2 * Copyright (c) 2005-2018 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Jeffrey Hsu and Matthew Dillon.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1989, 1991, 1993
36 * The Regents of the University of California. All rights reserved.
37 * (c) UNIX System Laboratories, Inc.
38 * All or some portions of this file are derived from material licensed
39 * to the University of California by American Telephone and Telegraph
40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41 * the permission of UNIX System Laboratories, Inc.
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. Neither the name of the University nor the names of its contributors
52 * may be used to endorse or promote products derived from this software
53 * without specific prior written permission.
55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
67 * @(#)kern_descrip.c 8.6 (Berkeley) 4/19/94
68 * $FreeBSD: src/sys/kern/kern_descrip.c,v 1.81.2.19 2004/02/28 00:43:31 tegge Exp $
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/malloc.h>
74 #include <sys/sysmsg.h>
76 #include <sys/device.h>
78 #include <sys/filedesc.h>
79 #include <sys/kernel.h>
80 #include <sys/sysctl.h>
81 #include <sys/vnode.h>
83 #include <sys/nlookup.h>
85 #include <sys/filio.h>
86 #include <sys/fcntl.h>
87 #include <sys/unistd.h>
88 #include <sys/resourcevar.h>
89 #include <sys/event.h>
90 #include <sys/kern_syscall.h>
91 #include <sys/kcore.h>
92 #include <sys/kinfo.h>
94 #include <sys/objcache.h>
97 #include <vm/vm_extern.h>
99 #include <sys/file2.h>
100 #include <sys/spinlock2.h>
102 static int fdalloc_locked(struct proc
*p
, struct filedesc
*fdp
,
103 int want
, int *result
);
104 static void fsetfd_locked(struct filedesc
*fdp
, struct file
*fp
, int fd
);
105 static void fdreserve_locked (struct filedesc
*fdp
, int fd0
, int incr
);
106 static struct file
*funsetfd_locked (struct filedesc
*fdp
, int fd
);
107 static void ffree(struct file
*fp
);
109 static MALLOC_DEFINE(M_FILEDESC
, "file desc", "Open file descriptor table");
110 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER
, "file desc to leader",
111 "file desc to leader structures");
112 static MALLOC_DEFINE_OBJ(M_FILE
, sizeof(struct file
),
113 "file", "Open file structure");
114 static MALLOC_DEFINE(M_SIGIO
, "sigio", "sigio structures");
116 static struct krate krate_uidinfo
= { .freq
= 1 };
118 static d_open_t fdopen
;
121 #define CDEV_MAJOR 22
122 static struct dev_ops fildesc_ops
= {
128 * Descriptor management.
130 #ifndef NFILELIST_HEADS
131 #define NFILELIST_HEADS 257 /* primary number */
134 struct filelist_head
{
135 struct spinlock spin
;
136 struct filelist list
;
139 static struct filelist_head filelist_heads
[NFILELIST_HEADS
];
141 static int nfiles
; /* actual number of open files */
144 struct lwkt_token revoke_token
= LWKT_TOKEN_INITIALIZER(revoke_token
);
147 * Fixup fd_freefile and fd_lastfile after a descriptor has been cleared.
149 * must be called with fdp->fd_spin exclusively held
153 fdfixup_locked(struct filedesc
*fdp
, int fd
)
155 if (fd
< fdp
->fd_freefile
) {
156 fdp
->fd_freefile
= fd
;
158 while (fdp
->fd_lastfile
>= 0 &&
159 fdp
->fd_files
[fdp
->fd_lastfile
].fp
== NULL
&&
160 fdp
->fd_files
[fdp
->fd_lastfile
].reserved
== 0
167 * Clear the fd thread caches for this fdnode.
169 * If match_fdc is NULL, all thread caches of fdn will be cleared.
170 * The caller must hold fdp->fd_spin exclusively. The threads caching
171 * the descriptor do not have to be the current thread. The (status)
172 * argument is ignored.
174 * If match_fdc is not NULL, only the match_fdc's cache will be cleared.
175 * The caller must hold fdp->fd_spin shared and match_fdc must match a
176 * fdcache entry in curthread. match_fdc has been locked by the caller
177 * and had the specified (status).
179 * Since we are matching against a fp in the fdp (which must still be present
180 * at this time), fp will have at least two refs on any match and we can
181 * decrement the count trivially.
185 fclearcache(struct fdnode
*fdn
, struct fdcache
*match_fdc
, int status
)
192 * match_fdc == NULL We are cleaning out all tdcache entries
193 * for the fdn and hold fdp->fd_spin exclusively.
194 * This can race against the target threads
195 * cleaning out specific entries.
197 * match_fdc != NULL We are cleaning out a specific tdcache
198 * entry on behalf of the owning thread
199 * and hold fdp->fd_spin shared. The thread
200 * has already locked the entry. This cannot
204 for (i
= 0; i
< NTDCACHEFD
; ++i
) {
205 if ((fdc
= fdn
->tdcache
[i
]) == NULL
)
209 * If match_fdc is non-NULL we are being asked to
210 * clear a specific fdc owned by curthread. There must
211 * be exactly one match. The caller has already locked
212 * the cache entry and will dispose of the lock after
215 * Since we also have a shared lock on fdp, we
216 * can do this without atomic ops.
219 if (fdc
!= match_fdc
)
221 fdn
->tdcache
[i
] = NULL
;
222 KASSERT(fp
== fdc
->fp
,
223 ("fclearcache(1): fp mismatch %p/%p\n",
229 * status can be 0 or 2. If 2 the ref is borrowed,
230 * if 0 the ref is not borrowed and we have to drop
234 atomic_add_int(&fp
->f_count
, -1);
235 fdn
->isfull
= 0; /* heuristic */
240 * Otherwise we hold an exclusive spin-lock and can only
241 * race thread consumers borrowing cache entries.
243 * Acquire the lock and dispose of the entry. We have to
244 * spin until we get the lock.
247 status
= atomic_swap_int(&fdc
->locked
, 1);
248 if (status
== 1) { /* foreign lock, retry */
252 fdn
->tdcache
[i
] = NULL
;
253 KASSERT(fp
== fdc
->fp
,
254 ("fclearcache(2): fp mismatch %p/%p\n",
259 atomic_add_int(&fp
->f_count
, -1);
260 fdn
->isfull
= 0; /* heuristic */
261 atomic_swap_int(&fdc
->locked
, 0);
265 KKASSERT(match_fdc
== NULL
);
269 * Retrieve the fp for the specified fd given the specified file descriptor
270 * table. The fdp does not have to be owned by the current process.
271 * If flags != -1, fp->f_flag must contain at least one of the flags.
273 * This function is not able to cache the fp.
276 holdfp_fdp(struct filedesc
*fdp
, int fd
, int flag
)
280 spin_lock_shared(&fdp
->fd_spin
);
281 if (((u_int
)fd
) < fdp
->fd_nfiles
) {
282 fp
= fdp
->fd_files
[fd
].fp
; /* can be NULL */
284 if ((fp
->f_flag
& flag
) == 0 && flag
!= -1) {
293 spin_unlock_shared(&fdp
->fd_spin
);
299 holdfp_fdp_locked(struct filedesc
*fdp
, int fd
, int flag
)
303 if (((u_int
)fd
) < fdp
->fd_nfiles
) {
304 fp
= fdp
->fd_files
[fd
].fp
; /* can be NULL */
306 if ((fp
->f_flag
& flag
) == 0 && flag
!= -1) {
319 * Acquire the fp for the specified file descriptor, using the thread
320 * cache if possible and caching it if possible.
322 * td must be the curren thread.
326 _holdfp_cache(thread_t td
, int fd
)
328 struct filedesc
*fdp
;
330 struct fdcache
*best
;
340 for (fdc
= &td
->td_fdcache
[0]; fdc
< &td
->td_fdcache
[NFDCACHE
]; ++fdc
) {
341 if (fdc
->fd
!= fd
|| fdc
->fp
== NULL
)
343 status
= atomic_swap_int(&fdc
->locked
, 1);
346 * If someone else has locked our cache entry they are in
347 * the middle of clearing it, skip the entry.
353 * We have locked the entry, but if it no longer matches
354 * restore the previous state (0 or 2) and skip the entry.
356 if (fdc
->fd
!= fd
|| fdc
->fp
== NULL
) {
357 atomic_swap_int(&fdc
->locked
, status
);
362 * We have locked a valid entry. We can borrow the ref
363 * for a mode 0 entry. We can get a valid fp for a mode
364 * 2 entry but not borrow the ref.
368 fdc
->lru
= ++td
->td_fdcache_lru
;
369 atomic_swap_int(&fdc
->locked
, 2);
376 fdc
->lru
= ++td
->td_fdcache_lru
;
377 atomic_swap_int(&fdc
->locked
, 2);
385 * Lookup the descriptor the slow way. This can contend against
386 * modifying operations in a multi-threaded environment and cause
387 * cache line ping ponging otherwise.
389 fdp
= td
->td_proc
->p_fd
;
390 spin_lock_shared(&fdp
->fd_spin
);
392 if (((u_int
)fd
) < fdp
->fd_nfiles
) {
393 fp
= fdp
->fd_files
[fd
].fp
; /* can be NULL */
396 if (fdp
->fd_files
[fd
].isfull
== 0)
402 spin_unlock_shared(&fdp
->fd_spin
);
407 * We found a valid fp and held it, fdp is still shared locked.
408 * Enter the fp into the per-thread cache. Find the oldest entry
409 * via lru, or an empty entry.
411 * Because fdp's spinlock is held (shared is fine), no other
412 * thread should be in the middle of clearing our selected entry.
415 best
= &td
->td_fdcache
[0];
416 for (fdc
= &td
->td_fdcache
[0]; fdc
< &td
->td_fdcache
[NFDCACHE
]; ++fdc
) {
417 if (fdc
->fp
== NULL
) {
421 delta
= fdc
->lru
- best
->lru
;
429 * Don't enter into the cache if we cannot get the lock.
431 status
= atomic_swap_int(&best
->locked
, 1);
436 * Clear the previous cache entry if present
439 KKASSERT(best
->fd
>= 0);
440 fclearcache(&fdp
->fd_files
[best
->fd
], best
, status
);
444 * Create our new cache entry. This entry is 'safe' until we tie
445 * into the fdnode. If we cannot tie in, we will clear the entry.
449 best
->lru
= ++td
->td_fdcache_lru
;
450 best
->locked
= 2; /* borrowed ref */
452 fdn
= &fdp
->fd_files
[fd
];
453 for (i
= 0; i
< NTDCACHEFD
; ++i
) {
454 if (fdn
->tdcache
[i
] == NULL
&&
455 atomic_cmpset_ptr((void **)&fdn
->tdcache
[i
], NULL
, best
)) {
459 fdn
->isfull
= 1; /* no space */
464 spin_unlock_shared(&fdp
->fd_spin
);
470 * holdfp(), bypassing the cache in order to also be able to return
471 * the descriptor flags. A bit of a hack.
475 _holdfp2(thread_t td
, int fd
, char *fflagsp
)
477 struct filedesc
*fdp
;
481 * Lookup the descriptor the slow way. This can contend against
482 * modifying operations in a multi-threaded environment and cause
483 * cache line ping ponging otherwise.
485 fdp
= td
->td_proc
->p_fd
;
486 spin_lock_shared(&fdp
->fd_spin
);
488 if (((u_int
)fd
) < fdp
->fd_nfiles
) {
489 fp
= fdp
->fd_files
[fd
].fp
; /* can be NULL */
491 *fflagsp
= fdp
->fd_files
[fd
].fileflags
;
497 spin_unlock_shared(&fdp
->fd_spin
);
504 * Drop the file pointer and return to the thread cache if possible.
506 * Caller must not hold fdp's spin lock.
507 * td must be the current thread.
510 dropfp(thread_t td
, int fd
, struct file
*fp
)
512 struct filedesc
*fdp
;
516 fdp
= td
->td_proc
->p_fd
;
519 * If our placeholder is still present we can re-cache the ref.
521 * Note that we can race an fclearcache().
523 for (fdc
= &td
->td_fdcache
[0]; fdc
< &td
->td_fdcache
[NFDCACHE
]; ++fdc
) {
524 if (fdc
->fp
!= fp
|| fdc
->fd
!= fd
)
526 status
= atomic_swap_int(&fdc
->locked
, 1);
530 * Not in mode 2, fdrop fp without caching.
532 atomic_swap_int(&fdc
->locked
, 0);
536 * Not in mode 2, locked by someone else.
537 * fdrop fp without caching.
542 * Intact borrowed ref, return to mode 0
543 * indicating that we have returned the ref.
545 * Return the borrowed ref (2->1->0)
547 if (fdc
->fp
== fp
&& fdc
->fd
== fd
) {
548 atomic_swap_int(&fdc
->locked
, 0);
551 atomic_swap_int(&fdc
->locked
, 2);
557 * Failed to re-cache, drop the fp without caching.
563 * Clear all descriptors cached in the per-thread fd cache for
564 * the specified thread.
566 * Caller must not hold p_fd->spin. This function will temporarily
567 * obtain a shared spin lock.
570 fexitcache(thread_t td
)
572 struct filedesc
*fdp
;
577 if (td
->td_proc
== NULL
)
579 fdp
= td
->td_proc
->p_fd
;
584 * A shared lock is sufficient as the caller controls td and we
585 * are only clearing td's cache.
587 spin_lock_shared(&fdp
->fd_spin
);
588 for (i
= 0; i
< NFDCACHE
; ++i
) {
589 fdc
= &td
->td_fdcache
[i
];
591 status
= atomic_swap_int(&fdc
->locked
, 1);
598 KKASSERT(fdc
->fd
>= 0);
599 fclearcache(&fdp
->fd_files
[fdc
->fd
], fdc
,
602 atomic_swap_int(&fdc
->locked
, 0);
605 spin_unlock_shared(&fdp
->fd_spin
);
608 static __inline
struct filelist_head
*
609 fp2filelist(const struct file
*fp
)
613 i
= (u_int
)(uintptr_t)fp
% NFILELIST_HEADS
;
614 return &filelist_heads
[i
];
619 readplimits(struct proc
*p
)
621 thread_t td
= curthread
;
622 struct plimit
*limit
;
624 limit
= td
->td_limit
;
625 if (limit
!= p
->p_limit
) {
626 spin_lock_shared(&p
->p_spin
);
628 atomic_add_int(&limit
->p_refcnt
, 1);
629 spin_unlock_shared(&p
->p_spin
);
631 plimit_free(td
->td_limit
);
632 td
->td_limit
= limit
;
638 * System calls on descriptors.
641 sys_getdtablesize(struct sysmsg
*sysmsg
, const struct getdtablesize_args
*uap
)
643 struct proc
*p
= curproc
;
644 struct plimit
*limit
= readplimits(p
);
647 if (limit
->pl_rlimit
[RLIMIT_NOFILE
].rlim_cur
> INT_MAX
)
650 dtsize
= (int)limit
->pl_rlimit
[RLIMIT_NOFILE
].rlim_cur
;
652 if (dtsize
> maxfilesperproc
)
653 dtsize
= maxfilesperproc
;
654 if (dtsize
< minfilesperproc
)
655 dtsize
= minfilesperproc
;
656 if (p
->p_ucred
->cr_uid
&& dtsize
> maxfilesperuser
)
657 dtsize
= maxfilesperuser
;
658 sysmsg
->sysmsg_result
= dtsize
;
663 * Duplicate a file descriptor to a particular value.
665 * note: keep in mind that a potential race condition exists when closing
666 * descriptors from a shared descriptor table (via rfork).
669 sys_dup2(struct sysmsg
*sysmsg
, const struct dup2_args
*uap
)
674 error
= kern_dup(DUP_FIXED
, uap
->from
, uap
->to
, &fd
);
675 sysmsg
->sysmsg_fds
[0] = fd
;
681 * Duplicate a file descriptor.
684 sys_dup(struct sysmsg
*sysmsg
, const struct dup_args
*uap
)
689 error
= kern_dup(DUP_VARIABLE
, uap
->fd
, 0, &fd
);
690 sysmsg
->sysmsg_fds
[0] = fd
;
696 * MPALMOSTSAFE - acquires mplock for fp operations
699 kern_fcntl(int fd
, int cmd
, union fcntl_dat
*dat
, struct ucred
*cred
)
701 struct thread
*td
= curthread
;
702 struct proc
*p
= td
->td_proc
;
709 int tmp
, error
, flg
= F_POSIX
;
714 * Operations on file descriptors that do not require a file pointer.
718 error
= fgetfdflags(p
->p_fd
, fd
, &tmp
);
720 dat
->fc_cloexec
= (tmp
& UF_EXCLOSE
) ? FD_CLOEXEC
: 0;
724 if (dat
->fc_cloexec
& FD_CLOEXEC
)
725 error
= fsetfdflags(p
->p_fd
, fd
, UF_EXCLOSE
);
727 error
= fclrfdflags(p
->p_fd
, fd
, UF_EXCLOSE
);
731 error
= kern_dup(DUP_VARIABLE
| DUP_FCNTL
, fd
, newmin
,
734 case F_DUPFD_CLOEXEC
:
736 error
= kern_dup(DUP_VARIABLE
| DUP_CLOEXEC
| DUP_FCNTL
,
737 fd
, newmin
, &dat
->fc_fd
);
741 error
= kern_dup(DUP_FIXED
, fd
, newmin
, &dat
->fc_fd
);
743 case F_DUP2FD_CLOEXEC
:
745 error
= kern_dup(DUP_FIXED
| DUP_CLOEXEC
, fd
, newmin
,
753 * Operations on file pointers
755 closedcounter
= p
->p_fd
->fd_closedcounter
;
756 if ((fp
= holdfp(td
, fd
, -1)) == NULL
)
761 dat
->fc_flags
= OFLAGS(fp
->f_flag
);
767 nflags
= FFLAGS(dat
->fc_flags
& ~O_ACCMODE
) & FCNTLFLAGS
;
768 nflags
|= oflags
& ~FCNTLFLAGS
;
771 if (((nflags
^ oflags
) & O_APPEND
) && (oflags
& FAPPENDONLY
))
773 if (error
== 0 && ((nflags
^ oflags
) & FASYNC
)) {
774 tmp
= nflags
& FASYNC
;
775 error
= fo_ioctl(fp
, FIOASYNC
, (caddr_t
)&tmp
,
780 * If no error, must be atomically set.
785 nflags
= (oflags
& ~FCNTLFLAGS
) | (nflags
& FCNTLFLAGS
);
786 if (atomic_cmpset_int(&fp
->f_flag
, oflags
, nflags
))
793 error
= fo_ioctl(fp
, FIOGETOWN
, (caddr_t
)&dat
->fc_owner
,
798 error
= fo_ioctl(fp
, FIOSETOWN
, (caddr_t
)&dat
->fc_owner
,
804 /* Fall into F_SETLK */
807 if (fp
->f_type
!= DTYPE_VNODE
) {
811 vp
= (struct vnode
*)fp
->f_data
;
814 * copyin/lockop may block
816 if (dat
->fc_flock
.l_whence
== SEEK_CUR
)
817 dat
->fc_flock
.l_start
+= fp
->f_offset
;
819 switch (dat
->fc_flock
.l_type
) {
821 if ((fp
->f_flag
& FREAD
) == 0) {
825 if (p
->p_leader
->p_advlock_flag
== 0)
826 p
->p_leader
->p_advlock_flag
= 1;
827 error
= VOP_ADVLOCK(vp
, (caddr_t
)p
->p_leader
, F_SETLK
,
828 &dat
->fc_flock
, flg
);
831 if ((fp
->f_flag
& FWRITE
) == 0) {
835 if (p
->p_leader
->p_advlock_flag
== 0)
836 p
->p_leader
->p_advlock_flag
= 1;
837 error
= VOP_ADVLOCK(vp
, (caddr_t
)p
->p_leader
, F_SETLK
,
838 &dat
->fc_flock
, flg
);
841 error
= VOP_ADVLOCK(vp
, (caddr_t
)p
->p_leader
, F_UNLCK
,
842 &dat
->fc_flock
, F_POSIX
);
850 * It is possible to race a close() on the descriptor while
851 * we were blocked getting the lock. If this occurs the
852 * close might not have caught the lock.
854 if (checkfdclosed(td
, p
->p_fd
, fd
, fp
, closedcounter
)) {
855 dat
->fc_flock
.l_whence
= SEEK_SET
;
856 dat
->fc_flock
.l_start
= 0;
857 dat
->fc_flock
.l_len
= 0;
858 dat
->fc_flock
.l_type
= F_UNLCK
;
859 VOP_ADVLOCK(vp
, (caddr_t
)p
->p_leader
,
860 F_UNLCK
, &dat
->fc_flock
, F_POSIX
);
865 if (fp
->f_type
!= DTYPE_VNODE
) {
869 vp
= (struct vnode
*)fp
->f_data
;
871 * copyin/lockop may block
873 if (dat
->fc_flock
.l_type
!= F_RDLCK
&&
874 dat
->fc_flock
.l_type
!= F_WRLCK
&&
875 dat
->fc_flock
.l_type
!= F_UNLCK
) {
879 if (dat
->fc_flock
.l_whence
== SEEK_CUR
)
880 dat
->fc_flock
.l_start
+= fp
->f_offset
;
881 error
= VOP_ADVLOCK(vp
, (caddr_t
)p
->p_leader
, F_GETLK
,
882 &dat
->fc_flock
, F_POSIX
);
886 if (fp
->f_type
!= DTYPE_VNODE
) {
892 * cache_fullpath() itself is limited to MAXPATHLEN so we
893 * do not need an explicit length check, but we do have
894 * to munge the error to ERANGE as per fcntl.2
896 error
= cache_fullpath(p
, &fp
->f_nchandle
, NULL
,
897 &dat
->fc_path
.ptr
, &dat
->fc_path
.buf
, 1);
912 * The file control system call.
915 sys_fcntl(struct sysmsg
*sysmsg
, const struct fcntl_args
*uap
)
923 case F_DUPFD_CLOEXEC
:
924 case F_DUP2FD_CLOEXEC
:
925 dat
.fc_fd
= uap
->arg
;
928 dat
.fc_cloexec
= uap
->arg
;
931 dat
.fc_flags
= uap
->arg
;
934 dat
.fc_owner
= uap
->arg
;
939 error
= copyin((caddr_t
)uap
->arg
, &dat
.fc_flock
,
940 sizeof(struct flock
));
946 error
= kern_fcntl(uap
->fd
, uap
->cmd
, &dat
, curthread
->td_ucred
);
952 case F_DUPFD_CLOEXEC
:
953 case F_DUP2FD_CLOEXEC
:
954 sysmsg
->sysmsg_result
= dat
.fc_fd
;
957 sysmsg
->sysmsg_result
= dat
.fc_cloexec
;
960 sysmsg
->sysmsg_result
= dat
.fc_flags
;
963 sysmsg
->sysmsg_result
= dat
.fc_owner
;
966 error
= copyout(&dat
.fc_flock
, (caddr_t
)uap
->arg
,
967 sizeof(struct flock
));
970 error
= copyout(dat
.fc_path
.ptr
, (caddr_t
)uap
->arg
,
971 strlen(dat
.fc_path
.ptr
) + 1);
972 kfree(dat
.fc_path
.buf
, M_TEMP
);
981 * Common code for dup, dup2, and fcntl(F_DUPFD).
983 * There are four type flags: DUP_FCNTL, DUP_FIXED, DUP_VARIABLE, and
986 * DUP_FCNTL is for handling EINVAL vs. EBADF differences between
987 * fcntl()'s F_DUPFD and F_DUPFD_CLOEXEC and dup2() (per POSIX).
988 * The next two flags are mutually exclusive, and the fourth is optional.
989 * DUP_FIXED tells kern_dup() to destructively dup over an existing file
990 * descriptor if "new" is already open. DUP_VARIABLE tells kern_dup()
991 * to find the lowest unused file descriptor that is greater than or
992 * equal to "new". DUP_CLOEXEC, which works with either of the first
993 * two flags, sets the close-on-exec flag on the "new" file descriptor.
996 kern_dup(int flags
, int old
, int new, int *res
)
998 struct thread
*td
= curthread
;
999 struct proc
*p
= td
->td_proc
;
1000 struct plimit
*limit
= readplimits(p
);
1001 struct filedesc
*fdp
= p
->p_fd
;
1010 * Verify that we have a valid descriptor to dup from and
1011 * possibly to dup to. When the new descriptor is out of
1012 * bounds, fcntl()'s F_DUPFD and F_DUPFD_CLOEXEC must
1013 * return EINVAL, while dup2() returns EBADF in
1016 * NOTE: maxfilesperuser is not applicable to dup()
1019 if (limit
->pl_rlimit
[RLIMIT_NOFILE
].rlim_cur
> INT_MAX
)
1022 dtsize
= (int)limit
->pl_rlimit
[RLIMIT_NOFILE
].rlim_cur
;
1023 if (dtsize
> maxfilesperproc
)
1024 dtsize
= maxfilesperproc
;
1025 if (dtsize
< minfilesperproc
)
1026 dtsize
= minfilesperproc
;
1028 if (new < 0 || new >= dtsize
)
1029 return (flags
& DUP_FCNTL
? EINVAL
: EBADF
);
1031 spin_lock(&fdp
->fd_spin
);
1032 if ((unsigned)old
>= fdp
->fd_nfiles
|| fdp
->fd_files
[old
].fp
== NULL
) {
1033 spin_unlock(&fdp
->fd_spin
);
1036 if ((flags
& DUP_FIXED
) && old
== new) {
1038 if (flags
& DUP_CLOEXEC
)
1039 fdp
->fd_files
[new].fileflags
|= UF_EXCLOSE
;
1040 spin_unlock(&fdp
->fd_spin
);
1043 fp
= fdp
->fd_files
[old
].fp
;
1044 oldflags
= fdp
->fd_files
[old
].fileflags
;
1048 * Allocate a new descriptor if DUP_VARIABLE, or expand the table
1049 * if the requested descriptor is beyond the current table size.
1051 * This can block. Retry if the source descriptor no longer matches
1052 * or if our expectation in the expansion case races.
1054 * If we are not expanding or allocating a new decriptor, then reset
1055 * the target descriptor to a reserved state so we have a uniform
1056 * setup for the next code block.
1058 if ((flags
& DUP_VARIABLE
) || new >= fdp
->fd_nfiles
) {
1059 error
= fdalloc_locked(p
, fdp
, new, &newfd
);
1061 spin_unlock(&fdp
->fd_spin
);
1068 if (old
>= fdp
->fd_nfiles
|| fdp
->fd_files
[old
].fp
!= fp
) {
1069 fsetfd_locked(fdp
, NULL
, newfd
);
1070 spin_unlock(&fdp
->fd_spin
);
1075 * Check for expansion race
1077 if ((flags
& DUP_VARIABLE
) == 0 && new != newfd
) {
1078 fsetfd_locked(fdp
, NULL
, newfd
);
1079 spin_unlock(&fdp
->fd_spin
);
1084 * Check for ripout, newfd reused old (this case probably
1088 fsetfd_locked(fdp
, NULL
, newfd
);
1089 spin_unlock(&fdp
->fd_spin
);
1096 if (fdp
->fd_files
[new].reserved
) {
1097 spin_unlock(&fdp
->fd_spin
);
1099 kprintf("Warning: dup(): target descriptor %d is "
1100 "reserved, waiting for it to be resolved\n",
1102 tsleep(fdp
, 0, "fdres", hz
);
1107 * If the target descriptor was never allocated we have
1108 * to allocate it. If it was we have to clean out the
1109 * old descriptor. delfp inherits the ref from the
1112 ++fdp
->fd_closedcounter
;
1113 fclearcache(&fdp
->fd_files
[new], NULL
, 0);
1114 ++fdp
->fd_closedcounter
;
1115 delfp
= fdp
->fd_files
[new].fp
;
1116 fdp
->fd_files
[new].fp
= NULL
;
1117 fdp
->fd_files
[new].reserved
= 1;
1118 if (delfp
== NULL
) {
1119 fdreserve_locked(fdp
, new, 1);
1120 if (new > fdp
->fd_lastfile
)
1121 fdp
->fd_lastfile
= new;
1127 * NOTE: still holding an exclusive spinlock
1131 * If a descriptor is being overwritten we may hve to tell
1132 * fdfree() to sleep to ensure that all relevant process
1133 * leaders can be traversed in closef().
1135 if (delfp
!= NULL
&& p
->p_fdtol
!= NULL
) {
1136 fdp
->fd_holdleaderscount
++;
1141 KASSERT(delfp
== NULL
|| (flags
& DUP_FIXED
),
1142 ("dup() picked an open file"));
1145 * Duplicate the source descriptor, update lastfile. If the new
1146 * descriptor was not allocated and we aren't replacing an existing
1147 * descriptor we have to mark the descriptor as being in use.
1149 * The fd_files[] array inherits fp's hold reference.
1151 fsetfd_locked(fdp
, fp
, new);
1152 if ((flags
& DUP_CLOEXEC
) != 0)
1153 fdp
->fd_files
[new].fileflags
= oldflags
| UF_EXCLOSE
;
1155 fdp
->fd_files
[new].fileflags
= oldflags
& ~UF_EXCLOSE
;
1156 spin_unlock(&fdp
->fd_spin
);
1161 * If we dup'd over a valid file, we now own the reference to it
1162 * and must dispose of it using closef() semantics (as if a
1163 * close() were performed on it).
1166 if (SLIST_FIRST(&delfp
->f_klist
))
1167 knote_fdclose(delfp
, fdp
, new);
1170 spin_lock(&fdp
->fd_spin
);
1171 fdp
->fd_holdleaderscount
--;
1172 if (fdp
->fd_holdleaderscount
== 0 &&
1173 fdp
->fd_holdleaderswakeup
!= 0) {
1174 fdp
->fd_holdleaderswakeup
= 0;
1175 spin_unlock(&fdp
->fd_spin
);
1176 wakeup(&fdp
->fd_holdleaderscount
);
1178 spin_unlock(&fdp
->fd_spin
);
1186 * If sigio is on the list associated with a process or process group,
1187 * disable signalling from the device, remove sigio from the list and
1191 funsetown(struct sigio
**sigiop
)
1195 struct sigio
*sigio
;
1197 if ((sigio
= *sigiop
) != NULL
) {
1198 lwkt_gettoken(&sigio_token
); /* protect sigio */
1199 KKASSERT(sigiop
== sigio
->sio_myref
);
1202 lwkt_reltoken(&sigio_token
);
1207 if (sigio
->sio_pgid
< 0) {
1208 pgrp
= sigio
->sio_pgrp
;
1209 sigio
->sio_pgrp
= NULL
;
1210 lwkt_gettoken(&pgrp
->pg_token
);
1211 SLIST_REMOVE(&pgrp
->pg_sigiolst
, sigio
, sigio
, sio_pgsigio
);
1212 lwkt_reltoken(&pgrp
->pg_token
);
1214 } else /* if ((*sigiop)->sio_pgid > 0) */ {
1215 p
= sigio
->sio_proc
;
1216 sigio
->sio_proc
= NULL
;
1218 lwkt_gettoken(&p
->p_token
);
1219 SLIST_REMOVE(&p
->p_sigiolst
, sigio
, sigio
, sio_pgsigio
);
1220 lwkt_reltoken(&p
->p_token
);
1223 crfree(sigio
->sio_ucred
);
1224 sigio
->sio_ucred
= NULL
;
1225 kfree(sigio
, M_SIGIO
);
1229 * Free a list of sigio structures. Caller is responsible for ensuring
1230 * that the list is MPSAFE.
1233 funsetownlst(struct sigiolst
*sigiolst
)
1235 struct sigio
*sigio
;
1237 while ((sigio
= SLIST_FIRST(sigiolst
)) != NULL
)
1238 funsetown(sigio
->sio_myref
);
1242 * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg).
1244 * After permission checking, add a sigio structure to the sigio list for
1245 * the process or process group.
1248 fsetown(pid_t pgid
, struct sigio
**sigiop
)
1250 struct proc
*proc
= NULL
;
1251 struct pgrp
*pgrp
= NULL
;
1252 struct sigio
*sigio
;
1268 * Policy - Don't allow a process to FSETOWN a process
1269 * in another session.
1271 * Remove this test to allow maximum flexibility or
1272 * restrict FSETOWN to the current process or process
1273 * group for maximum safety.
1275 if (proc
->p_session
!= curproc
->p_session
) {
1279 } else /* if (pgid < 0) */ {
1280 pgrp
= pgfind(-pgid
);
1287 * Policy - Don't allow a process to FSETOWN a process
1288 * in another session.
1290 * Remove this test to allow maximum flexibility or
1291 * restrict FSETOWN to the current process or process
1292 * group for maximum safety.
1294 if (pgrp
->pg_session
!= curproc
->p_session
) {
1299 sigio
= kmalloc(sizeof(struct sigio
), M_SIGIO
, M_WAITOK
| M_ZERO
);
1301 KKASSERT(pgrp
== NULL
);
1302 lwkt_gettoken(&proc
->p_token
);
1303 SLIST_INSERT_HEAD(&proc
->p_sigiolst
, sigio
, sio_pgsigio
);
1304 sigio
->sio_proc
= proc
;
1305 lwkt_reltoken(&proc
->p_token
);
1307 KKASSERT(proc
== NULL
);
1308 lwkt_gettoken(&pgrp
->pg_token
);
1309 SLIST_INSERT_HEAD(&pgrp
->pg_sigiolst
, sigio
, sio_pgsigio
);
1310 sigio
->sio_pgrp
= pgrp
;
1311 lwkt_reltoken(&pgrp
->pg_token
);
1314 sigio
->sio_pgid
= pgid
;
1315 sigio
->sio_ucred
= crhold(curthread
->td_ucred
);
1316 /* It would be convenient if p_ruid was in ucred. */
1317 sigio
->sio_ruid
= sigio
->sio_ucred
->cr_ruid
;
1318 sigio
->sio_myref
= sigiop
;
1320 lwkt_gettoken(&sigio_token
);
1324 lwkt_reltoken(&sigio_token
);
1335 * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg).
1338 fgetown(struct sigio
**sigiop
)
1340 struct sigio
*sigio
;
1343 lwkt_gettoken_shared(&sigio_token
);
1345 own
= (sigio
!= NULL
? sigio
->sio_pgid
: 0);
1346 lwkt_reltoken(&sigio_token
);
1352 * Close many file descriptors.
1355 sys_closefrom(struct sysmsg
*sysmsg
, const struct closefrom_args
*uap
)
1357 return(kern_closefrom(uap
->fd
));
1361 * Close all file descriptors greater then or equal to fd
1364 kern_closefrom(int fd
)
1366 struct thread
*td
= curthread
;
1367 struct proc
*p
= td
->td_proc
;
1368 struct filedesc
*fdp
;
1379 * NOTE: This function will skip unassociated descriptors and
1380 * reserved descriptors that have not yet been assigned.
1381 * fd_lastfile can change as a side effect of kern_close().
1383 * NOTE: We accumulate EINTR errors and return EINTR if any
1384 * close() returned EINTR. However, the descriptor is
1385 * still closed and we do not break out of the loop.
1388 spin_lock(&fdp
->fd_spin
);
1389 while (fd
<= fdp
->fd_lastfile
) {
1390 if (fdp
->fd_files
[fd
].fp
!= NULL
) {
1391 spin_unlock(&fdp
->fd_spin
);
1392 /* ok if this races another close */
1393 e2
= kern_close(fd
);
1396 spin_lock(&fdp
->fd_spin
);
1400 spin_unlock(&fdp
->fd_spin
);
1406 * Close a file descriptor.
1409 sys_close(struct sysmsg
*sysmsg
, const struct close_args
*uap
)
1411 return(kern_close(uap
->fd
));
1420 struct thread
*td
= curthread
;
1421 struct proc
*p
= td
->td_proc
;
1422 struct filedesc
*fdp
;
1431 * funsetfd*() also clears the fd cache
1433 spin_lock(&fdp
->fd_spin
);
1434 if ((fp
= funsetfd_locked(fdp
, fd
)) == NULL
) {
1435 spin_unlock(&fdp
->fd_spin
);
1439 if (p
->p_fdtol
!= NULL
) {
1441 * Ask fdfree() to sleep to ensure that all relevant
1442 * process leaders can be traversed in closef().
1444 fdp
->fd_holdleaderscount
++;
1449 * we now hold the fp reference that used to be owned by the descriptor
1452 spin_unlock(&fdp
->fd_spin
);
1453 if (SLIST_FIRST(&fp
->f_klist
))
1454 knote_fdclose(fp
, fdp
, fd
);
1455 error
= closef(fp
, p
);
1457 spin_lock(&fdp
->fd_spin
);
1458 fdp
->fd_holdleaderscount
--;
1459 if (fdp
->fd_holdleaderscount
== 0 &&
1460 fdp
->fd_holdleaderswakeup
!= 0) {
1461 fdp
->fd_holdleaderswakeup
= 0;
1462 spin_unlock(&fdp
->fd_spin
);
1463 wakeup(&fdp
->fd_holdleaderscount
);
1465 spin_unlock(&fdp
->fd_spin
);
1472 * shutdown_args(int fd, int how)
1475 kern_shutdown(int fd
, int how
)
1477 struct thread
*td
= curthread
;
1481 if ((fp
= holdfp(td
, fd
, -1)) == NULL
)
1483 error
= fo_shutdown(fp
, how
);
1493 sys_shutdown(struct sysmsg
*sysmsg
, const struct shutdown_args
*uap
)
1497 error
= kern_shutdown(uap
->s
, uap
->how
);
1506 kern_fstat(int fd
, struct stat
*ub
)
1508 struct thread
*td
= curthread
;
1512 if ((fp
= holdfp(td
, fd
, -1)) == NULL
)
1514 error
= fo_stat(fp
, ub
, td
->td_ucred
);
1521 * Return status information about a file descriptor.
1524 sys_fstat(struct sysmsg
*sysmsg
, const struct fstat_args
*uap
)
1529 error
= kern_fstat(uap
->fd
, &st
);
1532 error
= copyout(&st
, uap
->sb
, sizeof(st
));
1537 * Return pathconf information about a file descriptor.
1542 sys_fpathconf(struct sysmsg
*sysmsg
, const struct fpathconf_args
*uap
)
1544 struct thread
*td
= curthread
;
1549 if ((fp
= holdfp(td
, uap
->fd
, -1)) == NULL
)
1552 switch (fp
->f_type
) {
1555 if (uap
->name
!= _PC_PIPE_BUF
) {
1558 sysmsg
->sysmsg_result
= PIPE_BUF
;
1564 vp
= (struct vnode
*)fp
->f_data
;
1565 error
= VOP_PATHCONF(vp
, uap
->name
, &sysmsg
->sysmsg_reg
);
1576 * Grow the file table so it can hold through descriptor (want).
1578 * The fdp's spinlock must be held exclusively on entry and may be held
1579 * exclusively on return. The spinlock may be cycled by the routine.
1582 fdgrow_locked(struct filedesc
*fdp
, int want
)
1584 struct fdnode
*newfiles
;
1585 struct fdnode
*oldfiles
;
1588 nf
= fdp
->fd_nfiles
;
1590 /* nf has to be of the form 2^n - 1 */
1592 } while (nf
<= want
);
1594 spin_unlock(&fdp
->fd_spin
);
1595 newfiles
= kmalloc(nf
* sizeof(struct fdnode
), M_FILEDESC
, M_WAITOK
);
1596 spin_lock(&fdp
->fd_spin
);
1599 * We could have raced another extend while we were not holding
1602 if (fdp
->fd_nfiles
>= nf
) {
1603 spin_unlock(&fdp
->fd_spin
);
1604 kfree(newfiles
, M_FILEDESC
);
1605 spin_lock(&fdp
->fd_spin
);
1609 * Copy the existing ofile and ofileflags arrays
1610 * and zero the new portion of each array.
1612 extra
= nf
- fdp
->fd_nfiles
;
1613 bcopy(fdp
->fd_files
, newfiles
, fdp
->fd_nfiles
* sizeof(struct fdnode
));
1614 bzero(&newfiles
[fdp
->fd_nfiles
], extra
* sizeof(struct fdnode
));
1616 oldfiles
= fdp
->fd_files
;
1617 fdp
->fd_files
= newfiles
;
1618 fdp
->fd_nfiles
= nf
;
1620 if (oldfiles
!= fdp
->fd_builtin_files
) {
1621 spin_unlock(&fdp
->fd_spin
);
1622 kfree(oldfiles
, M_FILEDESC
);
1623 spin_lock(&fdp
->fd_spin
);
1628 * Number of nodes in right subtree, including the root.
1631 right_subtree_size(int n
)
1633 return (n
^ (n
| (n
+ 1)));
1640 right_ancestor(int n
)
1642 return (n
| (n
+ 1));
1649 left_ancestor(int n
)
1651 return ((n
& (n
+ 1)) - 1);
1655 * Traverse the in-place binary tree buttom-up adjusting the allocation
1656 * count so scans can determine where free descriptors are located.
1658 * caller must be holding an exclusive spinlock on fdp
1662 fdreserve_locked(struct filedesc
*fdp
, int fd
, int incr
)
1665 fdp
->fd_files
[fd
].allocated
+= incr
;
1666 KKASSERT(fdp
->fd_files
[fd
].allocated
>= 0);
1667 fd
= left_ancestor(fd
);
1672 * Reserve a file descriptor for the process. If no error occurs, the
1673 * caller MUST at some point call fsetfd() or assign a file pointer
1674 * or dispose of the reservation.
1678 fdalloc_locked(struct proc
*p
, struct filedesc
*fdp
, int want
, int *result
)
1680 struct plimit
*limit
= readplimits(p
);
1681 struct uidinfo
*uip
;
1682 int fd
, rsize
, rsum
, node
, lim
;
1685 * Check dtable size limit
1687 *result
= -1; /* avoid gcc warnings */
1688 if (limit
->pl_rlimit
[RLIMIT_NOFILE
].rlim_cur
> INT_MAX
)
1691 lim
= (int)limit
->pl_rlimit
[RLIMIT_NOFILE
].rlim_cur
;
1693 if (lim
> maxfilesperproc
)
1694 lim
= maxfilesperproc
;
1695 if (lim
< minfilesperproc
)
1696 lim
= minfilesperproc
;
1701 * Check that the user has not run out of descriptors (non-root only).
1702 * As a safety measure the dtable is allowed to have at least
1703 * minfilesperproc open fds regardless of the maxfilesperuser limit.
1705 * This isn't as loose a spec as ui_posixlocks, so we use atomic
1706 * ops to force synchronize and recheck if we would otherwise
1709 if (p
->p_ucred
->cr_uid
&& fdp
->fd_nfiles
>= minfilesperproc
) {
1710 uip
= p
->p_ucred
->cr_uidinfo
;
1711 if (uip
->ui_openfiles
> maxfilesperuser
) {
1716 for (n
= 0; n
< ncpus
; ++n
) {
1717 count
+= atomic_swap_int(
1718 &uip
->ui_pcpu
[n
].pu_openfiles
, 0);
1720 atomic_add_int(&uip
->ui_openfiles
, count
);
1721 if (uip
->ui_openfiles
> maxfilesperuser
) {
1722 krateprintf(&krate_uidinfo
,
1723 "Warning: user %d pid %d (%s) "
1724 "ran out of file descriptors "
1726 p
->p_ucred
->cr_uid
, (int)p
->p_pid
,
1728 uip
->ui_openfiles
, maxfilesperuser
);
1735 * Grow the dtable if necessary
1737 if (want
>= fdp
->fd_nfiles
)
1738 fdgrow_locked(fdp
, want
);
1741 * Search for a free descriptor starting at the higher
1742 * of want or fd_freefile. If that fails, consider
1743 * expanding the ofile array.
1745 * NOTE! the 'allocated' field is a cumulative recursive allocation
1746 * count. If we happen to see a value of 0 then we can shortcut
1747 * our search. Otherwise we run through through the tree going
1748 * down branches we know have free descriptor(s) until we hit a
1749 * leaf node. The leaf node will be free but will not necessarily
1750 * have an allocated field of 0.
1753 /* move up the tree looking for a subtree with a free node */
1754 for (fd
= max(want
, fdp
->fd_freefile
); fd
< min(fdp
->fd_nfiles
, lim
);
1755 fd
= right_ancestor(fd
)) {
1756 if (fdp
->fd_files
[fd
].allocated
== 0)
1759 rsize
= right_subtree_size(fd
);
1760 if (fdp
->fd_files
[fd
].allocated
== rsize
)
1761 continue; /* right subtree full */
1764 * Free fd is in the right subtree of the tree rooted at fd.
1765 * Call that subtree R. Look for the smallest (leftmost)
1766 * subtree of R with an unallocated fd: continue moving
1767 * down the left branch until encountering a full left
1768 * subtree, then move to the right.
1770 for (rsum
= 0, rsize
/= 2; rsize
> 0; rsize
/= 2) {
1772 rsum
+= fdp
->fd_files
[node
].allocated
;
1773 if (fdp
->fd_files
[fd
].allocated
== rsum
+ rsize
) {
1774 fd
= node
; /* move to the right */
1775 if (fdp
->fd_files
[node
].allocated
== 0)
1784 * No space in current array. Expand?
1786 if (fdp
->fd_nfiles
>= lim
) {
1789 fdgrow_locked(fdp
, want
);
1793 KKASSERT(fd
< fdp
->fd_nfiles
);
1794 if (fd
> fdp
->fd_lastfile
)
1795 fdp
->fd_lastfile
= fd
;
1796 if (want
<= fdp
->fd_freefile
)
1797 fdp
->fd_freefile
= fd
;
1799 KKASSERT(fdp
->fd_files
[fd
].fp
== NULL
);
1800 KKASSERT(fdp
->fd_files
[fd
].reserved
== 0);
1801 fdp
->fd_files
[fd
].fileflags
= 0;
1802 fdp
->fd_files
[fd
].reserved
= 1;
1803 fdreserve_locked(fdp
, fd
, 1);
1809 fdalloc(struct proc
*p
, int want
, int *result
)
1811 struct filedesc
*fdp
= p
->p_fd
;
1814 spin_lock(&fdp
->fd_spin
);
1815 error
= fdalloc_locked(p
, fdp
, want
, result
);
1816 spin_unlock(&fdp
->fd_spin
);
1822 * Check to see whether n user file descriptors
1823 * are available to the process p.
1826 fdavail(struct proc
*p
, int n
)
1828 struct plimit
*limit
= readplimits(p
);
1829 struct filedesc
*fdp
= p
->p_fd
;
1830 struct fdnode
*fdnode
;
1833 if (limit
->pl_rlimit
[RLIMIT_NOFILE
].rlim_cur
> INT_MAX
)
1836 lim
= (int)limit
->pl_rlimit
[RLIMIT_NOFILE
].rlim_cur
;
1838 if (lim
> maxfilesperproc
)
1839 lim
= maxfilesperproc
;
1840 if (lim
< minfilesperproc
)
1841 lim
= minfilesperproc
;
1843 spin_lock(&fdp
->fd_spin
);
1844 if ((i
= lim
- fdp
->fd_nfiles
) > 0 && (n
-= i
) <= 0) {
1845 spin_unlock(&fdp
->fd_spin
);
1848 last
= min(fdp
->fd_nfiles
, lim
);
1849 fdnode
= &fdp
->fd_files
[fdp
->fd_freefile
];
1850 for (i
= last
- fdp
->fd_freefile
; --i
>= 0; ++fdnode
) {
1851 if (fdnode
->fp
== NULL
&& --n
<= 0) {
1852 spin_unlock(&fdp
->fd_spin
);
1856 spin_unlock(&fdp
->fd_spin
);
1861 * Revoke open descriptors referencing (f_data, f_type)
1863 * Any revoke executed within a prison is only able to
1864 * revoke descriptors for processes within that prison.
1866 * Returns 0 on success or an error code.
1868 struct fdrevoke_info
{
1877 static int fdrevoke_check_callback(struct file
*fp
, void *vinfo
);
1878 static int fdrevoke_proc_callback(struct proc
*p
, void *vinfo
);
1881 fdrevoke(void *f_data
, short f_type
, struct ucred
*cred
)
1883 struct fdrevoke_info info
;
1886 bzero(&info
, sizeof(info
));
1890 error
= falloc(NULL
, &info
.nfp
, NULL
);
1895 * Scan the file pointer table once. dups do not dup file pointers,
1896 * only descriptors, so there is no leak. Set FREVOKED on the fps
1899 * Any fps sent over unix-domain sockets will be revoked by the
1900 * socket code checking for FREVOKED when the fps are externialized.
1901 * revoke_token is used to make sure that fps marked FREVOKED and
1902 * externalized will be picked up by the following allproc_scan().
1904 lwkt_gettoken(&revoke_token
);
1905 allfiles_scan_exclusive(fdrevoke_check_callback
, &info
);
1906 lwkt_reltoken(&revoke_token
);
1909 * If any fps were marked track down the related descriptors
1910 * and close them. Any dup()s at this point will notice
1911 * the FREVOKED already set in the fp and do the right thing.
1914 allproc_scan(fdrevoke_proc_callback
, &info
, 0);
1920 * Locate matching file pointers directly.
1922 * WARNING: allfiles_scan_exclusive() holds a spinlock through these calls!
1925 fdrevoke_check_callback(struct file
*fp
, void *vinfo
)
1927 struct fdrevoke_info
*info
= vinfo
;
1930 * File pointers already flagged for revokation are skipped.
1932 if (fp
->f_flag
& FREVOKED
)
1936 * If revoking from a prison file pointers created outside of
1937 * that prison, or file pointers without creds, cannot be revoked.
1939 if (info
->cred
->cr_prison
&&
1940 (fp
->f_cred
== NULL
||
1941 info
->cred
->cr_prison
!= fp
->f_cred
->cr_prison
)) {
1946 * If the file pointer matches then mark it for revocation. The
1947 * flag is currently only used by unp_revoke_gc().
1949 * info->found is a heuristic and can race in a SMP environment.
1951 if (info
->data
== fp
->f_data
&& info
->type
== fp
->f_type
) {
1952 atomic_set_int(&fp
->f_flag
, FREVOKED
);
1959 * Locate matching file pointers via process descriptor tables.
1962 fdrevoke_proc_callback(struct proc
*p
, void *vinfo
)
1964 struct fdrevoke_info
*info
= vinfo
;
1965 struct filedesc
*fdp
;
1969 if (p
->p_stat
== SIDL
|| p
->p_stat
== SZOMB
)
1971 if (info
->cred
->cr_prison
&&
1972 info
->cred
->cr_prison
!= p
->p_ucred
->cr_prison
) {
1977 * If the controlling terminal of the process matches the
1978 * vnode being revoked we clear the controlling terminal.
1980 * The normal spec_close() may not catch this because it
1981 * uses curproc instead of p.
1983 if (p
->p_session
&& info
->type
== DTYPE_VNODE
&&
1984 info
->data
== p
->p_session
->s_ttyvp
) {
1985 p
->p_session
->s_ttyvp
= NULL
;
1990 * Softref the fdp to prevent it from being destroyed
1992 spin_lock(&p
->p_spin
);
1993 if ((fdp
= p
->p_fd
) == NULL
) {
1994 spin_unlock(&p
->p_spin
);
1997 atomic_add_int(&fdp
->fd_softrefs
, 1);
1998 spin_unlock(&p
->p_spin
);
2001 * Locate and close any matching file descriptors, replacing
2002 * them with info->nfp.
2004 spin_lock(&fdp
->fd_spin
);
2005 for (n
= 0; n
< fdp
->fd_nfiles
; ++n
) {
2006 if ((fp
= fdp
->fd_files
[n
].fp
) == NULL
)
2008 if (fp
->f_flag
& FREVOKED
) {
2009 ++fdp
->fd_closedcounter
;
2010 fclearcache(&fdp
->fd_files
[n
], NULL
, 0);
2011 ++fdp
->fd_closedcounter
;
2013 fdp
->fd_files
[n
].fp
= info
->nfp
;
2014 spin_unlock(&fdp
->fd_spin
);
2015 knote_fdclose(fp
, fdp
, n
); /* XXX */
2017 spin_lock(&fdp
->fd_spin
);
2020 spin_unlock(&fdp
->fd_spin
);
2021 atomic_subtract_int(&fdp
->fd_softrefs
, 1);
2027 * Create a new open file structure and reserve a file decriptor
2028 * for the process that refers to it.
2030 * Root creds are checked using lp, or assumed if lp is NULL. If
2031 * resultfd is non-NULL then lp must also be non-NULL. No file
2032 * descriptor is reserved (and no process context is needed) if
2035 * A file pointer with a refcount of 1 is returned. Note that the
2036 * file pointer is NOT associated with the descriptor. If falloc
2037 * returns success, fsetfd() MUST be called to either associate the
2038 * file pointer or clear the reservation.
2041 falloc(struct lwp
*lp
, struct file
**resultfp
, int *resultfd
)
2043 static struct timeval lastfail
;
2045 struct filelist_head
*head
;
2047 struct ucred
*cred
= lp
? lp
->lwp_thread
->td_ucred
: proc0
.p_ucred
;
2053 * Handle filetable full issues and root overfill.
2055 if (nfiles
>= maxfiles
- maxfilesrootres
&&
2056 (cred
->cr_ruid
!= 0 || nfiles
>= maxfiles
)) {
2057 if (ppsratecheck(&lastfail
, &curfail
, 1)) {
2058 kprintf("kern.maxfiles limit exceeded by uid %d, "
2059 "please see tuning(7).\n",
2067 * Allocate a new file descriptor.
2069 fp
= kmalloc_obj(sizeof(*fp
), M_FILE
, M_WAITOK
|M_ZERO
);
2070 spin_init(&fp
->f_spin
, "falloc");
2071 SLIST_INIT(&fp
->f_klist
);
2073 fp
->f_ops
= &badfileops
;
2076 atomic_add_int(&nfiles
, 1);
2078 head
= fp2filelist(fp
);
2079 spin_lock(&head
->spin
);
2080 LIST_INSERT_HEAD(&head
->list
, fp
, f_list
);
2081 spin_unlock(&head
->spin
);
2084 if ((error
= fdalloc(lp
->lwp_proc
, 0, resultfd
)) != 0) {
2097 * Check for races against a file descriptor by determining that the
2098 * file pointer is still associated with the specified file descriptor,
2099 * and a close is not currently in progress.
2102 checkfdclosed(thread_t td
, struct filedesc
*fdp
, int fd
, struct file
*fp
,
2105 struct fdcache
*fdc
;
2109 if (fdp
->fd_closedcounter
== closedcounter
)
2112 if (td
->td_proc
&& td
->td_proc
->p_fd
== fdp
) {
2113 for (fdc
= &td
->td_fdcache
[0];
2114 fdc
< &td
->td_fdcache
[NFDCACHE
]; ++fdc
) {
2115 if (fdc
->fd
== fd
&& fdc
->fp
== fp
)
2120 spin_lock_shared(&fdp
->fd_spin
);
2121 if ((unsigned)fd
>= fdp
->fd_nfiles
|| fp
!= fdp
->fd_files
[fd
].fp
)
2125 spin_unlock_shared(&fdp
->fd_spin
);
2130 * Associate a file pointer with a previously reserved file descriptor.
2131 * This function always succeeds.
2133 * If fp is NULL, the file descriptor is returned to the pool.
2135 * Caller must hold an exclusive spinlock on fdp->fd_spin.
2138 fsetfd_locked(struct filedesc
*fdp
, struct file
*fp
, int fd
)
2140 KKASSERT((unsigned)fd
< fdp
->fd_nfiles
);
2141 KKASSERT(fdp
->fd_files
[fd
].reserved
!= 0);
2144 /* fclearcache(&fdp->fd_files[fd], NULL, 0); */
2145 fdp
->fd_files
[fd
].fp
= fp
;
2146 fdp
->fd_files
[fd
].reserved
= 0;
2148 fdp
->fd_files
[fd
].reserved
= 0;
2149 fdreserve_locked(fdp
, fd
, -1);
2150 fdfixup_locked(fdp
, fd
);
2155 * Caller must hold an exclusive spinlock on fdp->fd_spin.
2158 fsetfd(struct filedesc
*fdp
, struct file
*fp
, int fd
)
2160 spin_lock(&fdp
->fd_spin
);
2161 fsetfd_locked(fdp
, fp
, fd
);
2162 spin_unlock(&fdp
->fd_spin
);
2166 * Caller must hold an exclusive spinlock on fdp->fd_spin.
2170 funsetfd_locked(struct filedesc
*fdp
, int fd
)
2174 if ((unsigned)fd
>= fdp
->fd_nfiles
)
2176 if ((fp
= fdp
->fd_files
[fd
].fp
) == NULL
)
2178 ++fdp
->fd_closedcounter
;
2179 fclearcache(&fdp
->fd_files
[fd
], NULL
, 0);
2180 fdp
->fd_files
[fd
].fp
= NULL
;
2181 fdp
->fd_files
[fd
].fileflags
= 0;
2182 ++fdp
->fd_closedcounter
;
2184 fdreserve_locked(fdp
, fd
, -1);
2185 fdfixup_locked(fdp
, fd
);
2191 * WARNING: May not be called before initial fsetfd().
2194 fgetfdflags(struct filedesc
*fdp
, int fd
, int *flagsp
)
2198 spin_lock_shared(&fdp
->fd_spin
);
2199 if (((u_int
)fd
) >= fdp
->fd_nfiles
) {
2201 } else if (fdp
->fd_files
[fd
].fp
== NULL
) {
2204 *flagsp
= fdp
->fd_files
[fd
].fileflags
;
2207 spin_unlock_shared(&fdp
->fd_spin
);
2213 * WARNING: May not be called before initial fsetfd().
2216 fsetfdflags(struct filedesc
*fdp
, int fd
, int add_flags
)
2220 spin_lock(&fdp
->fd_spin
);
2221 if (((u_int
)fd
) >= fdp
->fd_nfiles
) {
2223 } else if (fdp
->fd_files
[fd
].fp
== NULL
) {
2226 fdp
->fd_files
[fd
].fileflags
|= add_flags
;
2229 spin_unlock(&fdp
->fd_spin
);
2235 * WARNING: May not be called before initial fsetfd().
2238 fclrfdflags(struct filedesc
*fdp
, int fd
, int rem_flags
)
2242 spin_lock(&fdp
->fd_spin
);
2243 if (((u_int
)fd
) >= fdp
->fd_nfiles
) {
2245 } else if (fdp
->fd_files
[fd
].fp
== NULL
) {
2248 fdp
->fd_files
[fd
].fileflags
&= ~rem_flags
;
2251 spin_unlock(&fdp
->fd_spin
);
2257 * Set/Change/Clear the creds for a fp and synchronize the uidinfo.
2260 fsetcred(struct file
*fp
, struct ucred
*ncr
)
2263 struct uidinfo
*uip
;
2264 struct uidcount
*pup
;
2269 if (ocr
== NULL
|| ncr
== NULL
|| ocr
->cr_uidinfo
!= ncr
->cr_uidinfo
) {
2271 uip
= ocr
->cr_uidinfo
;
2272 pup
= &uip
->ui_pcpu
[cpu
];
2273 atomic_add_int(&pup
->pu_openfiles
, -1);
2274 if (pup
->pu_openfiles
< -PUP_LIMIT
||
2275 pup
->pu_openfiles
> PUP_LIMIT
) {
2276 count
= atomic_swap_int(&pup
->pu_openfiles
, 0);
2277 atomic_add_int(&uip
->ui_openfiles
, count
);
2281 uip
= ncr
->cr_uidinfo
;
2282 pup
= &uip
->ui_pcpu
[cpu
];
2283 atomic_add_int(&pup
->pu_openfiles
, 1);
2284 if (pup
->pu_openfiles
< -PUP_LIMIT
||
2285 pup
->pu_openfiles
> PUP_LIMIT
) {
2286 count
= atomic_swap_int(&pup
->pu_openfiles
, 0);
2287 atomic_add_int(&uip
->ui_openfiles
, count
);
2299 * Free a file descriptor.
2303 ffree(struct file
*fp
)
2305 KASSERT((fp
->f_count
== 0), ("ffree: fp_fcount not 0!"));
2307 if (fp
->f_nchandle
.ncp
)
2308 cache_drop(&fp
->f_nchandle
);
2309 kfree_obj(fp
, M_FILE
);
2313 * called from init_main, initialize filedesc0 for proc0.
2316 fdinit_bootstrap(struct proc
*p0
, struct filedesc
*fdp0
, int cmask
)
2320 fdp0
->fd_refcnt
= 1;
2321 fdp0
->fd_cmask
= cmask
;
2322 fdp0
->fd_files
= fdp0
->fd_builtin_files
;
2323 fdp0
->fd_nfiles
= NDFILE
;
2324 fdp0
->fd_lastfile
= -1;
2325 spin_init(&fdp0
->fd_spin
, "fdinitbootstrap");
2329 * Build a new filedesc structure.
2332 fdinit(struct proc
*p
)
2334 struct filedesc
*newfdp
;
2335 struct filedesc
*fdp
= p
->p_fd
;
2337 newfdp
= kmalloc(sizeof(struct filedesc
), M_FILEDESC
, M_WAITOK
|M_ZERO
);
2338 spin_lock(&fdp
->fd_spin
);
2340 newfdp
->fd_cdir
= fdp
->fd_cdir
;
2341 vref(newfdp
->fd_cdir
);
2342 cache_copy(&fdp
->fd_ncdir
, &newfdp
->fd_ncdir
);
2346 * rdir may not be set in e.g. proc0 or anything vm_fork'd off of
2347 * proc0, but should unconditionally exist in other processes.
2350 newfdp
->fd_rdir
= fdp
->fd_rdir
;
2351 vref(newfdp
->fd_rdir
);
2352 cache_copy(&fdp
->fd_nrdir
, &newfdp
->fd_nrdir
);
2355 newfdp
->fd_jdir
= fdp
->fd_jdir
;
2356 vref(newfdp
->fd_jdir
);
2357 cache_copy(&fdp
->fd_njdir
, &newfdp
->fd_njdir
);
2359 spin_unlock(&fdp
->fd_spin
);
2361 /* Create the file descriptor table. */
2362 newfdp
->fd_refcnt
= 1;
2363 newfdp
->fd_cmask
= cmask
;
2364 newfdp
->fd_files
= newfdp
->fd_builtin_files
;
2365 newfdp
->fd_nfiles
= NDFILE
;
2366 newfdp
->fd_lastfile
= -1;
2367 spin_init(&newfdp
->fd_spin
, "fdinit");
2373 * Share a filedesc structure.
2376 fdshare(struct proc
*p
)
2378 struct filedesc
*fdp
;
2381 spin_lock(&fdp
->fd_spin
);
2383 spin_unlock(&fdp
->fd_spin
);
2388 * Copy a filedesc structure.
2391 fdcopy(struct proc
*p
, struct filedesc
**fpp
)
2393 struct filedesc
*fdp
= p
->p_fd
;
2394 struct filedesc
*newfdp
;
2395 struct fdnode
*fdnode
;
2400 * Certain daemons might not have file descriptors.
2406 * Allocate the new filedesc and fd_files[] array. This can race
2407 * with operations by other threads on the fdp so we have to be
2410 newfdp
= kmalloc(sizeof(struct filedesc
),
2411 M_FILEDESC
, M_WAITOK
| M_ZERO
| M_NULLOK
);
2412 if (newfdp
== NULL
) {
2417 spin_lock(&fdp
->fd_spin
);
2418 if (fdp
->fd_lastfile
< NDFILE
) {
2419 newfdp
->fd_files
= newfdp
->fd_builtin_files
;
2423 * We have to allocate (N^2-1) entries for our in-place
2424 * binary tree. Allow the table to shrink.
2428 while (ni
> fdp
->fd_lastfile
&& ni
> NDFILE
) {
2432 spin_unlock(&fdp
->fd_spin
);
2433 newfdp
->fd_files
= kmalloc(i
* sizeof(struct fdnode
),
2434 M_FILEDESC
, M_WAITOK
| M_ZERO
);
2437 * Check for race, retry
2439 spin_lock(&fdp
->fd_spin
);
2440 if (i
<= fdp
->fd_lastfile
) {
2441 spin_unlock(&fdp
->fd_spin
);
2442 kfree(newfdp
->fd_files
, M_FILEDESC
);
2448 * Dup the remaining fields. vref() and cache_hold() can be
2449 * safely called while holding the read spinlock on fdp.
2451 * The read spinlock on fdp is still being held.
2453 * NOTE: vref and cache_hold calls for the case where the vnode
2454 * or cache entry already has at least one ref may be called
2455 * while holding spin locks.
2457 if ((newfdp
->fd_cdir
= fdp
->fd_cdir
) != NULL
) {
2458 vref(newfdp
->fd_cdir
);
2459 cache_copy(&fdp
->fd_ncdir
, &newfdp
->fd_ncdir
);
2462 * We must check for fd_rdir here, at least for now because
2463 * the init process is created before we have access to the
2464 * rootvode to take a reference to it.
2466 if ((newfdp
->fd_rdir
= fdp
->fd_rdir
) != NULL
) {
2467 vref(newfdp
->fd_rdir
);
2468 cache_copy(&fdp
->fd_nrdir
, &newfdp
->fd_nrdir
);
2470 if ((newfdp
->fd_jdir
= fdp
->fd_jdir
) != NULL
) {
2471 vref(newfdp
->fd_jdir
);
2472 cache_copy(&fdp
->fd_njdir
, &newfdp
->fd_njdir
);
2474 newfdp
->fd_refcnt
= 1;
2475 newfdp
->fd_nfiles
= i
;
2476 newfdp
->fd_lastfile
= fdp
->fd_lastfile
;
2477 newfdp
->fd_freefile
= fdp
->fd_freefile
;
2478 newfdp
->fd_cmask
= fdp
->fd_cmask
;
2479 spin_init(&newfdp
->fd_spin
, "fdcopy");
2482 * Copy the descriptor table through (i). This also copies the
2483 * allocation state. Then go through and ref the file pointers
2484 * and clean up any KQ descriptors.
2486 * kq descriptors cannot be copied. Since we haven't ref'd the
2487 * copied files yet we can ignore the return value from funsetfd().
2489 * The read spinlock on fdp is still being held.
2491 * Be sure to clean out fdnode->tdcache, otherwise bad things will
2494 bcopy(fdp
->fd_files
, newfdp
->fd_files
, i
* sizeof(struct fdnode
));
2495 for (i
= 0 ; i
< newfdp
->fd_nfiles
; ++i
) {
2496 fdnode
= &newfdp
->fd_files
[i
];
2497 if (fdnode
->reserved
) {
2498 fdreserve_locked(newfdp
, i
, -1);
2499 fdnode
->reserved
= 0;
2500 fdfixup_locked(newfdp
, i
);
2501 } else if (fdnode
->fp
) {
2502 bzero(&fdnode
->tdcache
, sizeof(fdnode
->tdcache
));
2503 if (fdnode
->fp
->f_type
== DTYPE_KQUEUE
) {
2504 (void)funsetfd_locked(newfdp
, i
);
2510 spin_unlock(&fdp
->fd_spin
);
2516 * Release a filedesc structure.
2518 * NOT MPSAFE (MPSAFE for refs > 1, but the final cleanup code is not MPSAFE)
2521 fdfree(struct proc
*p
, struct filedesc
*repl
)
2523 struct filedesc
*fdp
;
2524 struct fdnode
*fdnode
;
2526 struct filedesc_to_leader
*fdtol
;
2532 * Before destroying or replacing p->p_fd we must be sure to
2533 * clean out the cache of the last thread, which should be
2536 fexitcache(curthread
);
2539 * Certain daemons might not have file descriptors.
2548 * Severe messing around to follow.
2550 spin_lock(&fdp
->fd_spin
);
2552 /* Check for special need to clear POSIX style locks */
2554 if (fdtol
!= NULL
) {
2555 KASSERT(fdtol
->fdl_refcount
> 0,
2556 ("filedesc_to_refcount botch: fdl_refcount=%d",
2557 fdtol
->fdl_refcount
));
2558 if (fdtol
->fdl_refcount
== 1 && p
->p_leader
->p_advlock_flag
) {
2559 for (i
= 0; i
<= fdp
->fd_lastfile
; ++i
) {
2560 fdnode
= &fdp
->fd_files
[i
];
2561 if (fdnode
->fp
== NULL
||
2562 fdnode
->fp
->f_type
!= DTYPE_VNODE
) {
2567 spin_unlock(&fdp
->fd_spin
);
2569 lf
.l_whence
= SEEK_SET
;
2572 lf
.l_type
= F_UNLCK
;
2573 vp
= (struct vnode
*)fp
->f_data
;
2574 VOP_ADVLOCK(vp
, (caddr_t
)p
->p_leader
,
2575 F_UNLCK
, &lf
, F_POSIX
);
2577 spin_lock(&fdp
->fd_spin
);
2581 if (fdtol
->fdl_refcount
== 1) {
2582 if (fdp
->fd_holdleaderscount
> 0 &&
2583 p
->p_leader
->p_advlock_flag
) {
2585 * close() or do_dup() has cleared a reference
2586 * in a shared file descriptor table.
2588 fdp
->fd_holdleaderswakeup
= 1;
2589 ssleep(&fdp
->fd_holdleaderscount
,
2590 &fdp
->fd_spin
, 0, "fdlhold", 0);
2593 if (fdtol
->fdl_holdcount
> 0) {
2595 * Ensure that fdtol->fdl_leader
2596 * remains valid in closef().
2598 fdtol
->fdl_wakeup
= 1;
2599 ssleep(fdtol
, &fdp
->fd_spin
, 0, "fdlhold", 0);
2603 fdtol
->fdl_refcount
--;
2604 if (fdtol
->fdl_refcount
== 0 &&
2605 fdtol
->fdl_holdcount
== 0) {
2606 fdtol
->fdl_next
->fdl_prev
= fdtol
->fdl_prev
;
2607 fdtol
->fdl_prev
->fdl_next
= fdtol
->fdl_next
;
2612 if (fdtol
!= NULL
) {
2613 spin_unlock(&fdp
->fd_spin
);
2614 kfree(fdtol
, M_FILEDESC_TO_LEADER
);
2615 spin_lock(&fdp
->fd_spin
);
2618 if (--fdp
->fd_refcnt
> 0) {
2619 spin_unlock(&fdp
->fd_spin
);
2620 spin_lock(&p
->p_spin
);
2622 spin_unlock(&p
->p_spin
);
2627 * Even though we are the last reference to the structure allproc
2628 * scans may still reference the structure. Maintain proper
2629 * locks until we can replace p->p_fd.
2631 * Also note that kqueue's closef still needs to reference the
2632 * fdp via p->p_fd, so we have to close the descriptors before
2633 * we replace p->p_fd.
2635 for (i
= 0; i
<= fdp
->fd_lastfile
; ++i
) {
2636 if (fdp
->fd_files
[i
].fp
) {
2637 fp
= funsetfd_locked(fdp
, i
);
2639 spin_unlock(&fdp
->fd_spin
);
2640 if (SLIST_FIRST(&fp
->f_klist
))
2641 knote_fdclose(fp
, fdp
, i
);
2643 spin_lock(&fdp
->fd_spin
);
2647 spin_unlock(&fdp
->fd_spin
);
2650 * Interlock against an allproc scan operations (typically frevoke).
2652 spin_lock(&p
->p_spin
);
2654 spin_unlock(&p
->p_spin
);
2657 * Wait for any softrefs to go away. This race rarely occurs so
2658 * we can use a non-critical-path style poll/sleep loop. The
2659 * race only occurs against allproc scans.
2661 * No new softrefs can occur with the fdp disconnected from the
2664 if (fdp
->fd_softrefs
) {
2665 kprintf("pid %d: Warning, fdp race avoided\n", p
->p_pid
);
2666 while (fdp
->fd_softrefs
)
2667 tsleep(&fdp
->fd_softrefs
, 0, "fdsoft", 1);
2670 if (fdp
->fd_files
!= fdp
->fd_builtin_files
)
2671 kfree(fdp
->fd_files
, M_FILEDESC
);
2673 cache_drop(&fdp
->fd_ncdir
);
2674 vrele(fdp
->fd_cdir
);
2677 cache_drop(&fdp
->fd_nrdir
);
2678 vrele(fdp
->fd_rdir
);
2681 cache_drop(&fdp
->fd_njdir
);
2682 vrele(fdp
->fd_jdir
);
2684 kfree(fdp
, M_FILEDESC
);
2688 * Retrieve and reference the file pointer associated with a descriptor.
2690 * td must be the current thread.
2693 holdfp(thread_t td
, int fd
, int flag
)
2697 fp
= _holdfp_cache(td
, fd
);
2699 if ((fp
->f_flag
& flag
) == 0 && flag
!= -1) {
2708 * holdsock() - load the struct file pointer associated
2709 * with a socket into *fpp. If an error occurs, non-zero
2710 * will be returned and *fpp will be set to NULL.
2712 * td must be the current thread.
2715 holdsock(thread_t td
, int fd
, struct file
**fpp
)
2723 fp
= _holdfp_cache(td
, fd
);
2725 if (fp
->f_type
!= DTYPE_SOCKET
) {
2741 * Convert a user file descriptor to a held file pointer.
2743 * td must be the current thread.
2746 holdvnode(thread_t td
, int fd
, struct file
**fpp
)
2751 fp
= _holdfp_cache(td
, fd
);
2753 if (fp
->f_type
!= DTYPE_VNODE
&& fp
->f_type
!= DTYPE_FIFO
) {
2769 * Convert a user file descriptor to a held file pointer.
2771 * td must be the current thread.
2774 holdvnode2(thread_t td
, int fd
, struct file
**fpp
, char *fflagsp
)
2779 fp
= _holdfp2(td
, fd
, fflagsp
);
2781 if (fp
->f_type
!= DTYPE_VNODE
&& fp
->f_type
!= DTYPE_FIFO
) {
2797 * For setugid programs, we don't want to people to use that setugidness
2798 * to generate error messages which write to a file which otherwise would
2799 * otherwise be off-limits to the process.
2801 * This is a gross hack to plug the hole. A better solution would involve
2802 * a special vop or other form of generalized access control mechanism. We
2803 * go ahead and just reject all procfs file systems accesses as dangerous.
2805 * Since setugidsafety calls this only for fd 0, 1 and 2, this check is
2806 * sufficient. We also don't for check setugidness since we know we are.
2809 is_unsafe(struct file
*fp
)
2811 if (fp
->f_type
== DTYPE_VNODE
&&
2812 ((struct vnode
*)(fp
->f_data
))->v_tag
== VT_PROCFS
)
2818 * Make this setguid thing safe, if at all possible.
2820 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose()
2823 setugidsafety(struct proc
*p
)
2825 struct filedesc
*fdp
= p
->p_fd
;
2828 /* Certain daemons might not have file descriptors. */
2833 * note: fdp->fd_files may be reallocated out from under us while
2834 * we are blocked in a close. Be careful!
2836 for (i
= 0; i
<= fdp
->fd_lastfile
; i
++) {
2839 if (fdp
->fd_files
[i
].fp
&& is_unsafe(fdp
->fd_files
[i
].fp
)) {
2843 * NULL-out descriptor prior to close to avoid
2844 * a race while close blocks.
2846 if ((fp
= funsetfd_locked(fdp
, i
)) != NULL
) {
2847 knote_fdclose(fp
, fdp
, i
);
2855 * Close all CLOEXEC files on exec.
2857 * Only a single thread remains for the current process.
2859 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose()
2862 fdcloseexec(struct proc
*p
)
2864 struct filedesc
*fdp
= p
->p_fd
;
2867 /* Certain daemons might not have file descriptors. */
2872 * We cannot cache fd_files since operations may block and rip
2873 * them out from under us.
2875 for (i
= 0; i
<= fdp
->fd_lastfile
; i
++) {
2876 if (fdp
->fd_files
[i
].fp
!= NULL
&&
2877 (fdp
->fd_files
[i
].fileflags
& UF_EXCLOSE
)) {
2881 * NULL-out descriptor prior to close to avoid
2882 * a race while close blocks.
2884 * (funsetfd*() also clears the fd cache)
2886 if ((fp
= funsetfd_locked(fdp
, i
)) != NULL
) {
2887 knote_fdclose(fp
, fdp
, i
);
2895 * It is unsafe for set[ug]id processes to be started with file
2896 * descriptors 0..2 closed, as these descriptors are given implicit
2897 * significance in the Standard C library. fdcheckstd() will create a
2898 * descriptor referencing /dev/null for each of stdin, stdout, and
2899 * stderr that is not already open.
2901 * NOT MPSAFE - calls falloc, vn_open, etc
2904 fdcheckstd(struct lwp
*lp
)
2906 struct nlookupdata nd
;
2907 struct filedesc
*fdp
;
2910 int i
, error
, flags
, devnull
;
2912 fdp
= lp
->lwp_proc
->p_fd
;
2917 for (i
= 0; i
< 3; i
++) {
2918 if (fdp
->fd_files
[i
].fp
!= NULL
)
2921 if ((error
= falloc(lp
, &fp
, &devnull
)) != 0)
2924 error
= nlookup_init(&nd
, "/dev/null", UIO_SYSSPACE
,
2925 NLC_FOLLOW
|NLC_LOCKVP
);
2926 flags
= FREAD
| FWRITE
;
2928 error
= vn_open(&nd
, &fp
, flags
, 0);
2930 fsetfd(fdp
, fp
, devnull
);
2932 fsetfd(fdp
, NULL
, devnull
);
2937 KKASSERT(i
== devnull
);
2939 error
= kern_dup(DUP_FIXED
, devnull
, i
, &retval
);
2948 * Internal form of close.
2949 * Decrement reference count on file structure.
2950 * Note: td and/or p may be NULL when closing a file
2951 * that was being passed in a message.
2953 * MPALMOSTSAFE - acquires mplock for VOP operations
2956 closef(struct file
*fp
, struct proc
*p
)
2960 struct filedesc_to_leader
*fdtol
;
2966 * POSIX record locking dictates that any close releases ALL
2967 * locks owned by this process. This is handled by setting
2968 * a flag in the unlock to free ONLY locks obeying POSIX
2969 * semantics, and not to free BSD-style file locks.
2970 * If the descriptor was in a message, POSIX-style locks
2971 * aren't passed with the descriptor.
2973 if (p
!= NULL
&& fp
->f_type
== DTYPE_VNODE
&&
2974 (((struct vnode
*)fp
->f_data
)->v_flag
& VMAYHAVELOCKS
)
2976 if (p
->p_leader
->p_advlock_flag
) {
2977 lf
.l_whence
= SEEK_SET
;
2980 lf
.l_type
= F_UNLCK
;
2981 vp
= (struct vnode
*)fp
->f_data
;
2982 VOP_ADVLOCK(vp
, (caddr_t
)p
->p_leader
, F_UNLCK
,
2986 if (fdtol
!= NULL
) {
2987 lwkt_gettoken(&p
->p_token
);
2990 * Handle special case where file descriptor table
2991 * is shared between multiple process leaders.
2993 for (fdtol
= fdtol
->fdl_next
;
2994 fdtol
!= p
->p_fdtol
;
2995 fdtol
= fdtol
->fdl_next
) {
2996 if (fdtol
->fdl_leader
->p_advlock_flag
== 0)
2998 fdtol
->fdl_holdcount
++;
2999 lf
.l_whence
= SEEK_SET
;
3002 lf
.l_type
= F_UNLCK
;
3003 vp
= (struct vnode
*)fp
->f_data
;
3004 VOP_ADVLOCK(vp
, (caddr_t
)fdtol
->fdl_leader
,
3005 F_UNLCK
, &lf
, F_POSIX
);
3006 fdtol
->fdl_holdcount
--;
3007 if (fdtol
->fdl_holdcount
== 0 &&
3008 fdtol
->fdl_wakeup
!= 0) {
3009 fdtol
->fdl_wakeup
= 0;
3013 lwkt_reltoken(&p
->p_token
);
3020 * fhold() can only be called if f_count is already at least 1 (i.e. the
3021 * caller of fhold() already has a reference to the file pointer in some
3024 * Atomic ops are used for incrementing and decrementing f_count before
3025 * the 1->0 transition. f_count 1->0 transition is special, see the
3026 * comment in fdrop().
3029 fhold(struct file
*fp
)
3031 /* 0->1 transition will never work */
3032 KASSERT(fp
->f_count
> 0, ("fhold: invalid f_count %d", fp
->f_count
));
3033 atomic_add_int(&fp
->f_count
, 1);
3037 * fdrop() - drop a reference to a descriptor
3040 fdrop(struct file
*fp
)
3044 int error
, do_free
= 0;
3048 * Simple atomic_fetchadd_int(f_count, -1) here will cause use-
3049 * after-free or double free (due to f_count 0->1 transition), if
3050 * fhold() is called on the fps found through filehead iteration.
3053 int count
= fp
->f_count
;
3056 KASSERT(count
> 0, ("fdrop: invalid f_count %d", count
));
3058 struct filelist_head
*head
= fp2filelist(fp
);
3061 * About to drop the last reference, hold the
3062 * filehead spin lock and drop it, so that no
3063 * one could see this fp through filehead anymore,
3064 * let alone fhold() this fp.
3066 spin_lock(&head
->spin
);
3067 if (atomic_cmpset_int(&fp
->f_count
, count
, 0)) {
3068 LIST_REMOVE(fp
, f_list
);
3069 spin_unlock(&head
->spin
);
3070 atomic_subtract_int(&nfiles
, 1);
3071 do_free
= 1; /* free this fp */
3074 spin_unlock(&head
->spin
);
3076 } else if (atomic_cmpset_int(&fp
->f_count
, count
, count
- 1)) {
3084 KKASSERT(SLIST_FIRST(&fp
->f_klist
) == NULL
);
3087 * The last reference has gone away, we own the fp structure free
3090 if (fp
->f_count
< 0)
3091 panic("fdrop: count < 0");
3092 if ((fp
->f_flag
& FHASLOCK
) && fp
->f_type
== DTYPE_VNODE
&&
3093 (((struct vnode
*)fp
->f_data
)->v_flag
& VMAYHAVELOCKS
)
3095 lf
.l_whence
= SEEK_SET
;
3098 lf
.l_type
= F_UNLCK
;
3099 vp
= (struct vnode
*)fp
->f_data
;
3100 VOP_ADVLOCK(vp
, (caddr_t
)fp
, F_UNLCK
, &lf
, 0);
3102 if (fp
->f_ops
!= &badfileops
)
3103 error
= fo_close(fp
);
3111 * Apply an advisory lock on a file descriptor.
3113 * Just attempt to get a record lock of the requested type on
3114 * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0).
3119 sys_flock(struct sysmsg
*sysmsg
, const struct flock_args
*uap
)
3121 thread_t td
= curthread
;
3127 if ((fp
= holdfp(td
, uap
->fd
, -1)) == NULL
)
3129 if (fp
->f_type
!= DTYPE_VNODE
) {
3133 vp
= (struct vnode
*)fp
->f_data
;
3134 lf
.l_whence
= SEEK_SET
;
3137 if (uap
->how
& LOCK_UN
) {
3138 lf
.l_type
= F_UNLCK
;
3139 atomic_clear_int(&fp
->f_flag
, FHASLOCK
); /* race ok */
3140 error
= VOP_ADVLOCK(vp
, (caddr_t
)fp
, F_UNLCK
, &lf
, 0);
3143 if (uap
->how
& LOCK_EX
)
3144 lf
.l_type
= F_WRLCK
;
3145 else if (uap
->how
& LOCK_SH
)
3146 lf
.l_type
= F_RDLCK
;
3151 if (uap
->how
& LOCK_NB
)
3152 error
= VOP_ADVLOCK(vp
, (caddr_t
)fp
, F_SETLK
, &lf
, 0);
3154 error
= VOP_ADVLOCK(vp
, (caddr_t
)fp
, F_SETLK
, &lf
, F_WAIT
);
3155 atomic_set_int(&fp
->f_flag
, FHASLOCK
); /* race ok */
3162 * File Descriptor pseudo-device driver ( /dev/fd/N ).
3164 * This interface is now a bit more linux-compatible and attempts to not
3165 * share seek positions by not sharing the fp of the descriptor when
3168 * Probably a good idea anyhow, but now particularly important for
3169 * fexecve() which uses /dev/fd/N.
3171 * The original interface effectively dup()d the descriptor.
3174 fdopen(struct dev_open_args
*ap
)
3182 KKASSERT(td
->td_lwp
!= NULL
);
3185 * Get the fp for /dev/fd/N
3187 sfd
= minor(ap
->a_head
.a_dev
);
3188 if ((wfp
= holdfp(td
, sfd
, -1)) == NULL
)
3192 * Close a revoke/dup race. Duping a descriptor marked as revoked
3193 * will dup a dummy descriptor instead of the real one.
3195 if (wfp
->f_flag
& FREVOKED
) {
3196 kprintf("Warning: attempt to dup() a revoked descriptor\n");
3199 error
= falloc(NULL
, &wfp
, NULL
);
3205 * Check that the mode the file is being opened for is a
3206 * subset of the mode of the existing descriptor.
3208 if (ap
->a_fpp
== NULL
) {
3212 if (((ap
->a_oflags
& (FREAD
|FWRITE
)) | wfp
->f_flag
) != wfp
->f_flag
) {
3216 if (wfp
->f_type
== DTYPE_VNODE
&& wfp
->f_data
) {
3218 * If wfp is a vnode create a new fp so things like the
3219 * seek position (etc) are not shared with the original.
3221 * Don't try to call VOP_OPEN(). Adjust the open-count
3231 * Yah... this wouldn't be good.
3233 if ((ap
->a_oflags
& (FWRITE
|O_TRUNC
)) && vp
->v_type
== VDIR
) {
3239 * Setup the new fp and simulate an open(), but for now do
3240 * not actually call VOP_OPEN() though we probably could.
3242 fp
->f_type
= DTYPE_VNODE
;
3243 /* retain flags not to be copied */
3244 fp
->f_flag
= (fp
->f_flag
& ~FMASK
) | (ap
->a_oflags
& FMASK
);
3245 fp
->f_ops
= &vnode_fileops
;
3249 if (ap
->a_oflags
& FWRITE
)
3250 atomic_add_int(&vp
->v_writecount
, 1);
3251 KKASSERT(vp
->v_opencount
>= 0 && vp
->v_opencount
!= INT_MAX
);
3252 atomic_add_int(&vp
->v_opencount
, 1);
3256 * If wfp is not a vnode we have to share it directly.
3259 *ap
->a_fpp
= wfp
; /* transfer hold count */
3265 * NOT MPSAFE - I think these refer to a common file descriptor table
3266 * and we need to spinlock that to link fdtol in.
3268 struct filedesc_to_leader
*
3269 filedesc_to_leader_alloc(struct filedesc_to_leader
*old
,
3270 struct proc
*leader
)
3272 struct filedesc_to_leader
*fdtol
;
3274 fdtol
= kmalloc(sizeof(struct filedesc_to_leader
),
3275 M_FILEDESC_TO_LEADER
, M_WAITOK
| M_ZERO
);
3276 fdtol
->fdl_refcount
= 1;
3277 fdtol
->fdl_holdcount
= 0;
3278 fdtol
->fdl_wakeup
= 0;
3279 fdtol
->fdl_leader
= leader
;
3281 fdtol
->fdl_next
= old
->fdl_next
;
3282 fdtol
->fdl_prev
= old
;
3283 old
->fdl_next
= fdtol
;
3284 fdtol
->fdl_next
->fdl_prev
= fdtol
;
3286 fdtol
->fdl_next
= fdtol
;
3287 fdtol
->fdl_prev
= fdtol
;
3293 * Scan all file pointers in the system. The callback is made with
3294 * the master list spinlock held exclusively.
3297 allfiles_scan_exclusive(int (*callback
)(struct file
*, void *), void *data
)
3301 for (i
= 0; i
< NFILELIST_HEADS
; ++i
) {
3302 struct filelist_head
*head
= &filelist_heads
[i
];
3305 spin_lock(&head
->spin
);
3306 LIST_FOREACH(fp
, &head
->list
, f_list
) {
3309 res
= callback(fp
, data
);
3313 spin_unlock(&head
->spin
);
3318 * Get file structures.
3320 * NOT MPSAFE - process list scan, SYSCTL_OUT (probably not mpsafe)
3323 struct sysctl_kern_file_info
{
3326 struct sysctl_req
*req
;
3329 static int sysctl_kern_file_callback(struct proc
*p
, void *data
);
3332 sysctl_kern_file(SYSCTL_HANDLER_ARGS
)
3334 struct sysctl_kern_file_info info
;
3337 * Note: because the number of file descriptors is calculated
3338 * in different ways for sizing vs returning the data,
3339 * there is information leakage from the first loop. However,
3340 * it is of a similar order of magnitude to the leakage from
3341 * global system statistics such as kern.openfiles.
3343 * When just doing a count, note that we cannot just count
3344 * the elements and add f_count via the filehead list because
3345 * threaded processes share their descriptor table and f_count might
3346 * still be '1' in that case.
3348 * Since the SYSCTL op can block, we must hold the process to
3349 * prevent it being ripped out from under us either in the
3350 * file descriptor loop or in the greater LIST_FOREACH. The
3351 * process may be in varying states of disrepair. If the process
3352 * is in SZOMB we may have caught it just as it is being removed
3353 * from the allproc list, we must skip it in that case to maintain
3354 * an unbroken chain through the allproc list.
3359 allproc_scan(sysctl_kern_file_callback
, &info
, 0);
3362 * When just calculating the size, overestimate a bit to try to
3363 * prevent system activity from causing the buffer-fill call
3366 if (req
->oldptr
== NULL
) {
3367 info
.count
= (info
.count
+ 16) + (info
.count
/ 10);
3368 info
.error
= SYSCTL_OUT(req
, NULL
,
3369 info
.count
* sizeof(struct kinfo_file
));
3371 return (info
.error
);
3375 sysctl_kern_file_callback(struct proc
*p
, void *data
)
3377 struct sysctl_kern_file_info
*info
= data
;
3378 struct kinfo_file kf
;
3379 struct filedesc
*fdp
;
3384 if (p
->p_stat
== SIDL
|| p
->p_stat
== SZOMB
)
3386 if (!(PRISON_CHECK(info
->req
->td
->td_ucred
, p
->p_ucred
) != 0))
3390 * Softref the fdp to prevent it from being destroyed
3392 spin_lock(&p
->p_spin
);
3393 if ((fdp
= p
->p_fd
) == NULL
) {
3394 spin_unlock(&p
->p_spin
);
3397 atomic_add_int(&fdp
->fd_softrefs
, 1);
3398 spin_unlock(&p
->p_spin
);
3401 * The fdp's own spinlock prevents the contents from being
3404 spin_lock_shared(&fdp
->fd_spin
);
3405 for (n
= 0; n
< fdp
->fd_nfiles
; ++n
) {
3406 if ((fp
= fdp
->fd_files
[n
].fp
) == NULL
)
3408 if (info
->req
->oldptr
== NULL
) {
3411 uid
= p
->p_ucred
? p
->p_ucred
->cr_uid
: -1;
3412 kcore_make_file(&kf
, fp
, p
->p_pid
, uid
, n
);
3413 spin_unlock_shared(&fdp
->fd_spin
);
3414 info
->error
= SYSCTL_OUT(info
->req
, &kf
, sizeof(kf
));
3415 spin_lock_shared(&fdp
->fd_spin
);
3420 spin_unlock_shared(&fdp
->fd_spin
);
3421 atomic_subtract_int(&fdp
->fd_softrefs
, 1);
3427 SYSCTL_PROC(_kern
, KERN_FILE
, file
, CTLTYPE_OPAQUE
|CTLFLAG_RD
,
3428 0, 0, sysctl_kern_file
, "S,file", "Entire file table");
3430 SYSCTL_INT(_kern
, OID_AUTO
, minfilesperproc
, CTLFLAG_RW
,
3431 &minfilesperproc
, 0, "Minimum files allowed open per process");
3432 SYSCTL_INT(_kern
, KERN_MAXFILESPERPROC
, maxfilesperproc
, CTLFLAG_RW
,
3433 &maxfilesperproc
, 0, "Maximum files allowed open per process");
3434 SYSCTL_INT(_kern
, OID_AUTO
, maxfilesperuser
, CTLFLAG_RW
,
3435 &maxfilesperuser
, 0, "Maximum files allowed open per user");
3437 SYSCTL_INT(_kern
, KERN_MAXFILES
, maxfiles
, CTLFLAG_RW
,
3438 &maxfiles
, 0, "Maximum number of files");
3440 SYSCTL_INT(_kern
, OID_AUTO
, maxfilesrootres
, CTLFLAG_RW
,
3441 &maxfilesrootres
, 0, "Descriptors reserved for root use");
3443 SYSCTL_INT(_kern
, OID_AUTO
, openfiles
, CTLFLAG_RD
,
3444 &nfiles
, 0, "System-wide number of open files");
3447 fildesc_drvinit(void *unused
)
3451 for (fd
= 0; fd
< NUMFDESC
; fd
++) {
3452 make_dev(&fildesc_ops
, fd
,
3453 UID_BIN
, GID_BIN
, 0666, "fd/%d", fd
);
3456 make_dev(&fildesc_ops
, 0, UID_ROOT
, GID_WHEEL
, 0666, "stdin");
3457 make_dev(&fildesc_ops
, 1, UID_ROOT
, GID_WHEEL
, 0666, "stdout");
3458 make_dev(&fildesc_ops
, 2, UID_ROOT
, GID_WHEEL
, 0666, "stderr");
3461 struct fileops badfileops
= {
3462 .fo_read
= badfo_readwrite
,
3463 .fo_write
= badfo_readwrite
,
3464 .fo_ioctl
= badfo_ioctl
,
3465 .fo_kqfilter
= badfo_kqfilter
,
3466 .fo_stat
= badfo_stat
,
3467 .fo_close
= badfo_close
,
3468 .fo_shutdown
= badfo_shutdown
3482 badfo_ioctl(struct file
*fp
, u_long com
, caddr_t data
,
3483 struct ucred
*cred
, struct sysmsg
*msgv
)
3489 * Must return an error to prevent registration, typically
3490 * due to a revoked descriptor (file_filtops assigned).
3493 badfo_kqfilter(struct file
*fp
, struct knote
*kn
)
3495 return (EOPNOTSUPP
);
3499 badfo_stat(struct file
*fp
, struct stat
*sb
, struct ucred
*cred
)
3505 badfo_close(struct file
*fp
)
3511 badfo_shutdown(struct file
*fp
, int how
)
3517 nofo_shutdown(struct file
*fp
, int how
)
3519 return (EOPNOTSUPP
);
3522 SYSINIT(fildescdev
, SI_SUB_DRIVERS
, SI_ORDER_MIDDLE
+ CDEV_MAJOR
,
3523 fildesc_drvinit
,NULL
);
3526 filelist_heads_init(void *arg __unused
)
3530 for (i
= 0; i
< NFILELIST_HEADS
; ++i
) {
3531 struct filelist_head
*head
= &filelist_heads
[i
];
3533 spin_init(&head
->spin
, "filehead_spin");
3534 LIST_INIT(&head
->list
);
3538 SYSINIT(filelistheads
, SI_BOOT1_LOCK
, SI_ORDER_ANY
, filelist_heads_init
, NULL
);