2 * Copyright (c) 2005-2018 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Jeffrey Hsu and Matthew Dillon.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1989, 1991, 1993
36 * The Regents of the University of California. All rights reserved.
37 * (c) UNIX System Laboratories, Inc.
38 * All or some portions of this file are derived from material licensed
39 * to the University of California by American Telephone and Telegraph
40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41 * the permission of UNIX System Laboratories, Inc.
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. Neither the name of the University nor the names of its contributors
52 * may be used to endorse or promote products derived from this software
53 * without specific prior written permission.
55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
67 * @(#)kern_descrip.c 8.6 (Berkeley) 4/19/94
68 * $FreeBSD: src/sys/kern/kern_descrip.c,v 1.81.2.19 2004/02/28 00:43:31 tegge Exp $
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/malloc.h>
74 #include <sys/sysproto.h>
76 #include <sys/device.h>
78 #include <sys/filedesc.h>
79 #include <sys/kernel.h>
80 #include <sys/sysctl.h>
81 #include <sys/vnode.h>
83 #include <sys/nlookup.h>
85 #include <sys/filio.h>
86 #include <sys/fcntl.h>
87 #include <sys/unistd.h>
88 #include <sys/resourcevar.h>
89 #include <sys/event.h>
90 #include <sys/kern_syscall.h>
91 #include <sys/kcore.h>
92 #include <sys/kinfo.h>
94 #include <sys/objcache.h>
97 #include <vm/vm_extern.h>
99 #include <sys/thread2.h>
100 #include <sys/file2.h>
101 #include <sys/spinlock2.h>
103 static int fdalloc_locked(struct proc
*p
, struct filedesc
*fdp
,
104 int want
, int *result
);
105 static void fsetfd_locked(struct filedesc
*fdp
, struct file
*fp
, int fd
);
106 static void fdreserve_locked (struct filedesc
*fdp
, int fd0
, int incr
);
107 static struct file
*funsetfd_locked (struct filedesc
*fdp
, int fd
);
108 static void ffree(struct file
*fp
);
110 static MALLOC_DEFINE(M_FILEDESC
, "file desc", "Open file descriptor table");
111 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER
, "file desc to leader",
112 "file desc to leader structures");
113 MALLOC_DEFINE(M_FILE
, "file", "Open file structure");
114 static MALLOC_DEFINE(M_SIGIO
, "sigio", "sigio structures");
116 static struct krate krate_uidinfo
= { .freq
= 1 };
118 static d_open_t fdopen
;
121 #define CDEV_MAJOR 22
122 static struct dev_ops fildesc_ops
= {
128 * Descriptor management.
130 #ifndef NFILELIST_HEADS
131 #define NFILELIST_HEADS 257 /* primary number */
134 struct filelist_head
{
135 struct spinlock spin
;
136 struct filelist list
;
139 static struct filelist_head filelist_heads
[NFILELIST_HEADS
];
141 static int nfiles
; /* actual number of open files */
144 struct lwkt_token revoke_token
= LWKT_TOKEN_INITIALIZER(revoke_token
);
146 static struct objcache
*file_objcache
;
148 static struct objcache_malloc_args file_malloc_args
= {
149 .objsize
= sizeof(struct file
),
154 * Fixup fd_freefile and fd_lastfile after a descriptor has been cleared.
156 * must be called with fdp->fd_spin exclusively held
160 fdfixup_locked(struct filedesc
*fdp
, int fd
)
162 if (fd
< fdp
->fd_freefile
) {
163 fdp
->fd_freefile
= fd
;
165 while (fdp
->fd_lastfile
>= 0 &&
166 fdp
->fd_files
[fdp
->fd_lastfile
].fp
== NULL
&&
167 fdp
->fd_files
[fdp
->fd_lastfile
].reserved
== 0
174 * Clear the fd thread caches for this fdnode.
176 * If match_fdc is NULL, all thread caches of fdn will be cleared.
177 * The caller must hold fdp->fd_spin exclusively. The threads caching
178 * the descriptor do not have to be the current thread. The (status)
179 * argument is ignored.
181 * If match_fdc is not NULL, only the match_fdc's cache will be cleared.
182 * The caller must hold fdp->fd_spin shared and match_fdc must match a
183 * fdcache entry in curthread. match_fdc has been locked by the caller
184 * and had the specified (status).
186 * Since we are matching against a fp in the fdp (which must still be present
187 * at this time), fp will have at least two refs on any match and we can
188 * decrement the count trivially.
192 fclearcache(struct fdnode
*fdn
, struct fdcache
*match_fdc
, int status
)
199 * match_fdc == NULL We are cleaning out all tdcache entries
200 * for the fdn and hold fdp->fd_spin exclusively.
201 * This can race against the target threads
202 * cleaning out specific entries.
204 * match_fdc != NULL We are cleaning out a specific tdcache
205 * entry on behalf of the owning thread
206 * and hold fdp->fd_spin shared. The thread
207 * has already locked the entry. This cannot
211 for (i
= 0; i
< NTDCACHEFD
; ++i
) {
212 if ((fdc
= fdn
->tdcache
[i
]) == NULL
)
216 * If match_fdc is non-NULL we are being asked to
217 * clear a specific fdc owned by curthread. There must
218 * be exactly one match. The caller has already locked
219 * the cache entry and will dispose of the lock after
222 * Since we also have a shared lock on fdp, we
223 * can do this without atomic ops.
226 if (fdc
!= match_fdc
)
228 fdn
->tdcache
[i
] = NULL
;
229 KASSERT(fp
== fdc
->fp
,
230 ("fclearcache(1): fp mismatch %p/%p\n",
236 * status can be 0 or 2. If 2 the ref is borrowed,
237 * if 0 the ref is not borrowed and we have to drop
241 atomic_add_int(&fp
->f_count
, -1);
242 fdn
->isfull
= 0; /* heuristic */
247 * Otherwise we hold an exclusive spin-lock and can only
248 * race thread consumers borrowing cache entries.
250 * Acquire the lock and dispose of the entry. We have to
251 * spin until we get the lock.
254 status
= atomic_swap_int(&fdc
->locked
, 1);
255 if (status
== 1) { /* foreign lock, retry */
259 fdn
->tdcache
[i
] = NULL
;
260 KASSERT(fp
== fdc
->fp
,
261 ("fclearcache(2): fp mismatch %p/%p\n",
266 atomic_add_int(&fp
->f_count
, -1);
267 fdn
->isfull
= 0; /* heuristic */
268 atomic_swap_int(&fdc
->locked
, 0);
272 KKASSERT(match_fdc
== NULL
);
276 * Retrieve the fp for the specified fd given the specified file descriptor
277 * table. The fdp does not have to be owned by the current process.
278 * If flags != -1, fp->f_flag must contain at least one of the flags.
280 * This function is not able to cache the fp.
283 holdfp_fdp(struct filedesc
*fdp
, int fd
, int flag
)
287 spin_lock_shared(&fdp
->fd_spin
);
288 if (((u_int
)fd
) < fdp
->fd_nfiles
) {
289 fp
= fdp
->fd_files
[fd
].fp
; /* can be NULL */
291 if ((fp
->f_flag
& flag
) == 0 && flag
!= -1) {
300 spin_unlock_shared(&fdp
->fd_spin
);
306 holdfp_fdp_locked(struct filedesc
*fdp
, int fd
, int flag
)
310 if (((u_int
)fd
) < fdp
->fd_nfiles
) {
311 fp
= fdp
->fd_files
[fd
].fp
; /* can be NULL */
313 if ((fp
->f_flag
& flag
) == 0 && flag
!= -1) {
326 * Acquire the fp for the specified file descriptor, using the thread
327 * cache if possible and caching it if possible.
329 * td must be the curren thread.
333 _holdfp_cache(thread_t td
, int fd
)
335 struct filedesc
*fdp
;
337 struct fdcache
*best
;
347 for (fdc
= &td
->td_fdcache
[0]; fdc
< &td
->td_fdcache
[NFDCACHE
]; ++fdc
) {
348 if (fdc
->fd
!= fd
|| fdc
->fp
== NULL
)
350 status
= atomic_swap_int(&fdc
->locked
, 1);
353 * If someone else has locked our cache entry they are in
354 * the middle of clearing it, skip the entry.
360 * We have locked the entry, but if it no longer matches
361 * restore the previous state (0 or 2) and skip the entry.
363 if (fdc
->fd
!= fd
|| fdc
->fp
== NULL
) {
364 atomic_swap_int(&fdc
->locked
, status
);
369 * We have locked a valid entry. We can borrow the ref
370 * for a mode 0 entry. We can get a valid fp for a mode
371 * 2 entry but not borrow the ref.
375 fdc
->lru
= ++td
->td_fdcache_lru
;
376 atomic_swap_int(&fdc
->locked
, 2);
383 fdc
->lru
= ++td
->td_fdcache_lru
;
384 atomic_swap_int(&fdc
->locked
, 2);
392 * Lookup the descriptor the slow way. This can contend against
393 * modifying operations in a multi-threaded environment and cause
394 * cache line ping ponging otherwise.
396 fdp
= td
->td_proc
->p_fd
;
397 spin_lock_shared(&fdp
->fd_spin
);
399 if (((u_int
)fd
) < fdp
->fd_nfiles
) {
400 fp
= fdp
->fd_files
[fd
].fp
; /* can be NULL */
403 if (fdp
->fd_files
[fd
].isfull
== 0)
409 spin_unlock_shared(&fdp
->fd_spin
);
414 * We found a valid fp and held it, fdp is still shared locked.
415 * Enter the fp into the per-thread cache. Find the oldest entry
416 * via lru, or an empty entry.
418 * Because fdp's spinlock is held (shared is fine), no other
419 * thread should be in the middle of clearing our selected entry.
422 best
= &td
->td_fdcache
[0];
423 for (fdc
= &td
->td_fdcache
[0]; fdc
< &td
->td_fdcache
[NFDCACHE
]; ++fdc
) {
424 if (fdc
->fp
== NULL
) {
428 delta
= fdc
->lru
- best
->lru
;
436 * Don't enter into the cache if we cannot get the lock.
438 status
= atomic_swap_int(&best
->locked
, 1);
443 * Clear the previous cache entry if present
446 KKASSERT(best
->fd
>= 0);
447 fclearcache(&fdp
->fd_files
[best
->fd
], best
, status
);
451 * Create our new cache entry. This entry is 'safe' until we tie
452 * into the fdnode. If we cannot tie in, we will clear the entry.
456 best
->lru
= ++td
->td_fdcache_lru
;
457 best
->locked
= 2; /* borrowed ref */
459 fdn
= &fdp
->fd_files
[fd
];
460 for (i
= 0; i
< NTDCACHEFD
; ++i
) {
461 if (fdn
->tdcache
[i
] == NULL
&&
462 atomic_cmpset_ptr((void **)&fdn
->tdcache
[i
], NULL
, best
)) {
466 fdn
->isfull
= 1; /* no space */
471 spin_unlock_shared(&fdp
->fd_spin
);
477 * Drop the file pointer and return to the thread cache if possible.
479 * Caller must not hold fdp's spin lock.
480 * td must be the current thread.
483 dropfp(thread_t td
, int fd
, struct file
*fp
)
485 struct filedesc
*fdp
;
489 fdp
= td
->td_proc
->p_fd
;
492 * If our placeholder is still present we can re-cache the ref.
494 * Note that we can race an fclearcache().
496 for (fdc
= &td
->td_fdcache
[0]; fdc
< &td
->td_fdcache
[NFDCACHE
]; ++fdc
) {
497 if (fdc
->fp
!= fp
|| fdc
->fd
!= fd
)
499 status
= atomic_swap_int(&fdc
->locked
, 1);
503 * Not in mode 2, fdrop fp without caching.
505 atomic_swap_int(&fdc
->locked
, 0);
509 * Not in mode 2, locked by someone else.
510 * fdrop fp without caching.
515 * Intact borrowed ref, return to mode 0
516 * indicating that we have returned the ref.
518 * Return the borrowed ref (2->1->0)
520 if (fdc
->fp
== fp
&& fdc
->fd
== fd
) {
521 atomic_swap_int(&fdc
->locked
, 0);
524 atomic_swap_int(&fdc
->locked
, 2);
530 * Failed to re-cache, drop the fp without caching.
536 * Clear all descriptors cached in the per-thread fd cache for
537 * the specified thread.
539 * Caller must not hold p_fd->spin. This function will temporarily
540 * obtain a shared spin lock.
543 fexitcache(thread_t td
)
545 struct filedesc
*fdp
;
550 if (td
->td_proc
== NULL
)
552 fdp
= td
->td_proc
->p_fd
;
557 * A shared lock is sufficient as the caller controls td and we
558 * are only clearing td's cache.
560 spin_lock_shared(&fdp
->fd_spin
);
561 for (i
= 0; i
< NFDCACHE
; ++i
) {
562 fdc
= &td
->td_fdcache
[i
];
564 status
= atomic_swap_int(&fdc
->locked
, 1);
571 KKASSERT(fdc
->fd
>= 0);
572 fclearcache(&fdp
->fd_files
[fdc
->fd
], fdc
,
575 atomic_swap_int(&fdc
->locked
, 0);
578 spin_unlock_shared(&fdp
->fd_spin
);
581 static __inline
struct filelist_head
*
582 fp2filelist(const struct file
*fp
)
586 i
= (u_int
)(uintptr_t)fp
% NFILELIST_HEADS
;
587 return &filelist_heads
[i
];
592 readplimits(struct proc
*p
)
594 thread_t td
= curthread
;
595 struct plimit
*limit
;
597 limit
= td
->td_limit
;
598 if (limit
!= p
->p_limit
) {
599 spin_lock_shared(&p
->p_spin
);
601 atomic_add_int(&limit
->p_refcnt
, 1);
602 spin_unlock_shared(&p
->p_spin
);
604 plimit_free(td
->td_limit
);
605 td
->td_limit
= limit
;
611 * System calls on descriptors.
614 sys_getdtablesize(struct getdtablesize_args
*uap
)
616 struct proc
*p
= curproc
;
617 struct plimit
*limit
= readplimits(p
);
620 if (limit
->pl_rlimit
[RLIMIT_NOFILE
].rlim_cur
> INT_MAX
)
623 dtsize
= (int)limit
->pl_rlimit
[RLIMIT_NOFILE
].rlim_cur
;
625 if (dtsize
> maxfilesperproc
)
626 dtsize
= maxfilesperproc
;
627 if (dtsize
< minfilesperproc
)
628 dtsize
= minfilesperproc
;
629 if (p
->p_ucred
->cr_uid
&& dtsize
> maxfilesperuser
)
630 dtsize
= maxfilesperuser
;
631 uap
->sysmsg_result
= dtsize
;
636 * Duplicate a file descriptor to a particular value.
638 * note: keep in mind that a potential race condition exists when closing
639 * descriptors from a shared descriptor table (via rfork).
642 sys_dup2(struct dup2_args
*uap
)
647 error
= kern_dup(DUP_FIXED
, uap
->from
, uap
->to
, &fd
);
648 uap
->sysmsg_fds
[0] = fd
;
654 * Duplicate a file descriptor.
657 sys_dup(struct dup_args
*uap
)
662 error
= kern_dup(DUP_VARIABLE
, uap
->fd
, 0, &fd
);
663 uap
->sysmsg_fds
[0] = fd
;
669 * MPALMOSTSAFE - acquires mplock for fp operations
672 kern_fcntl(int fd
, int cmd
, union fcntl_dat
*dat
, struct ucred
*cred
)
674 struct thread
*td
= curthread
;
675 struct proc
*p
= td
->td_proc
;
682 int tmp
, error
, flg
= F_POSIX
;
687 * Operations on file descriptors that do not require a file pointer.
691 error
= fgetfdflags(p
->p_fd
, fd
, &tmp
);
693 dat
->fc_cloexec
= (tmp
& UF_EXCLOSE
) ? FD_CLOEXEC
: 0;
697 if (dat
->fc_cloexec
& FD_CLOEXEC
)
698 error
= fsetfdflags(p
->p_fd
, fd
, UF_EXCLOSE
);
700 error
= fclrfdflags(p
->p_fd
, fd
, UF_EXCLOSE
);
704 error
= kern_dup(DUP_VARIABLE
| DUP_FCNTL
, fd
, newmin
,
707 case F_DUPFD_CLOEXEC
:
709 error
= kern_dup(DUP_VARIABLE
| DUP_CLOEXEC
| DUP_FCNTL
,
710 fd
, newmin
, &dat
->fc_fd
);
714 error
= kern_dup(DUP_FIXED
, fd
, newmin
, &dat
->fc_fd
);
716 case F_DUP2FD_CLOEXEC
:
718 error
= kern_dup(DUP_FIXED
| DUP_CLOEXEC
, fd
, newmin
,
726 * Operations on file pointers
728 closedcounter
= p
->p_fd
->fd_closedcounter
;
729 if ((fp
= holdfp(td
, fd
, -1)) == NULL
)
734 dat
->fc_flags
= OFLAGS(fp
->f_flag
);
740 nflags
= FFLAGS(dat
->fc_flags
& ~O_ACCMODE
) & FCNTLFLAGS
;
741 nflags
|= oflags
& ~FCNTLFLAGS
;
744 if (((nflags
^ oflags
) & O_APPEND
) && (oflags
& FAPPENDONLY
))
746 if (error
== 0 && ((nflags
^ oflags
) & FASYNC
)) {
747 tmp
= nflags
& FASYNC
;
748 error
= fo_ioctl(fp
, FIOASYNC
, (caddr_t
)&tmp
,
753 * If no error, must be atomically set.
758 nflags
= (oflags
& ~FCNTLFLAGS
) | (nflags
& FCNTLFLAGS
);
759 if (atomic_cmpset_int(&fp
->f_flag
, oflags
, nflags
))
766 error
= fo_ioctl(fp
, FIOGETOWN
, (caddr_t
)&dat
->fc_owner
,
771 error
= fo_ioctl(fp
, FIOSETOWN
, (caddr_t
)&dat
->fc_owner
,
777 /* Fall into F_SETLK */
780 if (fp
->f_type
!= DTYPE_VNODE
) {
784 vp
= (struct vnode
*)fp
->f_data
;
787 * copyin/lockop may block
789 if (dat
->fc_flock
.l_whence
== SEEK_CUR
)
790 dat
->fc_flock
.l_start
+= fp
->f_offset
;
792 switch (dat
->fc_flock
.l_type
) {
794 if ((fp
->f_flag
& FREAD
) == 0) {
798 if (p
->p_leader
->p_advlock_flag
== 0)
799 p
->p_leader
->p_advlock_flag
= 1;
800 error
= VOP_ADVLOCK(vp
, (caddr_t
)p
->p_leader
, F_SETLK
,
801 &dat
->fc_flock
, flg
);
804 if ((fp
->f_flag
& FWRITE
) == 0) {
808 if (p
->p_leader
->p_advlock_flag
== 0)
809 p
->p_leader
->p_advlock_flag
= 1;
810 error
= VOP_ADVLOCK(vp
, (caddr_t
)p
->p_leader
, F_SETLK
,
811 &dat
->fc_flock
, flg
);
814 error
= VOP_ADVLOCK(vp
, (caddr_t
)p
->p_leader
, F_UNLCK
,
815 &dat
->fc_flock
, F_POSIX
);
823 * It is possible to race a close() on the descriptor while
824 * we were blocked getting the lock. If this occurs the
825 * close might not have caught the lock.
827 if (checkfdclosed(td
, p
->p_fd
, fd
, fp
, closedcounter
)) {
828 dat
->fc_flock
.l_whence
= SEEK_SET
;
829 dat
->fc_flock
.l_start
= 0;
830 dat
->fc_flock
.l_len
= 0;
831 dat
->fc_flock
.l_type
= F_UNLCK
;
832 VOP_ADVLOCK(vp
, (caddr_t
)p
->p_leader
,
833 F_UNLCK
, &dat
->fc_flock
, F_POSIX
);
838 if (fp
->f_type
!= DTYPE_VNODE
) {
842 vp
= (struct vnode
*)fp
->f_data
;
844 * copyin/lockop may block
846 if (dat
->fc_flock
.l_type
!= F_RDLCK
&&
847 dat
->fc_flock
.l_type
!= F_WRLCK
&&
848 dat
->fc_flock
.l_type
!= F_UNLCK
) {
852 if (dat
->fc_flock
.l_whence
== SEEK_CUR
)
853 dat
->fc_flock
.l_start
+= fp
->f_offset
;
854 error
= VOP_ADVLOCK(vp
, (caddr_t
)p
->p_leader
, F_GETLK
,
855 &dat
->fc_flock
, F_POSIX
);
867 * The file control system call.
870 sys_fcntl(struct fcntl_args
*uap
)
878 case F_DUPFD_CLOEXEC
:
879 case F_DUP2FD_CLOEXEC
:
880 dat
.fc_fd
= uap
->arg
;
883 dat
.fc_cloexec
= uap
->arg
;
886 dat
.fc_flags
= uap
->arg
;
889 dat
.fc_owner
= uap
->arg
;
894 error
= copyin((caddr_t
)uap
->arg
, &dat
.fc_flock
,
895 sizeof(struct flock
));
901 error
= kern_fcntl(uap
->fd
, uap
->cmd
, &dat
, curthread
->td_ucred
);
907 case F_DUPFD_CLOEXEC
:
908 case F_DUP2FD_CLOEXEC
:
909 uap
->sysmsg_result
= dat
.fc_fd
;
912 uap
->sysmsg_result
= dat
.fc_cloexec
;
915 uap
->sysmsg_result
= dat
.fc_flags
;
918 uap
->sysmsg_result
= dat
.fc_owner
;
921 error
= copyout(&dat
.fc_flock
, (caddr_t
)uap
->arg
,
922 sizeof(struct flock
));
931 * Common code for dup, dup2, and fcntl(F_DUPFD).
933 * There are four type flags: DUP_FCNTL, DUP_FIXED, DUP_VARIABLE, and
936 * DUP_FCNTL is for handling EINVAL vs. EBADF differences between
937 * fcntl()'s F_DUPFD and F_DUPFD_CLOEXEC and dup2() (per POSIX).
938 * The next two flags are mutually exclusive, and the fourth is optional.
939 * DUP_FIXED tells kern_dup() to destructively dup over an existing file
940 * descriptor if "new" is already open. DUP_VARIABLE tells kern_dup()
941 * to find the lowest unused file descriptor that is greater than or
942 * equal to "new". DUP_CLOEXEC, which works with either of the first
943 * two flags, sets the close-on-exec flag on the "new" file descriptor.
946 kern_dup(int flags
, int old
, int new, int *res
)
948 struct thread
*td
= curthread
;
949 struct proc
*p
= td
->td_proc
;
950 struct plimit
*limit
= readplimits(p
);
951 struct filedesc
*fdp
= p
->p_fd
;
960 * Verify that we have a valid descriptor to dup from and
961 * possibly to dup to. When the new descriptor is out of
962 * bounds, fcntl()'s F_DUPFD and F_DUPFD_CLOEXEC must
963 * return EINVAL, while dup2() returns EBADF in
966 * NOTE: maxfilesperuser is not applicable to dup()
969 if (limit
->pl_rlimit
[RLIMIT_NOFILE
].rlim_cur
> INT_MAX
)
972 dtsize
= (int)limit
->pl_rlimit
[RLIMIT_NOFILE
].rlim_cur
;
973 if (dtsize
> maxfilesperproc
)
974 dtsize
= maxfilesperproc
;
975 if (dtsize
< minfilesperproc
)
976 dtsize
= minfilesperproc
;
978 if (new < 0 || new > dtsize
)
979 return (flags
& DUP_FCNTL
? EINVAL
: EBADF
);
981 spin_lock(&fdp
->fd_spin
);
982 if ((unsigned)old
>= fdp
->fd_nfiles
|| fdp
->fd_files
[old
].fp
== NULL
) {
983 spin_unlock(&fdp
->fd_spin
);
986 if ((flags
& DUP_FIXED
) && old
== new) {
988 if (flags
& DUP_CLOEXEC
)
989 fdp
->fd_files
[new].fileflags
|= UF_EXCLOSE
;
990 spin_unlock(&fdp
->fd_spin
);
993 fp
= fdp
->fd_files
[old
].fp
;
994 oldflags
= fdp
->fd_files
[old
].fileflags
;
998 * Allocate a new descriptor if DUP_VARIABLE, or expand the table
999 * if the requested descriptor is beyond the current table size.
1001 * This can block. Retry if the source descriptor no longer matches
1002 * or if our expectation in the expansion case races.
1004 * If we are not expanding or allocating a new decriptor, then reset
1005 * the target descriptor to a reserved state so we have a uniform
1006 * setup for the next code block.
1008 if ((flags
& DUP_VARIABLE
) || new >= fdp
->fd_nfiles
) {
1009 error
= fdalloc_locked(p
, fdp
, new, &newfd
);
1011 spin_unlock(&fdp
->fd_spin
);
1018 if (old
>= fdp
->fd_nfiles
|| fdp
->fd_files
[old
].fp
!= fp
) {
1019 fsetfd_locked(fdp
, NULL
, newfd
);
1020 spin_unlock(&fdp
->fd_spin
);
1025 * Check for expansion race
1027 if ((flags
& DUP_VARIABLE
) == 0 && new != newfd
) {
1028 fsetfd_locked(fdp
, NULL
, newfd
);
1029 spin_unlock(&fdp
->fd_spin
);
1034 * Check for ripout, newfd reused old (this case probably
1038 fsetfd_locked(fdp
, NULL
, newfd
);
1039 spin_unlock(&fdp
->fd_spin
);
1046 if (fdp
->fd_files
[new].reserved
) {
1047 spin_unlock(&fdp
->fd_spin
);
1049 kprintf("Warning: dup(): target descriptor %d is "
1050 "reserved, waiting for it to be resolved\n",
1052 tsleep(fdp
, 0, "fdres", hz
);
1057 * If the target descriptor was never allocated we have
1058 * to allocate it. If it was we have to clean out the
1059 * old descriptor. delfp inherits the ref from the
1062 ++fdp
->fd_closedcounter
;
1063 fclearcache(&fdp
->fd_files
[new], NULL
, 0);
1064 ++fdp
->fd_closedcounter
;
1065 delfp
= fdp
->fd_files
[new].fp
;
1066 fdp
->fd_files
[new].fp
= NULL
;
1067 fdp
->fd_files
[new].reserved
= 1;
1068 if (delfp
== NULL
) {
1069 fdreserve_locked(fdp
, new, 1);
1070 if (new > fdp
->fd_lastfile
)
1071 fdp
->fd_lastfile
= new;
1077 * NOTE: still holding an exclusive spinlock
1081 * If a descriptor is being overwritten we may hve to tell
1082 * fdfree() to sleep to ensure that all relevant process
1083 * leaders can be traversed in closef().
1085 if (delfp
!= NULL
&& p
->p_fdtol
!= NULL
) {
1086 fdp
->fd_holdleaderscount
++;
1091 KASSERT(delfp
== NULL
|| (flags
& DUP_FIXED
),
1092 ("dup() picked an open file"));
1095 * Duplicate the source descriptor, update lastfile. If the new
1096 * descriptor was not allocated and we aren't replacing an existing
1097 * descriptor we have to mark the descriptor as being in use.
1099 * The fd_files[] array inherits fp's hold reference.
1101 fsetfd_locked(fdp
, fp
, new);
1102 if ((flags
& DUP_CLOEXEC
) != 0)
1103 fdp
->fd_files
[new].fileflags
= oldflags
| UF_EXCLOSE
;
1105 fdp
->fd_files
[new].fileflags
= oldflags
& ~UF_EXCLOSE
;
1106 spin_unlock(&fdp
->fd_spin
);
1111 * If we dup'd over a valid file, we now own the reference to it
1112 * and must dispose of it using closef() semantics (as if a
1113 * close() were performed on it).
1116 if (SLIST_FIRST(&delfp
->f_klist
))
1117 knote_fdclose(delfp
, fdp
, new);
1120 spin_lock(&fdp
->fd_spin
);
1121 fdp
->fd_holdleaderscount
--;
1122 if (fdp
->fd_holdleaderscount
== 0 &&
1123 fdp
->fd_holdleaderswakeup
!= 0) {
1124 fdp
->fd_holdleaderswakeup
= 0;
1125 spin_unlock(&fdp
->fd_spin
);
1126 wakeup(&fdp
->fd_holdleaderscount
);
1128 spin_unlock(&fdp
->fd_spin
);
1136 * If sigio is on the list associated with a process or process group,
1137 * disable signalling from the device, remove sigio from the list and
1141 funsetown(struct sigio
**sigiop
)
1145 struct sigio
*sigio
;
1147 if ((sigio
= *sigiop
) != NULL
) {
1148 lwkt_gettoken(&sigio_token
); /* protect sigio */
1149 KKASSERT(sigiop
== sigio
->sio_myref
);
1152 lwkt_reltoken(&sigio_token
);
1157 if (sigio
->sio_pgid
< 0) {
1158 pgrp
= sigio
->sio_pgrp
;
1159 sigio
->sio_pgrp
= NULL
;
1160 lwkt_gettoken(&pgrp
->pg_token
);
1161 SLIST_REMOVE(&pgrp
->pg_sigiolst
, sigio
, sigio
, sio_pgsigio
);
1162 lwkt_reltoken(&pgrp
->pg_token
);
1164 } else /* if ((*sigiop)->sio_pgid > 0) */ {
1165 p
= sigio
->sio_proc
;
1166 sigio
->sio_proc
= NULL
;
1168 lwkt_gettoken(&p
->p_token
);
1169 SLIST_REMOVE(&p
->p_sigiolst
, sigio
, sigio
, sio_pgsigio
);
1170 lwkt_reltoken(&p
->p_token
);
1173 crfree(sigio
->sio_ucred
);
1174 sigio
->sio_ucred
= NULL
;
1175 kfree(sigio
, M_SIGIO
);
1179 * Free a list of sigio structures. Caller is responsible for ensuring
1180 * that the list is MPSAFE.
1183 funsetownlst(struct sigiolst
*sigiolst
)
1185 struct sigio
*sigio
;
1187 while ((sigio
= SLIST_FIRST(sigiolst
)) != NULL
)
1188 funsetown(sigio
->sio_myref
);
1192 * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg).
1194 * After permission checking, add a sigio structure to the sigio list for
1195 * the process or process group.
1198 fsetown(pid_t pgid
, struct sigio
**sigiop
)
1200 struct proc
*proc
= NULL
;
1201 struct pgrp
*pgrp
= NULL
;
1202 struct sigio
*sigio
;
1218 * Policy - Don't allow a process to FSETOWN a process
1219 * in another session.
1221 * Remove this test to allow maximum flexibility or
1222 * restrict FSETOWN to the current process or process
1223 * group for maximum safety.
1225 if (proc
->p_session
!= curproc
->p_session
) {
1229 } else /* if (pgid < 0) */ {
1230 pgrp
= pgfind(-pgid
);
1237 * Policy - Don't allow a process to FSETOWN a process
1238 * in another session.
1240 * Remove this test to allow maximum flexibility or
1241 * restrict FSETOWN to the current process or process
1242 * group for maximum safety.
1244 if (pgrp
->pg_session
!= curproc
->p_session
) {
1249 sigio
= kmalloc(sizeof(struct sigio
), M_SIGIO
, M_WAITOK
| M_ZERO
);
1251 KKASSERT(pgrp
== NULL
);
1252 lwkt_gettoken(&proc
->p_token
);
1253 SLIST_INSERT_HEAD(&proc
->p_sigiolst
, sigio
, sio_pgsigio
);
1254 sigio
->sio_proc
= proc
;
1255 lwkt_reltoken(&proc
->p_token
);
1257 KKASSERT(proc
== NULL
);
1258 lwkt_gettoken(&pgrp
->pg_token
);
1259 SLIST_INSERT_HEAD(&pgrp
->pg_sigiolst
, sigio
, sio_pgsigio
);
1260 sigio
->sio_pgrp
= pgrp
;
1261 lwkt_reltoken(&pgrp
->pg_token
);
1264 sigio
->sio_pgid
= pgid
;
1265 sigio
->sio_ucred
= crhold(curthread
->td_ucred
);
1266 /* It would be convenient if p_ruid was in ucred. */
1267 sigio
->sio_ruid
= sigio
->sio_ucred
->cr_ruid
;
1268 sigio
->sio_myref
= sigiop
;
1270 lwkt_gettoken(&sigio_token
);
1274 lwkt_reltoken(&sigio_token
);
1285 * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg).
1288 fgetown(struct sigio
**sigiop
)
1290 struct sigio
*sigio
;
1293 lwkt_gettoken_shared(&sigio_token
);
1295 own
= (sigio
!= NULL
? sigio
->sio_pgid
: 0);
1296 lwkt_reltoken(&sigio_token
);
1302 * Close many file descriptors.
1305 sys_closefrom(struct closefrom_args
*uap
)
1307 return(kern_closefrom(uap
->fd
));
1311 * Close all file descriptors greater then or equal to fd
1314 kern_closefrom(int fd
)
1316 struct thread
*td
= curthread
;
1317 struct proc
*p
= td
->td_proc
;
1318 struct filedesc
*fdp
;
1329 * NOTE: This function will skip unassociated descriptors and
1330 * reserved descriptors that have not yet been assigned.
1331 * fd_lastfile can change as a side effect of kern_close().
1333 * NOTE: We accumulate EINTR errors and return EINTR if any
1334 * close() returned EINTR. However, the descriptor is
1335 * still closed and we do not break out of the loop.
1338 spin_lock(&fdp
->fd_spin
);
1339 while (fd
<= fdp
->fd_lastfile
) {
1340 if (fdp
->fd_files
[fd
].fp
!= NULL
) {
1341 spin_unlock(&fdp
->fd_spin
);
1342 /* ok if this races another close */
1343 e2
= kern_close(fd
);
1346 spin_lock(&fdp
->fd_spin
);
1350 spin_unlock(&fdp
->fd_spin
);
1356 * Close a file descriptor.
1359 sys_close(struct close_args
*uap
)
1361 return(kern_close(uap
->fd
));
1370 struct thread
*td
= curthread
;
1371 struct proc
*p
= td
->td_proc
;
1372 struct filedesc
*fdp
;
1381 * funsetfd*() also clears the fd cache
1383 spin_lock(&fdp
->fd_spin
);
1384 if ((fp
= funsetfd_locked(fdp
, fd
)) == NULL
) {
1385 spin_unlock(&fdp
->fd_spin
);
1389 if (p
->p_fdtol
!= NULL
) {
1391 * Ask fdfree() to sleep to ensure that all relevant
1392 * process leaders can be traversed in closef().
1394 fdp
->fd_holdleaderscount
++;
1399 * we now hold the fp reference that used to be owned by the descriptor
1402 spin_unlock(&fdp
->fd_spin
);
1403 if (SLIST_FIRST(&fp
->f_klist
))
1404 knote_fdclose(fp
, fdp
, fd
);
1405 error
= closef(fp
, p
);
1407 spin_lock(&fdp
->fd_spin
);
1408 fdp
->fd_holdleaderscount
--;
1409 if (fdp
->fd_holdleaderscount
== 0 &&
1410 fdp
->fd_holdleaderswakeup
!= 0) {
1411 fdp
->fd_holdleaderswakeup
= 0;
1412 spin_unlock(&fdp
->fd_spin
);
1413 wakeup(&fdp
->fd_holdleaderscount
);
1415 spin_unlock(&fdp
->fd_spin
);
1422 * shutdown_args(int fd, int how)
1425 kern_shutdown(int fd
, int how
)
1427 struct thread
*td
= curthread
;
1431 if ((fp
= holdfp(td
, fd
, -1)) == NULL
)
1433 error
= fo_shutdown(fp
, how
);
1443 sys_shutdown(struct shutdown_args
*uap
)
1447 error
= kern_shutdown(uap
->s
, uap
->how
);
1456 kern_fstat(int fd
, struct stat
*ub
)
1458 struct thread
*td
= curthread
;
1462 if ((fp
= holdfp(td
, fd
, -1)) == NULL
)
1464 error
= fo_stat(fp
, ub
, td
->td_ucred
);
1471 * Return status information about a file descriptor.
1474 sys_fstat(struct fstat_args
*uap
)
1479 error
= kern_fstat(uap
->fd
, &st
);
1482 error
= copyout(&st
, uap
->sb
, sizeof(st
));
1487 * Return pathconf information about a file descriptor.
1492 sys_fpathconf(struct fpathconf_args
*uap
)
1494 struct thread
*td
= curthread
;
1499 if ((fp
= holdfp(td
, uap
->fd
, -1)) == NULL
)
1502 switch (fp
->f_type
) {
1505 if (uap
->name
!= _PC_PIPE_BUF
) {
1508 uap
->sysmsg_result
= PIPE_BUF
;
1514 vp
= (struct vnode
*)fp
->f_data
;
1515 error
= VOP_PATHCONF(vp
, uap
->name
, &uap
->sysmsg_reg
);
1526 * Grow the file table so it can hold through descriptor (want).
1528 * The fdp's spinlock must be held exclusively on entry and may be held
1529 * exclusively on return. The spinlock may be cycled by the routine.
1532 fdgrow_locked(struct filedesc
*fdp
, int want
)
1534 struct fdnode
*newfiles
;
1535 struct fdnode
*oldfiles
;
1538 nf
= fdp
->fd_nfiles
;
1540 /* nf has to be of the form 2^n - 1 */
1542 } while (nf
<= want
);
1544 spin_unlock(&fdp
->fd_spin
);
1545 newfiles
= kmalloc(nf
* sizeof(struct fdnode
), M_FILEDESC
, M_WAITOK
);
1546 spin_lock(&fdp
->fd_spin
);
1549 * We could have raced another extend while we were not holding
1552 if (fdp
->fd_nfiles
>= nf
) {
1553 spin_unlock(&fdp
->fd_spin
);
1554 kfree(newfiles
, M_FILEDESC
);
1555 spin_lock(&fdp
->fd_spin
);
1559 * Copy the existing ofile and ofileflags arrays
1560 * and zero the new portion of each array.
1562 extra
= nf
- fdp
->fd_nfiles
;
1563 bcopy(fdp
->fd_files
, newfiles
, fdp
->fd_nfiles
* sizeof(struct fdnode
));
1564 bzero(&newfiles
[fdp
->fd_nfiles
], extra
* sizeof(struct fdnode
));
1566 oldfiles
= fdp
->fd_files
;
1567 fdp
->fd_files
= newfiles
;
1568 fdp
->fd_nfiles
= nf
;
1570 if (oldfiles
!= fdp
->fd_builtin_files
) {
1571 spin_unlock(&fdp
->fd_spin
);
1572 kfree(oldfiles
, M_FILEDESC
);
1573 spin_lock(&fdp
->fd_spin
);
1578 * Number of nodes in right subtree, including the root.
1581 right_subtree_size(int n
)
1583 return (n
^ (n
| (n
+ 1)));
1590 right_ancestor(int n
)
1592 return (n
| (n
+ 1));
1599 left_ancestor(int n
)
1601 return ((n
& (n
+ 1)) - 1);
1605 * Traverse the in-place binary tree buttom-up adjusting the allocation
1606 * count so scans can determine where free descriptors are located.
1608 * caller must be holding an exclusive spinlock on fdp
1612 fdreserve_locked(struct filedesc
*fdp
, int fd
, int incr
)
1615 fdp
->fd_files
[fd
].allocated
+= incr
;
1616 KKASSERT(fdp
->fd_files
[fd
].allocated
>= 0);
1617 fd
= left_ancestor(fd
);
1622 * Reserve a file descriptor for the process. If no error occurs, the
1623 * caller MUST at some point call fsetfd() or assign a file pointer
1624 * or dispose of the reservation.
1628 fdalloc_locked(struct proc
*p
, struct filedesc
*fdp
, int want
, int *result
)
1630 struct plimit
*limit
= readplimits(p
);
1631 struct uidinfo
*uip
;
1632 int fd
, rsize
, rsum
, node
, lim
;
1635 * Check dtable size limit
1637 *result
= -1; /* avoid gcc warnings */
1638 if (limit
->pl_rlimit
[RLIMIT_NOFILE
].rlim_cur
> INT_MAX
)
1641 lim
= (int)limit
->pl_rlimit
[RLIMIT_NOFILE
].rlim_cur
;
1643 if (lim
> maxfilesperproc
)
1644 lim
= maxfilesperproc
;
1645 if (lim
< minfilesperproc
)
1646 lim
= minfilesperproc
;
1651 * Check that the user has not run out of descriptors (non-root only).
1652 * As a safety measure the dtable is allowed to have at least
1653 * minfilesperproc open fds regardless of the maxfilesperuser limit.
1655 * This isn't as loose a spec as ui_posixlocks, so we use atomic
1656 * ops to force synchronize and recheck if we would otherwise
1659 if (p
->p_ucred
->cr_uid
&& fdp
->fd_nfiles
>= minfilesperproc
) {
1660 uip
= p
->p_ucred
->cr_uidinfo
;
1661 if (uip
->ui_openfiles
> maxfilesperuser
) {
1666 for (n
= 0; n
< ncpus
; ++n
) {
1667 count
+= atomic_swap_int(
1668 &uip
->ui_pcpu
[n
].pu_openfiles
, 0);
1670 atomic_add_int(&uip
->ui_openfiles
, count
);
1671 if (uip
->ui_openfiles
> maxfilesperuser
) {
1672 krateprintf(&krate_uidinfo
,
1673 "Warning: user %d pid %d (%s) "
1674 "ran out of file descriptors "
1676 p
->p_ucred
->cr_uid
, (int)p
->p_pid
,
1678 uip
->ui_openfiles
, maxfilesperuser
);
1685 * Grow the dtable if necessary
1687 if (want
>= fdp
->fd_nfiles
)
1688 fdgrow_locked(fdp
, want
);
1691 * Search for a free descriptor starting at the higher
1692 * of want or fd_freefile. If that fails, consider
1693 * expanding the ofile array.
1695 * NOTE! the 'allocated' field is a cumulative recursive allocation
1696 * count. If we happen to see a value of 0 then we can shortcut
1697 * our search. Otherwise we run through through the tree going
1698 * down branches we know have free descriptor(s) until we hit a
1699 * leaf node. The leaf node will be free but will not necessarily
1700 * have an allocated field of 0.
1703 /* move up the tree looking for a subtree with a free node */
1704 for (fd
= max(want
, fdp
->fd_freefile
); fd
< min(fdp
->fd_nfiles
, lim
);
1705 fd
= right_ancestor(fd
)) {
1706 if (fdp
->fd_files
[fd
].allocated
== 0)
1709 rsize
= right_subtree_size(fd
);
1710 if (fdp
->fd_files
[fd
].allocated
== rsize
)
1711 continue; /* right subtree full */
1714 * Free fd is in the right subtree of the tree rooted at fd.
1715 * Call that subtree R. Look for the smallest (leftmost)
1716 * subtree of R with an unallocated fd: continue moving
1717 * down the left branch until encountering a full left
1718 * subtree, then move to the right.
1720 for (rsum
= 0, rsize
/= 2; rsize
> 0; rsize
/= 2) {
1722 rsum
+= fdp
->fd_files
[node
].allocated
;
1723 if (fdp
->fd_files
[fd
].allocated
== rsum
+ rsize
) {
1724 fd
= node
; /* move to the right */
1725 if (fdp
->fd_files
[node
].allocated
== 0)
1734 * No space in current array. Expand?
1736 if (fdp
->fd_nfiles
>= lim
) {
1739 fdgrow_locked(fdp
, want
);
1743 KKASSERT(fd
< fdp
->fd_nfiles
);
1744 if (fd
> fdp
->fd_lastfile
)
1745 fdp
->fd_lastfile
= fd
;
1746 if (want
<= fdp
->fd_freefile
)
1747 fdp
->fd_freefile
= fd
;
1749 KKASSERT(fdp
->fd_files
[fd
].fp
== NULL
);
1750 KKASSERT(fdp
->fd_files
[fd
].reserved
== 0);
1751 fdp
->fd_files
[fd
].fileflags
= 0;
1752 fdp
->fd_files
[fd
].reserved
= 1;
1753 fdreserve_locked(fdp
, fd
, 1);
1759 fdalloc(struct proc
*p
, int want
, int *result
)
1761 struct filedesc
*fdp
= p
->p_fd
;
1764 spin_lock(&fdp
->fd_spin
);
1765 error
= fdalloc_locked(p
, fdp
, want
, result
);
1766 spin_unlock(&fdp
->fd_spin
);
1772 * Check to see whether n user file descriptors
1773 * are available to the process p.
1776 fdavail(struct proc
*p
, int n
)
1778 struct plimit
*limit
= readplimits(p
);
1779 struct filedesc
*fdp
= p
->p_fd
;
1780 struct fdnode
*fdnode
;
1783 if (limit
->pl_rlimit
[RLIMIT_NOFILE
].rlim_cur
> INT_MAX
)
1786 lim
= (int)limit
->pl_rlimit
[RLIMIT_NOFILE
].rlim_cur
;
1788 if (lim
> maxfilesperproc
)
1789 lim
= maxfilesperproc
;
1790 if (lim
< minfilesperproc
)
1791 lim
= minfilesperproc
;
1793 spin_lock(&fdp
->fd_spin
);
1794 if ((i
= lim
- fdp
->fd_nfiles
) > 0 && (n
-= i
) <= 0) {
1795 spin_unlock(&fdp
->fd_spin
);
1798 last
= min(fdp
->fd_nfiles
, lim
);
1799 fdnode
= &fdp
->fd_files
[fdp
->fd_freefile
];
1800 for (i
= last
- fdp
->fd_freefile
; --i
>= 0; ++fdnode
) {
1801 if (fdnode
->fp
== NULL
&& --n
<= 0) {
1802 spin_unlock(&fdp
->fd_spin
);
1806 spin_unlock(&fdp
->fd_spin
);
1811 * Revoke open descriptors referencing (f_data, f_type)
1813 * Any revoke executed within a prison is only able to
1814 * revoke descriptors for processes within that prison.
1816 * Returns 0 on success or an error code.
1818 struct fdrevoke_info
{
1827 static int fdrevoke_check_callback(struct file
*fp
, void *vinfo
);
1828 static int fdrevoke_proc_callback(struct proc
*p
, void *vinfo
);
1831 fdrevoke(void *f_data
, short f_type
, struct ucred
*cred
)
1833 struct fdrevoke_info info
;
1836 bzero(&info
, sizeof(info
));
1840 error
= falloc(NULL
, &info
.nfp
, NULL
);
1845 * Scan the file pointer table once. dups do not dup file pointers,
1846 * only descriptors, so there is no leak. Set FREVOKED on the fps
1849 * Any fps sent over unix-domain sockets will be revoked by the
1850 * socket code checking for FREVOKED when the fps are externialized.
1851 * revoke_token is used to make sure that fps marked FREVOKED and
1852 * externalized will be picked up by the following allproc_scan().
1854 lwkt_gettoken(&revoke_token
);
1855 allfiles_scan_exclusive(fdrevoke_check_callback
, &info
);
1856 lwkt_reltoken(&revoke_token
);
1859 * If any fps were marked track down the related descriptors
1860 * and close them. Any dup()s at this point will notice
1861 * the FREVOKED already set in the fp and do the right thing.
1864 allproc_scan(fdrevoke_proc_callback
, &info
, 0);
1870 * Locate matching file pointers directly.
1872 * WARNING: allfiles_scan_exclusive() holds a spinlock through these calls!
1875 fdrevoke_check_callback(struct file
*fp
, void *vinfo
)
1877 struct fdrevoke_info
*info
= vinfo
;
1880 * File pointers already flagged for revokation are skipped.
1882 if (fp
->f_flag
& FREVOKED
)
1886 * If revoking from a prison file pointers created outside of
1887 * that prison, or file pointers without creds, cannot be revoked.
1889 if (info
->cred
->cr_prison
&&
1890 (fp
->f_cred
== NULL
||
1891 info
->cred
->cr_prison
!= fp
->f_cred
->cr_prison
)) {
1896 * If the file pointer matches then mark it for revocation. The
1897 * flag is currently only used by unp_revoke_gc().
1899 * info->found is a heuristic and can race in a SMP environment.
1901 if (info
->data
== fp
->f_data
&& info
->type
== fp
->f_type
) {
1902 atomic_set_int(&fp
->f_flag
, FREVOKED
);
1909 * Locate matching file pointers via process descriptor tables.
1912 fdrevoke_proc_callback(struct proc
*p
, void *vinfo
)
1914 struct fdrevoke_info
*info
= vinfo
;
1915 struct filedesc
*fdp
;
1919 if (p
->p_stat
== SIDL
|| p
->p_stat
== SZOMB
)
1921 if (info
->cred
->cr_prison
&&
1922 info
->cred
->cr_prison
!= p
->p_ucred
->cr_prison
) {
1927 * If the controlling terminal of the process matches the
1928 * vnode being revoked we clear the controlling terminal.
1930 * The normal spec_close() may not catch this because it
1931 * uses curproc instead of p.
1933 if (p
->p_session
&& info
->type
== DTYPE_VNODE
&&
1934 info
->data
== p
->p_session
->s_ttyvp
) {
1935 p
->p_session
->s_ttyvp
= NULL
;
1940 * Softref the fdp to prevent it from being destroyed
1942 spin_lock(&p
->p_spin
);
1943 if ((fdp
= p
->p_fd
) == NULL
) {
1944 spin_unlock(&p
->p_spin
);
1947 atomic_add_int(&fdp
->fd_softrefs
, 1);
1948 spin_unlock(&p
->p_spin
);
1951 * Locate and close any matching file descriptors, replacing
1952 * them with info->nfp.
1954 spin_lock(&fdp
->fd_spin
);
1955 for (n
= 0; n
< fdp
->fd_nfiles
; ++n
) {
1956 if ((fp
= fdp
->fd_files
[n
].fp
) == NULL
)
1958 if (fp
->f_flag
& FREVOKED
) {
1959 ++fdp
->fd_closedcounter
;
1960 fclearcache(&fdp
->fd_files
[n
], NULL
, 0);
1961 ++fdp
->fd_closedcounter
;
1963 fdp
->fd_files
[n
].fp
= info
->nfp
;
1964 spin_unlock(&fdp
->fd_spin
);
1965 knote_fdclose(fp
, fdp
, n
); /* XXX */
1967 spin_lock(&fdp
->fd_spin
);
1970 spin_unlock(&fdp
->fd_spin
);
1971 atomic_subtract_int(&fdp
->fd_softrefs
, 1);
1977 * Create a new open file structure and reserve a file decriptor
1978 * for the process that refers to it.
1980 * Root creds are checked using lp, or assumed if lp is NULL. If
1981 * resultfd is non-NULL then lp must also be non-NULL. No file
1982 * descriptor is reserved (and no process context is needed) if
1985 * A file pointer with a refcount of 1 is returned. Note that the
1986 * file pointer is NOT associated with the descriptor. If falloc
1987 * returns success, fsetfd() MUST be called to either associate the
1988 * file pointer or clear the reservation.
1991 falloc(struct lwp
*lp
, struct file
**resultfp
, int *resultfd
)
1993 static struct timeval lastfail
;
1995 struct filelist_head
*head
;
1997 struct ucred
*cred
= lp
? lp
->lwp_thread
->td_ucred
: proc0
.p_ucred
;
2003 * Handle filetable full issues and root overfill.
2005 if (nfiles
>= maxfiles
- maxfilesrootres
&&
2006 (cred
->cr_ruid
!= 0 || nfiles
>= maxfiles
)) {
2007 if (ppsratecheck(&lastfail
, &curfail
, 1)) {
2008 kprintf("kern.maxfiles limit exceeded by uid %d, "
2009 "please see tuning(7).\n",
2017 * Allocate a new file descriptor.
2019 fp
= objcache_get(file_objcache
, M_WAITOK
);
2020 bzero(fp
, sizeof(*fp
));
2021 spin_init(&fp
->f_spin
, "falloc");
2022 SLIST_INIT(&fp
->f_klist
);
2024 fp
->f_ops
= &badfileops
;
2027 atomic_add_int(&nfiles
, 1);
2029 head
= fp2filelist(fp
);
2030 spin_lock(&head
->spin
);
2031 LIST_INSERT_HEAD(&head
->list
, fp
, f_list
);
2032 spin_unlock(&head
->spin
);
2035 if ((error
= fdalloc(lp
->lwp_proc
, 0, resultfd
)) != 0) {
2048 * Check for races against a file descriptor by determining that the
2049 * file pointer is still associated with the specified file descriptor,
2050 * and a close is not currently in progress.
2053 checkfdclosed(thread_t td
, struct filedesc
*fdp
, int fd
, struct file
*fp
,
2056 struct fdcache
*fdc
;
2060 if (fdp
->fd_closedcounter
== closedcounter
)
2063 if (td
->td_proc
&& td
->td_proc
->p_fd
== fdp
) {
2064 for (fdc
= &td
->td_fdcache
[0];
2065 fdc
< &td
->td_fdcache
[NFDCACHE
]; ++fdc
) {
2066 if (fdc
->fd
== fd
&& fdc
->fp
== fp
)
2071 spin_lock_shared(&fdp
->fd_spin
);
2072 if ((unsigned)fd
>= fdp
->fd_nfiles
|| fp
!= fdp
->fd_files
[fd
].fp
)
2076 spin_unlock_shared(&fdp
->fd_spin
);
2081 * Associate a file pointer with a previously reserved file descriptor.
2082 * This function always succeeds.
2084 * If fp is NULL, the file descriptor is returned to the pool.
2086 * Caller must hold an exclusive spinlock on fdp->fd_spin.
2089 fsetfd_locked(struct filedesc
*fdp
, struct file
*fp
, int fd
)
2091 KKASSERT((unsigned)fd
< fdp
->fd_nfiles
);
2092 KKASSERT(fdp
->fd_files
[fd
].reserved
!= 0);
2095 /* fclearcache(&fdp->fd_files[fd], NULL, 0); */
2096 fdp
->fd_files
[fd
].fp
= fp
;
2097 fdp
->fd_files
[fd
].reserved
= 0;
2099 fdp
->fd_files
[fd
].reserved
= 0;
2100 fdreserve_locked(fdp
, fd
, -1);
2101 fdfixup_locked(fdp
, fd
);
2106 * Caller must hold an exclusive spinlock on fdp->fd_spin.
2109 fsetfd(struct filedesc
*fdp
, struct file
*fp
, int fd
)
2111 spin_lock(&fdp
->fd_spin
);
2112 fsetfd_locked(fdp
, fp
, fd
);
2113 spin_unlock(&fdp
->fd_spin
);
2117 * Caller must hold an exclusive spinlock on fdp->fd_spin.
2121 funsetfd_locked(struct filedesc
*fdp
, int fd
)
2125 if ((unsigned)fd
>= fdp
->fd_nfiles
)
2127 if ((fp
= fdp
->fd_files
[fd
].fp
) == NULL
)
2129 ++fdp
->fd_closedcounter
;
2130 fclearcache(&fdp
->fd_files
[fd
], NULL
, 0);
2131 fdp
->fd_files
[fd
].fp
= NULL
;
2132 fdp
->fd_files
[fd
].fileflags
= 0;
2133 ++fdp
->fd_closedcounter
;
2135 fdreserve_locked(fdp
, fd
, -1);
2136 fdfixup_locked(fdp
, fd
);
2142 * WARNING: May not be called before initial fsetfd().
2145 fgetfdflags(struct filedesc
*fdp
, int fd
, int *flagsp
)
2149 spin_lock_shared(&fdp
->fd_spin
);
2150 if (((u_int
)fd
) >= fdp
->fd_nfiles
) {
2152 } else if (fdp
->fd_files
[fd
].fp
== NULL
) {
2155 *flagsp
= fdp
->fd_files
[fd
].fileflags
;
2158 spin_unlock_shared(&fdp
->fd_spin
);
2164 * WARNING: May not be called before initial fsetfd().
2167 fsetfdflags(struct filedesc
*fdp
, int fd
, int add_flags
)
2171 spin_lock(&fdp
->fd_spin
);
2172 if (((u_int
)fd
) >= fdp
->fd_nfiles
) {
2174 } else if (fdp
->fd_files
[fd
].fp
== NULL
) {
2177 fdp
->fd_files
[fd
].fileflags
|= add_flags
;
2180 spin_unlock(&fdp
->fd_spin
);
2186 * WARNING: May not be called before initial fsetfd().
2189 fclrfdflags(struct filedesc
*fdp
, int fd
, int rem_flags
)
2193 spin_lock(&fdp
->fd_spin
);
2194 if (((u_int
)fd
) >= fdp
->fd_nfiles
) {
2196 } else if (fdp
->fd_files
[fd
].fp
== NULL
) {
2199 fdp
->fd_files
[fd
].fileflags
&= ~rem_flags
;
2202 spin_unlock(&fdp
->fd_spin
);
2208 * Set/Change/Clear the creds for a fp and synchronize the uidinfo.
2211 fsetcred(struct file
*fp
, struct ucred
*ncr
)
2214 struct uidinfo
*uip
;
2215 struct uidcount
*pup
;
2220 if (ocr
== NULL
|| ncr
== NULL
|| ocr
->cr_uidinfo
!= ncr
->cr_uidinfo
) {
2222 uip
= ocr
->cr_uidinfo
;
2223 pup
= &uip
->ui_pcpu
[cpu
];
2224 atomic_add_int(&pup
->pu_openfiles
, -1);
2225 if (pup
->pu_openfiles
< -PUP_LIMIT
||
2226 pup
->pu_openfiles
> PUP_LIMIT
) {
2227 count
= atomic_swap_int(&pup
->pu_openfiles
, 0);
2228 atomic_add_int(&uip
->ui_openfiles
, count
);
2232 uip
= ncr
->cr_uidinfo
;
2233 pup
= &uip
->ui_pcpu
[cpu
];
2234 atomic_add_int(&pup
->pu_openfiles
, 1);
2235 if (pup
->pu_openfiles
< -PUP_LIMIT
||
2236 pup
->pu_openfiles
> PUP_LIMIT
) {
2237 count
= atomic_swap_int(&pup
->pu_openfiles
, 0);
2238 atomic_add_int(&uip
->ui_openfiles
, count
);
2250 * Free a file descriptor.
2254 ffree(struct file
*fp
)
2256 KASSERT((fp
->f_count
== 0), ("ffree: fp_fcount not 0!"));
2258 if (fp
->f_nchandle
.ncp
)
2259 cache_drop(&fp
->f_nchandle
);
2260 objcache_put(file_objcache
, fp
);
2264 * called from init_main, initialize filedesc0 for proc0.
2267 fdinit_bootstrap(struct proc
*p0
, struct filedesc
*fdp0
, int cmask
)
2271 fdp0
->fd_refcnt
= 1;
2272 fdp0
->fd_cmask
= cmask
;
2273 fdp0
->fd_files
= fdp0
->fd_builtin_files
;
2274 fdp0
->fd_nfiles
= NDFILE
;
2275 fdp0
->fd_lastfile
= -1;
2276 spin_init(&fdp0
->fd_spin
, "fdinitbootstrap");
2280 * Build a new filedesc structure.
2283 fdinit(struct proc
*p
)
2285 struct filedesc
*newfdp
;
2286 struct filedesc
*fdp
= p
->p_fd
;
2288 newfdp
= kmalloc(sizeof(struct filedesc
), M_FILEDESC
, M_WAITOK
|M_ZERO
);
2289 spin_lock(&fdp
->fd_spin
);
2291 newfdp
->fd_cdir
= fdp
->fd_cdir
;
2292 vref(newfdp
->fd_cdir
);
2293 cache_copy(&fdp
->fd_ncdir
, &newfdp
->fd_ncdir
);
2297 * rdir may not be set in e.g. proc0 or anything vm_fork'd off of
2298 * proc0, but should unconditionally exist in other processes.
2301 newfdp
->fd_rdir
= fdp
->fd_rdir
;
2302 vref(newfdp
->fd_rdir
);
2303 cache_copy(&fdp
->fd_nrdir
, &newfdp
->fd_nrdir
);
2306 newfdp
->fd_jdir
= fdp
->fd_jdir
;
2307 vref(newfdp
->fd_jdir
);
2308 cache_copy(&fdp
->fd_njdir
, &newfdp
->fd_njdir
);
2310 spin_unlock(&fdp
->fd_spin
);
2312 /* Create the file descriptor table. */
2313 newfdp
->fd_refcnt
= 1;
2314 newfdp
->fd_cmask
= cmask
;
2315 newfdp
->fd_files
= newfdp
->fd_builtin_files
;
2316 newfdp
->fd_nfiles
= NDFILE
;
2317 newfdp
->fd_lastfile
= -1;
2318 spin_init(&newfdp
->fd_spin
, "fdinit");
2324 * Share a filedesc structure.
2327 fdshare(struct proc
*p
)
2329 struct filedesc
*fdp
;
2332 spin_lock(&fdp
->fd_spin
);
2334 spin_unlock(&fdp
->fd_spin
);
2339 * Copy a filedesc structure.
2342 fdcopy(struct proc
*p
, struct filedesc
**fpp
)
2344 struct filedesc
*fdp
= p
->p_fd
;
2345 struct filedesc
*newfdp
;
2346 struct fdnode
*fdnode
;
2351 * Certain daemons might not have file descriptors.
2357 * Allocate the new filedesc and fd_files[] array. This can race
2358 * with operations by other threads on the fdp so we have to be
2361 newfdp
= kmalloc(sizeof(struct filedesc
),
2362 M_FILEDESC
, M_WAITOK
| M_ZERO
| M_NULLOK
);
2363 if (newfdp
== NULL
) {
2368 spin_lock(&fdp
->fd_spin
);
2369 if (fdp
->fd_lastfile
< NDFILE
) {
2370 newfdp
->fd_files
= newfdp
->fd_builtin_files
;
2374 * We have to allocate (N^2-1) entries for our in-place
2375 * binary tree. Allow the table to shrink.
2379 while (ni
> fdp
->fd_lastfile
&& ni
> NDFILE
) {
2383 spin_unlock(&fdp
->fd_spin
);
2384 newfdp
->fd_files
= kmalloc(i
* sizeof(struct fdnode
),
2385 M_FILEDESC
, M_WAITOK
| M_ZERO
);
2388 * Check for race, retry
2390 spin_lock(&fdp
->fd_spin
);
2391 if (i
<= fdp
->fd_lastfile
) {
2392 spin_unlock(&fdp
->fd_spin
);
2393 kfree(newfdp
->fd_files
, M_FILEDESC
);
2399 * Dup the remaining fields. vref() and cache_hold() can be
2400 * safely called while holding the read spinlock on fdp.
2402 * The read spinlock on fdp is still being held.
2404 * NOTE: vref and cache_hold calls for the case where the vnode
2405 * or cache entry already has at least one ref may be called
2406 * while holding spin locks.
2408 if ((newfdp
->fd_cdir
= fdp
->fd_cdir
) != NULL
) {
2409 vref(newfdp
->fd_cdir
);
2410 cache_copy(&fdp
->fd_ncdir
, &newfdp
->fd_ncdir
);
2413 * We must check for fd_rdir here, at least for now because
2414 * the init process is created before we have access to the
2415 * rootvode to take a reference to it.
2417 if ((newfdp
->fd_rdir
= fdp
->fd_rdir
) != NULL
) {
2418 vref(newfdp
->fd_rdir
);
2419 cache_copy(&fdp
->fd_nrdir
, &newfdp
->fd_nrdir
);
2421 if ((newfdp
->fd_jdir
= fdp
->fd_jdir
) != NULL
) {
2422 vref(newfdp
->fd_jdir
);
2423 cache_copy(&fdp
->fd_njdir
, &newfdp
->fd_njdir
);
2425 newfdp
->fd_refcnt
= 1;
2426 newfdp
->fd_nfiles
= i
;
2427 newfdp
->fd_lastfile
= fdp
->fd_lastfile
;
2428 newfdp
->fd_freefile
= fdp
->fd_freefile
;
2429 newfdp
->fd_cmask
= fdp
->fd_cmask
;
2430 spin_init(&newfdp
->fd_spin
, "fdcopy");
2433 * Copy the descriptor table through (i). This also copies the
2434 * allocation state. Then go through and ref the file pointers
2435 * and clean up any KQ descriptors.
2437 * kq descriptors cannot be copied. Since we haven't ref'd the
2438 * copied files yet we can ignore the return value from funsetfd().
2440 * The read spinlock on fdp is still being held.
2442 * Be sure to clean out fdnode->tdcache, otherwise bad things will
2445 bcopy(fdp
->fd_files
, newfdp
->fd_files
, i
* sizeof(struct fdnode
));
2446 for (i
= 0 ; i
< newfdp
->fd_nfiles
; ++i
) {
2447 fdnode
= &newfdp
->fd_files
[i
];
2448 if (fdnode
->reserved
) {
2449 fdreserve_locked(newfdp
, i
, -1);
2450 fdnode
->reserved
= 0;
2451 fdfixup_locked(newfdp
, i
);
2452 } else if (fdnode
->fp
) {
2453 bzero(&fdnode
->tdcache
, sizeof(fdnode
->tdcache
));
2454 if (fdnode
->fp
->f_type
== DTYPE_KQUEUE
) {
2455 (void)funsetfd_locked(newfdp
, i
);
2461 spin_unlock(&fdp
->fd_spin
);
2467 * Release a filedesc structure.
2469 * NOT MPSAFE (MPSAFE for refs > 1, but the final cleanup code is not MPSAFE)
2472 fdfree(struct proc
*p
, struct filedesc
*repl
)
2474 struct filedesc
*fdp
;
2475 struct fdnode
*fdnode
;
2477 struct filedesc_to_leader
*fdtol
;
2483 * Before destroying or replacing p->p_fd we must be sure to
2484 * clean out the cache of the last thread, which should be
2487 fexitcache(curthread
);
2490 * Certain daemons might not have file descriptors.
2499 * Severe messing around to follow.
2501 spin_lock(&fdp
->fd_spin
);
2503 /* Check for special need to clear POSIX style locks */
2505 if (fdtol
!= NULL
) {
2506 KASSERT(fdtol
->fdl_refcount
> 0,
2507 ("filedesc_to_refcount botch: fdl_refcount=%d",
2508 fdtol
->fdl_refcount
));
2509 if (fdtol
->fdl_refcount
== 1 && p
->p_leader
->p_advlock_flag
) {
2510 for (i
= 0; i
<= fdp
->fd_lastfile
; ++i
) {
2511 fdnode
= &fdp
->fd_files
[i
];
2512 if (fdnode
->fp
== NULL
||
2513 fdnode
->fp
->f_type
!= DTYPE_VNODE
) {
2518 spin_unlock(&fdp
->fd_spin
);
2520 lf
.l_whence
= SEEK_SET
;
2523 lf
.l_type
= F_UNLCK
;
2524 vp
= (struct vnode
*)fp
->f_data
;
2525 VOP_ADVLOCK(vp
, (caddr_t
)p
->p_leader
,
2526 F_UNLCK
, &lf
, F_POSIX
);
2528 spin_lock(&fdp
->fd_spin
);
2532 if (fdtol
->fdl_refcount
== 1) {
2533 if (fdp
->fd_holdleaderscount
> 0 &&
2534 p
->p_leader
->p_advlock_flag
) {
2536 * close() or do_dup() has cleared a reference
2537 * in a shared file descriptor table.
2539 fdp
->fd_holdleaderswakeup
= 1;
2540 ssleep(&fdp
->fd_holdleaderscount
,
2541 &fdp
->fd_spin
, 0, "fdlhold", 0);
2544 if (fdtol
->fdl_holdcount
> 0) {
2546 * Ensure that fdtol->fdl_leader
2547 * remains valid in closef().
2549 fdtol
->fdl_wakeup
= 1;
2550 ssleep(fdtol
, &fdp
->fd_spin
, 0, "fdlhold", 0);
2554 fdtol
->fdl_refcount
--;
2555 if (fdtol
->fdl_refcount
== 0 &&
2556 fdtol
->fdl_holdcount
== 0) {
2557 fdtol
->fdl_next
->fdl_prev
= fdtol
->fdl_prev
;
2558 fdtol
->fdl_prev
->fdl_next
= fdtol
->fdl_next
;
2563 if (fdtol
!= NULL
) {
2564 spin_unlock(&fdp
->fd_spin
);
2565 kfree(fdtol
, M_FILEDESC_TO_LEADER
);
2566 spin_lock(&fdp
->fd_spin
);
2569 if (--fdp
->fd_refcnt
> 0) {
2570 spin_unlock(&fdp
->fd_spin
);
2571 spin_lock(&p
->p_spin
);
2573 spin_unlock(&p
->p_spin
);
2578 * Even though we are the last reference to the structure allproc
2579 * scans may still reference the structure. Maintain proper
2580 * locks until we can replace p->p_fd.
2582 * Also note that kqueue's closef still needs to reference the
2583 * fdp via p->p_fd, so we have to close the descriptors before
2584 * we replace p->p_fd.
2586 for (i
= 0; i
<= fdp
->fd_lastfile
; ++i
) {
2587 if (fdp
->fd_files
[i
].fp
) {
2588 fp
= funsetfd_locked(fdp
, i
);
2590 spin_unlock(&fdp
->fd_spin
);
2591 if (SLIST_FIRST(&fp
->f_klist
))
2592 knote_fdclose(fp
, fdp
, i
);
2594 spin_lock(&fdp
->fd_spin
);
2598 spin_unlock(&fdp
->fd_spin
);
2601 * Interlock against an allproc scan operations (typically frevoke).
2603 spin_lock(&p
->p_spin
);
2605 spin_unlock(&p
->p_spin
);
2608 * Wait for any softrefs to go away. This race rarely occurs so
2609 * we can use a non-critical-path style poll/sleep loop. The
2610 * race only occurs against allproc scans.
2612 * No new softrefs can occur with the fdp disconnected from the
2615 if (fdp
->fd_softrefs
) {
2616 kprintf("pid %d: Warning, fdp race avoided\n", p
->p_pid
);
2617 while (fdp
->fd_softrefs
)
2618 tsleep(&fdp
->fd_softrefs
, 0, "fdsoft", 1);
2621 if (fdp
->fd_files
!= fdp
->fd_builtin_files
)
2622 kfree(fdp
->fd_files
, M_FILEDESC
);
2624 cache_drop(&fdp
->fd_ncdir
);
2625 vrele(fdp
->fd_cdir
);
2628 cache_drop(&fdp
->fd_nrdir
);
2629 vrele(fdp
->fd_rdir
);
2632 cache_drop(&fdp
->fd_njdir
);
2633 vrele(fdp
->fd_jdir
);
2635 kfree(fdp
, M_FILEDESC
);
2639 * Retrieve and reference the file pointer associated with a descriptor.
2641 * td must be the current thread.
2644 holdfp(thread_t td
, int fd
, int flag
)
2648 fp
= _holdfp_cache(td
, fd
);
2650 if ((fp
->f_flag
& flag
) == 0 && flag
!= -1) {
2659 * holdsock() - load the struct file pointer associated
2660 * with a socket into *fpp. If an error occurs, non-zero
2661 * will be returned and *fpp will be set to NULL.
2663 * td must be the current thread.
2666 holdsock(thread_t td
, int fd
, struct file
**fpp
)
2674 fp
= _holdfp_cache(td
, fd
);
2676 if (fp
->f_type
!= DTYPE_SOCKET
) {
2692 * Convert a user file descriptor to a held file pointer.
2694 * td must be the current thread.
2697 holdvnode(thread_t td
, int fd
, struct file
**fpp
)
2702 fp
= _holdfp_cache(td
, fd
);
2704 if (fp
->f_type
!= DTYPE_VNODE
&& fp
->f_type
!= DTYPE_FIFO
) {
2720 * For setugid programs, we don't want to people to use that setugidness
2721 * to generate error messages which write to a file which otherwise would
2722 * otherwise be off-limits to the process.
2724 * This is a gross hack to plug the hole. A better solution would involve
2725 * a special vop or other form of generalized access control mechanism. We
2726 * go ahead and just reject all procfs file systems accesses as dangerous.
2728 * Since setugidsafety calls this only for fd 0, 1 and 2, this check is
2729 * sufficient. We also don't for check setugidness since we know we are.
2732 is_unsafe(struct file
*fp
)
2734 if (fp
->f_type
== DTYPE_VNODE
&&
2735 ((struct vnode
*)(fp
->f_data
))->v_tag
== VT_PROCFS
)
2741 * Make this setguid thing safe, if at all possible.
2743 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose()
2746 setugidsafety(struct proc
*p
)
2748 struct filedesc
*fdp
= p
->p_fd
;
2751 /* Certain daemons might not have file descriptors. */
2756 * note: fdp->fd_files may be reallocated out from under us while
2757 * we are blocked in a close. Be careful!
2759 for (i
= 0; i
<= fdp
->fd_lastfile
; i
++) {
2762 if (fdp
->fd_files
[i
].fp
&& is_unsafe(fdp
->fd_files
[i
].fp
)) {
2766 * NULL-out descriptor prior to close to avoid
2767 * a race while close blocks.
2769 if ((fp
= funsetfd_locked(fdp
, i
)) != NULL
) {
2770 knote_fdclose(fp
, fdp
, i
);
2778 * Close all CLOEXEC files on exec.
2780 * Only a single thread remains for the current process.
2782 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose()
2785 fdcloseexec(struct proc
*p
)
2787 struct filedesc
*fdp
= p
->p_fd
;
2790 /* Certain daemons might not have file descriptors. */
2795 * We cannot cache fd_files since operations may block and rip
2796 * them out from under us.
2798 for (i
= 0; i
<= fdp
->fd_lastfile
; i
++) {
2799 if (fdp
->fd_files
[i
].fp
!= NULL
&&
2800 (fdp
->fd_files
[i
].fileflags
& UF_EXCLOSE
)) {
2804 * NULL-out descriptor prior to close to avoid
2805 * a race while close blocks.
2807 * (funsetfd*() also clears the fd cache)
2809 if ((fp
= funsetfd_locked(fdp
, i
)) != NULL
) {
2810 knote_fdclose(fp
, fdp
, i
);
2818 * It is unsafe for set[ug]id processes to be started with file
2819 * descriptors 0..2 closed, as these descriptors are given implicit
2820 * significance in the Standard C library. fdcheckstd() will create a
2821 * descriptor referencing /dev/null for each of stdin, stdout, and
2822 * stderr that is not already open.
2824 * NOT MPSAFE - calls falloc, vn_open, etc
2827 fdcheckstd(struct lwp
*lp
)
2829 struct nlookupdata nd
;
2830 struct filedesc
*fdp
;
2833 int i
, error
, flags
, devnull
;
2835 fdp
= lp
->lwp_proc
->p_fd
;
2840 for (i
= 0; i
< 3; i
++) {
2841 if (fdp
->fd_files
[i
].fp
!= NULL
)
2844 if ((error
= falloc(lp
, &fp
, &devnull
)) != 0)
2847 error
= nlookup_init(&nd
, "/dev/null", UIO_SYSSPACE
,
2848 NLC_FOLLOW
|NLC_LOCKVP
);
2849 flags
= FREAD
| FWRITE
;
2851 error
= vn_open(&nd
, fp
, flags
, 0);
2853 fsetfd(fdp
, fp
, devnull
);
2855 fsetfd(fdp
, NULL
, devnull
);
2860 KKASSERT(i
== devnull
);
2862 error
= kern_dup(DUP_FIXED
, devnull
, i
, &retval
);
2871 * Internal form of close.
2872 * Decrement reference count on file structure.
2873 * Note: td and/or p may be NULL when closing a file
2874 * that was being passed in a message.
2876 * MPALMOSTSAFE - acquires mplock for VOP operations
2879 closef(struct file
*fp
, struct proc
*p
)
2883 struct filedesc_to_leader
*fdtol
;
2889 * POSIX record locking dictates that any close releases ALL
2890 * locks owned by this process. This is handled by setting
2891 * a flag in the unlock to free ONLY locks obeying POSIX
2892 * semantics, and not to free BSD-style file locks.
2893 * If the descriptor was in a message, POSIX-style locks
2894 * aren't passed with the descriptor.
2896 if (p
!= NULL
&& fp
->f_type
== DTYPE_VNODE
&&
2897 (((struct vnode
*)fp
->f_data
)->v_flag
& VMAYHAVELOCKS
)
2899 if (p
->p_leader
->p_advlock_flag
) {
2900 lf
.l_whence
= SEEK_SET
;
2903 lf
.l_type
= F_UNLCK
;
2904 vp
= (struct vnode
*)fp
->f_data
;
2905 VOP_ADVLOCK(vp
, (caddr_t
)p
->p_leader
, F_UNLCK
,
2909 if (fdtol
!= NULL
) {
2910 lwkt_gettoken(&p
->p_token
);
2913 * Handle special case where file descriptor table
2914 * is shared between multiple process leaders.
2916 for (fdtol
= fdtol
->fdl_next
;
2917 fdtol
!= p
->p_fdtol
;
2918 fdtol
= fdtol
->fdl_next
) {
2919 if (fdtol
->fdl_leader
->p_advlock_flag
== 0)
2921 fdtol
->fdl_holdcount
++;
2922 lf
.l_whence
= SEEK_SET
;
2925 lf
.l_type
= F_UNLCK
;
2926 vp
= (struct vnode
*)fp
->f_data
;
2927 VOP_ADVLOCK(vp
, (caddr_t
)fdtol
->fdl_leader
,
2928 F_UNLCK
, &lf
, F_POSIX
);
2929 fdtol
->fdl_holdcount
--;
2930 if (fdtol
->fdl_holdcount
== 0 &&
2931 fdtol
->fdl_wakeup
!= 0) {
2932 fdtol
->fdl_wakeup
= 0;
2936 lwkt_reltoken(&p
->p_token
);
2943 * fhold() can only be called if f_count is already at least 1 (i.e. the
2944 * caller of fhold() already has a reference to the file pointer in some
2947 * Atomic ops are used for incrementing and decrementing f_count before
2948 * the 1->0 transition. f_count 1->0 transition is special, see the
2949 * comment in fdrop().
2952 fhold(struct file
*fp
)
2954 /* 0->1 transition will never work */
2955 KASSERT(fp
->f_count
> 0, ("fhold: invalid f_count %d", fp
->f_count
));
2956 atomic_add_int(&fp
->f_count
, 1);
2960 * fdrop() - drop a reference to a descriptor
2963 fdrop(struct file
*fp
)
2967 int error
, do_free
= 0;
2971 * Simple atomic_fetchadd_int(f_count, -1) here will cause use-
2972 * after-free or double free (due to f_count 0->1 transition), if
2973 * fhold() is called on the fps found through filehead iteration.
2976 int count
= fp
->f_count
;
2979 KASSERT(count
> 0, ("fdrop: invalid f_count %d", count
));
2981 struct filelist_head
*head
= fp2filelist(fp
);
2984 * About to drop the last reference, hold the
2985 * filehead spin lock and drop it, so that no
2986 * one could see this fp through filehead anymore,
2987 * let alone fhold() this fp.
2989 spin_lock(&head
->spin
);
2990 if (atomic_cmpset_int(&fp
->f_count
, count
, 0)) {
2991 LIST_REMOVE(fp
, f_list
);
2992 spin_unlock(&head
->spin
);
2993 atomic_subtract_int(&nfiles
, 1);
2994 do_free
= 1; /* free this fp */
2997 spin_unlock(&head
->spin
);
2999 } else if (atomic_cmpset_int(&fp
->f_count
, count
, count
- 1)) {
3007 KKASSERT(SLIST_FIRST(&fp
->f_klist
) == NULL
);
3010 * The last reference has gone away, we own the fp structure free
3013 if (fp
->f_count
< 0)
3014 panic("fdrop: count < 0");
3015 if ((fp
->f_flag
& FHASLOCK
) && fp
->f_type
== DTYPE_VNODE
&&
3016 (((struct vnode
*)fp
->f_data
)->v_flag
& VMAYHAVELOCKS
)
3018 lf
.l_whence
= SEEK_SET
;
3021 lf
.l_type
= F_UNLCK
;
3022 vp
= (struct vnode
*)fp
->f_data
;
3023 VOP_ADVLOCK(vp
, (caddr_t
)fp
, F_UNLCK
, &lf
, 0);
3025 if (fp
->f_ops
!= &badfileops
)
3026 error
= fo_close(fp
);
3034 * Apply an advisory lock on a file descriptor.
3036 * Just attempt to get a record lock of the requested type on
3037 * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0).
3042 sys_flock(struct flock_args
*uap
)
3044 thread_t td
= curthread
;
3050 if ((fp
= holdfp(td
, uap
->fd
, -1)) == NULL
)
3052 if (fp
->f_type
!= DTYPE_VNODE
) {
3056 vp
= (struct vnode
*)fp
->f_data
;
3057 lf
.l_whence
= SEEK_SET
;
3060 if (uap
->how
& LOCK_UN
) {
3061 lf
.l_type
= F_UNLCK
;
3062 atomic_clear_int(&fp
->f_flag
, FHASLOCK
); /* race ok */
3063 error
= VOP_ADVLOCK(vp
, (caddr_t
)fp
, F_UNLCK
, &lf
, 0);
3066 if (uap
->how
& LOCK_EX
)
3067 lf
.l_type
= F_WRLCK
;
3068 else if (uap
->how
& LOCK_SH
)
3069 lf
.l_type
= F_RDLCK
;
3074 if (uap
->how
& LOCK_NB
)
3075 error
= VOP_ADVLOCK(vp
, (caddr_t
)fp
, F_SETLK
, &lf
, 0);
3077 error
= VOP_ADVLOCK(vp
, (caddr_t
)fp
, F_SETLK
, &lf
, F_WAIT
);
3078 atomic_set_int(&fp
->f_flag
, FHASLOCK
); /* race ok */
3085 * File Descriptor pseudo-device driver (/dev/fd/).
3087 * Opening minor device N dup()s the file (if any) connected to file
3088 * descriptor N belonging to the calling process. Note that this driver
3089 * consists of only the ``open()'' routine, because all subsequent
3090 * references to this file will be direct to the other driver.
3093 fdopen(struct dev_open_args
*ap
)
3095 thread_t td
= curthread
;
3097 KKASSERT(td
->td_lwp
!= NULL
);
3100 * XXX Kludge: set curlwp->lwp_dupfd to contain the value of the
3101 * the file descriptor being sought for duplication. The error
3102 * return ensures that the vnode for this device will be released
3103 * by vn_open. Open will detect this special error and take the
3104 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN
3105 * will simply report the error.
3107 td
->td_lwp
->lwp_dupfd
= minor(ap
->a_head
.a_dev
);
3112 * The caller has reserved the file descriptor dfd for us. On success we
3113 * must fsetfd() it. On failure the caller will clean it up.
3116 dupfdopen(thread_t td
, int dfd
, int sfd
, int mode
, int error
)
3118 struct filedesc
*fdp
;
3123 if ((wfp
= holdfp(td
, sfd
, -1)) == NULL
)
3127 * Close a revoke/dup race. Duping a descriptor marked as revoked
3128 * will dup a dummy descriptor instead of the real one.
3130 if (wfp
->f_flag
& FREVOKED
) {
3131 kprintf("Warning: attempt to dup() a revoked descriptor\n");
3134 werror
= falloc(NULL
, &wfp
, NULL
);
3139 fdp
= td
->td_proc
->p_fd
;
3142 * There are two cases of interest here.
3144 * For ENODEV simply dup sfd to file descriptor dfd and return.
3146 * For ENXIO steal away the file structure from sfd and store it
3147 * dfd. sfd is effectively closed by this operation.
3149 * Any other error code is just returned.
3154 * Check that the mode the file is being opened for is a
3155 * subset of the mode of the existing descriptor.
3157 if (((mode
& (FREAD
|FWRITE
)) | wfp
->f_flag
) != wfp
->f_flag
) {
3161 spin_lock(&fdp
->fd_spin
);
3162 fdp
->fd_files
[dfd
].fileflags
= fdp
->fd_files
[sfd
].fileflags
;
3163 fsetfd_locked(fdp
, wfp
, dfd
);
3164 spin_unlock(&fdp
->fd_spin
);
3169 * Steal away the file pointer from dfd, and stuff it into indx.
3171 spin_lock(&fdp
->fd_spin
);
3172 fdp
->fd_files
[dfd
].fileflags
= fdp
->fd_files
[sfd
].fileflags
;
3173 fsetfd(fdp
, wfp
, dfd
);
3174 if ((xfp
= funsetfd_locked(fdp
, sfd
)) != NULL
) {
3175 spin_unlock(&fdp
->fd_spin
);
3178 spin_unlock(&fdp
->fd_spin
);
3190 * NOT MPSAFE - I think these refer to a common file descriptor table
3191 * and we need to spinlock that to link fdtol in.
3193 struct filedesc_to_leader
*
3194 filedesc_to_leader_alloc(struct filedesc_to_leader
*old
,
3195 struct proc
*leader
)
3197 struct filedesc_to_leader
*fdtol
;
3199 fdtol
= kmalloc(sizeof(struct filedesc_to_leader
),
3200 M_FILEDESC_TO_LEADER
, M_WAITOK
| M_ZERO
);
3201 fdtol
->fdl_refcount
= 1;
3202 fdtol
->fdl_holdcount
= 0;
3203 fdtol
->fdl_wakeup
= 0;
3204 fdtol
->fdl_leader
= leader
;
3206 fdtol
->fdl_next
= old
->fdl_next
;
3207 fdtol
->fdl_prev
= old
;
3208 old
->fdl_next
= fdtol
;
3209 fdtol
->fdl_next
->fdl_prev
= fdtol
;
3211 fdtol
->fdl_next
= fdtol
;
3212 fdtol
->fdl_prev
= fdtol
;
3218 * Scan all file pointers in the system. The callback is made with
3219 * the master list spinlock held exclusively.
3222 allfiles_scan_exclusive(int (*callback
)(struct file
*, void *), void *data
)
3226 for (i
= 0; i
< NFILELIST_HEADS
; ++i
) {
3227 struct filelist_head
*head
= &filelist_heads
[i
];
3230 spin_lock(&head
->spin
);
3231 LIST_FOREACH(fp
, &head
->list
, f_list
) {
3234 res
= callback(fp
, data
);
3238 spin_unlock(&head
->spin
);
3243 * Get file structures.
3245 * NOT MPSAFE - process list scan, SYSCTL_OUT (probably not mpsafe)
3248 struct sysctl_kern_file_info
{
3251 struct sysctl_req
*req
;
3254 static int sysctl_kern_file_callback(struct proc
*p
, void *data
);
3257 sysctl_kern_file(SYSCTL_HANDLER_ARGS
)
3259 struct sysctl_kern_file_info info
;
3262 * Note: because the number of file descriptors is calculated
3263 * in different ways for sizing vs returning the data,
3264 * there is information leakage from the first loop. However,
3265 * it is of a similar order of magnitude to the leakage from
3266 * global system statistics such as kern.openfiles.
3268 * When just doing a count, note that we cannot just count
3269 * the elements and add f_count via the filehead list because
3270 * threaded processes share their descriptor table and f_count might
3271 * still be '1' in that case.
3273 * Since the SYSCTL op can block, we must hold the process to
3274 * prevent it being ripped out from under us either in the
3275 * file descriptor loop or in the greater LIST_FOREACH. The
3276 * process may be in varying states of disrepair. If the process
3277 * is in SZOMB we may have caught it just as it is being removed
3278 * from the allproc list, we must skip it in that case to maintain
3279 * an unbroken chain through the allproc list.
3284 allproc_scan(sysctl_kern_file_callback
, &info
, 0);
3287 * When just calculating the size, overestimate a bit to try to
3288 * prevent system activity from causing the buffer-fill call
3291 if (req
->oldptr
== NULL
) {
3292 info
.count
= (info
.count
+ 16) + (info
.count
/ 10);
3293 info
.error
= SYSCTL_OUT(req
, NULL
,
3294 info
.count
* sizeof(struct kinfo_file
));
3296 return (info
.error
);
3300 sysctl_kern_file_callback(struct proc
*p
, void *data
)
3302 struct sysctl_kern_file_info
*info
= data
;
3303 struct kinfo_file kf
;
3304 struct filedesc
*fdp
;
3309 if (p
->p_stat
== SIDL
|| p
->p_stat
== SZOMB
)
3311 if (!(PRISON_CHECK(info
->req
->td
->td_ucred
, p
->p_ucred
) != 0))
3315 * Softref the fdp to prevent it from being destroyed
3317 spin_lock(&p
->p_spin
);
3318 if ((fdp
= p
->p_fd
) == NULL
) {
3319 spin_unlock(&p
->p_spin
);
3322 atomic_add_int(&fdp
->fd_softrefs
, 1);
3323 spin_unlock(&p
->p_spin
);
3326 * The fdp's own spinlock prevents the contents from being
3329 spin_lock_shared(&fdp
->fd_spin
);
3330 for (n
= 0; n
< fdp
->fd_nfiles
; ++n
) {
3331 if ((fp
= fdp
->fd_files
[n
].fp
) == NULL
)
3333 if (info
->req
->oldptr
== NULL
) {
3336 uid
= p
->p_ucred
? p
->p_ucred
->cr_uid
: -1;
3337 kcore_make_file(&kf
, fp
, p
->p_pid
, uid
, n
);
3338 spin_unlock_shared(&fdp
->fd_spin
);
3339 info
->error
= SYSCTL_OUT(info
->req
, &kf
, sizeof(kf
));
3340 spin_lock_shared(&fdp
->fd_spin
);
3345 spin_unlock_shared(&fdp
->fd_spin
);
3346 atomic_subtract_int(&fdp
->fd_softrefs
, 1);
3352 SYSCTL_PROC(_kern
, KERN_FILE
, file
, CTLTYPE_OPAQUE
|CTLFLAG_RD
,
3353 0, 0, sysctl_kern_file
, "S,file", "Entire file table");
3355 SYSCTL_INT(_kern
, OID_AUTO
, minfilesperproc
, CTLFLAG_RW
,
3356 &minfilesperproc
, 0, "Minimum files allowed open per process");
3357 SYSCTL_INT(_kern
, KERN_MAXFILESPERPROC
, maxfilesperproc
, CTLFLAG_RW
,
3358 &maxfilesperproc
, 0, "Maximum files allowed open per process");
3359 SYSCTL_INT(_kern
, OID_AUTO
, maxfilesperuser
, CTLFLAG_RW
,
3360 &maxfilesperuser
, 0, "Maximum files allowed open per user");
3362 SYSCTL_INT(_kern
, KERN_MAXFILES
, maxfiles
, CTLFLAG_RW
,
3363 &maxfiles
, 0, "Maximum number of files");
3365 SYSCTL_INT(_kern
, OID_AUTO
, maxfilesrootres
, CTLFLAG_RW
,
3366 &maxfilesrootres
, 0, "Descriptors reserved for root use");
3368 SYSCTL_INT(_kern
, OID_AUTO
, openfiles
, CTLFLAG_RD
,
3369 &nfiles
, 0, "System-wide number of open files");
3372 fildesc_drvinit(void *unused
)
3376 for (fd
= 0; fd
< NUMFDESC
; fd
++) {
3377 make_dev(&fildesc_ops
, fd
,
3378 UID_BIN
, GID_BIN
, 0666, "fd/%d", fd
);
3381 make_dev(&fildesc_ops
, 0, UID_ROOT
, GID_WHEEL
, 0666, "stdin");
3382 make_dev(&fildesc_ops
, 1, UID_ROOT
, GID_WHEEL
, 0666, "stdout");
3383 make_dev(&fildesc_ops
, 2, UID_ROOT
, GID_WHEEL
, 0666, "stderr");
3386 struct fileops badfileops
= {
3387 .fo_read
= badfo_readwrite
,
3388 .fo_write
= badfo_readwrite
,
3389 .fo_ioctl
= badfo_ioctl
,
3390 .fo_kqfilter
= badfo_kqfilter
,
3391 .fo_stat
= badfo_stat
,
3392 .fo_close
= badfo_close
,
3393 .fo_shutdown
= badfo_shutdown
3407 badfo_ioctl(struct file
*fp
, u_long com
, caddr_t data
,
3408 struct ucred
*cred
, struct sysmsg
*msgv
)
3414 * Must return an error to prevent registration, typically
3415 * due to a revoked descriptor (file_filtops assigned).
3418 badfo_kqfilter(struct file
*fp
, struct knote
*kn
)
3420 return (EOPNOTSUPP
);
3424 badfo_stat(struct file
*fp
, struct stat
*sb
, struct ucred
*cred
)
3430 badfo_close(struct file
*fp
)
3436 badfo_shutdown(struct file
*fp
, int how
)
3442 nofo_shutdown(struct file
*fp
, int how
)
3444 return (EOPNOTSUPP
);
3447 SYSINIT(fildescdev
, SI_SUB_DRIVERS
, SI_ORDER_MIDDLE
+ CDEV_MAJOR
,
3448 fildesc_drvinit
,NULL
);
3451 filelist_heads_init(void *arg __unused
)
3455 for (i
= 0; i
< NFILELIST_HEADS
; ++i
) {
3456 struct filelist_head
*head
= &filelist_heads
[i
];
3458 spin_init(&head
->spin
, "filehead_spin");
3459 LIST_INIT(&head
->list
);
3463 SYSINIT(filelistheads
, SI_BOOT1_LOCK
, SI_ORDER_ANY
,
3464 filelist_heads_init
, NULL
);
3467 file_objcache_init(void *dummy __unused
)
3469 file_objcache
= objcache_create("file", maxfiles
, maxfiles
/ 8,
3470 NULL
, NULL
, NULL
, /* TODO: ctor/dtor */
3471 objcache_malloc_alloc
, objcache_malloc_free
, &file_malloc_args
);
3473 SYSINIT(fpobjcache
, SI_BOOT2_POST_SMP
, SI_ORDER_ANY
, file_objcache_init
, NULL
);