2 * Copyright (c) 2005-2018 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Jeffrey Hsu and Matthew Dillon.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1989, 1991, 1993
36 * The Regents of the University of California. All rights reserved.
37 * (c) UNIX System Laboratories, Inc.
38 * All or some portions of this file are derived from material licensed
39 * to the University of California by American Telephone and Telegraph
40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41 * the permission of UNIX System Laboratories, Inc.
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. Neither the name of the University nor the names of its contributors
52 * may be used to endorse or promote products derived from this software
53 * without specific prior written permission.
55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
67 * @(#)kern_descrip.c 8.6 (Berkeley) 4/19/94
68 * $FreeBSD: src/sys/kern/kern_descrip.c,v 1.81.2.19 2004/02/28 00:43:31 tegge Exp $
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/malloc.h>
74 #include <sys/sysproto.h>
76 #include <sys/device.h>
78 #include <sys/filedesc.h>
79 #include <sys/kernel.h>
80 #include <sys/sysctl.h>
81 #include <sys/vnode.h>
83 #include <sys/nlookup.h>
85 #include <sys/filio.h>
86 #include <sys/fcntl.h>
87 #include <sys/unistd.h>
88 #include <sys/resourcevar.h>
89 #include <sys/event.h>
90 #include <sys/kern_syscall.h>
91 #include <sys/kcore.h>
92 #include <sys/kinfo.h>
94 #include <sys/objcache.h>
97 #include <vm/vm_extern.h>
99 #include <sys/thread2.h>
100 #include <sys/file2.h>
101 #include <sys/spinlock2.h>
103 static void fsetfd_locked(struct filedesc
*fdp
, struct file
*fp
, int fd
);
104 static void fdreserve_locked (struct filedesc
*fdp
, int fd0
, int incr
);
105 static struct file
*funsetfd_locked (struct filedesc
*fdp
, int fd
);
106 static void ffree(struct file
*fp
);
108 static MALLOC_DEFINE(M_FILEDESC
, "file desc", "Open file descriptor table");
109 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER
, "file desc to leader",
110 "file desc to leader structures");
111 MALLOC_DEFINE(M_FILE
, "file", "Open file structure");
112 static MALLOC_DEFINE(M_SIGIO
, "sigio", "sigio structures");
114 static struct krate krate_uidinfo
= { .freq
= 1 };
116 static d_open_t fdopen
;
119 #define CDEV_MAJOR 22
120 static struct dev_ops fildesc_ops
= {
126 * Descriptor management.
128 #ifndef NFILELIST_HEADS
129 #define NFILELIST_HEADS 257 /* primary number */
132 struct filelist_head
{
133 struct spinlock spin
;
134 struct filelist list
;
137 static struct filelist_head filelist_heads
[NFILELIST_HEADS
];
139 static int nfiles
; /* actual number of open files */
142 struct lwkt_token revoke_token
= LWKT_TOKEN_INITIALIZER(revoke_token
);
144 static struct objcache
*file_objcache
;
146 static struct objcache_malloc_args file_malloc_args
= {
147 .objsize
= sizeof(struct file
),
152 * Fixup fd_freefile and fd_lastfile after a descriptor has been cleared.
154 * must be called with fdp->fd_spin exclusively held
158 fdfixup_locked(struct filedesc
*fdp
, int fd
)
160 if (fd
< fdp
->fd_freefile
) {
161 fdp
->fd_freefile
= fd
;
163 while (fdp
->fd_lastfile
>= 0 &&
164 fdp
->fd_files
[fdp
->fd_lastfile
].fp
== NULL
&&
165 fdp
->fd_files
[fdp
->fd_lastfile
].reserved
== 0
172 * Clear the fd thread caches for this fdnode.
174 * If match_fdc is NULL, all thread caches of fdn will be cleared.
175 * The caller must hold fdp->fd_spin exclusively. The threads caching
176 * the descriptor do not have to be the current thread. The (status)
177 * argument is ignored.
179 * If match_fdc is not NULL, only the match_fdc's cache will be cleared.
180 * The caller must hold fdp->fd_spin shared and match_fdc must match a
181 * fdcache entry in curthread. match_fdc has been locked by the caller
182 * and had the specified (status).
184 * Since we are matching against a fp in the fdp (which must still be present
185 * at this time), fp will have at least two refs on any match and we can
186 * decrement the count trivially.
190 fclearcache(struct fdnode
*fdn
, struct fdcache
*match_fdc
, int status
)
197 * match_fdc == NULL We are cleaning out all tdcache entries
198 * for the fdn and hold fdp->fd_spin exclusively.
199 * This can race against the target threads
200 * cleaning out specific entries.
202 * match_fdc != NULL We are cleaning out a specific tdcache
203 * entry on behalf of the owning thread
204 * and hold fdp->fd_spin shared. The thread
205 * has already locked the entry. This cannot
209 for (i
= 0; i
< NTDCACHEFD
; ++i
) {
210 if ((fdc
= fdn
->tdcache
[i
]) == NULL
)
214 * If match_fdc is non-NULL we are being asked to
215 * clear a specific fdc owned by curthread. There must
216 * be exactly one match. The caller has already locked
217 * the cache entry and will dispose of the lock after
220 * Since we also have a shared lock on fdp, we
221 * can do this without atomic ops.
224 if (fdc
!= match_fdc
)
226 fdn
->tdcache
[i
] = NULL
;
227 KASSERT(fp
== fdc
->fp
,
228 ("fclearcache(1): fp mismatch %p/%p\n",
234 * status can be 0 or 2. If 2 the ref is borrowed,
235 * if 0 the ref is not borrowed and we have to drop
239 atomic_add_int(&fp
->f_count
, -1);
240 fdn
->isfull
= 0; /* heuristic */
245 * Otherwise we hold an exclusive spin-lock and can only
246 * race thread consumers borrowing cache entries.
248 * Acquire the lock and dispose of the entry. We have to
249 * spin until we get the lock.
252 status
= atomic_swap_int(&fdc
->locked
, 1);
253 if (status
== 1) { /* foreign lock, retry */
257 fdn
->tdcache
[i
] = NULL
;
258 KASSERT(fp
== fdc
->fp
,
259 ("fclearcache(2): fp mismatch %p/%p\n",
264 atomic_add_int(&fp
->f_count
, -1);
265 fdn
->isfull
= 0; /* heuristic */
266 atomic_swap_int(&fdc
->locked
, 0);
270 KKASSERT(match_fdc
== NULL
);
274 * Retrieve the fp for the specified fd given the specified file descriptor
275 * table. The fdp does not have to be owned by the current process.
276 * If flags != -1, fp->f_flag must contain at least one of the flags.
278 * This function is not able to cache the fp.
281 holdfp_fdp(struct filedesc
*fdp
, int fd
, int flag
)
285 spin_lock_shared(&fdp
->fd_spin
);
286 if (((u_int
)fd
) < fdp
->fd_nfiles
) {
287 fp
= fdp
->fd_files
[fd
].fp
; /* can be NULL */
289 if ((fp
->f_flag
& flag
) == 0 && flag
!= -1) {
298 spin_unlock_shared(&fdp
->fd_spin
);
304 holdfp_fdp_locked(struct filedesc
*fdp
, int fd
, int flag
)
308 if (((u_int
)fd
) < fdp
->fd_nfiles
) {
309 fp
= fdp
->fd_files
[fd
].fp
; /* can be NULL */
311 if ((fp
->f_flag
& flag
) == 0 && flag
!= -1) {
324 * Acquire the fp for the specified file descriptor, using the thread
325 * cache if possible and caching it if possible.
327 * td must be the curren thread.
331 _holdfp_cache(thread_t td
, int fd
)
333 struct filedesc
*fdp
;
335 struct fdcache
*best
;
345 for (fdc
= &td
->td_fdcache
[0]; fdc
< &td
->td_fdcache
[NFDCACHE
]; ++fdc
) {
346 if (fdc
->fd
!= fd
|| fdc
->fp
== NULL
)
348 status
= atomic_swap_int(&fdc
->locked
, 1);
351 * If someone else has locked our cache entry they are in
352 * the middle of clearing it, skip the entry.
358 * We have locked the entry, but if it no longer matches
359 * restore the previous state (0 or 2) and skip the entry.
361 if (fdc
->fd
!= fd
|| fdc
->fp
== NULL
) {
362 atomic_swap_int(&fdc
->locked
, status
);
367 * We have locked a valid entry. We can borrow the ref
368 * for a mode 0 entry. We can get a valid fp for a mode
369 * 2 entry but not borrow the ref.
373 fdc
->lru
= ++td
->td_fdcache_lru
;
374 atomic_swap_int(&fdc
->locked
, 2);
381 fdc
->lru
= ++td
->td_fdcache_lru
;
382 atomic_swap_int(&fdc
->locked
, 2);
390 * Lookup the descriptor the slow way. This can contend against
391 * modifying operations in a multi-threaded environment and cause
392 * cache line ping ponging otherwise.
394 fdp
= td
->td_proc
->p_fd
;
395 spin_lock_shared(&fdp
->fd_spin
);
397 if (((u_int
)fd
) < fdp
->fd_nfiles
) {
398 fp
= fdp
->fd_files
[fd
].fp
; /* can be NULL */
401 if (fdp
->fd_files
[fd
].isfull
== 0)
407 spin_unlock_shared(&fdp
->fd_spin
);
412 * We found a valid fp and held it, fdp is still shared locked.
413 * Enter the fp into the per-thread cache. Find the oldest entry
414 * via lru, or an empty entry.
416 * Because fdp's spinlock is held (shared is fine), no other
417 * thread should be in the middle of clearing our selected entry.
420 best
= &td
->td_fdcache
[0];
421 for (fdc
= &td
->td_fdcache
[0]; fdc
< &td
->td_fdcache
[NFDCACHE
]; ++fdc
) {
422 if (fdc
->fp
== NULL
) {
426 delta
= fdc
->lru
- best
->lru
;
434 * Don't enter into the cache if we cannot get the lock.
436 status
= atomic_swap_int(&best
->locked
, 1);
441 * Clear the previous cache entry if present
444 KKASSERT(best
->fd
>= 0);
445 fclearcache(&fdp
->fd_files
[best
->fd
], best
, status
);
449 * Create our new cache entry. This entry is 'safe' until we tie
450 * into the fdnode. If we cannot tie in, we will clear the entry.
454 best
->lru
= ++td
->td_fdcache_lru
;
455 best
->locked
= 2; /* borrowed ref */
457 fdn
= &fdp
->fd_files
[fd
];
458 for (i
= 0; i
< NTDCACHEFD
; ++i
) {
459 if (fdn
->tdcache
[i
] == NULL
&&
460 atomic_cmpset_ptr((void **)&fdn
->tdcache
[i
], NULL
, best
)) {
464 fdn
->isfull
= 1; /* no space */
469 spin_unlock_shared(&fdp
->fd_spin
);
475 * Drop the file pointer and return to the thread cache if possible.
477 * Caller must not hold fdp's spin lock.
478 * td must be the current thread.
481 dropfp(thread_t td
, int fd
, struct file
*fp
)
483 struct filedesc
*fdp
;
487 fdp
= td
->td_proc
->p_fd
;
490 * If our placeholder is still present we can re-cache the ref.
492 * Note that we can race an fclearcache().
494 for (fdc
= &td
->td_fdcache
[0]; fdc
< &td
->td_fdcache
[NFDCACHE
]; ++fdc
) {
495 if (fdc
->fp
!= fp
|| fdc
->fd
!= fd
)
497 status
= atomic_swap_int(&fdc
->locked
, 1);
501 * Not in mode 2, fdrop fp without caching.
503 atomic_swap_int(&fdc
->locked
, 0);
507 * Not in mode 2, locked by someone else.
508 * fdrop fp without caching.
513 * Intact borrowed ref, return to mode 0
514 * indicating that we have returned the ref.
516 * Return the borrowed ref (2->1->0)
518 if (fdc
->fp
== fp
&& fdc
->fd
== fd
) {
519 atomic_swap_int(&fdc
->locked
, 0);
522 atomic_swap_int(&fdc
->locked
, 2);
528 * Failed to re-cache, drop the fp without caching.
534 * Clear all descriptors cached in the per-thread fd cache for
535 * the specified thread.
537 * Caller must not hold p_fd->spin. This function will temporarily
538 * obtain a shared spin lock.
541 fexitcache(thread_t td
)
543 struct filedesc
*fdp
;
548 if (td
->td_proc
== NULL
)
550 fdp
= td
->td_proc
->p_fd
;
555 * A shared lock is sufficient as the caller controls td and we
556 * are only clearing td's cache.
558 spin_lock_shared(&fdp
->fd_spin
);
559 for (i
= 0; i
< NFDCACHE
; ++i
) {
560 fdc
= &td
->td_fdcache
[i
];
562 status
= atomic_swap_int(&fdc
->locked
, 1);
569 KKASSERT(fdc
->fd
>= 0);
570 fclearcache(&fdp
->fd_files
[fdc
->fd
], fdc
,
573 atomic_swap_int(&fdc
->locked
, 0);
576 spin_unlock_shared(&fdp
->fd_spin
);
579 static __inline
struct filelist_head
*
580 fp2filelist(const struct file
*fp
)
584 i
= (u_int
)(uintptr_t)fp
% NFILELIST_HEADS
;
585 return &filelist_heads
[i
];
590 readplimits(struct proc
*p
)
592 thread_t td
= curthread
;
593 struct plimit
*limit
;
595 limit
= td
->td_limit
;
596 if (limit
!= p
->p_limit
) {
597 spin_lock_shared(&p
->p_spin
);
599 atomic_add_int(&limit
->p_refcnt
, 1);
600 spin_unlock_shared(&p
->p_spin
);
602 plimit_free(td
->td_limit
);
603 td
->td_limit
= limit
;
609 * System calls on descriptors.
612 sys_getdtablesize(struct getdtablesize_args
*uap
)
614 struct proc
*p
= curproc
;
615 struct plimit
*limit
= readplimits(p
);
618 if (limit
->pl_rlimit
[RLIMIT_NOFILE
].rlim_cur
> INT_MAX
)
621 dtsize
= (int)limit
->pl_rlimit
[RLIMIT_NOFILE
].rlim_cur
;
623 if (dtsize
> maxfilesperproc
)
624 dtsize
= maxfilesperproc
;
625 if (dtsize
< minfilesperproc
)
626 dtsize
= minfilesperproc
;
627 if (p
->p_ucred
->cr_uid
&& dtsize
> maxfilesperuser
)
628 dtsize
= maxfilesperuser
;
629 uap
->sysmsg_result
= dtsize
;
634 * Duplicate a file descriptor to a particular value.
636 * note: keep in mind that a potential race condition exists when closing
637 * descriptors from a shared descriptor table (via rfork).
640 sys_dup2(struct dup2_args
*uap
)
645 error
= kern_dup(DUP_FIXED
, uap
->from
, uap
->to
, &fd
);
646 uap
->sysmsg_fds
[0] = fd
;
652 * Duplicate a file descriptor.
655 sys_dup(struct dup_args
*uap
)
660 error
= kern_dup(DUP_VARIABLE
, uap
->fd
, 0, &fd
);
661 uap
->sysmsg_fds
[0] = fd
;
667 * MPALMOSTSAFE - acquires mplock for fp operations
670 kern_fcntl(int fd
, int cmd
, union fcntl_dat
*dat
, struct ucred
*cred
)
672 struct thread
*td
= curthread
;
673 struct proc
*p
= td
->td_proc
;
680 int tmp
, error
, flg
= F_POSIX
;
685 * Operations on file descriptors that do not require a file pointer.
689 error
= fgetfdflags(p
->p_fd
, fd
, &tmp
);
691 dat
->fc_cloexec
= (tmp
& UF_EXCLOSE
) ? FD_CLOEXEC
: 0;
695 if (dat
->fc_cloexec
& FD_CLOEXEC
)
696 error
= fsetfdflags(p
->p_fd
, fd
, UF_EXCLOSE
);
698 error
= fclrfdflags(p
->p_fd
, fd
, UF_EXCLOSE
);
702 error
= kern_dup(DUP_VARIABLE
| DUP_FCNTL
, fd
, newmin
,
705 case F_DUPFD_CLOEXEC
:
707 error
= kern_dup(DUP_VARIABLE
| DUP_CLOEXEC
| DUP_FCNTL
,
708 fd
, newmin
, &dat
->fc_fd
);
712 error
= kern_dup(DUP_FIXED
, fd
, newmin
, &dat
->fc_fd
);
714 case F_DUP2FD_CLOEXEC
:
716 error
= kern_dup(DUP_FIXED
| DUP_CLOEXEC
, fd
, newmin
,
724 * Operations on file pointers
726 closedcounter
= p
->p_fd
->fd_closedcounter
;
727 if ((fp
= holdfp(td
, fd
, -1)) == NULL
)
732 dat
->fc_flags
= OFLAGS(fp
->f_flag
);
738 nflags
= FFLAGS(dat
->fc_flags
& ~O_ACCMODE
) & FCNTLFLAGS
;
739 nflags
|= oflags
& ~FCNTLFLAGS
;
742 if (((nflags
^ oflags
) & O_APPEND
) && (oflags
& FAPPENDONLY
))
744 if (error
== 0 && ((nflags
^ oflags
) & FASYNC
)) {
745 tmp
= nflags
& FASYNC
;
746 error
= fo_ioctl(fp
, FIOASYNC
, (caddr_t
)&tmp
,
751 * If no error, must be atomically set.
756 nflags
= (oflags
& ~FCNTLFLAGS
) | (nflags
& FCNTLFLAGS
);
757 if (atomic_cmpset_int(&fp
->f_flag
, oflags
, nflags
))
764 error
= fo_ioctl(fp
, FIOGETOWN
, (caddr_t
)&dat
->fc_owner
,
769 error
= fo_ioctl(fp
, FIOSETOWN
, (caddr_t
)&dat
->fc_owner
,
775 /* Fall into F_SETLK */
778 if (fp
->f_type
!= DTYPE_VNODE
) {
782 vp
= (struct vnode
*)fp
->f_data
;
785 * copyin/lockop may block
787 if (dat
->fc_flock
.l_whence
== SEEK_CUR
)
788 dat
->fc_flock
.l_start
+= fp
->f_offset
;
790 switch (dat
->fc_flock
.l_type
) {
792 if ((fp
->f_flag
& FREAD
) == 0) {
796 if (p
->p_leader
->p_advlock_flag
== 0)
797 p
->p_leader
->p_advlock_flag
= 1;
798 error
= VOP_ADVLOCK(vp
, (caddr_t
)p
->p_leader
, F_SETLK
,
799 &dat
->fc_flock
, flg
);
802 if ((fp
->f_flag
& FWRITE
) == 0) {
806 if (p
->p_leader
->p_advlock_flag
== 0)
807 p
->p_leader
->p_advlock_flag
= 1;
808 error
= VOP_ADVLOCK(vp
, (caddr_t
)p
->p_leader
, F_SETLK
,
809 &dat
->fc_flock
, flg
);
812 error
= VOP_ADVLOCK(vp
, (caddr_t
)p
->p_leader
, F_UNLCK
,
813 &dat
->fc_flock
, F_POSIX
);
821 * It is possible to race a close() on the descriptor while
822 * we were blocked getting the lock. If this occurs the
823 * close might not have caught the lock.
825 if (checkfdclosed(td
, p
->p_fd
, fd
, fp
, closedcounter
)) {
826 dat
->fc_flock
.l_whence
= SEEK_SET
;
827 dat
->fc_flock
.l_start
= 0;
828 dat
->fc_flock
.l_len
= 0;
829 dat
->fc_flock
.l_type
= F_UNLCK
;
830 VOP_ADVLOCK(vp
, (caddr_t
)p
->p_leader
,
831 F_UNLCK
, &dat
->fc_flock
, F_POSIX
);
836 if (fp
->f_type
!= DTYPE_VNODE
) {
840 vp
= (struct vnode
*)fp
->f_data
;
842 * copyin/lockop may block
844 if (dat
->fc_flock
.l_type
!= F_RDLCK
&&
845 dat
->fc_flock
.l_type
!= F_WRLCK
&&
846 dat
->fc_flock
.l_type
!= F_UNLCK
) {
850 if (dat
->fc_flock
.l_whence
== SEEK_CUR
)
851 dat
->fc_flock
.l_start
+= fp
->f_offset
;
852 error
= VOP_ADVLOCK(vp
, (caddr_t
)p
->p_leader
, F_GETLK
,
853 &dat
->fc_flock
, F_POSIX
);
865 * The file control system call.
868 sys_fcntl(struct fcntl_args
*uap
)
876 case F_DUPFD_CLOEXEC
:
877 case F_DUP2FD_CLOEXEC
:
878 dat
.fc_fd
= uap
->arg
;
881 dat
.fc_cloexec
= uap
->arg
;
884 dat
.fc_flags
= uap
->arg
;
887 dat
.fc_owner
= uap
->arg
;
892 error
= copyin((caddr_t
)uap
->arg
, &dat
.fc_flock
,
893 sizeof(struct flock
));
899 error
= kern_fcntl(uap
->fd
, uap
->cmd
, &dat
, curthread
->td_ucred
);
905 case F_DUPFD_CLOEXEC
:
906 case F_DUP2FD_CLOEXEC
:
907 uap
->sysmsg_result
= dat
.fc_fd
;
910 uap
->sysmsg_result
= dat
.fc_cloexec
;
913 uap
->sysmsg_result
= dat
.fc_flags
;
916 uap
->sysmsg_result
= dat
.fc_owner
;
919 error
= copyout(&dat
.fc_flock
, (caddr_t
)uap
->arg
,
920 sizeof(struct flock
));
929 * Common code for dup, dup2, and fcntl(F_DUPFD).
931 * There are four type flags: DUP_FCNTL, DUP_FIXED, DUP_VARIABLE, and
934 * DUP_FCNTL is for handling EINVAL vs. EBADF differences between
935 * fcntl()'s F_DUPFD and F_DUPFD_CLOEXEC and dup2() (per POSIX).
936 * The next two flags are mutually exclusive, and the fourth is optional.
937 * DUP_FIXED tells kern_dup() to destructively dup over an existing file
938 * descriptor if "new" is already open. DUP_VARIABLE tells kern_dup()
939 * to find the lowest unused file descriptor that is greater than or
940 * equal to "new". DUP_CLOEXEC, which works with either of the first
941 * two flags, sets the close-on-exec flag on the "new" file descriptor.
944 kern_dup(int flags
, int old
, int new, int *res
)
946 struct thread
*td
= curthread
;
947 struct proc
*p
= td
->td_proc
;
948 struct plimit
*limit
= readplimits(p
);
949 struct filedesc
*fdp
= p
->p_fd
;
958 * Verify that we have a valid descriptor to dup from and
959 * possibly to dup to. When the new descriptor is out of
960 * bounds, fcntl()'s F_DUPFD and F_DUPFD_CLOEXEC must
961 * return EINVAL, while dup2() returns EBADF in
964 * NOTE: maxfilesperuser is not applicable to dup()
967 if (limit
->pl_rlimit
[RLIMIT_NOFILE
].rlim_cur
> INT_MAX
)
970 dtsize
= (int)limit
->pl_rlimit
[RLIMIT_NOFILE
].rlim_cur
;
971 if (dtsize
> maxfilesperproc
)
972 dtsize
= maxfilesperproc
;
973 if (dtsize
< minfilesperproc
)
974 dtsize
= minfilesperproc
;
976 if (new < 0 || new > dtsize
)
977 return (flags
& DUP_FCNTL
? EINVAL
: EBADF
);
979 spin_lock(&fdp
->fd_spin
);
980 if ((unsigned)old
>= fdp
->fd_nfiles
|| fdp
->fd_files
[old
].fp
== NULL
) {
981 spin_unlock(&fdp
->fd_spin
);
984 if ((flags
& DUP_FIXED
) && old
== new) {
986 if (flags
& DUP_CLOEXEC
)
987 fdp
->fd_files
[new].fileflags
|= UF_EXCLOSE
;
988 spin_unlock(&fdp
->fd_spin
);
991 fp
= fdp
->fd_files
[old
].fp
;
992 oldflags
= fdp
->fd_files
[old
].fileflags
;
996 * Allocate a new descriptor if DUP_VARIABLE, or expand the table
997 * if the requested descriptor is beyond the current table size.
999 * This can block. Retry if the source descriptor no longer matches
1000 * or if our expectation in the expansion case races.
1002 * If we are not expanding or allocating a new decriptor, then reset
1003 * the target descriptor to a reserved state so we have a uniform
1004 * setup for the next code block.
1006 if ((flags
& DUP_VARIABLE
) || new >= fdp
->fd_nfiles
) {
1007 spin_unlock(&fdp
->fd_spin
);
1008 error
= fdalloc(p
, new, &newfd
);
1009 spin_lock(&fdp
->fd_spin
);
1011 spin_unlock(&fdp
->fd_spin
);
1018 if (old
>= fdp
->fd_nfiles
|| fdp
->fd_files
[old
].fp
!= fp
) {
1019 fsetfd_locked(fdp
, NULL
, newfd
);
1020 spin_unlock(&fdp
->fd_spin
);
1025 * Check for expansion race
1027 if ((flags
& DUP_VARIABLE
) == 0 && new != newfd
) {
1028 fsetfd_locked(fdp
, NULL
, newfd
);
1029 spin_unlock(&fdp
->fd_spin
);
1034 * Check for ripout, newfd reused old (this case probably
1038 fsetfd_locked(fdp
, NULL
, newfd
);
1039 spin_unlock(&fdp
->fd_spin
);
1046 if (fdp
->fd_files
[new].reserved
) {
1047 spin_unlock(&fdp
->fd_spin
);
1049 kprintf("Warning: dup(): target descriptor %d is reserved, waiting for it to be resolved\n", new);
1050 tsleep(fdp
, 0, "fdres", hz
);
1055 * If the target descriptor was never allocated we have
1056 * to allocate it. If it was we have to clean out the
1057 * old descriptor. delfp inherits the ref from the
1060 ++fdp
->fd_closedcounter
;
1061 fclearcache(&fdp
->fd_files
[new], NULL
, 0);
1062 ++fdp
->fd_closedcounter
;
1063 delfp
= fdp
->fd_files
[new].fp
;
1064 fdp
->fd_files
[new].fp
= NULL
;
1065 fdp
->fd_files
[new].reserved
= 1;
1066 if (delfp
== NULL
) {
1067 fdreserve_locked(fdp
, new, 1);
1068 if (new > fdp
->fd_lastfile
)
1069 fdp
->fd_lastfile
= new;
1075 * NOTE: still holding an exclusive spinlock
1079 * If a descriptor is being overwritten we may hve to tell
1080 * fdfree() to sleep to ensure that all relevant process
1081 * leaders can be traversed in closef().
1083 if (delfp
!= NULL
&& p
->p_fdtol
!= NULL
) {
1084 fdp
->fd_holdleaderscount
++;
1089 KASSERT(delfp
== NULL
|| (flags
& DUP_FIXED
),
1090 ("dup() picked an open file"));
1093 * Duplicate the source descriptor, update lastfile. If the new
1094 * descriptor was not allocated and we aren't replacing an existing
1095 * descriptor we have to mark the descriptor as being in use.
1097 * The fd_files[] array inherits fp's hold reference.
1099 fsetfd_locked(fdp
, fp
, new);
1100 if ((flags
& DUP_CLOEXEC
) != 0)
1101 fdp
->fd_files
[new].fileflags
= oldflags
| UF_EXCLOSE
;
1103 fdp
->fd_files
[new].fileflags
= oldflags
& ~UF_EXCLOSE
;
1104 spin_unlock(&fdp
->fd_spin
);
1109 * If we dup'd over a valid file, we now own the reference to it
1110 * and must dispose of it using closef() semantics (as if a
1111 * close() were performed on it).
1114 if (SLIST_FIRST(&delfp
->f_klist
))
1115 knote_fdclose(delfp
, fdp
, new);
1118 spin_lock(&fdp
->fd_spin
);
1119 fdp
->fd_holdleaderscount
--;
1120 if (fdp
->fd_holdleaderscount
== 0 &&
1121 fdp
->fd_holdleaderswakeup
!= 0) {
1122 fdp
->fd_holdleaderswakeup
= 0;
1123 spin_unlock(&fdp
->fd_spin
);
1124 wakeup(&fdp
->fd_holdleaderscount
);
1126 spin_unlock(&fdp
->fd_spin
);
1134 * If sigio is on the list associated with a process or process group,
1135 * disable signalling from the device, remove sigio from the list and
1139 funsetown(struct sigio
**sigiop
)
1143 struct sigio
*sigio
;
1145 if ((sigio
= *sigiop
) != NULL
) {
1146 lwkt_gettoken(&sigio_token
); /* protect sigio */
1147 KKASSERT(sigiop
== sigio
->sio_myref
);
1150 lwkt_reltoken(&sigio_token
);
1155 if (sigio
->sio_pgid
< 0) {
1156 pgrp
= sigio
->sio_pgrp
;
1157 sigio
->sio_pgrp
= NULL
;
1158 lwkt_gettoken(&pgrp
->pg_token
);
1159 SLIST_REMOVE(&pgrp
->pg_sigiolst
, sigio
, sigio
, sio_pgsigio
);
1160 lwkt_reltoken(&pgrp
->pg_token
);
1162 } else /* if ((*sigiop)->sio_pgid > 0) */ {
1163 p
= sigio
->sio_proc
;
1164 sigio
->sio_proc
= NULL
;
1166 lwkt_gettoken(&p
->p_token
);
1167 SLIST_REMOVE(&p
->p_sigiolst
, sigio
, sigio
, sio_pgsigio
);
1168 lwkt_reltoken(&p
->p_token
);
1171 crfree(sigio
->sio_ucred
);
1172 sigio
->sio_ucred
= NULL
;
1173 kfree(sigio
, M_SIGIO
);
1177 * Free a list of sigio structures. Caller is responsible for ensuring
1178 * that the list is MPSAFE.
1181 funsetownlst(struct sigiolst
*sigiolst
)
1183 struct sigio
*sigio
;
1185 while ((sigio
= SLIST_FIRST(sigiolst
)) != NULL
)
1186 funsetown(sigio
->sio_myref
);
1190 * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg).
1192 * After permission checking, add a sigio structure to the sigio list for
1193 * the process or process group.
1196 fsetown(pid_t pgid
, struct sigio
**sigiop
)
1198 struct proc
*proc
= NULL
;
1199 struct pgrp
*pgrp
= NULL
;
1200 struct sigio
*sigio
;
1216 * Policy - Don't allow a process to FSETOWN a process
1217 * in another session.
1219 * Remove this test to allow maximum flexibility or
1220 * restrict FSETOWN to the current process or process
1221 * group for maximum safety.
1223 if (proc
->p_session
!= curproc
->p_session
) {
1227 } else /* if (pgid < 0) */ {
1228 pgrp
= pgfind(-pgid
);
1235 * Policy - Don't allow a process to FSETOWN a process
1236 * in another session.
1238 * Remove this test to allow maximum flexibility or
1239 * restrict FSETOWN to the current process or process
1240 * group for maximum safety.
1242 if (pgrp
->pg_session
!= curproc
->p_session
) {
1247 sigio
= kmalloc(sizeof(struct sigio
), M_SIGIO
, M_WAITOK
| M_ZERO
);
1249 KKASSERT(pgrp
== NULL
);
1250 lwkt_gettoken(&proc
->p_token
);
1251 SLIST_INSERT_HEAD(&proc
->p_sigiolst
, sigio
, sio_pgsigio
);
1252 sigio
->sio_proc
= proc
;
1253 lwkt_reltoken(&proc
->p_token
);
1255 KKASSERT(proc
== NULL
);
1256 lwkt_gettoken(&pgrp
->pg_token
);
1257 SLIST_INSERT_HEAD(&pgrp
->pg_sigiolst
, sigio
, sio_pgsigio
);
1258 sigio
->sio_pgrp
= pgrp
;
1259 lwkt_reltoken(&pgrp
->pg_token
);
1262 sigio
->sio_pgid
= pgid
;
1263 sigio
->sio_ucred
= crhold(curthread
->td_ucred
);
1264 /* It would be convenient if p_ruid was in ucred. */
1265 sigio
->sio_ruid
= sigio
->sio_ucred
->cr_ruid
;
1266 sigio
->sio_myref
= sigiop
;
1268 lwkt_gettoken(&sigio_token
);
1272 lwkt_reltoken(&sigio_token
);
1283 * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg).
1286 fgetown(struct sigio
**sigiop
)
1288 struct sigio
*sigio
;
1291 lwkt_gettoken_shared(&sigio_token
);
1293 own
= (sigio
!= NULL
? sigio
->sio_pgid
: 0);
1294 lwkt_reltoken(&sigio_token
);
1300 * Close many file descriptors.
1303 sys_closefrom(struct closefrom_args
*uap
)
1305 return(kern_closefrom(uap
->fd
));
1309 * Close all file descriptors greater then or equal to fd
1312 kern_closefrom(int fd
)
1314 struct thread
*td
= curthread
;
1315 struct proc
*p
= td
->td_proc
;
1316 struct filedesc
*fdp
;
1325 * NOTE: This function will skip unassociated descriptors and
1326 * reserved descriptors that have not yet been assigned.
1327 * fd_lastfile can change as a side effect of kern_close().
1329 spin_lock(&fdp
->fd_spin
);
1330 while (fd
<= fdp
->fd_lastfile
) {
1331 if (fdp
->fd_files
[fd
].fp
!= NULL
) {
1332 spin_unlock(&fdp
->fd_spin
);
1333 /* ok if this races another close */
1334 if (kern_close(fd
) == EINTR
)
1336 spin_lock(&fdp
->fd_spin
);
1340 spin_unlock(&fdp
->fd_spin
);
1345 * Close a file descriptor.
1348 sys_close(struct close_args
*uap
)
1350 return(kern_close(uap
->fd
));
1359 struct thread
*td
= curthread
;
1360 struct proc
*p
= td
->td_proc
;
1361 struct filedesc
*fdp
;
1370 * funsetfd*() also clears the fd cache
1372 spin_lock(&fdp
->fd_spin
);
1373 if ((fp
= funsetfd_locked(fdp
, fd
)) == NULL
) {
1374 spin_unlock(&fdp
->fd_spin
);
1378 if (p
->p_fdtol
!= NULL
) {
1380 * Ask fdfree() to sleep to ensure that all relevant
1381 * process leaders can be traversed in closef().
1383 fdp
->fd_holdleaderscount
++;
1388 * we now hold the fp reference that used to be owned by the descriptor
1391 spin_unlock(&fdp
->fd_spin
);
1392 if (SLIST_FIRST(&fp
->f_klist
))
1393 knote_fdclose(fp
, fdp
, fd
);
1394 error
= closef(fp
, p
);
1396 spin_lock(&fdp
->fd_spin
);
1397 fdp
->fd_holdleaderscount
--;
1398 if (fdp
->fd_holdleaderscount
== 0 &&
1399 fdp
->fd_holdleaderswakeup
!= 0) {
1400 fdp
->fd_holdleaderswakeup
= 0;
1401 spin_unlock(&fdp
->fd_spin
);
1402 wakeup(&fdp
->fd_holdleaderscount
);
1404 spin_unlock(&fdp
->fd_spin
);
1411 * shutdown_args(int fd, int how)
1414 kern_shutdown(int fd
, int how
)
1416 struct thread
*td
= curthread
;
1420 if ((fp
= holdfp(td
, fd
, -1)) == NULL
)
1422 error
= fo_shutdown(fp
, how
);
1432 sys_shutdown(struct shutdown_args
*uap
)
1436 error
= kern_shutdown(uap
->s
, uap
->how
);
1445 kern_fstat(int fd
, struct stat
*ub
)
1447 struct thread
*td
= curthread
;
1451 if ((fp
= holdfp(td
, fd
, -1)) == NULL
)
1453 error
= fo_stat(fp
, ub
, td
->td_ucred
);
1460 * Return status information about a file descriptor.
1463 sys_fstat(struct fstat_args
*uap
)
1468 error
= kern_fstat(uap
->fd
, &st
);
1471 error
= copyout(&st
, uap
->sb
, sizeof(st
));
1476 * Return pathconf information about a file descriptor.
1481 sys_fpathconf(struct fpathconf_args
*uap
)
1483 struct thread
*td
= curthread
;
1488 if ((fp
= holdfp(td
, uap
->fd
, -1)) == NULL
)
1491 switch (fp
->f_type
) {
1494 if (uap
->name
!= _PC_PIPE_BUF
) {
1497 uap
->sysmsg_result
= PIPE_BUF
;
1503 vp
= (struct vnode
*)fp
->f_data
;
1504 error
= VOP_PATHCONF(vp
, uap
->name
, &uap
->sysmsg_reg
);
1515 * Grow the file table so it can hold through descriptor (want).
1517 * The fdp's spinlock must be held exclusively on entry and may be held
1518 * exclusively on return. The spinlock may be cycled by the routine.
1521 fdgrow_locked(struct filedesc
*fdp
, int want
)
1523 struct fdnode
*newfiles
;
1524 struct fdnode
*oldfiles
;
1527 nf
= fdp
->fd_nfiles
;
1529 /* nf has to be of the form 2^n - 1 */
1531 } while (nf
<= want
);
1533 spin_unlock(&fdp
->fd_spin
);
1534 newfiles
= kmalloc(nf
* sizeof(struct fdnode
), M_FILEDESC
, M_WAITOK
);
1535 spin_lock(&fdp
->fd_spin
);
1538 * We could have raced another extend while we were not holding
1541 if (fdp
->fd_nfiles
>= nf
) {
1542 spin_unlock(&fdp
->fd_spin
);
1543 kfree(newfiles
, M_FILEDESC
);
1544 spin_lock(&fdp
->fd_spin
);
1548 * Copy the existing ofile and ofileflags arrays
1549 * and zero the new portion of each array.
1551 extra
= nf
- fdp
->fd_nfiles
;
1552 bcopy(fdp
->fd_files
, newfiles
, fdp
->fd_nfiles
* sizeof(struct fdnode
));
1553 bzero(&newfiles
[fdp
->fd_nfiles
], extra
* sizeof(struct fdnode
));
1555 oldfiles
= fdp
->fd_files
;
1556 fdp
->fd_files
= newfiles
;
1557 fdp
->fd_nfiles
= nf
;
1559 if (oldfiles
!= fdp
->fd_builtin_files
) {
1560 spin_unlock(&fdp
->fd_spin
);
1561 kfree(oldfiles
, M_FILEDESC
);
1562 spin_lock(&fdp
->fd_spin
);
1567 * Number of nodes in right subtree, including the root.
1570 right_subtree_size(int n
)
1572 return (n
^ (n
| (n
+ 1)));
1579 right_ancestor(int n
)
1581 return (n
| (n
+ 1));
1588 left_ancestor(int n
)
1590 return ((n
& (n
+ 1)) - 1);
1594 * Traverse the in-place binary tree buttom-up adjusting the allocation
1595 * count so scans can determine where free descriptors are located.
1597 * caller must be holding an exclusive spinlock on fdp
1601 fdreserve_locked(struct filedesc
*fdp
, int fd
, int incr
)
1604 fdp
->fd_files
[fd
].allocated
+= incr
;
1605 KKASSERT(fdp
->fd_files
[fd
].allocated
>= 0);
1606 fd
= left_ancestor(fd
);
1611 * Reserve a file descriptor for the process. If no error occurs, the
1612 * caller MUST at some point call fsetfd() or assign a file pointer
1613 * or dispose of the reservation.
1616 fdalloc(struct proc
*p
, int want
, int *result
)
1618 struct plimit
*limit
= readplimits(p
);
1619 struct filedesc
*fdp
= p
->p_fd
;
1620 struct uidinfo
*uip
;
1621 int fd
, rsize
, rsum
, node
, lim
;
1624 * Check dtable size limit
1626 *result
= -1; /* avoid gcc warnings */
1627 if (limit
->pl_rlimit
[RLIMIT_NOFILE
].rlim_cur
> INT_MAX
)
1630 lim
= (int)limit
->pl_rlimit
[RLIMIT_NOFILE
].rlim_cur
;
1632 if (lim
> maxfilesperproc
)
1633 lim
= maxfilesperproc
;
1634 if (lim
< minfilesperproc
)
1635 lim
= minfilesperproc
;
1640 * Check that the user has not run out of descriptors (non-root only).
1641 * As a safety measure the dtable is allowed to have at least
1642 * minfilesperproc open fds regardless of the maxfilesperuser limit.
1644 * This isn't as loose a spec as ui_posixlocks, so we use atomic
1645 * ops to force synchronize and recheck if we would otherwise
1648 if (p
->p_ucred
->cr_uid
&& fdp
->fd_nfiles
>= minfilesperproc
) {
1649 uip
= p
->p_ucred
->cr_uidinfo
;
1650 if (uip
->ui_openfiles
> maxfilesperuser
) {
1654 for (n
= 0; n
< ncpus
; ++n
) {
1655 count
= atomic_swap_int(
1656 &uip
->ui_pcpu
[n
].pu_openfiles
, 0);
1657 atomic_add_int(&uip
->ui_openfiles
, count
);
1659 if (uip
->ui_openfiles
> maxfilesperuser
) {
1660 krateprintf(&krate_uidinfo
,
1661 "Warning: user %d pid %d (%s) "
1662 "ran out of file descriptors "
1664 p
->p_ucred
->cr_uid
, (int)p
->p_pid
,
1666 uip
->ui_openfiles
, maxfilesperuser
);
1673 * Grow the dtable if necessary
1675 spin_lock(&fdp
->fd_spin
);
1676 if (want
>= fdp
->fd_nfiles
)
1677 fdgrow_locked(fdp
, want
);
1680 * Search for a free descriptor starting at the higher
1681 * of want or fd_freefile. If that fails, consider
1682 * expanding the ofile array.
1684 * NOTE! the 'allocated' field is a cumulative recursive allocation
1685 * count. If we happen to see a value of 0 then we can shortcut
1686 * our search. Otherwise we run through through the tree going
1687 * down branches we know have free descriptor(s) until we hit a
1688 * leaf node. The leaf node will be free but will not necessarily
1689 * have an allocated field of 0.
1692 /* move up the tree looking for a subtree with a free node */
1693 for (fd
= max(want
, fdp
->fd_freefile
); fd
< min(fdp
->fd_nfiles
, lim
);
1694 fd
= right_ancestor(fd
)) {
1695 if (fdp
->fd_files
[fd
].allocated
== 0)
1698 rsize
= right_subtree_size(fd
);
1699 if (fdp
->fd_files
[fd
].allocated
== rsize
)
1700 continue; /* right subtree full */
1703 * Free fd is in the right subtree of the tree rooted at fd.
1704 * Call that subtree R. Look for the smallest (leftmost)
1705 * subtree of R with an unallocated fd: continue moving
1706 * down the left branch until encountering a full left
1707 * subtree, then move to the right.
1709 for (rsum
= 0, rsize
/= 2; rsize
> 0; rsize
/= 2) {
1711 rsum
+= fdp
->fd_files
[node
].allocated
;
1712 if (fdp
->fd_files
[fd
].allocated
== rsum
+ rsize
) {
1713 fd
= node
; /* move to the right */
1714 if (fdp
->fd_files
[node
].allocated
== 0)
1723 * No space in current array. Expand?
1725 if (fdp
->fd_nfiles
>= lim
) {
1726 spin_unlock(&fdp
->fd_spin
);
1729 fdgrow_locked(fdp
, want
);
1733 KKASSERT(fd
< fdp
->fd_nfiles
);
1734 if (fd
> fdp
->fd_lastfile
)
1735 fdp
->fd_lastfile
= fd
;
1736 if (want
<= fdp
->fd_freefile
)
1737 fdp
->fd_freefile
= fd
;
1739 KKASSERT(fdp
->fd_files
[fd
].fp
== NULL
);
1740 KKASSERT(fdp
->fd_files
[fd
].reserved
== 0);
1741 fdp
->fd_files
[fd
].fileflags
= 0;
1742 fdp
->fd_files
[fd
].reserved
= 1;
1743 fdreserve_locked(fdp
, fd
, 1);
1744 spin_unlock(&fdp
->fd_spin
);
1749 * Check to see whether n user file descriptors
1750 * are available to the process p.
1753 fdavail(struct proc
*p
, int n
)
1755 struct plimit
*limit
= readplimits(p
);
1756 struct filedesc
*fdp
= p
->p_fd
;
1757 struct fdnode
*fdnode
;
1760 if (limit
->pl_rlimit
[RLIMIT_NOFILE
].rlim_cur
> INT_MAX
)
1763 lim
= (int)limit
->pl_rlimit
[RLIMIT_NOFILE
].rlim_cur
;
1765 if (lim
> maxfilesperproc
)
1766 lim
= maxfilesperproc
;
1767 if (lim
< minfilesperproc
)
1768 lim
= minfilesperproc
;
1770 spin_lock(&fdp
->fd_spin
);
1771 if ((i
= lim
- fdp
->fd_nfiles
) > 0 && (n
-= i
) <= 0) {
1772 spin_unlock(&fdp
->fd_spin
);
1775 last
= min(fdp
->fd_nfiles
, lim
);
1776 fdnode
= &fdp
->fd_files
[fdp
->fd_freefile
];
1777 for (i
= last
- fdp
->fd_freefile
; --i
>= 0; ++fdnode
) {
1778 if (fdnode
->fp
== NULL
&& --n
<= 0) {
1779 spin_unlock(&fdp
->fd_spin
);
1783 spin_unlock(&fdp
->fd_spin
);
1788 * Revoke open descriptors referencing (f_data, f_type)
1790 * Any revoke executed within a prison is only able to
1791 * revoke descriptors for processes within that prison.
1793 * Returns 0 on success or an error code.
1795 struct fdrevoke_info
{
1804 static int fdrevoke_check_callback(struct file
*fp
, void *vinfo
);
1805 static int fdrevoke_proc_callback(struct proc
*p
, void *vinfo
);
1808 fdrevoke(void *f_data
, short f_type
, struct ucred
*cred
)
1810 struct fdrevoke_info info
;
1813 bzero(&info
, sizeof(info
));
1817 error
= falloc(NULL
, &info
.nfp
, NULL
);
1822 * Scan the file pointer table once. dups do not dup file pointers,
1823 * only descriptors, so there is no leak. Set FREVOKED on the fps
1826 * Any fps sent over unix-domain sockets will be revoked by the
1827 * socket code checking for FREVOKED when the fps are externialized.
1828 * revoke_token is used to make sure that fps marked FREVOKED and
1829 * externalized will be picked up by the following allproc_scan().
1831 lwkt_gettoken(&revoke_token
);
1832 allfiles_scan_exclusive(fdrevoke_check_callback
, &info
);
1833 lwkt_reltoken(&revoke_token
);
1836 * If any fps were marked track down the related descriptors
1837 * and close them. Any dup()s at this point will notice
1838 * the FREVOKED already set in the fp and do the right thing.
1841 allproc_scan(fdrevoke_proc_callback
, &info
, 0);
1847 * Locate matching file pointers directly.
1849 * WARNING: allfiles_scan_exclusive() holds a spinlock through these calls!
1852 fdrevoke_check_callback(struct file
*fp
, void *vinfo
)
1854 struct fdrevoke_info
*info
= vinfo
;
1857 * File pointers already flagged for revokation are skipped.
1859 if (fp
->f_flag
& FREVOKED
)
1863 * If revoking from a prison file pointers created outside of
1864 * that prison, or file pointers without creds, cannot be revoked.
1866 if (info
->cred
->cr_prison
&&
1867 (fp
->f_cred
== NULL
||
1868 info
->cred
->cr_prison
!= fp
->f_cred
->cr_prison
)) {
1873 * If the file pointer matches then mark it for revocation. The
1874 * flag is currently only used by unp_revoke_gc().
1876 * info->found is a heuristic and can race in a SMP environment.
1878 if (info
->data
== fp
->f_data
&& info
->type
== fp
->f_type
) {
1879 atomic_set_int(&fp
->f_flag
, FREVOKED
);
1886 * Locate matching file pointers via process descriptor tables.
1889 fdrevoke_proc_callback(struct proc
*p
, void *vinfo
)
1891 struct fdrevoke_info
*info
= vinfo
;
1892 struct filedesc
*fdp
;
1896 if (p
->p_stat
== SIDL
|| p
->p_stat
== SZOMB
)
1898 if (info
->cred
->cr_prison
&&
1899 info
->cred
->cr_prison
!= p
->p_ucred
->cr_prison
) {
1904 * If the controlling terminal of the process matches the
1905 * vnode being revoked we clear the controlling terminal.
1907 * The normal spec_close() may not catch this because it
1908 * uses curproc instead of p.
1910 if (p
->p_session
&& info
->type
== DTYPE_VNODE
&&
1911 info
->data
== p
->p_session
->s_ttyvp
) {
1912 p
->p_session
->s_ttyvp
= NULL
;
1917 * Softref the fdp to prevent it from being destroyed
1919 spin_lock(&p
->p_spin
);
1920 if ((fdp
= p
->p_fd
) == NULL
) {
1921 spin_unlock(&p
->p_spin
);
1924 atomic_add_int(&fdp
->fd_softrefs
, 1);
1925 spin_unlock(&p
->p_spin
);
1928 * Locate and close any matching file descriptors, replacing
1929 * them with info->nfp.
1931 spin_lock(&fdp
->fd_spin
);
1932 for (n
= 0; n
< fdp
->fd_nfiles
; ++n
) {
1933 if ((fp
= fdp
->fd_files
[n
].fp
) == NULL
)
1935 if (fp
->f_flag
& FREVOKED
) {
1936 ++fdp
->fd_closedcounter
;
1937 fclearcache(&fdp
->fd_files
[n
], NULL
, 0);
1938 ++fdp
->fd_closedcounter
;
1940 fdp
->fd_files
[n
].fp
= info
->nfp
;
1941 spin_unlock(&fdp
->fd_spin
);
1942 knote_fdclose(fp
, fdp
, n
); /* XXX */
1944 spin_lock(&fdp
->fd_spin
);
1947 spin_unlock(&fdp
->fd_spin
);
1948 atomic_subtract_int(&fdp
->fd_softrefs
, 1);
1954 * Create a new open file structure and reserve a file decriptor
1955 * for the process that refers to it.
1957 * Root creds are checked using lp, or assumed if lp is NULL. If
1958 * resultfd is non-NULL then lp must also be non-NULL. No file
1959 * descriptor is reserved (and no process context is needed) if
1962 * A file pointer with a refcount of 1 is returned. Note that the
1963 * file pointer is NOT associated with the descriptor. If falloc
1964 * returns success, fsetfd() MUST be called to either associate the
1965 * file pointer or clear the reservation.
1968 falloc(struct lwp
*lp
, struct file
**resultfp
, int *resultfd
)
1970 static struct timeval lastfail
;
1972 struct filelist_head
*head
;
1974 struct ucred
*cred
= lp
? lp
->lwp_thread
->td_ucred
: proc0
.p_ucred
;
1980 * Handle filetable full issues and root overfill.
1982 if (nfiles
>= maxfiles
- maxfilesrootres
&&
1983 (cred
->cr_ruid
!= 0 || nfiles
>= maxfiles
)) {
1984 if (ppsratecheck(&lastfail
, &curfail
, 1)) {
1985 kprintf("kern.maxfiles limit exceeded by uid %d, "
1986 "please see tuning(7).\n",
1994 * Allocate a new file descriptor.
1996 fp
= objcache_get(file_objcache
, M_WAITOK
);
1997 bzero(fp
, sizeof(*fp
));
1998 spin_init(&fp
->f_spin
, "falloc");
1999 SLIST_INIT(&fp
->f_klist
);
2001 fp
->f_ops
= &badfileops
;
2004 atomic_add_int(&nfiles
, 1);
2006 head
= fp2filelist(fp
);
2007 spin_lock(&head
->spin
);
2008 LIST_INSERT_HEAD(&head
->list
, fp
, f_list
);
2009 spin_unlock(&head
->spin
);
2012 if ((error
= fdalloc(lp
->lwp_proc
, 0, resultfd
)) != 0) {
2025 * Check for races against a file descriptor by determining that the
2026 * file pointer is still associated with the specified file descriptor,
2027 * and a close is not currently in progress.
2030 checkfdclosed(thread_t td
, struct filedesc
*fdp
, int fd
, struct file
*fp
,
2033 struct fdcache
*fdc
;
2037 if (fdp
->fd_closedcounter
== closedcounter
)
2040 if (td
->td_proc
&& td
->td_proc
->p_fd
== fdp
) {
2041 for (fdc
= &td
->td_fdcache
[0];
2042 fdc
< &td
->td_fdcache
[NFDCACHE
]; ++fdc
) {
2043 if (fdc
->fd
== fd
&& fdc
->fp
== fp
)
2048 spin_lock_shared(&fdp
->fd_spin
);
2049 if ((unsigned)fd
>= fdp
->fd_nfiles
|| fp
!= fdp
->fd_files
[fd
].fp
)
2053 spin_unlock_shared(&fdp
->fd_spin
);
2058 * Associate a file pointer with a previously reserved file descriptor.
2059 * This function always succeeds.
2061 * If fp is NULL, the file descriptor is returned to the pool.
2063 * Caller must hold an exclusive spinlock on fdp->fd_spin.
2066 fsetfd_locked(struct filedesc
*fdp
, struct file
*fp
, int fd
)
2068 KKASSERT((unsigned)fd
< fdp
->fd_nfiles
);
2069 KKASSERT(fdp
->fd_files
[fd
].reserved
!= 0);
2072 fclearcache(&fdp
->fd_files
[fd
], NULL
, 0);
2073 fdp
->fd_files
[fd
].fp
= fp
;
2074 fdp
->fd_files
[fd
].reserved
= 0;
2076 fdp
->fd_files
[fd
].reserved
= 0;
2077 fdreserve_locked(fdp
, fd
, -1);
2078 fdfixup_locked(fdp
, fd
);
2083 * Caller must hold an exclusive spinlock on fdp->fd_spin.
2086 fsetfd(struct filedesc
*fdp
, struct file
*fp
, int fd
)
2088 spin_lock(&fdp
->fd_spin
);
2089 fsetfd_locked(fdp
, fp
, fd
);
2090 spin_unlock(&fdp
->fd_spin
);
2094 * Caller must hold an exclusive spinlock on fdp->fd_spin.
2098 funsetfd_locked(struct filedesc
*fdp
, int fd
)
2102 if ((unsigned)fd
>= fdp
->fd_nfiles
)
2104 if ((fp
= fdp
->fd_files
[fd
].fp
) == NULL
)
2106 ++fdp
->fd_closedcounter
;
2107 fclearcache(&fdp
->fd_files
[fd
], NULL
, 0);
2108 fdp
->fd_files
[fd
].fp
= NULL
;
2109 fdp
->fd_files
[fd
].fileflags
= 0;
2110 ++fdp
->fd_closedcounter
;
2112 fdreserve_locked(fdp
, fd
, -1);
2113 fdfixup_locked(fdp
, fd
);
2119 * WARNING: May not be called before initial fsetfd().
2122 fgetfdflags(struct filedesc
*fdp
, int fd
, int *flagsp
)
2126 spin_lock(&fdp
->fd_spin
);
2127 if (((u_int
)fd
) >= fdp
->fd_nfiles
) {
2129 } else if (fdp
->fd_files
[fd
].fp
== NULL
) {
2132 *flagsp
= fdp
->fd_files
[fd
].fileflags
;
2135 spin_unlock(&fdp
->fd_spin
);
2140 * WARNING: May not be called before initial fsetfd().
2143 fsetfdflags(struct filedesc
*fdp
, int fd
, int add_flags
)
2147 spin_lock(&fdp
->fd_spin
);
2148 if (((u_int
)fd
) >= fdp
->fd_nfiles
) {
2150 } else if (fdp
->fd_files
[fd
].fp
== NULL
) {
2153 fdp
->fd_files
[fd
].fileflags
|= add_flags
;
2156 spin_unlock(&fdp
->fd_spin
);
2161 * WARNING: May not be called before initial fsetfd().
2164 fclrfdflags(struct filedesc
*fdp
, int fd
, int rem_flags
)
2168 spin_lock(&fdp
->fd_spin
);
2169 if (((u_int
)fd
) >= fdp
->fd_nfiles
) {
2171 } else if (fdp
->fd_files
[fd
].fp
== NULL
) {
2174 fdp
->fd_files
[fd
].fileflags
&= ~rem_flags
;
2177 spin_unlock(&fdp
->fd_spin
);
2182 * Set/Change/Clear the creds for a fp and synchronize the uidinfo.
2185 fsetcred(struct file
*fp
, struct ucred
*ncr
)
2188 struct uidinfo
*uip
;
2189 struct uidcount
*pup
;
2194 if (ocr
== NULL
|| ncr
== NULL
|| ocr
->cr_uidinfo
!= ncr
->cr_uidinfo
) {
2196 uip
= ocr
->cr_uidinfo
;
2197 pup
= &uip
->ui_pcpu
[cpu
];
2198 atomic_add_int(&pup
->pu_openfiles
, -1);
2199 if (pup
->pu_openfiles
< -PUP_LIMIT
||
2200 pup
->pu_openfiles
> PUP_LIMIT
) {
2201 count
= atomic_swap_int(&pup
->pu_openfiles
, 0);
2202 atomic_add_int(&uip
->ui_openfiles
, count
);
2206 uip
= ncr
->cr_uidinfo
;
2207 pup
= &uip
->ui_pcpu
[cpu
];
2208 atomic_add_int(&pup
->pu_openfiles
, 1);
2209 if (pup
->pu_openfiles
< -PUP_LIMIT
||
2210 pup
->pu_openfiles
> PUP_LIMIT
) {
2211 count
= atomic_swap_int(&pup
->pu_openfiles
, 0);
2212 atomic_add_int(&uip
->ui_openfiles
, count
);
2224 * Free a file descriptor.
2228 ffree(struct file
*fp
)
2230 KASSERT((fp
->f_count
== 0), ("ffree: fp_fcount not 0!"));
2232 if (fp
->f_nchandle
.ncp
)
2233 cache_drop(&fp
->f_nchandle
);
2234 objcache_put(file_objcache
, fp
);
2238 * called from init_main, initialize filedesc0 for proc0.
2241 fdinit_bootstrap(struct proc
*p0
, struct filedesc
*fdp0
, int cmask
)
2245 fdp0
->fd_refcnt
= 1;
2246 fdp0
->fd_cmask
= cmask
;
2247 fdp0
->fd_files
= fdp0
->fd_builtin_files
;
2248 fdp0
->fd_nfiles
= NDFILE
;
2249 fdp0
->fd_lastfile
= -1;
2250 spin_init(&fdp0
->fd_spin
, "fdinitbootstrap");
2254 * Build a new filedesc structure.
2257 fdinit(struct proc
*p
)
2259 struct filedesc
*newfdp
;
2260 struct filedesc
*fdp
= p
->p_fd
;
2262 newfdp
= kmalloc(sizeof(struct filedesc
), M_FILEDESC
, M_WAITOK
|M_ZERO
);
2263 spin_lock(&fdp
->fd_spin
);
2265 newfdp
->fd_cdir
= fdp
->fd_cdir
;
2266 vref(newfdp
->fd_cdir
);
2267 cache_copy(&fdp
->fd_ncdir
, &newfdp
->fd_ncdir
);
2271 * rdir may not be set in e.g. proc0 or anything vm_fork'd off of
2272 * proc0, but should unconditionally exist in other processes.
2275 newfdp
->fd_rdir
= fdp
->fd_rdir
;
2276 vref(newfdp
->fd_rdir
);
2277 cache_copy(&fdp
->fd_nrdir
, &newfdp
->fd_nrdir
);
2280 newfdp
->fd_jdir
= fdp
->fd_jdir
;
2281 vref(newfdp
->fd_jdir
);
2282 cache_copy(&fdp
->fd_njdir
, &newfdp
->fd_njdir
);
2284 spin_unlock(&fdp
->fd_spin
);
2286 /* Create the file descriptor table. */
2287 newfdp
->fd_refcnt
= 1;
2288 newfdp
->fd_cmask
= cmask
;
2289 newfdp
->fd_files
= newfdp
->fd_builtin_files
;
2290 newfdp
->fd_nfiles
= NDFILE
;
2291 newfdp
->fd_lastfile
= -1;
2292 spin_init(&newfdp
->fd_spin
, "fdinit");
2298 * Share a filedesc structure.
2301 fdshare(struct proc
*p
)
2303 struct filedesc
*fdp
;
2306 spin_lock(&fdp
->fd_spin
);
2308 spin_unlock(&fdp
->fd_spin
);
2313 * Copy a filedesc structure.
2316 fdcopy(struct proc
*p
, struct filedesc
**fpp
)
2318 struct filedesc
*fdp
= p
->p_fd
;
2319 struct filedesc
*newfdp
;
2320 struct fdnode
*fdnode
;
2325 * Certain daemons might not have file descriptors.
2331 * Allocate the new filedesc and fd_files[] array. This can race
2332 * with operations by other threads on the fdp so we have to be
2335 newfdp
= kmalloc(sizeof(struct filedesc
),
2336 M_FILEDESC
, M_WAITOK
| M_ZERO
| M_NULLOK
);
2337 if (newfdp
== NULL
) {
2342 spin_lock(&fdp
->fd_spin
);
2343 if (fdp
->fd_lastfile
< NDFILE
) {
2344 newfdp
->fd_files
= newfdp
->fd_builtin_files
;
2348 * We have to allocate (N^2-1) entries for our in-place
2349 * binary tree. Allow the table to shrink.
2353 while (ni
> fdp
->fd_lastfile
&& ni
> NDFILE
) {
2357 spin_unlock(&fdp
->fd_spin
);
2358 newfdp
->fd_files
= kmalloc(i
* sizeof(struct fdnode
),
2359 M_FILEDESC
, M_WAITOK
| M_ZERO
);
2362 * Check for race, retry
2364 spin_lock(&fdp
->fd_spin
);
2365 if (i
<= fdp
->fd_lastfile
) {
2366 spin_unlock(&fdp
->fd_spin
);
2367 kfree(newfdp
->fd_files
, M_FILEDESC
);
2373 * Dup the remaining fields. vref() and cache_hold() can be
2374 * safely called while holding the read spinlock on fdp.
2376 * The read spinlock on fdp is still being held.
2378 * NOTE: vref and cache_hold calls for the case where the vnode
2379 * or cache entry already has at least one ref may be called
2380 * while holding spin locks.
2382 if ((newfdp
->fd_cdir
= fdp
->fd_cdir
) != NULL
) {
2383 vref(newfdp
->fd_cdir
);
2384 cache_copy(&fdp
->fd_ncdir
, &newfdp
->fd_ncdir
);
2387 * We must check for fd_rdir here, at least for now because
2388 * the init process is created before we have access to the
2389 * rootvode to take a reference to it.
2391 if ((newfdp
->fd_rdir
= fdp
->fd_rdir
) != NULL
) {
2392 vref(newfdp
->fd_rdir
);
2393 cache_copy(&fdp
->fd_nrdir
, &newfdp
->fd_nrdir
);
2395 if ((newfdp
->fd_jdir
= fdp
->fd_jdir
) != NULL
) {
2396 vref(newfdp
->fd_jdir
);
2397 cache_copy(&fdp
->fd_njdir
, &newfdp
->fd_njdir
);
2399 newfdp
->fd_refcnt
= 1;
2400 newfdp
->fd_nfiles
= i
;
2401 newfdp
->fd_lastfile
= fdp
->fd_lastfile
;
2402 newfdp
->fd_freefile
= fdp
->fd_freefile
;
2403 newfdp
->fd_cmask
= fdp
->fd_cmask
;
2404 spin_init(&newfdp
->fd_spin
, "fdcopy");
2407 * Copy the descriptor table through (i). This also copies the
2408 * allocation state. Then go through and ref the file pointers
2409 * and clean up any KQ descriptors.
2411 * kq descriptors cannot be copied. Since we haven't ref'd the
2412 * copied files yet we can ignore the return value from funsetfd().
2414 * The read spinlock on fdp is still being held.
2416 * Be sure to clean out fdnode->tdcache, otherwise bad things will
2419 bcopy(fdp
->fd_files
, newfdp
->fd_files
, i
* sizeof(struct fdnode
));
2420 for (i
= 0 ; i
< newfdp
->fd_nfiles
; ++i
) {
2421 fdnode
= &newfdp
->fd_files
[i
];
2422 if (fdnode
->reserved
) {
2423 fdreserve_locked(newfdp
, i
, -1);
2424 fdnode
->reserved
= 0;
2425 fdfixup_locked(newfdp
, i
);
2426 } else if (fdnode
->fp
) {
2427 bzero(&fdnode
->tdcache
, sizeof(fdnode
->tdcache
));
2428 if (fdnode
->fp
->f_type
== DTYPE_KQUEUE
) {
2429 (void)funsetfd_locked(newfdp
, i
);
2435 spin_unlock(&fdp
->fd_spin
);
2441 * Release a filedesc structure.
2443 * NOT MPSAFE (MPSAFE for refs > 1, but the final cleanup code is not MPSAFE)
2446 fdfree(struct proc
*p
, struct filedesc
*repl
)
2448 struct filedesc
*fdp
;
2449 struct fdnode
*fdnode
;
2451 struct filedesc_to_leader
*fdtol
;
2457 * Before destroying or replacing p->p_fd we must be sure to
2458 * clean out the cache of the last thread, which should be
2461 fexitcache(curthread
);
2464 * Certain daemons might not have file descriptors.
2473 * Severe messing around to follow.
2475 spin_lock(&fdp
->fd_spin
);
2477 /* Check for special need to clear POSIX style locks */
2479 if (fdtol
!= NULL
) {
2480 KASSERT(fdtol
->fdl_refcount
> 0,
2481 ("filedesc_to_refcount botch: fdl_refcount=%d",
2482 fdtol
->fdl_refcount
));
2483 if (fdtol
->fdl_refcount
== 1 && p
->p_leader
->p_advlock_flag
) {
2484 for (i
= 0; i
<= fdp
->fd_lastfile
; ++i
) {
2485 fdnode
= &fdp
->fd_files
[i
];
2486 if (fdnode
->fp
== NULL
||
2487 fdnode
->fp
->f_type
!= DTYPE_VNODE
) {
2492 spin_unlock(&fdp
->fd_spin
);
2494 lf
.l_whence
= SEEK_SET
;
2497 lf
.l_type
= F_UNLCK
;
2498 vp
= (struct vnode
*)fp
->f_data
;
2499 VOP_ADVLOCK(vp
, (caddr_t
)p
->p_leader
,
2500 F_UNLCK
, &lf
, F_POSIX
);
2502 spin_lock(&fdp
->fd_spin
);
2506 if (fdtol
->fdl_refcount
== 1) {
2507 if (fdp
->fd_holdleaderscount
> 0 &&
2508 p
->p_leader
->p_advlock_flag
) {
2510 * close() or do_dup() has cleared a reference
2511 * in a shared file descriptor table.
2513 fdp
->fd_holdleaderswakeup
= 1;
2514 ssleep(&fdp
->fd_holdleaderscount
,
2515 &fdp
->fd_spin
, 0, "fdlhold", 0);
2518 if (fdtol
->fdl_holdcount
> 0) {
2520 * Ensure that fdtol->fdl_leader
2521 * remains valid in closef().
2523 fdtol
->fdl_wakeup
= 1;
2524 ssleep(fdtol
, &fdp
->fd_spin
, 0, "fdlhold", 0);
2528 fdtol
->fdl_refcount
--;
2529 if (fdtol
->fdl_refcount
== 0 &&
2530 fdtol
->fdl_holdcount
== 0) {
2531 fdtol
->fdl_next
->fdl_prev
= fdtol
->fdl_prev
;
2532 fdtol
->fdl_prev
->fdl_next
= fdtol
->fdl_next
;
2537 if (fdtol
!= NULL
) {
2538 spin_unlock(&fdp
->fd_spin
);
2539 kfree(fdtol
, M_FILEDESC_TO_LEADER
);
2540 spin_lock(&fdp
->fd_spin
);
2543 if (--fdp
->fd_refcnt
> 0) {
2544 spin_unlock(&fdp
->fd_spin
);
2545 spin_lock(&p
->p_spin
);
2547 spin_unlock(&p
->p_spin
);
2552 * Even though we are the last reference to the structure allproc
2553 * scans may still reference the structure. Maintain proper
2554 * locks until we can replace p->p_fd.
2556 * Also note that kqueue's closef still needs to reference the
2557 * fdp via p->p_fd, so we have to close the descriptors before
2558 * we replace p->p_fd.
2560 for (i
= 0; i
<= fdp
->fd_lastfile
; ++i
) {
2561 if (fdp
->fd_files
[i
].fp
) {
2562 fp
= funsetfd_locked(fdp
, i
);
2564 spin_unlock(&fdp
->fd_spin
);
2565 if (SLIST_FIRST(&fp
->f_klist
))
2566 knote_fdclose(fp
, fdp
, i
);
2568 spin_lock(&fdp
->fd_spin
);
2572 spin_unlock(&fdp
->fd_spin
);
2575 * Interlock against an allproc scan operations (typically frevoke).
2577 spin_lock(&p
->p_spin
);
2579 spin_unlock(&p
->p_spin
);
2582 * Wait for any softrefs to go away. This race rarely occurs so
2583 * we can use a non-critical-path style poll/sleep loop. The
2584 * race only occurs against allproc scans.
2586 * No new softrefs can occur with the fdp disconnected from the
2589 if (fdp
->fd_softrefs
) {
2590 kprintf("pid %d: Warning, fdp race avoided\n", p
->p_pid
);
2591 while (fdp
->fd_softrefs
)
2592 tsleep(&fdp
->fd_softrefs
, 0, "fdsoft", 1);
2595 if (fdp
->fd_files
!= fdp
->fd_builtin_files
)
2596 kfree(fdp
->fd_files
, M_FILEDESC
);
2598 cache_drop(&fdp
->fd_ncdir
);
2599 vrele(fdp
->fd_cdir
);
2602 cache_drop(&fdp
->fd_nrdir
);
2603 vrele(fdp
->fd_rdir
);
2606 cache_drop(&fdp
->fd_njdir
);
2607 vrele(fdp
->fd_jdir
);
2609 kfree(fdp
, M_FILEDESC
);
2613 * Retrieve and reference the file pointer associated with a descriptor.
2615 * td must be the current thread.
2618 holdfp(thread_t td
, int fd
, int flag
)
2622 fp
= _holdfp_cache(td
, fd
);
2624 if ((fp
->f_flag
& flag
) == 0 && flag
!= -1) {
2633 * holdsock() - load the struct file pointer associated
2634 * with a socket into *fpp. If an error occurs, non-zero
2635 * will be returned and *fpp will be set to NULL.
2637 * td must be the current thread.
2640 holdsock(thread_t td
, int fd
, struct file
**fpp
)
2648 fp
= _holdfp_cache(td
, fd
);
2650 if (fp
->f_type
!= DTYPE_SOCKET
) {
2666 * Convert a user file descriptor to a held file pointer.
2668 * td must be the current thread.
2671 holdvnode(thread_t td
, int fd
, struct file
**fpp
)
2676 fp
= _holdfp_cache(td
, fd
);
2678 if (fp
->f_type
!= DTYPE_VNODE
&& fp
->f_type
!= DTYPE_FIFO
) {
2694 * For setugid programs, we don't want to people to use that setugidness
2695 * to generate error messages which write to a file which otherwise would
2696 * otherwise be off-limits to the process.
2698 * This is a gross hack to plug the hole. A better solution would involve
2699 * a special vop or other form of generalized access control mechanism. We
2700 * go ahead and just reject all procfs file systems accesses as dangerous.
2702 * Since setugidsafety calls this only for fd 0, 1 and 2, this check is
2703 * sufficient. We also don't for check setugidness since we know we are.
2706 is_unsafe(struct file
*fp
)
2708 if (fp
->f_type
== DTYPE_VNODE
&&
2709 ((struct vnode
*)(fp
->f_data
))->v_tag
== VT_PROCFS
)
2715 * Make this setguid thing safe, if at all possible.
2717 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose()
2720 setugidsafety(struct proc
*p
)
2722 struct filedesc
*fdp
= p
->p_fd
;
2725 /* Certain daemons might not have file descriptors. */
2730 * note: fdp->fd_files may be reallocated out from under us while
2731 * we are blocked in a close. Be careful!
2733 for (i
= 0; i
<= fdp
->fd_lastfile
; i
++) {
2736 if (fdp
->fd_files
[i
].fp
&& is_unsafe(fdp
->fd_files
[i
].fp
)) {
2740 * NULL-out descriptor prior to close to avoid
2741 * a race while close blocks.
2743 if ((fp
= funsetfd_locked(fdp
, i
)) != NULL
) {
2744 knote_fdclose(fp
, fdp
, i
);
2752 * Close all CLOEXEC files on exec.
2754 * Only a single thread remains for the current process.
2756 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose()
2759 fdcloseexec(struct proc
*p
)
2761 struct filedesc
*fdp
= p
->p_fd
;
2764 /* Certain daemons might not have file descriptors. */
2769 * We cannot cache fd_files since operations may block and rip
2770 * them out from under us.
2772 for (i
= 0; i
<= fdp
->fd_lastfile
; i
++) {
2773 if (fdp
->fd_files
[i
].fp
!= NULL
&&
2774 (fdp
->fd_files
[i
].fileflags
& UF_EXCLOSE
)) {
2778 * NULL-out descriptor prior to close to avoid
2779 * a race while close blocks.
2781 * (funsetfd*() also clears the fd cache)
2783 if ((fp
= funsetfd_locked(fdp
, i
)) != NULL
) {
2784 knote_fdclose(fp
, fdp
, i
);
2792 * It is unsafe for set[ug]id processes to be started with file
2793 * descriptors 0..2 closed, as these descriptors are given implicit
2794 * significance in the Standard C library. fdcheckstd() will create a
2795 * descriptor referencing /dev/null for each of stdin, stdout, and
2796 * stderr that is not already open.
2798 * NOT MPSAFE - calls falloc, vn_open, etc
2801 fdcheckstd(struct lwp
*lp
)
2803 struct nlookupdata nd
;
2804 struct filedesc
*fdp
;
2807 int i
, error
, flags
, devnull
;
2809 fdp
= lp
->lwp_proc
->p_fd
;
2814 for (i
= 0; i
< 3; i
++) {
2815 if (fdp
->fd_files
[i
].fp
!= NULL
)
2818 if ((error
= falloc(lp
, &fp
, &devnull
)) != 0)
2821 error
= nlookup_init(&nd
, "/dev/null", UIO_SYSSPACE
,
2822 NLC_FOLLOW
|NLC_LOCKVP
);
2823 flags
= FREAD
| FWRITE
;
2825 error
= vn_open(&nd
, fp
, flags
, 0);
2827 fsetfd(fdp
, fp
, devnull
);
2829 fsetfd(fdp
, NULL
, devnull
);
2834 KKASSERT(i
== devnull
);
2836 error
= kern_dup(DUP_FIXED
, devnull
, i
, &retval
);
2845 * Internal form of close.
2846 * Decrement reference count on file structure.
2847 * Note: td and/or p may be NULL when closing a file
2848 * that was being passed in a message.
2850 * MPALMOSTSAFE - acquires mplock for VOP operations
2853 closef(struct file
*fp
, struct proc
*p
)
2857 struct filedesc_to_leader
*fdtol
;
2863 * POSIX record locking dictates that any close releases ALL
2864 * locks owned by this process. This is handled by setting
2865 * a flag in the unlock to free ONLY locks obeying POSIX
2866 * semantics, and not to free BSD-style file locks.
2867 * If the descriptor was in a message, POSIX-style locks
2868 * aren't passed with the descriptor.
2870 if (p
!= NULL
&& fp
->f_type
== DTYPE_VNODE
&&
2871 (((struct vnode
*)fp
->f_data
)->v_flag
& VMAYHAVELOCKS
)
2873 if (p
->p_leader
->p_advlock_flag
) {
2874 lf
.l_whence
= SEEK_SET
;
2877 lf
.l_type
= F_UNLCK
;
2878 vp
= (struct vnode
*)fp
->f_data
;
2879 VOP_ADVLOCK(vp
, (caddr_t
)p
->p_leader
, F_UNLCK
,
2883 if (fdtol
!= NULL
) {
2884 lwkt_gettoken(&p
->p_token
);
2887 * Handle special case where file descriptor table
2888 * is shared between multiple process leaders.
2890 for (fdtol
= fdtol
->fdl_next
;
2891 fdtol
!= p
->p_fdtol
;
2892 fdtol
= fdtol
->fdl_next
) {
2893 if (fdtol
->fdl_leader
->p_advlock_flag
== 0)
2895 fdtol
->fdl_holdcount
++;
2896 lf
.l_whence
= SEEK_SET
;
2899 lf
.l_type
= F_UNLCK
;
2900 vp
= (struct vnode
*)fp
->f_data
;
2901 VOP_ADVLOCK(vp
, (caddr_t
)fdtol
->fdl_leader
,
2902 F_UNLCK
, &lf
, F_POSIX
);
2903 fdtol
->fdl_holdcount
--;
2904 if (fdtol
->fdl_holdcount
== 0 &&
2905 fdtol
->fdl_wakeup
!= 0) {
2906 fdtol
->fdl_wakeup
= 0;
2910 lwkt_reltoken(&p
->p_token
);
2917 * fhold() can only be called if f_count is already at least 1 (i.e. the
2918 * caller of fhold() already has a reference to the file pointer in some
2921 * Atomic ops are used for incrementing and decrementing f_count before
2922 * the 1->0 transition. f_count 1->0 transition is special, see the
2923 * comment in fdrop().
2926 fhold(struct file
*fp
)
2928 /* 0->1 transition will never work */
2929 KASSERT(fp
->f_count
> 0, ("fhold: invalid f_count %d", fp
->f_count
));
2930 atomic_add_int(&fp
->f_count
, 1);
2934 * fdrop() - drop a reference to a descriptor
2937 fdrop(struct file
*fp
)
2941 int error
, do_free
= 0;
2945 * Simple atomic_fetchadd_int(f_count, -1) here will cause use-
2946 * after-free or double free (due to f_count 0->1 transition), if
2947 * fhold() is called on the fps found through filehead iteration.
2950 int count
= fp
->f_count
;
2953 KASSERT(count
> 0, ("fdrop: invalid f_count %d", count
));
2955 struct filelist_head
*head
= fp2filelist(fp
);
2958 * About to drop the last reference, hold the
2959 * filehead spin lock and drop it, so that no
2960 * one could see this fp through filehead anymore,
2961 * let alone fhold() this fp.
2963 spin_lock(&head
->spin
);
2964 if (atomic_cmpset_int(&fp
->f_count
, count
, 0)) {
2965 LIST_REMOVE(fp
, f_list
);
2966 spin_unlock(&head
->spin
);
2967 atomic_subtract_int(&nfiles
, 1);
2968 do_free
= 1; /* free this fp */
2971 spin_unlock(&head
->spin
);
2973 } else if (atomic_cmpset_int(&fp
->f_count
, count
, count
- 1)) {
2981 KKASSERT(SLIST_FIRST(&fp
->f_klist
) == NULL
);
2984 * The last reference has gone away, we own the fp structure free
2987 if (fp
->f_count
< 0)
2988 panic("fdrop: count < 0");
2989 if ((fp
->f_flag
& FHASLOCK
) && fp
->f_type
== DTYPE_VNODE
&&
2990 (((struct vnode
*)fp
->f_data
)->v_flag
& VMAYHAVELOCKS
)
2992 lf
.l_whence
= SEEK_SET
;
2995 lf
.l_type
= F_UNLCK
;
2996 vp
= (struct vnode
*)fp
->f_data
;
2997 VOP_ADVLOCK(vp
, (caddr_t
)fp
, F_UNLCK
, &lf
, 0);
2999 if (fp
->f_ops
!= &badfileops
)
3000 error
= fo_close(fp
);
3008 * Apply an advisory lock on a file descriptor.
3010 * Just attempt to get a record lock of the requested type on
3011 * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0).
3016 sys_flock(struct flock_args
*uap
)
3018 thread_t td
= curthread
;
3024 if ((fp
= holdfp(td
, uap
->fd
, -1)) == NULL
)
3026 if (fp
->f_type
!= DTYPE_VNODE
) {
3030 vp
= (struct vnode
*)fp
->f_data
;
3031 lf
.l_whence
= SEEK_SET
;
3034 if (uap
->how
& LOCK_UN
) {
3035 lf
.l_type
= F_UNLCK
;
3036 atomic_clear_int(&fp
->f_flag
, FHASLOCK
); /* race ok */
3037 error
= VOP_ADVLOCK(vp
, (caddr_t
)fp
, F_UNLCK
, &lf
, 0);
3040 if (uap
->how
& LOCK_EX
)
3041 lf
.l_type
= F_WRLCK
;
3042 else if (uap
->how
& LOCK_SH
)
3043 lf
.l_type
= F_RDLCK
;
3048 if (uap
->how
& LOCK_NB
)
3049 error
= VOP_ADVLOCK(vp
, (caddr_t
)fp
, F_SETLK
, &lf
, 0);
3051 error
= VOP_ADVLOCK(vp
, (caddr_t
)fp
, F_SETLK
, &lf
, F_WAIT
);
3052 atomic_set_int(&fp
->f_flag
, FHASLOCK
); /* race ok */
3059 * File Descriptor pseudo-device driver (/dev/fd/).
3061 * Opening minor device N dup()s the file (if any) connected to file
3062 * descriptor N belonging to the calling process. Note that this driver
3063 * consists of only the ``open()'' routine, because all subsequent
3064 * references to this file will be direct to the other driver.
3067 fdopen(struct dev_open_args
*ap
)
3069 thread_t td
= curthread
;
3071 KKASSERT(td
->td_lwp
!= NULL
);
3074 * XXX Kludge: set curlwp->lwp_dupfd to contain the value of the
3075 * the file descriptor being sought for duplication. The error
3076 * return ensures that the vnode for this device will be released
3077 * by vn_open. Open will detect this special error and take the
3078 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN
3079 * will simply report the error.
3081 td
->td_lwp
->lwp_dupfd
= minor(ap
->a_head
.a_dev
);
3086 * The caller has reserved the file descriptor dfd for us. On success we
3087 * must fsetfd() it. On failure the caller will clean it up.
3090 dupfdopen(thread_t td
, int dfd
, int sfd
, int mode
, int error
)
3092 struct filedesc
*fdp
;
3097 if ((wfp
= holdfp(td
, sfd
, -1)) == NULL
)
3101 * Close a revoke/dup race. Duping a descriptor marked as revoked
3102 * will dup a dummy descriptor instead of the real one.
3104 if (wfp
->f_flag
& FREVOKED
) {
3105 kprintf("Warning: attempt to dup() a revoked descriptor\n");
3108 werror
= falloc(NULL
, &wfp
, NULL
);
3113 fdp
= td
->td_proc
->p_fd
;
3116 * There are two cases of interest here.
3118 * For ENODEV simply dup sfd to file descriptor dfd and return.
3120 * For ENXIO steal away the file structure from sfd and store it
3121 * dfd. sfd is effectively closed by this operation.
3123 * Any other error code is just returned.
3128 * Check that the mode the file is being opened for is a
3129 * subset of the mode of the existing descriptor.
3131 if (((mode
& (FREAD
|FWRITE
)) | wfp
->f_flag
) != wfp
->f_flag
) {
3135 spin_lock(&fdp
->fd_spin
);
3136 fdp
->fd_files
[dfd
].fileflags
= fdp
->fd_files
[sfd
].fileflags
;
3137 fsetfd_locked(fdp
, wfp
, dfd
);
3138 spin_unlock(&fdp
->fd_spin
);
3143 * Steal away the file pointer from dfd, and stuff it into indx.
3145 spin_lock(&fdp
->fd_spin
);
3146 fdp
->fd_files
[dfd
].fileflags
= fdp
->fd_files
[sfd
].fileflags
;
3147 fsetfd(fdp
, wfp
, dfd
);
3148 if ((xfp
= funsetfd_locked(fdp
, sfd
)) != NULL
) {
3149 spin_unlock(&fdp
->fd_spin
);
3152 spin_unlock(&fdp
->fd_spin
);
3164 * NOT MPSAFE - I think these refer to a common file descriptor table
3165 * and we need to spinlock that to link fdtol in.
3167 struct filedesc_to_leader
*
3168 filedesc_to_leader_alloc(struct filedesc_to_leader
*old
,
3169 struct proc
*leader
)
3171 struct filedesc_to_leader
*fdtol
;
3173 fdtol
= kmalloc(sizeof(struct filedesc_to_leader
),
3174 M_FILEDESC_TO_LEADER
, M_WAITOK
| M_ZERO
);
3175 fdtol
->fdl_refcount
= 1;
3176 fdtol
->fdl_holdcount
= 0;
3177 fdtol
->fdl_wakeup
= 0;
3178 fdtol
->fdl_leader
= leader
;
3180 fdtol
->fdl_next
= old
->fdl_next
;
3181 fdtol
->fdl_prev
= old
;
3182 old
->fdl_next
= fdtol
;
3183 fdtol
->fdl_next
->fdl_prev
= fdtol
;
3185 fdtol
->fdl_next
= fdtol
;
3186 fdtol
->fdl_prev
= fdtol
;
3192 * Scan all file pointers in the system. The callback is made with
3193 * the master list spinlock held exclusively.
3196 allfiles_scan_exclusive(int (*callback
)(struct file
*, void *), void *data
)
3200 for (i
= 0; i
< NFILELIST_HEADS
; ++i
) {
3201 struct filelist_head
*head
= &filelist_heads
[i
];
3204 spin_lock(&head
->spin
);
3205 LIST_FOREACH(fp
, &head
->list
, f_list
) {
3208 res
= callback(fp
, data
);
3212 spin_unlock(&head
->spin
);
3217 * Get file structures.
3219 * NOT MPSAFE - process list scan, SYSCTL_OUT (probably not mpsafe)
3222 struct sysctl_kern_file_info
{
3225 struct sysctl_req
*req
;
3228 static int sysctl_kern_file_callback(struct proc
*p
, void *data
);
3231 sysctl_kern_file(SYSCTL_HANDLER_ARGS
)
3233 struct sysctl_kern_file_info info
;
3236 * Note: because the number of file descriptors is calculated
3237 * in different ways for sizing vs returning the data,
3238 * there is information leakage from the first loop. However,
3239 * it is of a similar order of magnitude to the leakage from
3240 * global system statistics such as kern.openfiles.
3242 * When just doing a count, note that we cannot just count
3243 * the elements and add f_count via the filehead list because
3244 * threaded processes share their descriptor table and f_count might
3245 * still be '1' in that case.
3247 * Since the SYSCTL op can block, we must hold the process to
3248 * prevent it being ripped out from under us either in the
3249 * file descriptor loop or in the greater LIST_FOREACH. The
3250 * process may be in varying states of disrepair. If the process
3251 * is in SZOMB we may have caught it just as it is being removed
3252 * from the allproc list, we must skip it in that case to maintain
3253 * an unbroken chain through the allproc list.
3258 allproc_scan(sysctl_kern_file_callback
, &info
, 0);
3261 * When just calculating the size, overestimate a bit to try to
3262 * prevent system activity from causing the buffer-fill call
3265 if (req
->oldptr
== NULL
) {
3266 info
.count
= (info
.count
+ 16) + (info
.count
/ 10);
3267 info
.error
= SYSCTL_OUT(req
, NULL
,
3268 info
.count
* sizeof(struct kinfo_file
));
3270 return (info
.error
);
3274 sysctl_kern_file_callback(struct proc
*p
, void *data
)
3276 struct sysctl_kern_file_info
*info
= data
;
3277 struct kinfo_file kf
;
3278 struct filedesc
*fdp
;
3283 if (p
->p_stat
== SIDL
|| p
->p_stat
== SZOMB
)
3285 if (!(PRISON_CHECK(info
->req
->td
->td_ucred
, p
->p_ucred
) != 0))
3289 * Softref the fdp to prevent it from being destroyed
3291 spin_lock(&p
->p_spin
);
3292 if ((fdp
= p
->p_fd
) == NULL
) {
3293 spin_unlock(&p
->p_spin
);
3296 atomic_add_int(&fdp
->fd_softrefs
, 1);
3297 spin_unlock(&p
->p_spin
);
3300 * The fdp's own spinlock prevents the contents from being
3303 spin_lock_shared(&fdp
->fd_spin
);
3304 for (n
= 0; n
< fdp
->fd_nfiles
; ++n
) {
3305 if ((fp
= fdp
->fd_files
[n
].fp
) == NULL
)
3307 if (info
->req
->oldptr
== NULL
) {
3310 uid
= p
->p_ucred
? p
->p_ucred
->cr_uid
: -1;
3311 kcore_make_file(&kf
, fp
, p
->p_pid
, uid
, n
);
3312 spin_unlock_shared(&fdp
->fd_spin
);
3313 info
->error
= SYSCTL_OUT(info
->req
, &kf
, sizeof(kf
));
3314 spin_lock_shared(&fdp
->fd_spin
);
3319 spin_unlock_shared(&fdp
->fd_spin
);
3320 atomic_subtract_int(&fdp
->fd_softrefs
, 1);
3326 SYSCTL_PROC(_kern
, KERN_FILE
, file
, CTLTYPE_OPAQUE
|CTLFLAG_RD
,
3327 0, 0, sysctl_kern_file
, "S,file", "Entire file table");
3329 SYSCTL_INT(_kern
, OID_AUTO
, minfilesperproc
, CTLFLAG_RW
,
3330 &minfilesperproc
, 0, "Minimum files allowed open per process");
3331 SYSCTL_INT(_kern
, KERN_MAXFILESPERPROC
, maxfilesperproc
, CTLFLAG_RW
,
3332 &maxfilesperproc
, 0, "Maximum files allowed open per process");
3333 SYSCTL_INT(_kern
, OID_AUTO
, maxfilesperuser
, CTLFLAG_RW
,
3334 &maxfilesperuser
, 0, "Maximum files allowed open per user");
3336 SYSCTL_INT(_kern
, KERN_MAXFILES
, maxfiles
, CTLFLAG_RW
,
3337 &maxfiles
, 0, "Maximum number of files");
3339 SYSCTL_INT(_kern
, OID_AUTO
, maxfilesrootres
, CTLFLAG_RW
,
3340 &maxfilesrootres
, 0, "Descriptors reserved for root use");
3342 SYSCTL_INT(_kern
, OID_AUTO
, openfiles
, CTLFLAG_RD
,
3343 &nfiles
, 0, "System-wide number of open files");
3346 fildesc_drvinit(void *unused
)
3350 for (fd
= 0; fd
< NUMFDESC
; fd
++) {
3351 make_dev(&fildesc_ops
, fd
,
3352 UID_BIN
, GID_BIN
, 0666, "fd/%d", fd
);
3355 make_dev(&fildesc_ops
, 0, UID_ROOT
, GID_WHEEL
, 0666, "stdin");
3356 make_dev(&fildesc_ops
, 1, UID_ROOT
, GID_WHEEL
, 0666, "stdout");
3357 make_dev(&fildesc_ops
, 2, UID_ROOT
, GID_WHEEL
, 0666, "stderr");
3360 struct fileops badfileops
= {
3361 .fo_read
= badfo_readwrite
,
3362 .fo_write
= badfo_readwrite
,
3363 .fo_ioctl
= badfo_ioctl
,
3364 .fo_kqfilter
= badfo_kqfilter
,
3365 .fo_stat
= badfo_stat
,
3366 .fo_close
= badfo_close
,
3367 .fo_shutdown
= badfo_shutdown
3381 badfo_ioctl(struct file
*fp
, u_long com
, caddr_t data
,
3382 struct ucred
*cred
, struct sysmsg
*msgv
)
3388 * Must return an error to prevent registration, typically
3389 * due to a revoked descriptor (file_filtops assigned).
3392 badfo_kqfilter(struct file
*fp
, struct knote
*kn
)
3394 return (EOPNOTSUPP
);
3398 badfo_stat(struct file
*fp
, struct stat
*sb
, struct ucred
*cred
)
3404 badfo_close(struct file
*fp
)
3410 badfo_shutdown(struct file
*fp
, int how
)
3416 nofo_shutdown(struct file
*fp
, int how
)
3418 return (EOPNOTSUPP
);
3421 SYSINIT(fildescdev
, SI_SUB_DRIVERS
, SI_ORDER_MIDDLE
+ CDEV_MAJOR
,
3422 fildesc_drvinit
,NULL
);
3425 filelist_heads_init(void *arg __unused
)
3429 for (i
= 0; i
< NFILELIST_HEADS
; ++i
) {
3430 struct filelist_head
*head
= &filelist_heads
[i
];
3432 spin_init(&head
->spin
, "filehead_spin");
3433 LIST_INIT(&head
->list
);
3437 SYSINIT(filelistheads
, SI_BOOT1_LOCK
, SI_ORDER_ANY
,
3438 filelist_heads_init
, NULL
);
3441 file_objcache_init(void *dummy __unused
)
3443 file_objcache
= objcache_create("file", maxfiles
, maxfiles
/ 8,
3444 NULL
, NULL
, NULL
, /* TODO: ctor/dtor */
3445 objcache_malloc_alloc
, objcache_malloc_free
, &file_malloc_args
);
3447 SYSINIT(fpobjcache
, SI_BOOT2_POST_SMP
, SI_ORDER_ANY
, file_objcache_init
, NULL
);