mtree/BSD.root.dist: Use spaces.
[dragonfly.git] / sys / kern / kern_descrip.c
blobfe16bc7b3418c6b41bdb398636bbe306a72ed254
1 /*
2 * Copyright (c) 2005-2018 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Jeffrey Hsu and Matthew Dillon.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
35 * Copyright (c) 1982, 1986, 1989, 1991, 1993
36 * The Regents of the University of California. All rights reserved.
37 * (c) UNIX System Laboratories, Inc.
38 * All or some portions of this file are derived from material licensed
39 * to the University of California by American Telephone and Telegraph
40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41 * the permission of UNIX System Laboratories, Inc.
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. Neither the name of the University nor the names of its contributors
52 * may be used to endorse or promote products derived from this software
53 * without specific prior written permission.
55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * SUCH DAMAGE.
67 * @(#)kern_descrip.c 8.6 (Berkeley) 4/19/94
68 * $FreeBSD: src/sys/kern/kern_descrip.c,v 1.81.2.19 2004/02/28 00:43:31 tegge Exp $
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/malloc.h>
74 #include <sys/sysproto.h>
75 #include <sys/conf.h>
76 #include <sys/device.h>
77 #include <sys/file.h>
78 #include <sys/filedesc.h>
79 #include <sys/kernel.h>
80 #include <sys/sysctl.h>
81 #include <sys/vnode.h>
82 #include <sys/proc.h>
83 #include <sys/nlookup.h>
84 #include <sys/stat.h>
85 #include <sys/filio.h>
86 #include <sys/fcntl.h>
87 #include <sys/unistd.h>
88 #include <sys/resourcevar.h>
89 #include <sys/event.h>
90 #include <sys/kern_syscall.h>
91 #include <sys/kcore.h>
92 #include <sys/kinfo.h>
93 #include <sys/un.h>
94 #include <sys/objcache.h>
96 #include <vm/vm.h>
97 #include <vm/vm_extern.h>
99 #include <sys/thread2.h>
100 #include <sys/file2.h>
101 #include <sys/spinlock2.h>
103 static int fdalloc_locked(struct proc *p, struct filedesc *fdp,
104 int want, int *result);
105 static void fsetfd_locked(struct filedesc *fdp, struct file *fp, int fd);
106 static void fdreserve_locked (struct filedesc *fdp, int fd0, int incr);
107 static struct file *funsetfd_locked (struct filedesc *fdp, int fd);
108 static void ffree(struct file *fp);
110 static MALLOC_DEFINE(M_FILEDESC, "file desc", "Open file descriptor table");
111 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "file desc to leader",
112 "file desc to leader structures");
113 MALLOC_DEFINE(M_FILE, "file", "Open file structure");
114 static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures");
116 static struct krate krate_uidinfo = { .freq = 1 };
118 static d_open_t fdopen;
119 #define NUMFDESC 64
121 #define CDEV_MAJOR 22
122 static struct dev_ops fildesc_ops = {
123 { "FD", 0, 0 },
124 .d_open = fdopen,
128 * Descriptor management.
130 #ifndef NFILELIST_HEADS
131 #define NFILELIST_HEADS 257 /* primary number */
132 #endif
134 struct filelist_head {
135 struct spinlock spin;
136 struct filelist list;
137 } __cachealign;
139 static struct filelist_head filelist_heads[NFILELIST_HEADS];
141 static int nfiles; /* actual number of open files */
142 extern int cmask;
144 struct lwkt_token revoke_token = LWKT_TOKEN_INITIALIZER(revoke_token);
146 static struct objcache *file_objcache;
148 static struct objcache_malloc_args file_malloc_args = {
149 .objsize = sizeof(struct file),
150 .mtype = M_FILE
154 * Fixup fd_freefile and fd_lastfile after a descriptor has been cleared.
156 * must be called with fdp->fd_spin exclusively held
158 static __inline
159 void
160 fdfixup_locked(struct filedesc *fdp, int fd)
162 if (fd < fdp->fd_freefile) {
163 fdp->fd_freefile = fd;
165 while (fdp->fd_lastfile >= 0 &&
166 fdp->fd_files[fdp->fd_lastfile].fp == NULL &&
167 fdp->fd_files[fdp->fd_lastfile].reserved == 0
169 --fdp->fd_lastfile;
174 * Clear the fd thread caches for this fdnode.
176 * If match_fdc is NULL, all thread caches of fdn will be cleared.
177 * The caller must hold fdp->fd_spin exclusively. The threads caching
178 * the descriptor do not have to be the current thread. The (status)
179 * argument is ignored.
181 * If match_fdc is not NULL, only the match_fdc's cache will be cleared.
182 * The caller must hold fdp->fd_spin shared and match_fdc must match a
183 * fdcache entry in curthread. match_fdc has been locked by the caller
184 * and had the specified (status).
186 * Since we are matching against a fp in the fdp (which must still be present
187 * at this time), fp will have at least two refs on any match and we can
188 * decrement the count trivially.
190 static
191 void
192 fclearcache(struct fdnode *fdn, struct fdcache *match_fdc, int status)
194 struct fdcache *fdc;
195 struct file *fp;
196 int i;
199 * match_fdc == NULL We are cleaning out all tdcache entries
200 * for the fdn and hold fdp->fd_spin exclusively.
201 * This can race against the target threads
202 * cleaning out specific entries.
204 * match_fdc != NULL We are cleaning out a specific tdcache
205 * entry on behalf of the owning thread
206 * and hold fdp->fd_spin shared. The thread
207 * has already locked the entry. This cannot
208 * race.
210 fp = fdn->fp;
211 for (i = 0; i < NTDCACHEFD; ++i) {
212 if ((fdc = fdn->tdcache[i]) == NULL)
213 continue;
216 * If match_fdc is non-NULL we are being asked to
217 * clear a specific fdc owned by curthread. There must
218 * be exactly one match. The caller has already locked
219 * the cache entry and will dispose of the lock after
220 * we return.
222 * Since we also have a shared lock on fdp, we
223 * can do this without atomic ops.
225 if (match_fdc) {
226 if (fdc != match_fdc)
227 continue;
228 fdn->tdcache[i] = NULL;
229 KASSERT(fp == fdc->fp,
230 ("fclearcache(1): fp mismatch %p/%p\n",
231 fp, fdc->fp));
232 fdc->fp = NULL;
233 fdc->fd = -1;
236 * status can be 0 or 2. If 2 the ref is borrowed,
237 * if 0 the ref is not borrowed and we have to drop
238 * it.
240 if (status == 0)
241 atomic_add_int(&fp->f_count, -1);
242 fdn->isfull = 0; /* heuristic */
243 return;
247 * Otherwise we hold an exclusive spin-lock and can only
248 * race thread consumers borrowing cache entries.
250 * Acquire the lock and dispose of the entry. We have to
251 * spin until we get the lock.
253 for (;;) {
254 status = atomic_swap_int(&fdc->locked, 1);
255 if (status == 1) { /* foreign lock, retry */
256 cpu_pause();
257 continue;
259 fdn->tdcache[i] = NULL;
260 KASSERT(fp == fdc->fp,
261 ("fclearcache(2): fp mismatch %p/%p\n",
262 fp, fdc->fp));
263 fdc->fp = NULL;
264 fdc->fd = -1;
265 if (status == 0)
266 atomic_add_int(&fp->f_count, -1);
267 fdn->isfull = 0; /* heuristic */
268 atomic_swap_int(&fdc->locked, 0);
269 break;
272 KKASSERT(match_fdc == NULL);
276 * Retrieve the fp for the specified fd given the specified file descriptor
277 * table. The fdp does not have to be owned by the current process.
278 * If flags != -1, fp->f_flag must contain at least one of the flags.
280 * This function is not able to cache the fp.
282 struct file *
283 holdfp_fdp(struct filedesc *fdp, int fd, int flag)
285 struct file *fp;
287 spin_lock_shared(&fdp->fd_spin);
288 if (((u_int)fd) < fdp->fd_nfiles) {
289 fp = fdp->fd_files[fd].fp; /* can be NULL */
290 if (fp) {
291 if ((fp->f_flag & flag) == 0 && flag != -1) {
292 fp = NULL;
293 } else {
294 fhold(fp);
297 } else {
298 fp = NULL;
300 spin_unlock_shared(&fdp->fd_spin);
302 return fp;
305 struct file *
306 holdfp_fdp_locked(struct filedesc *fdp, int fd, int flag)
308 struct file *fp;
310 if (((u_int)fd) < fdp->fd_nfiles) {
311 fp = fdp->fd_files[fd].fp; /* can be NULL */
312 if (fp) {
313 if ((fp->f_flag & flag) == 0 && flag != -1) {
314 fp = NULL;
315 } else {
316 fhold(fp);
319 } else {
320 fp = NULL;
322 return fp;
326 * Acquire the fp for the specified file descriptor, using the thread
327 * cache if possible and caching it if possible.
329 * td must be the curren thread.
331 static
332 struct file *
333 _holdfp_cache(thread_t td, int fd)
335 struct filedesc *fdp;
336 struct fdcache *fdc;
337 struct fdcache *best;
338 struct fdnode *fdn;
339 struct file *fp;
340 int status;
341 int delta;
342 int i;
345 * Fast
347 for (fdc = &td->td_fdcache[0]; fdc < &td->td_fdcache[NFDCACHE]; ++fdc) {
348 if (fdc->fd != fd || fdc->fp == NULL)
349 continue;
350 status = atomic_swap_int(&fdc->locked, 1);
353 * If someone else has locked our cache entry they are in
354 * the middle of clearing it, skip the entry.
356 if (status == 1)
357 continue;
360 * We have locked the entry, but if it no longer matches
361 * restore the previous state (0 or 2) and skip the entry.
363 if (fdc->fd != fd || fdc->fp == NULL) {
364 atomic_swap_int(&fdc->locked, status);
365 continue;
369 * We have locked a valid entry. We can borrow the ref
370 * for a mode 0 entry. We can get a valid fp for a mode
371 * 2 entry but not borrow the ref.
373 if (status == 0) {
374 fp = fdc->fp;
375 fdc->lru = ++td->td_fdcache_lru;
376 atomic_swap_int(&fdc->locked, 2);
378 return fp;
380 if (status == 2) {
381 fp = fdc->fp;
382 fhold(fp);
383 fdc->lru = ++td->td_fdcache_lru;
384 atomic_swap_int(&fdc->locked, 2);
386 return fp;
388 KKASSERT(0);
392 * Lookup the descriptor the slow way. This can contend against
393 * modifying operations in a multi-threaded environment and cause
394 * cache line ping ponging otherwise.
396 fdp = td->td_proc->p_fd;
397 spin_lock_shared(&fdp->fd_spin);
399 if (((u_int)fd) < fdp->fd_nfiles) {
400 fp = fdp->fd_files[fd].fp; /* can be NULL */
401 if (fp) {
402 fhold(fp);
403 if (fdp->fd_files[fd].isfull == 0)
404 goto enter;
406 } else {
407 fp = NULL;
409 spin_unlock_shared(&fdp->fd_spin);
411 return fp;
414 * We found a valid fp and held it, fdp is still shared locked.
415 * Enter the fp into the per-thread cache. Find the oldest entry
416 * via lru, or an empty entry.
418 * Because fdp's spinlock is held (shared is fine), no other
419 * thread should be in the middle of clearing our selected entry.
421 enter:
422 best = &td->td_fdcache[0];
423 for (fdc = &td->td_fdcache[0]; fdc < &td->td_fdcache[NFDCACHE]; ++fdc) {
424 if (fdc->fp == NULL) {
425 best = fdc;
426 break;
428 delta = fdc->lru - best->lru;
429 if (delta < 0)
430 best = fdc;
434 * Replace best
436 * Don't enter into the cache if we cannot get the lock.
438 status = atomic_swap_int(&best->locked, 1);
439 if (status == 1)
440 goto done;
443 * Clear the previous cache entry if present
445 if (best->fp) {
446 KKASSERT(best->fd >= 0);
447 fclearcache(&fdp->fd_files[best->fd], best, status);
451 * Create our new cache entry. This entry is 'safe' until we tie
452 * into the fdnode. If we cannot tie in, we will clear the entry.
454 best->fd = fd;
455 best->fp = fp;
456 best->lru = ++td->td_fdcache_lru;
457 best->locked = 2; /* borrowed ref */
459 fdn = &fdp->fd_files[fd];
460 for (i = 0; i < NTDCACHEFD; ++i) {
461 if (fdn->tdcache[i] == NULL &&
462 atomic_cmpset_ptr((void **)&fdn->tdcache[i], NULL, best)) {
463 goto done;
466 fdn->isfull = 1; /* no space */
467 best->fd = -1;
468 best->fp = NULL;
469 best->locked = 0;
470 done:
471 spin_unlock_shared(&fdp->fd_spin);
473 return fp;
477 * Drop the file pointer and return to the thread cache if possible.
479 * Caller must not hold fdp's spin lock.
480 * td must be the current thread.
482 void
483 dropfp(thread_t td, int fd, struct file *fp)
485 struct filedesc *fdp;
486 struct fdcache *fdc;
487 int status;
489 fdp = td->td_proc->p_fd;
492 * If our placeholder is still present we can re-cache the ref.
494 * Note that we can race an fclearcache().
496 for (fdc = &td->td_fdcache[0]; fdc < &td->td_fdcache[NFDCACHE]; ++fdc) {
497 if (fdc->fp != fp || fdc->fd != fd)
498 continue;
499 status = atomic_swap_int(&fdc->locked, 1);
500 switch(status) {
501 case 0:
503 * Not in mode 2, fdrop fp without caching.
505 atomic_swap_int(&fdc->locked, 0);
506 break;
507 case 1:
509 * Not in mode 2, locked by someone else.
510 * fdrop fp without caching.
512 break;
513 case 2:
515 * Intact borrowed ref, return to mode 0
516 * indicating that we have returned the ref.
518 * Return the borrowed ref (2->1->0)
520 if (fdc->fp == fp && fdc->fd == fd) {
521 atomic_swap_int(&fdc->locked, 0);
522 return;
524 atomic_swap_int(&fdc->locked, 2);
525 break;
530 * Failed to re-cache, drop the fp without caching.
532 fdrop(fp);
536 * Clear all descriptors cached in the per-thread fd cache for
537 * the specified thread.
539 * Caller must not hold p_fd->spin. This function will temporarily
540 * obtain a shared spin lock.
542 void
543 fexitcache(thread_t td)
545 struct filedesc *fdp;
546 struct fdcache *fdc;
547 int status;
548 int i;
550 if (td->td_proc == NULL)
551 return;
552 fdp = td->td_proc->p_fd;
553 if (fdp == NULL)
554 return;
557 * A shared lock is sufficient as the caller controls td and we
558 * are only clearing td's cache.
560 spin_lock_shared(&fdp->fd_spin);
561 for (i = 0; i < NFDCACHE; ++i) {
562 fdc = &td->td_fdcache[i];
563 if (fdc->fp) {
564 status = atomic_swap_int(&fdc->locked, 1);
565 if (status == 1) {
566 cpu_pause();
567 --i;
568 continue;
570 if (fdc->fp) {
571 KKASSERT(fdc->fd >= 0);
572 fclearcache(&fdp->fd_files[fdc->fd], fdc,
573 status);
575 atomic_swap_int(&fdc->locked, 0);
578 spin_unlock_shared(&fdp->fd_spin);
581 static __inline struct filelist_head *
582 fp2filelist(const struct file *fp)
584 u_int i;
586 i = (u_int)(uintptr_t)fp % NFILELIST_HEADS;
587 return &filelist_heads[i];
590 static __inline
591 struct plimit *
592 readplimits(struct proc *p)
594 thread_t td = curthread;
595 struct plimit *limit;
597 limit = td->td_limit;
598 if (limit != p->p_limit) {
599 spin_lock_shared(&p->p_spin);
600 limit = p->p_limit;
601 atomic_add_int(&limit->p_refcnt, 1);
602 spin_unlock_shared(&p->p_spin);
603 if (td->td_limit)
604 plimit_free(td->td_limit);
605 td->td_limit = limit;
607 return limit;
611 * System calls on descriptors.
614 sys_getdtablesize(struct getdtablesize_args *uap)
616 struct proc *p = curproc;
617 struct plimit *limit = readplimits(p);
618 int dtsize;
620 if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX)
621 dtsize = INT_MAX;
622 else
623 dtsize = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur;
625 if (dtsize > maxfilesperproc)
626 dtsize = maxfilesperproc;
627 if (dtsize < minfilesperproc)
628 dtsize = minfilesperproc;
629 if (p->p_ucred->cr_uid && dtsize > maxfilesperuser)
630 dtsize = maxfilesperuser;
631 uap->sysmsg_result = dtsize;
632 return (0);
636 * Duplicate a file descriptor to a particular value.
638 * note: keep in mind that a potential race condition exists when closing
639 * descriptors from a shared descriptor table (via rfork).
642 sys_dup2(struct dup2_args *uap)
644 int error;
645 int fd = 0;
647 error = kern_dup(DUP_FIXED, uap->from, uap->to, &fd);
648 uap->sysmsg_fds[0] = fd;
650 return (error);
654 * Duplicate a file descriptor.
657 sys_dup(struct dup_args *uap)
659 int error;
660 int fd = 0;
662 error = kern_dup(DUP_VARIABLE, uap->fd, 0, &fd);
663 uap->sysmsg_fds[0] = fd;
665 return (error);
669 * MPALMOSTSAFE - acquires mplock for fp operations
672 kern_fcntl(int fd, int cmd, union fcntl_dat *dat, struct ucred *cred)
674 struct thread *td = curthread;
675 struct proc *p = td->td_proc;
676 struct file *fp;
677 struct vnode *vp;
678 u_int newmin;
679 u_int oflags;
680 u_int nflags;
681 int closedcounter;
682 int tmp, error, flg = F_POSIX;
684 KKASSERT(p);
687 * Operations on file descriptors that do not require a file pointer.
689 switch (cmd) {
690 case F_GETFD:
691 error = fgetfdflags(p->p_fd, fd, &tmp);
692 if (error == 0)
693 dat->fc_cloexec = (tmp & UF_EXCLOSE) ? FD_CLOEXEC : 0;
694 return (error);
696 case F_SETFD:
697 if (dat->fc_cloexec & FD_CLOEXEC)
698 error = fsetfdflags(p->p_fd, fd, UF_EXCLOSE);
699 else
700 error = fclrfdflags(p->p_fd, fd, UF_EXCLOSE);
701 return (error);
702 case F_DUPFD:
703 newmin = dat->fc_fd;
704 error = kern_dup(DUP_VARIABLE | DUP_FCNTL, fd, newmin,
705 &dat->fc_fd);
706 return (error);
707 case F_DUPFD_CLOEXEC:
708 newmin = dat->fc_fd;
709 error = kern_dup(DUP_VARIABLE | DUP_CLOEXEC | DUP_FCNTL,
710 fd, newmin, &dat->fc_fd);
711 return (error);
712 case F_DUP2FD:
713 newmin = dat->fc_fd;
714 error = kern_dup(DUP_FIXED, fd, newmin, &dat->fc_fd);
715 return (error);
716 case F_DUP2FD_CLOEXEC:
717 newmin = dat->fc_fd;
718 error = kern_dup(DUP_FIXED | DUP_CLOEXEC, fd, newmin,
719 &dat->fc_fd);
720 return (error);
721 default:
722 break;
726 * Operations on file pointers
728 closedcounter = p->p_fd->fd_closedcounter;
729 if ((fp = holdfp(td, fd, -1)) == NULL)
730 return (EBADF);
732 switch (cmd) {
733 case F_GETFL:
734 dat->fc_flags = OFLAGS(fp->f_flag);
735 error = 0;
736 break;
738 case F_SETFL:
739 oflags = fp->f_flag;
740 nflags = FFLAGS(dat->fc_flags & ~O_ACCMODE) & FCNTLFLAGS;
741 nflags |= oflags & ~FCNTLFLAGS;
743 error = 0;
744 if (((nflags ^ oflags) & O_APPEND) && (oflags & FAPPENDONLY))
745 error = EINVAL;
746 if (error == 0 && ((nflags ^ oflags) & FASYNC)) {
747 tmp = nflags & FASYNC;
748 error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp,
749 cred, NULL);
753 * If no error, must be atomically set.
755 while (error == 0) {
756 oflags = fp->f_flag;
757 cpu_ccfence();
758 nflags = (oflags & ~FCNTLFLAGS) | (nflags & FCNTLFLAGS);
759 if (atomic_cmpset_int(&fp->f_flag, oflags, nflags))
760 break;
761 cpu_pause();
763 break;
765 case F_GETOWN:
766 error = fo_ioctl(fp, FIOGETOWN, (caddr_t)&dat->fc_owner,
767 cred, NULL);
768 break;
770 case F_SETOWN:
771 error = fo_ioctl(fp, FIOSETOWN, (caddr_t)&dat->fc_owner,
772 cred, NULL);
773 break;
775 case F_SETLKW:
776 flg |= F_WAIT;
777 /* Fall into F_SETLK */
779 case F_SETLK:
780 if (fp->f_type != DTYPE_VNODE) {
781 error = EBADF;
782 break;
784 vp = (struct vnode *)fp->f_data;
787 * copyin/lockop may block
789 if (dat->fc_flock.l_whence == SEEK_CUR)
790 dat->fc_flock.l_start += fp->f_offset;
792 switch (dat->fc_flock.l_type) {
793 case F_RDLCK:
794 if ((fp->f_flag & FREAD) == 0) {
795 error = EBADF;
796 break;
798 if (p->p_leader->p_advlock_flag == 0)
799 p->p_leader->p_advlock_flag = 1;
800 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
801 &dat->fc_flock, flg);
802 break;
803 case F_WRLCK:
804 if ((fp->f_flag & FWRITE) == 0) {
805 error = EBADF;
806 break;
808 if (p->p_leader->p_advlock_flag == 0)
809 p->p_leader->p_advlock_flag = 1;
810 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
811 &dat->fc_flock, flg);
812 break;
813 case F_UNLCK:
814 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
815 &dat->fc_flock, F_POSIX);
816 break;
817 default:
818 error = EINVAL;
819 break;
823 * It is possible to race a close() on the descriptor while
824 * we were blocked getting the lock. If this occurs the
825 * close might not have caught the lock.
827 if (checkfdclosed(td, p->p_fd, fd, fp, closedcounter)) {
828 dat->fc_flock.l_whence = SEEK_SET;
829 dat->fc_flock.l_start = 0;
830 dat->fc_flock.l_len = 0;
831 dat->fc_flock.l_type = F_UNLCK;
832 VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
833 F_UNLCK, &dat->fc_flock, F_POSIX);
835 break;
837 case F_GETLK:
838 if (fp->f_type != DTYPE_VNODE) {
839 error = EBADF;
840 break;
842 vp = (struct vnode *)fp->f_data;
844 * copyin/lockop may block
846 if (dat->fc_flock.l_type != F_RDLCK &&
847 dat->fc_flock.l_type != F_WRLCK &&
848 dat->fc_flock.l_type != F_UNLCK) {
849 error = EINVAL;
850 break;
852 if (dat->fc_flock.l_whence == SEEK_CUR)
853 dat->fc_flock.l_start += fp->f_offset;
854 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK,
855 &dat->fc_flock, F_POSIX);
856 break;
857 default:
858 error = EINVAL;
859 break;
862 fdrop(fp);
863 return (error);
867 * The file control system call.
870 sys_fcntl(struct fcntl_args *uap)
872 union fcntl_dat dat;
873 int error;
875 switch (uap->cmd) {
876 case F_DUPFD:
877 case F_DUP2FD:
878 case F_DUPFD_CLOEXEC:
879 case F_DUP2FD_CLOEXEC:
880 dat.fc_fd = uap->arg;
881 break;
882 case F_SETFD:
883 dat.fc_cloexec = uap->arg;
884 break;
885 case F_SETFL:
886 dat.fc_flags = uap->arg;
887 break;
888 case F_SETOWN:
889 dat.fc_owner = uap->arg;
890 break;
891 case F_SETLKW:
892 case F_SETLK:
893 case F_GETLK:
894 error = copyin((caddr_t)uap->arg, &dat.fc_flock,
895 sizeof(struct flock));
896 if (error)
897 return (error);
898 break;
901 error = kern_fcntl(uap->fd, uap->cmd, &dat, curthread->td_ucred);
903 if (error == 0) {
904 switch (uap->cmd) {
905 case F_DUPFD:
906 case F_DUP2FD:
907 case F_DUPFD_CLOEXEC:
908 case F_DUP2FD_CLOEXEC:
909 uap->sysmsg_result = dat.fc_fd;
910 break;
911 case F_GETFD:
912 uap->sysmsg_result = dat.fc_cloexec;
913 break;
914 case F_GETFL:
915 uap->sysmsg_result = dat.fc_flags;
916 break;
917 case F_GETOWN:
918 uap->sysmsg_result = dat.fc_owner;
919 break;
920 case F_GETLK:
921 error = copyout(&dat.fc_flock, (caddr_t)uap->arg,
922 sizeof(struct flock));
923 break;
927 return (error);
931 * Common code for dup, dup2, and fcntl(F_DUPFD).
933 * There are four type flags: DUP_FCNTL, DUP_FIXED, DUP_VARIABLE, and
934 * DUP_CLOEXEC.
936 * DUP_FCNTL is for handling EINVAL vs. EBADF differences between
937 * fcntl()'s F_DUPFD and F_DUPFD_CLOEXEC and dup2() (per POSIX).
938 * The next two flags are mutually exclusive, and the fourth is optional.
939 * DUP_FIXED tells kern_dup() to destructively dup over an existing file
940 * descriptor if "new" is already open. DUP_VARIABLE tells kern_dup()
941 * to find the lowest unused file descriptor that is greater than or
942 * equal to "new". DUP_CLOEXEC, which works with either of the first
943 * two flags, sets the close-on-exec flag on the "new" file descriptor.
946 kern_dup(int flags, int old, int new, int *res)
948 struct thread *td = curthread;
949 struct proc *p = td->td_proc;
950 struct plimit *limit = readplimits(p);
951 struct filedesc *fdp = p->p_fd;
952 struct file *fp;
953 struct file *delfp;
954 int oldflags;
955 int holdleaders;
956 int dtsize;
957 int error, newfd;
960 * Verify that we have a valid descriptor to dup from and
961 * possibly to dup to. When the new descriptor is out of
962 * bounds, fcntl()'s F_DUPFD and F_DUPFD_CLOEXEC must
963 * return EINVAL, while dup2() returns EBADF in
964 * this case.
966 * NOTE: maxfilesperuser is not applicable to dup()
968 retry:
969 if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX)
970 dtsize = INT_MAX;
971 else
972 dtsize = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur;
973 if (dtsize > maxfilesperproc)
974 dtsize = maxfilesperproc;
975 if (dtsize < minfilesperproc)
976 dtsize = minfilesperproc;
978 if (new < 0 || new > dtsize)
979 return (flags & DUP_FCNTL ? EINVAL : EBADF);
981 spin_lock(&fdp->fd_spin);
982 if ((unsigned)old >= fdp->fd_nfiles || fdp->fd_files[old].fp == NULL) {
983 spin_unlock(&fdp->fd_spin);
984 return (EBADF);
986 if ((flags & DUP_FIXED) && old == new) {
987 *res = new;
988 if (flags & DUP_CLOEXEC)
989 fdp->fd_files[new].fileflags |= UF_EXCLOSE;
990 spin_unlock(&fdp->fd_spin);
991 return (0);
993 fp = fdp->fd_files[old].fp;
994 oldflags = fdp->fd_files[old].fileflags;
995 fhold(fp);
998 * Allocate a new descriptor if DUP_VARIABLE, or expand the table
999 * if the requested descriptor is beyond the current table size.
1001 * This can block. Retry if the source descriptor no longer matches
1002 * or if our expectation in the expansion case races.
1004 * If we are not expanding or allocating a new decriptor, then reset
1005 * the target descriptor to a reserved state so we have a uniform
1006 * setup for the next code block.
1008 if ((flags & DUP_VARIABLE) || new >= fdp->fd_nfiles) {
1009 error = fdalloc_locked(p, fdp, new, &newfd);
1010 if (error) {
1011 spin_unlock(&fdp->fd_spin);
1012 fdrop(fp);
1013 return (error);
1016 * Check for ripout
1018 if (old >= fdp->fd_nfiles || fdp->fd_files[old].fp != fp) {
1019 fsetfd_locked(fdp, NULL, newfd);
1020 spin_unlock(&fdp->fd_spin);
1021 fdrop(fp);
1022 goto retry;
1025 * Check for expansion race
1027 if ((flags & DUP_VARIABLE) == 0 && new != newfd) {
1028 fsetfd_locked(fdp, NULL, newfd);
1029 spin_unlock(&fdp->fd_spin);
1030 fdrop(fp);
1031 goto retry;
1034 * Check for ripout, newfd reused old (this case probably
1035 * can't occur).
1037 if (old == newfd) {
1038 fsetfd_locked(fdp, NULL, newfd);
1039 spin_unlock(&fdp->fd_spin);
1040 fdrop(fp);
1041 goto retry;
1043 new = newfd;
1044 delfp = NULL;
1045 } else {
1046 if (fdp->fd_files[new].reserved) {
1047 spin_unlock(&fdp->fd_spin);
1048 fdrop(fp);
1049 kprintf("Warning: dup(): target descriptor %d is "
1050 "reserved, waiting for it to be resolved\n",
1051 new);
1052 tsleep(fdp, 0, "fdres", hz);
1053 goto retry;
1057 * If the target descriptor was never allocated we have
1058 * to allocate it. If it was we have to clean out the
1059 * old descriptor. delfp inherits the ref from the
1060 * descriptor table.
1062 ++fdp->fd_closedcounter;
1063 fclearcache(&fdp->fd_files[new], NULL, 0);
1064 ++fdp->fd_closedcounter;
1065 delfp = fdp->fd_files[new].fp;
1066 fdp->fd_files[new].fp = NULL;
1067 fdp->fd_files[new].reserved = 1;
1068 if (delfp == NULL) {
1069 fdreserve_locked(fdp, new, 1);
1070 if (new > fdp->fd_lastfile)
1071 fdp->fd_lastfile = new;
1077 * NOTE: still holding an exclusive spinlock
1081 * If a descriptor is being overwritten we may hve to tell
1082 * fdfree() to sleep to ensure that all relevant process
1083 * leaders can be traversed in closef().
1085 if (delfp != NULL && p->p_fdtol != NULL) {
1086 fdp->fd_holdleaderscount++;
1087 holdleaders = 1;
1088 } else {
1089 holdleaders = 0;
1091 KASSERT(delfp == NULL || (flags & DUP_FIXED),
1092 ("dup() picked an open file"));
1095 * Duplicate the source descriptor, update lastfile. If the new
1096 * descriptor was not allocated and we aren't replacing an existing
1097 * descriptor we have to mark the descriptor as being in use.
1099 * The fd_files[] array inherits fp's hold reference.
1101 fsetfd_locked(fdp, fp, new);
1102 if ((flags & DUP_CLOEXEC) != 0)
1103 fdp->fd_files[new].fileflags = oldflags | UF_EXCLOSE;
1104 else
1105 fdp->fd_files[new].fileflags = oldflags & ~UF_EXCLOSE;
1106 spin_unlock(&fdp->fd_spin);
1107 fdrop(fp);
1108 *res = new;
1111 * If we dup'd over a valid file, we now own the reference to it
1112 * and must dispose of it using closef() semantics (as if a
1113 * close() were performed on it).
1115 if (delfp) {
1116 if (SLIST_FIRST(&delfp->f_klist))
1117 knote_fdclose(delfp, fdp, new);
1118 closef(delfp, p);
1119 if (holdleaders) {
1120 spin_lock(&fdp->fd_spin);
1121 fdp->fd_holdleaderscount--;
1122 if (fdp->fd_holdleaderscount == 0 &&
1123 fdp->fd_holdleaderswakeup != 0) {
1124 fdp->fd_holdleaderswakeup = 0;
1125 spin_unlock(&fdp->fd_spin);
1126 wakeup(&fdp->fd_holdleaderscount);
1127 } else {
1128 spin_unlock(&fdp->fd_spin);
1132 return (0);
1136 * If sigio is on the list associated with a process or process group,
1137 * disable signalling from the device, remove sigio from the list and
1138 * free sigio.
1140 void
1141 funsetown(struct sigio **sigiop)
1143 struct pgrp *pgrp;
1144 struct proc *p;
1145 struct sigio *sigio;
1147 if ((sigio = *sigiop) != NULL) {
1148 lwkt_gettoken(&sigio_token); /* protect sigio */
1149 KKASSERT(sigiop == sigio->sio_myref);
1150 sigio = *sigiop;
1151 *sigiop = NULL;
1152 lwkt_reltoken(&sigio_token);
1154 if (sigio == NULL)
1155 return;
1157 if (sigio->sio_pgid < 0) {
1158 pgrp = sigio->sio_pgrp;
1159 sigio->sio_pgrp = NULL;
1160 lwkt_gettoken(&pgrp->pg_token);
1161 SLIST_REMOVE(&pgrp->pg_sigiolst, sigio, sigio, sio_pgsigio);
1162 lwkt_reltoken(&pgrp->pg_token);
1163 pgrel(pgrp);
1164 } else /* if ((*sigiop)->sio_pgid > 0) */ {
1165 p = sigio->sio_proc;
1166 sigio->sio_proc = NULL;
1167 PHOLD(p);
1168 lwkt_gettoken(&p->p_token);
1169 SLIST_REMOVE(&p->p_sigiolst, sigio, sigio, sio_pgsigio);
1170 lwkt_reltoken(&p->p_token);
1171 PRELE(p);
1173 crfree(sigio->sio_ucred);
1174 sigio->sio_ucred = NULL;
1175 kfree(sigio, M_SIGIO);
1179 * Free a list of sigio structures. Caller is responsible for ensuring
1180 * that the list is MPSAFE.
1182 void
1183 funsetownlst(struct sigiolst *sigiolst)
1185 struct sigio *sigio;
1187 while ((sigio = SLIST_FIRST(sigiolst)) != NULL)
1188 funsetown(sigio->sio_myref);
1192 * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg).
1194 * After permission checking, add a sigio structure to the sigio list for
1195 * the process or process group.
1198 fsetown(pid_t pgid, struct sigio **sigiop)
1200 struct proc *proc = NULL;
1201 struct pgrp *pgrp = NULL;
1202 struct sigio *sigio;
1203 int error;
1205 if (pgid == 0) {
1206 funsetown(sigiop);
1207 return (0);
1210 if (pgid > 0) {
1211 proc = pfind(pgid);
1212 if (proc == NULL) {
1213 error = ESRCH;
1214 goto done;
1218 * Policy - Don't allow a process to FSETOWN a process
1219 * in another session.
1221 * Remove this test to allow maximum flexibility or
1222 * restrict FSETOWN to the current process or process
1223 * group for maximum safety.
1225 if (proc->p_session != curproc->p_session) {
1226 error = EPERM;
1227 goto done;
1229 } else /* if (pgid < 0) */ {
1230 pgrp = pgfind(-pgid);
1231 if (pgrp == NULL) {
1232 error = ESRCH;
1233 goto done;
1237 * Policy - Don't allow a process to FSETOWN a process
1238 * in another session.
1240 * Remove this test to allow maximum flexibility or
1241 * restrict FSETOWN to the current process or process
1242 * group for maximum safety.
1244 if (pgrp->pg_session != curproc->p_session) {
1245 error = EPERM;
1246 goto done;
1249 sigio = kmalloc(sizeof(struct sigio), M_SIGIO, M_WAITOK | M_ZERO);
1250 if (pgid > 0) {
1251 KKASSERT(pgrp == NULL);
1252 lwkt_gettoken(&proc->p_token);
1253 SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio);
1254 sigio->sio_proc = proc;
1255 lwkt_reltoken(&proc->p_token);
1256 } else {
1257 KKASSERT(proc == NULL);
1258 lwkt_gettoken(&pgrp->pg_token);
1259 SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio);
1260 sigio->sio_pgrp = pgrp;
1261 lwkt_reltoken(&pgrp->pg_token);
1262 pgrp = NULL;
1264 sigio->sio_pgid = pgid;
1265 sigio->sio_ucred = crhold(curthread->td_ucred);
1266 /* It would be convenient if p_ruid was in ucred. */
1267 sigio->sio_ruid = sigio->sio_ucred->cr_ruid;
1268 sigio->sio_myref = sigiop;
1270 lwkt_gettoken(&sigio_token);
1271 while (*sigiop)
1272 funsetown(sigiop);
1273 *sigiop = sigio;
1274 lwkt_reltoken(&sigio_token);
1275 error = 0;
1276 done:
1277 if (pgrp)
1278 pgrel(pgrp);
1279 if (proc)
1280 PRELE(proc);
1281 return (error);
1285 * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg).
1287 pid_t
1288 fgetown(struct sigio **sigiop)
1290 struct sigio *sigio;
1291 pid_t own;
1293 lwkt_gettoken_shared(&sigio_token);
1294 sigio = *sigiop;
1295 own = (sigio != NULL ? sigio->sio_pgid : 0);
1296 lwkt_reltoken(&sigio_token);
1298 return (own);
1302 * Close many file descriptors.
1305 sys_closefrom(struct closefrom_args *uap)
1307 return(kern_closefrom(uap->fd));
1311 * Close all file descriptors greater then or equal to fd
1314 kern_closefrom(int fd)
1316 struct thread *td = curthread;
1317 struct proc *p = td->td_proc;
1318 struct filedesc *fdp;
1319 int error;
1320 int e2;
1322 KKASSERT(p);
1323 fdp = p->p_fd;
1325 if (fd < 0)
1326 return (EINVAL);
1329 * NOTE: This function will skip unassociated descriptors and
1330 * reserved descriptors that have not yet been assigned.
1331 * fd_lastfile can change as a side effect of kern_close().
1333 * NOTE: We accumulate EINTR errors and return EINTR if any
1334 * close() returned EINTR. However, the descriptor is
1335 * still closed and we do not break out of the loop.
1337 error = 0;
1338 spin_lock(&fdp->fd_spin);
1339 while (fd <= fdp->fd_lastfile) {
1340 if (fdp->fd_files[fd].fp != NULL) {
1341 spin_unlock(&fdp->fd_spin);
1342 /* ok if this races another close */
1343 e2 = kern_close(fd);
1344 if (e2 == EINTR)
1345 error = EINTR;
1346 spin_lock(&fdp->fd_spin);
1348 ++fd;
1350 spin_unlock(&fdp->fd_spin);
1352 return error;
1356 * Close a file descriptor.
1359 sys_close(struct close_args *uap)
1361 return(kern_close(uap->fd));
1365 * close() helper
1368 kern_close(int fd)
1370 struct thread *td = curthread;
1371 struct proc *p = td->td_proc;
1372 struct filedesc *fdp;
1373 struct file *fp;
1374 int error;
1375 int holdleaders;
1377 KKASSERT(p);
1378 fdp = p->p_fd;
1381 * funsetfd*() also clears the fd cache
1383 spin_lock(&fdp->fd_spin);
1384 if ((fp = funsetfd_locked(fdp, fd)) == NULL) {
1385 spin_unlock(&fdp->fd_spin);
1386 return (EBADF);
1388 holdleaders = 0;
1389 if (p->p_fdtol != NULL) {
1391 * Ask fdfree() to sleep to ensure that all relevant
1392 * process leaders can be traversed in closef().
1394 fdp->fd_holdleaderscount++;
1395 holdleaders = 1;
1399 * we now hold the fp reference that used to be owned by the descriptor
1400 * array.
1402 spin_unlock(&fdp->fd_spin);
1403 if (SLIST_FIRST(&fp->f_klist))
1404 knote_fdclose(fp, fdp, fd);
1405 error = closef(fp, p);
1406 if (holdleaders) {
1407 spin_lock(&fdp->fd_spin);
1408 fdp->fd_holdleaderscount--;
1409 if (fdp->fd_holdleaderscount == 0 &&
1410 fdp->fd_holdleaderswakeup != 0) {
1411 fdp->fd_holdleaderswakeup = 0;
1412 spin_unlock(&fdp->fd_spin);
1413 wakeup(&fdp->fd_holdleaderscount);
1414 } else {
1415 spin_unlock(&fdp->fd_spin);
1418 return (error);
1422 * shutdown_args(int fd, int how)
1425 kern_shutdown(int fd, int how)
1427 struct thread *td = curthread;
1428 struct file *fp;
1429 int error;
1431 if ((fp = holdfp(td, fd, -1)) == NULL)
1432 return (EBADF);
1433 error = fo_shutdown(fp, how);
1434 fdrop(fp);
1436 return (error);
1440 * MPALMOSTSAFE
1443 sys_shutdown(struct shutdown_args *uap)
1445 int error;
1447 error = kern_shutdown(uap->s, uap->how);
1449 return (error);
1453 * fstat() helper
1456 kern_fstat(int fd, struct stat *ub)
1458 struct thread *td = curthread;
1459 struct file *fp;
1460 int error;
1462 if ((fp = holdfp(td, fd, -1)) == NULL)
1463 return (EBADF);
1464 error = fo_stat(fp, ub, td->td_ucred);
1465 fdrop(fp);
1467 return (error);
1471 * Return status information about a file descriptor.
1474 sys_fstat(struct fstat_args *uap)
1476 struct stat st;
1477 int error;
1479 error = kern_fstat(uap->fd, &st);
1481 if (error == 0)
1482 error = copyout(&st, uap->sb, sizeof(st));
1483 return (error);
1487 * Return pathconf information about a file descriptor.
1489 * MPALMOSTSAFE
1492 sys_fpathconf(struct fpathconf_args *uap)
1494 struct thread *td = curthread;
1495 struct file *fp;
1496 struct vnode *vp;
1497 int error = 0;
1499 if ((fp = holdfp(td, uap->fd, -1)) == NULL)
1500 return (EBADF);
1502 switch (fp->f_type) {
1503 case DTYPE_PIPE:
1504 case DTYPE_SOCKET:
1505 if (uap->name != _PC_PIPE_BUF) {
1506 error = EINVAL;
1507 } else {
1508 uap->sysmsg_result = PIPE_BUF;
1509 error = 0;
1511 break;
1512 case DTYPE_FIFO:
1513 case DTYPE_VNODE:
1514 vp = (struct vnode *)fp->f_data;
1515 error = VOP_PATHCONF(vp, uap->name, &uap->sysmsg_reg);
1516 break;
1517 default:
1518 error = EOPNOTSUPP;
1519 break;
1521 fdrop(fp);
1522 return(error);
1526 * Grow the file table so it can hold through descriptor (want).
1528 * The fdp's spinlock must be held exclusively on entry and may be held
1529 * exclusively on return. The spinlock may be cycled by the routine.
1531 static void
1532 fdgrow_locked(struct filedesc *fdp, int want)
1534 struct fdnode *newfiles;
1535 struct fdnode *oldfiles;
1536 int nf, extra;
1538 nf = fdp->fd_nfiles;
1539 do {
1540 /* nf has to be of the form 2^n - 1 */
1541 nf = 2 * nf + 1;
1542 } while (nf <= want);
1544 spin_unlock(&fdp->fd_spin);
1545 newfiles = kmalloc(nf * sizeof(struct fdnode), M_FILEDESC, M_WAITOK);
1546 spin_lock(&fdp->fd_spin);
1549 * We could have raced another extend while we were not holding
1550 * the spinlock.
1552 if (fdp->fd_nfiles >= nf) {
1553 spin_unlock(&fdp->fd_spin);
1554 kfree(newfiles, M_FILEDESC);
1555 spin_lock(&fdp->fd_spin);
1556 return;
1559 * Copy the existing ofile and ofileflags arrays
1560 * and zero the new portion of each array.
1562 extra = nf - fdp->fd_nfiles;
1563 bcopy(fdp->fd_files, newfiles, fdp->fd_nfiles * sizeof(struct fdnode));
1564 bzero(&newfiles[fdp->fd_nfiles], extra * sizeof(struct fdnode));
1566 oldfiles = fdp->fd_files;
1567 fdp->fd_files = newfiles;
1568 fdp->fd_nfiles = nf;
1570 if (oldfiles != fdp->fd_builtin_files) {
1571 spin_unlock(&fdp->fd_spin);
1572 kfree(oldfiles, M_FILEDESC);
1573 spin_lock(&fdp->fd_spin);
1578 * Number of nodes in right subtree, including the root.
1580 static __inline int
1581 right_subtree_size(int n)
1583 return (n ^ (n | (n + 1)));
1587 * Bigger ancestor.
1589 static __inline int
1590 right_ancestor(int n)
1592 return (n | (n + 1));
1596 * Smaller ancestor.
1598 static __inline int
1599 left_ancestor(int n)
1601 return ((n & (n + 1)) - 1);
1605 * Traverse the in-place binary tree buttom-up adjusting the allocation
1606 * count so scans can determine where free descriptors are located.
1608 * caller must be holding an exclusive spinlock on fdp
1610 static
1611 void
1612 fdreserve_locked(struct filedesc *fdp, int fd, int incr)
1614 while (fd >= 0) {
1615 fdp->fd_files[fd].allocated += incr;
1616 KKASSERT(fdp->fd_files[fd].allocated >= 0);
1617 fd = left_ancestor(fd);
1622 * Reserve a file descriptor for the process. If no error occurs, the
1623 * caller MUST at some point call fsetfd() or assign a file pointer
1624 * or dispose of the reservation.
1626 static
1628 fdalloc_locked(struct proc *p, struct filedesc *fdp, int want, int *result)
1630 struct plimit *limit = readplimits(p);
1631 struct uidinfo *uip;
1632 int fd, rsize, rsum, node, lim;
1635 * Check dtable size limit
1637 *result = -1; /* avoid gcc warnings */
1638 if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX)
1639 lim = INT_MAX;
1640 else
1641 lim = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur;
1643 if (lim > maxfilesperproc)
1644 lim = maxfilesperproc;
1645 if (lim < minfilesperproc)
1646 lim = minfilesperproc;
1647 if (want >= lim)
1648 return (EMFILE);
1651 * Check that the user has not run out of descriptors (non-root only).
1652 * As a safety measure the dtable is allowed to have at least
1653 * minfilesperproc open fds regardless of the maxfilesperuser limit.
1655 * This isn't as loose a spec as ui_posixlocks, so we use atomic
1656 * ops to force synchronize and recheck if we would otherwise
1657 * error.
1659 if (p->p_ucred->cr_uid && fdp->fd_nfiles >= minfilesperproc) {
1660 uip = p->p_ucred->cr_uidinfo;
1661 if (uip->ui_openfiles > maxfilesperuser) {
1662 int n;
1663 int count;
1665 count = 0;
1666 for (n = 0; n < ncpus; ++n) {
1667 count += atomic_swap_int(
1668 &uip->ui_pcpu[n].pu_openfiles, 0);
1670 atomic_add_int(&uip->ui_openfiles, count);
1671 if (uip->ui_openfiles > maxfilesperuser) {
1672 krateprintf(&krate_uidinfo,
1673 "Warning: user %d pid %d (%s) "
1674 "ran out of file descriptors "
1675 "(%d/%d)\n",
1676 p->p_ucred->cr_uid, (int)p->p_pid,
1677 p->p_comm,
1678 uip->ui_openfiles, maxfilesperuser);
1679 return(ENFILE);
1685 * Grow the dtable if necessary
1687 if (want >= fdp->fd_nfiles)
1688 fdgrow_locked(fdp, want);
1691 * Search for a free descriptor starting at the higher
1692 * of want or fd_freefile. If that fails, consider
1693 * expanding the ofile array.
1695 * NOTE! the 'allocated' field is a cumulative recursive allocation
1696 * count. If we happen to see a value of 0 then we can shortcut
1697 * our search. Otherwise we run through through the tree going
1698 * down branches we know have free descriptor(s) until we hit a
1699 * leaf node. The leaf node will be free but will not necessarily
1700 * have an allocated field of 0.
1702 retry:
1703 /* move up the tree looking for a subtree with a free node */
1704 for (fd = max(want, fdp->fd_freefile); fd < min(fdp->fd_nfiles, lim);
1705 fd = right_ancestor(fd)) {
1706 if (fdp->fd_files[fd].allocated == 0)
1707 goto found;
1709 rsize = right_subtree_size(fd);
1710 if (fdp->fd_files[fd].allocated == rsize)
1711 continue; /* right subtree full */
1714 * Free fd is in the right subtree of the tree rooted at fd.
1715 * Call that subtree R. Look for the smallest (leftmost)
1716 * subtree of R with an unallocated fd: continue moving
1717 * down the left branch until encountering a full left
1718 * subtree, then move to the right.
1720 for (rsum = 0, rsize /= 2; rsize > 0; rsize /= 2) {
1721 node = fd + rsize;
1722 rsum += fdp->fd_files[node].allocated;
1723 if (fdp->fd_files[fd].allocated == rsum + rsize) {
1724 fd = node; /* move to the right */
1725 if (fdp->fd_files[node].allocated == 0)
1726 goto found;
1727 rsum = 0;
1730 goto found;
1734 * No space in current array. Expand?
1736 if (fdp->fd_nfiles >= lim) {
1737 return (EMFILE);
1739 fdgrow_locked(fdp, want);
1740 goto retry;
1742 found:
1743 KKASSERT(fd < fdp->fd_nfiles);
1744 if (fd > fdp->fd_lastfile)
1745 fdp->fd_lastfile = fd;
1746 if (want <= fdp->fd_freefile)
1747 fdp->fd_freefile = fd;
1748 *result = fd;
1749 KKASSERT(fdp->fd_files[fd].fp == NULL);
1750 KKASSERT(fdp->fd_files[fd].reserved == 0);
1751 fdp->fd_files[fd].fileflags = 0;
1752 fdp->fd_files[fd].reserved = 1;
1753 fdreserve_locked(fdp, fd, 1);
1755 return (0);
1759 fdalloc(struct proc *p, int want, int *result)
1761 struct filedesc *fdp = p->p_fd;
1762 int error;
1764 spin_lock(&fdp->fd_spin);
1765 error = fdalloc_locked(p, fdp, want, result);
1766 spin_unlock(&fdp->fd_spin);
1768 return error;
1772 * Check to see whether n user file descriptors
1773 * are available to the process p.
1776 fdavail(struct proc *p, int n)
1778 struct plimit *limit = readplimits(p);
1779 struct filedesc *fdp = p->p_fd;
1780 struct fdnode *fdnode;
1781 int i, lim, last;
1783 if (limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur > INT_MAX)
1784 lim = INT_MAX;
1785 else
1786 lim = (int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur;
1788 if (lim > maxfilesperproc)
1789 lim = maxfilesperproc;
1790 if (lim < minfilesperproc)
1791 lim = minfilesperproc;
1793 spin_lock(&fdp->fd_spin);
1794 if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0) {
1795 spin_unlock(&fdp->fd_spin);
1796 return (1);
1798 last = min(fdp->fd_nfiles, lim);
1799 fdnode = &fdp->fd_files[fdp->fd_freefile];
1800 for (i = last - fdp->fd_freefile; --i >= 0; ++fdnode) {
1801 if (fdnode->fp == NULL && --n <= 0) {
1802 spin_unlock(&fdp->fd_spin);
1803 return (1);
1806 spin_unlock(&fdp->fd_spin);
1807 return (0);
1811 * Revoke open descriptors referencing (f_data, f_type)
1813 * Any revoke executed within a prison is only able to
1814 * revoke descriptors for processes within that prison.
1816 * Returns 0 on success or an error code.
1818 struct fdrevoke_info {
1819 void *data;
1820 short type;
1821 short unused;
1822 int found;
1823 struct ucred *cred;
1824 struct file *nfp;
1827 static int fdrevoke_check_callback(struct file *fp, void *vinfo);
1828 static int fdrevoke_proc_callback(struct proc *p, void *vinfo);
1831 fdrevoke(void *f_data, short f_type, struct ucred *cred)
1833 struct fdrevoke_info info;
1834 int error;
1836 bzero(&info, sizeof(info));
1837 info.data = f_data;
1838 info.type = f_type;
1839 info.cred = cred;
1840 error = falloc(NULL, &info.nfp, NULL);
1841 if (error)
1842 return (error);
1845 * Scan the file pointer table once. dups do not dup file pointers,
1846 * only descriptors, so there is no leak. Set FREVOKED on the fps
1847 * being revoked.
1849 * Any fps sent over unix-domain sockets will be revoked by the
1850 * socket code checking for FREVOKED when the fps are externialized.
1851 * revoke_token is used to make sure that fps marked FREVOKED and
1852 * externalized will be picked up by the following allproc_scan().
1854 lwkt_gettoken(&revoke_token);
1855 allfiles_scan_exclusive(fdrevoke_check_callback, &info);
1856 lwkt_reltoken(&revoke_token);
1859 * If any fps were marked track down the related descriptors
1860 * and close them. Any dup()s at this point will notice
1861 * the FREVOKED already set in the fp and do the right thing.
1863 if (info.found)
1864 allproc_scan(fdrevoke_proc_callback, &info, 0);
1865 fdrop(info.nfp);
1866 return(0);
1870 * Locate matching file pointers directly.
1872 * WARNING: allfiles_scan_exclusive() holds a spinlock through these calls!
1874 static int
1875 fdrevoke_check_callback(struct file *fp, void *vinfo)
1877 struct fdrevoke_info *info = vinfo;
1880 * File pointers already flagged for revokation are skipped.
1882 if (fp->f_flag & FREVOKED)
1883 return(0);
1886 * If revoking from a prison file pointers created outside of
1887 * that prison, or file pointers without creds, cannot be revoked.
1889 if (info->cred->cr_prison &&
1890 (fp->f_cred == NULL ||
1891 info->cred->cr_prison != fp->f_cred->cr_prison)) {
1892 return(0);
1896 * If the file pointer matches then mark it for revocation. The
1897 * flag is currently only used by unp_revoke_gc().
1899 * info->found is a heuristic and can race in a SMP environment.
1901 if (info->data == fp->f_data && info->type == fp->f_type) {
1902 atomic_set_int(&fp->f_flag, FREVOKED);
1903 info->found = 1;
1905 return(0);
1909 * Locate matching file pointers via process descriptor tables.
1911 static int
1912 fdrevoke_proc_callback(struct proc *p, void *vinfo)
1914 struct fdrevoke_info *info = vinfo;
1915 struct filedesc *fdp;
1916 struct file *fp;
1917 int n;
1919 if (p->p_stat == SIDL || p->p_stat == SZOMB)
1920 return(0);
1921 if (info->cred->cr_prison &&
1922 info->cred->cr_prison != p->p_ucred->cr_prison) {
1923 return(0);
1927 * If the controlling terminal of the process matches the
1928 * vnode being revoked we clear the controlling terminal.
1930 * The normal spec_close() may not catch this because it
1931 * uses curproc instead of p.
1933 if (p->p_session && info->type == DTYPE_VNODE &&
1934 info->data == p->p_session->s_ttyvp) {
1935 p->p_session->s_ttyvp = NULL;
1936 vrele(info->data);
1940 * Softref the fdp to prevent it from being destroyed
1942 spin_lock(&p->p_spin);
1943 if ((fdp = p->p_fd) == NULL) {
1944 spin_unlock(&p->p_spin);
1945 return(0);
1947 atomic_add_int(&fdp->fd_softrefs, 1);
1948 spin_unlock(&p->p_spin);
1951 * Locate and close any matching file descriptors, replacing
1952 * them with info->nfp.
1954 spin_lock(&fdp->fd_spin);
1955 for (n = 0; n < fdp->fd_nfiles; ++n) {
1956 if ((fp = fdp->fd_files[n].fp) == NULL)
1957 continue;
1958 if (fp->f_flag & FREVOKED) {
1959 ++fdp->fd_closedcounter;
1960 fclearcache(&fdp->fd_files[n], NULL, 0);
1961 ++fdp->fd_closedcounter;
1962 fhold(info->nfp);
1963 fdp->fd_files[n].fp = info->nfp;
1964 spin_unlock(&fdp->fd_spin);
1965 knote_fdclose(fp, fdp, n); /* XXX */
1966 closef(fp, p);
1967 spin_lock(&fdp->fd_spin);
1970 spin_unlock(&fdp->fd_spin);
1971 atomic_subtract_int(&fdp->fd_softrefs, 1);
1972 return(0);
1976 * falloc:
1977 * Create a new open file structure and reserve a file decriptor
1978 * for the process that refers to it.
1980 * Root creds are checked using lp, or assumed if lp is NULL. If
1981 * resultfd is non-NULL then lp must also be non-NULL. No file
1982 * descriptor is reserved (and no process context is needed) if
1983 * resultfd is NULL.
1985 * A file pointer with a refcount of 1 is returned. Note that the
1986 * file pointer is NOT associated with the descriptor. If falloc
1987 * returns success, fsetfd() MUST be called to either associate the
1988 * file pointer or clear the reservation.
1991 falloc(struct lwp *lp, struct file **resultfp, int *resultfd)
1993 static struct timeval lastfail;
1994 static int curfail;
1995 struct filelist_head *head;
1996 struct file *fp;
1997 struct ucred *cred = lp ? lp->lwp_thread->td_ucred : proc0.p_ucred;
1998 int error;
2000 fp = NULL;
2003 * Handle filetable full issues and root overfill.
2005 if (nfiles >= maxfiles - maxfilesrootres &&
2006 (cred->cr_ruid != 0 || nfiles >= maxfiles)) {
2007 if (ppsratecheck(&lastfail, &curfail, 1)) {
2008 kprintf("kern.maxfiles limit exceeded by uid %d, "
2009 "please see tuning(7).\n",
2010 cred->cr_ruid);
2012 error = ENFILE;
2013 goto done;
2017 * Allocate a new file descriptor.
2019 fp = objcache_get(file_objcache, M_WAITOK);
2020 bzero(fp, sizeof(*fp));
2021 spin_init(&fp->f_spin, "falloc");
2022 SLIST_INIT(&fp->f_klist);
2023 fp->f_count = 1;
2024 fp->f_ops = &badfileops;
2025 fp->f_seqcount = 1;
2026 fsetcred(fp, cred);
2027 atomic_add_int(&nfiles, 1);
2029 head = fp2filelist(fp);
2030 spin_lock(&head->spin);
2031 LIST_INSERT_HEAD(&head->list, fp, f_list);
2032 spin_unlock(&head->spin);
2034 if (resultfd) {
2035 if ((error = fdalloc(lp->lwp_proc, 0, resultfd)) != 0) {
2036 fdrop(fp);
2037 fp = NULL;
2039 } else {
2040 error = 0;
2042 done:
2043 *resultfp = fp;
2044 return (error);
2048 * Check for races against a file descriptor by determining that the
2049 * file pointer is still associated with the specified file descriptor,
2050 * and a close is not currently in progress.
2053 checkfdclosed(thread_t td, struct filedesc *fdp, int fd, struct file *fp,
2054 int closedcounter)
2056 struct fdcache *fdc;
2057 int error;
2059 cpu_lfence();
2060 if (fdp->fd_closedcounter == closedcounter)
2061 return 0;
2063 if (td->td_proc && td->td_proc->p_fd == fdp) {
2064 for (fdc = &td->td_fdcache[0];
2065 fdc < &td->td_fdcache[NFDCACHE]; ++fdc) {
2066 if (fdc->fd == fd && fdc->fp == fp)
2067 return 0;
2071 spin_lock_shared(&fdp->fd_spin);
2072 if ((unsigned)fd >= fdp->fd_nfiles || fp != fdp->fd_files[fd].fp)
2073 error = EBADF;
2074 else
2075 error = 0;
2076 spin_unlock_shared(&fdp->fd_spin);
2077 return (error);
2081 * Associate a file pointer with a previously reserved file descriptor.
2082 * This function always succeeds.
2084 * If fp is NULL, the file descriptor is returned to the pool.
2086 * Caller must hold an exclusive spinlock on fdp->fd_spin.
2088 static void
2089 fsetfd_locked(struct filedesc *fdp, struct file *fp, int fd)
2091 KKASSERT((unsigned)fd < fdp->fd_nfiles);
2092 KKASSERT(fdp->fd_files[fd].reserved != 0);
2093 if (fp) {
2094 fhold(fp);
2095 /* fclearcache(&fdp->fd_files[fd], NULL, 0); */
2096 fdp->fd_files[fd].fp = fp;
2097 fdp->fd_files[fd].reserved = 0;
2098 } else {
2099 fdp->fd_files[fd].reserved = 0;
2100 fdreserve_locked(fdp, fd, -1);
2101 fdfixup_locked(fdp, fd);
2106 * Caller must hold an exclusive spinlock on fdp->fd_spin.
2108 void
2109 fsetfd(struct filedesc *fdp, struct file *fp, int fd)
2111 spin_lock(&fdp->fd_spin);
2112 fsetfd_locked(fdp, fp, fd);
2113 spin_unlock(&fdp->fd_spin);
2117 * Caller must hold an exclusive spinlock on fdp->fd_spin.
2119 static
2120 struct file *
2121 funsetfd_locked(struct filedesc *fdp, int fd)
2123 struct file *fp;
2125 if ((unsigned)fd >= fdp->fd_nfiles)
2126 return (NULL);
2127 if ((fp = fdp->fd_files[fd].fp) == NULL)
2128 return (NULL);
2129 ++fdp->fd_closedcounter;
2130 fclearcache(&fdp->fd_files[fd], NULL, 0);
2131 fdp->fd_files[fd].fp = NULL;
2132 fdp->fd_files[fd].fileflags = 0;
2133 ++fdp->fd_closedcounter;
2135 fdreserve_locked(fdp, fd, -1);
2136 fdfixup_locked(fdp, fd);
2138 return(fp);
2142 * WARNING: May not be called before initial fsetfd().
2145 fgetfdflags(struct filedesc *fdp, int fd, int *flagsp)
2147 int error;
2149 spin_lock_shared(&fdp->fd_spin);
2150 if (((u_int)fd) >= fdp->fd_nfiles) {
2151 error = EBADF;
2152 } else if (fdp->fd_files[fd].fp == NULL) {
2153 error = EBADF;
2154 } else {
2155 *flagsp = fdp->fd_files[fd].fileflags;
2156 error = 0;
2158 spin_unlock_shared(&fdp->fd_spin);
2160 return (error);
2164 * WARNING: May not be called before initial fsetfd().
2167 fsetfdflags(struct filedesc *fdp, int fd, int add_flags)
2169 int error;
2171 spin_lock(&fdp->fd_spin);
2172 if (((u_int)fd) >= fdp->fd_nfiles) {
2173 error = EBADF;
2174 } else if (fdp->fd_files[fd].fp == NULL) {
2175 error = EBADF;
2176 } else {
2177 fdp->fd_files[fd].fileflags |= add_flags;
2178 error = 0;
2180 spin_unlock(&fdp->fd_spin);
2182 return (error);
2186 * WARNING: May not be called before initial fsetfd().
2189 fclrfdflags(struct filedesc *fdp, int fd, int rem_flags)
2191 int error;
2193 spin_lock(&fdp->fd_spin);
2194 if (((u_int)fd) >= fdp->fd_nfiles) {
2195 error = EBADF;
2196 } else if (fdp->fd_files[fd].fp == NULL) {
2197 error = EBADF;
2198 } else {
2199 fdp->fd_files[fd].fileflags &= ~rem_flags;
2200 error = 0;
2202 spin_unlock(&fdp->fd_spin);
2204 return (error);
2208 * Set/Change/Clear the creds for a fp and synchronize the uidinfo.
2210 void
2211 fsetcred(struct file *fp, struct ucred *ncr)
2213 struct ucred *ocr;
2214 struct uidinfo *uip;
2215 struct uidcount *pup;
2216 int cpu = mycpuid;
2217 int count;
2219 ocr = fp->f_cred;
2220 if (ocr == NULL || ncr == NULL || ocr->cr_uidinfo != ncr->cr_uidinfo) {
2221 if (ocr) {
2222 uip = ocr->cr_uidinfo;
2223 pup = &uip->ui_pcpu[cpu];
2224 atomic_add_int(&pup->pu_openfiles, -1);
2225 if (pup->pu_openfiles < -PUP_LIMIT ||
2226 pup->pu_openfiles > PUP_LIMIT) {
2227 count = atomic_swap_int(&pup->pu_openfiles, 0);
2228 atomic_add_int(&uip->ui_openfiles, count);
2231 if (ncr) {
2232 uip = ncr->cr_uidinfo;
2233 pup = &uip->ui_pcpu[cpu];
2234 atomic_add_int(&pup->pu_openfiles, 1);
2235 if (pup->pu_openfiles < -PUP_LIMIT ||
2236 pup->pu_openfiles > PUP_LIMIT) {
2237 count = atomic_swap_int(&pup->pu_openfiles, 0);
2238 atomic_add_int(&uip->ui_openfiles, count);
2242 if (ncr)
2243 crhold(ncr);
2244 fp->f_cred = ncr;
2245 if (ocr)
2246 crfree(ocr);
2250 * Free a file descriptor.
2252 static
2253 void
2254 ffree(struct file *fp)
2256 KASSERT((fp->f_count == 0), ("ffree: fp_fcount not 0!"));
2257 fsetcred(fp, NULL);
2258 if (fp->f_nchandle.ncp)
2259 cache_drop(&fp->f_nchandle);
2260 objcache_put(file_objcache, fp);
2264 * called from init_main, initialize filedesc0 for proc0.
2266 void
2267 fdinit_bootstrap(struct proc *p0, struct filedesc *fdp0, int cmask)
2269 p0->p_fd = fdp0;
2270 p0->p_fdtol = NULL;
2271 fdp0->fd_refcnt = 1;
2272 fdp0->fd_cmask = cmask;
2273 fdp0->fd_files = fdp0->fd_builtin_files;
2274 fdp0->fd_nfiles = NDFILE;
2275 fdp0->fd_lastfile = -1;
2276 spin_init(&fdp0->fd_spin, "fdinitbootstrap");
2280 * Build a new filedesc structure.
2282 struct filedesc *
2283 fdinit(struct proc *p)
2285 struct filedesc *newfdp;
2286 struct filedesc *fdp = p->p_fd;
2288 newfdp = kmalloc(sizeof(struct filedesc), M_FILEDESC, M_WAITOK|M_ZERO);
2289 spin_lock(&fdp->fd_spin);
2290 if (fdp->fd_cdir) {
2291 newfdp->fd_cdir = fdp->fd_cdir;
2292 vref(newfdp->fd_cdir);
2293 cache_copy(&fdp->fd_ncdir, &newfdp->fd_ncdir);
2297 * rdir may not be set in e.g. proc0 or anything vm_fork'd off of
2298 * proc0, but should unconditionally exist in other processes.
2300 if (fdp->fd_rdir) {
2301 newfdp->fd_rdir = fdp->fd_rdir;
2302 vref(newfdp->fd_rdir);
2303 cache_copy(&fdp->fd_nrdir, &newfdp->fd_nrdir);
2305 if (fdp->fd_jdir) {
2306 newfdp->fd_jdir = fdp->fd_jdir;
2307 vref(newfdp->fd_jdir);
2308 cache_copy(&fdp->fd_njdir, &newfdp->fd_njdir);
2310 spin_unlock(&fdp->fd_spin);
2312 /* Create the file descriptor table. */
2313 newfdp->fd_refcnt = 1;
2314 newfdp->fd_cmask = cmask;
2315 newfdp->fd_files = newfdp->fd_builtin_files;
2316 newfdp->fd_nfiles = NDFILE;
2317 newfdp->fd_lastfile = -1;
2318 spin_init(&newfdp->fd_spin, "fdinit");
2320 return (newfdp);
2324 * Share a filedesc structure.
2326 struct filedesc *
2327 fdshare(struct proc *p)
2329 struct filedesc *fdp;
2331 fdp = p->p_fd;
2332 spin_lock(&fdp->fd_spin);
2333 fdp->fd_refcnt++;
2334 spin_unlock(&fdp->fd_spin);
2335 return (fdp);
2339 * Copy a filedesc structure.
2342 fdcopy(struct proc *p, struct filedesc **fpp)
2344 struct filedesc *fdp = p->p_fd;
2345 struct filedesc *newfdp;
2346 struct fdnode *fdnode;
2347 int i;
2348 int ni;
2351 * Certain daemons might not have file descriptors.
2353 if (fdp == NULL)
2354 return (0);
2357 * Allocate the new filedesc and fd_files[] array. This can race
2358 * with operations by other threads on the fdp so we have to be
2359 * careful.
2361 newfdp = kmalloc(sizeof(struct filedesc),
2362 M_FILEDESC, M_WAITOK | M_ZERO | M_NULLOK);
2363 if (newfdp == NULL) {
2364 *fpp = NULL;
2365 return (-1);
2367 again:
2368 spin_lock(&fdp->fd_spin);
2369 if (fdp->fd_lastfile < NDFILE) {
2370 newfdp->fd_files = newfdp->fd_builtin_files;
2371 i = NDFILE;
2372 } else {
2374 * We have to allocate (N^2-1) entries for our in-place
2375 * binary tree. Allow the table to shrink.
2377 i = fdp->fd_nfiles;
2378 ni = (i - 1) / 2;
2379 while (ni > fdp->fd_lastfile && ni > NDFILE) {
2380 i = ni;
2381 ni = (i - 1) / 2;
2383 spin_unlock(&fdp->fd_spin);
2384 newfdp->fd_files = kmalloc(i * sizeof(struct fdnode),
2385 M_FILEDESC, M_WAITOK | M_ZERO);
2388 * Check for race, retry
2390 spin_lock(&fdp->fd_spin);
2391 if (i <= fdp->fd_lastfile) {
2392 spin_unlock(&fdp->fd_spin);
2393 kfree(newfdp->fd_files, M_FILEDESC);
2394 goto again;
2399 * Dup the remaining fields. vref() and cache_hold() can be
2400 * safely called while holding the read spinlock on fdp.
2402 * The read spinlock on fdp is still being held.
2404 * NOTE: vref and cache_hold calls for the case where the vnode
2405 * or cache entry already has at least one ref may be called
2406 * while holding spin locks.
2408 if ((newfdp->fd_cdir = fdp->fd_cdir) != NULL) {
2409 vref(newfdp->fd_cdir);
2410 cache_copy(&fdp->fd_ncdir, &newfdp->fd_ncdir);
2413 * We must check for fd_rdir here, at least for now because
2414 * the init process is created before we have access to the
2415 * rootvode to take a reference to it.
2417 if ((newfdp->fd_rdir = fdp->fd_rdir) != NULL) {
2418 vref(newfdp->fd_rdir);
2419 cache_copy(&fdp->fd_nrdir, &newfdp->fd_nrdir);
2421 if ((newfdp->fd_jdir = fdp->fd_jdir) != NULL) {
2422 vref(newfdp->fd_jdir);
2423 cache_copy(&fdp->fd_njdir, &newfdp->fd_njdir);
2425 newfdp->fd_refcnt = 1;
2426 newfdp->fd_nfiles = i;
2427 newfdp->fd_lastfile = fdp->fd_lastfile;
2428 newfdp->fd_freefile = fdp->fd_freefile;
2429 newfdp->fd_cmask = fdp->fd_cmask;
2430 spin_init(&newfdp->fd_spin, "fdcopy");
2433 * Copy the descriptor table through (i). This also copies the
2434 * allocation state. Then go through and ref the file pointers
2435 * and clean up any KQ descriptors.
2437 * kq descriptors cannot be copied. Since we haven't ref'd the
2438 * copied files yet we can ignore the return value from funsetfd().
2440 * The read spinlock on fdp is still being held.
2442 * Be sure to clean out fdnode->tdcache, otherwise bad things will
2443 * happen.
2445 bcopy(fdp->fd_files, newfdp->fd_files, i * sizeof(struct fdnode));
2446 for (i = 0 ; i < newfdp->fd_nfiles; ++i) {
2447 fdnode = &newfdp->fd_files[i];
2448 if (fdnode->reserved) {
2449 fdreserve_locked(newfdp, i, -1);
2450 fdnode->reserved = 0;
2451 fdfixup_locked(newfdp, i);
2452 } else if (fdnode->fp) {
2453 bzero(&fdnode->tdcache, sizeof(fdnode->tdcache));
2454 if (fdnode->fp->f_type == DTYPE_KQUEUE) {
2455 (void)funsetfd_locked(newfdp, i);
2456 } else {
2457 fhold(fdnode->fp);
2461 spin_unlock(&fdp->fd_spin);
2462 *fpp = newfdp;
2463 return (0);
2467 * Release a filedesc structure.
2469 * NOT MPSAFE (MPSAFE for refs > 1, but the final cleanup code is not MPSAFE)
2471 void
2472 fdfree(struct proc *p, struct filedesc *repl)
2474 struct filedesc *fdp;
2475 struct fdnode *fdnode;
2476 int i;
2477 struct filedesc_to_leader *fdtol;
2478 struct file *fp;
2479 struct vnode *vp;
2480 struct flock lf;
2483 * Before destroying or replacing p->p_fd we must be sure to
2484 * clean out the cache of the last thread, which should be
2485 * curthread.
2487 fexitcache(curthread);
2490 * Certain daemons might not have file descriptors.
2492 fdp = p->p_fd;
2493 if (fdp == NULL) {
2494 p->p_fd = repl;
2495 return;
2499 * Severe messing around to follow.
2501 spin_lock(&fdp->fd_spin);
2503 /* Check for special need to clear POSIX style locks */
2504 fdtol = p->p_fdtol;
2505 if (fdtol != NULL) {
2506 KASSERT(fdtol->fdl_refcount > 0,
2507 ("filedesc_to_refcount botch: fdl_refcount=%d",
2508 fdtol->fdl_refcount));
2509 if (fdtol->fdl_refcount == 1 && p->p_leader->p_advlock_flag) {
2510 for (i = 0; i <= fdp->fd_lastfile; ++i) {
2511 fdnode = &fdp->fd_files[i];
2512 if (fdnode->fp == NULL ||
2513 fdnode->fp->f_type != DTYPE_VNODE) {
2514 continue;
2516 fp = fdnode->fp;
2517 fhold(fp);
2518 spin_unlock(&fdp->fd_spin);
2520 lf.l_whence = SEEK_SET;
2521 lf.l_start = 0;
2522 lf.l_len = 0;
2523 lf.l_type = F_UNLCK;
2524 vp = (struct vnode *)fp->f_data;
2525 VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
2526 F_UNLCK, &lf, F_POSIX);
2527 fdrop(fp);
2528 spin_lock(&fdp->fd_spin);
2531 retry:
2532 if (fdtol->fdl_refcount == 1) {
2533 if (fdp->fd_holdleaderscount > 0 &&
2534 p->p_leader->p_advlock_flag) {
2536 * close() or do_dup() has cleared a reference
2537 * in a shared file descriptor table.
2539 fdp->fd_holdleaderswakeup = 1;
2540 ssleep(&fdp->fd_holdleaderscount,
2541 &fdp->fd_spin, 0, "fdlhold", 0);
2542 goto retry;
2544 if (fdtol->fdl_holdcount > 0) {
2546 * Ensure that fdtol->fdl_leader
2547 * remains valid in closef().
2549 fdtol->fdl_wakeup = 1;
2550 ssleep(fdtol, &fdp->fd_spin, 0, "fdlhold", 0);
2551 goto retry;
2554 fdtol->fdl_refcount--;
2555 if (fdtol->fdl_refcount == 0 &&
2556 fdtol->fdl_holdcount == 0) {
2557 fdtol->fdl_next->fdl_prev = fdtol->fdl_prev;
2558 fdtol->fdl_prev->fdl_next = fdtol->fdl_next;
2559 } else {
2560 fdtol = NULL;
2562 p->p_fdtol = NULL;
2563 if (fdtol != NULL) {
2564 spin_unlock(&fdp->fd_spin);
2565 kfree(fdtol, M_FILEDESC_TO_LEADER);
2566 spin_lock(&fdp->fd_spin);
2569 if (--fdp->fd_refcnt > 0) {
2570 spin_unlock(&fdp->fd_spin);
2571 spin_lock(&p->p_spin);
2572 p->p_fd = repl;
2573 spin_unlock(&p->p_spin);
2574 return;
2578 * Even though we are the last reference to the structure allproc
2579 * scans may still reference the structure. Maintain proper
2580 * locks until we can replace p->p_fd.
2582 * Also note that kqueue's closef still needs to reference the
2583 * fdp via p->p_fd, so we have to close the descriptors before
2584 * we replace p->p_fd.
2586 for (i = 0; i <= fdp->fd_lastfile; ++i) {
2587 if (fdp->fd_files[i].fp) {
2588 fp = funsetfd_locked(fdp, i);
2589 if (fp) {
2590 spin_unlock(&fdp->fd_spin);
2591 if (SLIST_FIRST(&fp->f_klist))
2592 knote_fdclose(fp, fdp, i);
2593 closef(fp, p);
2594 spin_lock(&fdp->fd_spin);
2598 spin_unlock(&fdp->fd_spin);
2601 * Interlock against an allproc scan operations (typically frevoke).
2603 spin_lock(&p->p_spin);
2604 p->p_fd = repl;
2605 spin_unlock(&p->p_spin);
2608 * Wait for any softrefs to go away. This race rarely occurs so
2609 * we can use a non-critical-path style poll/sleep loop. The
2610 * race only occurs against allproc scans.
2612 * No new softrefs can occur with the fdp disconnected from the
2613 * process.
2615 if (fdp->fd_softrefs) {
2616 kprintf("pid %d: Warning, fdp race avoided\n", p->p_pid);
2617 while (fdp->fd_softrefs)
2618 tsleep(&fdp->fd_softrefs, 0, "fdsoft", 1);
2621 if (fdp->fd_files != fdp->fd_builtin_files)
2622 kfree(fdp->fd_files, M_FILEDESC);
2623 if (fdp->fd_cdir) {
2624 cache_drop(&fdp->fd_ncdir);
2625 vrele(fdp->fd_cdir);
2627 if (fdp->fd_rdir) {
2628 cache_drop(&fdp->fd_nrdir);
2629 vrele(fdp->fd_rdir);
2631 if (fdp->fd_jdir) {
2632 cache_drop(&fdp->fd_njdir);
2633 vrele(fdp->fd_jdir);
2635 kfree(fdp, M_FILEDESC);
2639 * Retrieve and reference the file pointer associated with a descriptor.
2641 * td must be the current thread.
2643 struct file *
2644 holdfp(thread_t td, int fd, int flag)
2646 struct file *fp;
2648 fp = _holdfp_cache(td, fd);
2649 if (fp) {
2650 if ((fp->f_flag & flag) == 0 && flag != -1) {
2651 fdrop(fp);
2652 fp = NULL;
2655 return fp;
2659 * holdsock() - load the struct file pointer associated
2660 * with a socket into *fpp. If an error occurs, non-zero
2661 * will be returned and *fpp will be set to NULL.
2663 * td must be the current thread.
2666 holdsock(thread_t td, int fd, struct file **fpp)
2668 struct file *fp;
2669 int error;
2672 * Lockless shortcut
2674 fp = _holdfp_cache(td, fd);
2675 if (fp) {
2676 if (fp->f_type != DTYPE_SOCKET) {
2677 fdrop(fp);
2678 fp = NULL;
2679 error = ENOTSOCK;
2680 } else {
2681 error = 0;
2683 } else {
2684 error = EBADF;
2686 *fpp = fp;
2688 return (error);
2692 * Convert a user file descriptor to a held file pointer.
2694 * td must be the current thread.
2697 holdvnode(thread_t td, int fd, struct file **fpp)
2699 struct file *fp;
2700 int error;
2702 fp = _holdfp_cache(td, fd);
2703 if (fp) {
2704 if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_FIFO) {
2705 fdrop(fp);
2706 fp = NULL;
2707 error = EINVAL;
2708 } else {
2709 error = 0;
2711 } else {
2712 error = EBADF;
2714 *fpp = fp;
2716 return (error);
2720 * For setugid programs, we don't want to people to use that setugidness
2721 * to generate error messages which write to a file which otherwise would
2722 * otherwise be off-limits to the process.
2724 * This is a gross hack to plug the hole. A better solution would involve
2725 * a special vop or other form of generalized access control mechanism. We
2726 * go ahead and just reject all procfs file systems accesses as dangerous.
2728 * Since setugidsafety calls this only for fd 0, 1 and 2, this check is
2729 * sufficient. We also don't for check setugidness since we know we are.
2731 static int
2732 is_unsafe(struct file *fp)
2734 if (fp->f_type == DTYPE_VNODE &&
2735 ((struct vnode *)(fp->f_data))->v_tag == VT_PROCFS)
2736 return (1);
2737 return (0);
2741 * Make this setguid thing safe, if at all possible.
2743 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose()
2745 void
2746 setugidsafety(struct proc *p)
2748 struct filedesc *fdp = p->p_fd;
2749 int i;
2751 /* Certain daemons might not have file descriptors. */
2752 if (fdp == NULL)
2753 return;
2756 * note: fdp->fd_files may be reallocated out from under us while
2757 * we are blocked in a close. Be careful!
2759 for (i = 0; i <= fdp->fd_lastfile; i++) {
2760 if (i > 2)
2761 break;
2762 if (fdp->fd_files[i].fp && is_unsafe(fdp->fd_files[i].fp)) {
2763 struct file *fp;
2766 * NULL-out descriptor prior to close to avoid
2767 * a race while close blocks.
2769 if ((fp = funsetfd_locked(fdp, i)) != NULL) {
2770 knote_fdclose(fp, fdp, i);
2771 closef(fp, p);
2778 * Close all CLOEXEC files on exec.
2780 * Only a single thread remains for the current process.
2782 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose()
2784 void
2785 fdcloseexec(struct proc *p)
2787 struct filedesc *fdp = p->p_fd;
2788 int i;
2790 /* Certain daemons might not have file descriptors. */
2791 if (fdp == NULL)
2792 return;
2795 * We cannot cache fd_files since operations may block and rip
2796 * them out from under us.
2798 for (i = 0; i <= fdp->fd_lastfile; i++) {
2799 if (fdp->fd_files[i].fp != NULL &&
2800 (fdp->fd_files[i].fileflags & UF_EXCLOSE)) {
2801 struct file *fp;
2804 * NULL-out descriptor prior to close to avoid
2805 * a race while close blocks.
2807 * (funsetfd*() also clears the fd cache)
2809 if ((fp = funsetfd_locked(fdp, i)) != NULL) {
2810 knote_fdclose(fp, fdp, i);
2811 closef(fp, p);
2818 * It is unsafe for set[ug]id processes to be started with file
2819 * descriptors 0..2 closed, as these descriptors are given implicit
2820 * significance in the Standard C library. fdcheckstd() will create a
2821 * descriptor referencing /dev/null for each of stdin, stdout, and
2822 * stderr that is not already open.
2824 * NOT MPSAFE - calls falloc, vn_open, etc
2827 fdcheckstd(struct lwp *lp)
2829 struct nlookupdata nd;
2830 struct filedesc *fdp;
2831 struct file *fp;
2832 int retval;
2833 int i, error, flags, devnull;
2835 fdp = lp->lwp_proc->p_fd;
2836 if (fdp == NULL)
2837 return (0);
2838 devnull = -1;
2839 error = 0;
2840 for (i = 0; i < 3; i++) {
2841 if (fdp->fd_files[i].fp != NULL)
2842 continue;
2843 if (devnull < 0) {
2844 if ((error = falloc(lp, &fp, &devnull)) != 0)
2845 break;
2847 error = nlookup_init(&nd, "/dev/null", UIO_SYSSPACE,
2848 NLC_FOLLOW|NLC_LOCKVP);
2849 flags = FREAD | FWRITE;
2850 if (error == 0)
2851 error = vn_open(&nd, fp, flags, 0);
2852 if (error == 0)
2853 fsetfd(fdp, fp, devnull);
2854 else
2855 fsetfd(fdp, NULL, devnull);
2856 fdrop(fp);
2857 nlookup_done(&nd);
2858 if (error)
2859 break;
2860 KKASSERT(i == devnull);
2861 } else {
2862 error = kern_dup(DUP_FIXED, devnull, i, &retval);
2863 if (error != 0)
2864 break;
2867 return (error);
2871 * Internal form of close.
2872 * Decrement reference count on file structure.
2873 * Note: td and/or p may be NULL when closing a file
2874 * that was being passed in a message.
2876 * MPALMOSTSAFE - acquires mplock for VOP operations
2879 closef(struct file *fp, struct proc *p)
2881 struct vnode *vp;
2882 struct flock lf;
2883 struct filedesc_to_leader *fdtol;
2885 if (fp == NULL)
2886 return (0);
2889 * POSIX record locking dictates that any close releases ALL
2890 * locks owned by this process. This is handled by setting
2891 * a flag in the unlock to free ONLY locks obeying POSIX
2892 * semantics, and not to free BSD-style file locks.
2893 * If the descriptor was in a message, POSIX-style locks
2894 * aren't passed with the descriptor.
2896 if (p != NULL && fp->f_type == DTYPE_VNODE &&
2897 (((struct vnode *)fp->f_data)->v_flag & VMAYHAVELOCKS)
2899 if (p->p_leader->p_advlock_flag) {
2900 lf.l_whence = SEEK_SET;
2901 lf.l_start = 0;
2902 lf.l_len = 0;
2903 lf.l_type = F_UNLCK;
2904 vp = (struct vnode *)fp->f_data;
2905 VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
2906 &lf, F_POSIX);
2908 fdtol = p->p_fdtol;
2909 if (fdtol != NULL) {
2910 lwkt_gettoken(&p->p_token);
2913 * Handle special case where file descriptor table
2914 * is shared between multiple process leaders.
2916 for (fdtol = fdtol->fdl_next;
2917 fdtol != p->p_fdtol;
2918 fdtol = fdtol->fdl_next) {
2919 if (fdtol->fdl_leader->p_advlock_flag == 0)
2920 continue;
2921 fdtol->fdl_holdcount++;
2922 lf.l_whence = SEEK_SET;
2923 lf.l_start = 0;
2924 lf.l_len = 0;
2925 lf.l_type = F_UNLCK;
2926 vp = (struct vnode *)fp->f_data;
2927 VOP_ADVLOCK(vp, (caddr_t)fdtol->fdl_leader,
2928 F_UNLCK, &lf, F_POSIX);
2929 fdtol->fdl_holdcount--;
2930 if (fdtol->fdl_holdcount == 0 &&
2931 fdtol->fdl_wakeup != 0) {
2932 fdtol->fdl_wakeup = 0;
2933 wakeup(fdtol);
2936 lwkt_reltoken(&p->p_token);
2939 return (fdrop(fp));
2943 * fhold() can only be called if f_count is already at least 1 (i.e. the
2944 * caller of fhold() already has a reference to the file pointer in some
2945 * manner or other).
2947 * Atomic ops are used for incrementing and decrementing f_count before
2948 * the 1->0 transition. f_count 1->0 transition is special, see the
2949 * comment in fdrop().
2951 void
2952 fhold(struct file *fp)
2954 /* 0->1 transition will never work */
2955 KASSERT(fp->f_count > 0, ("fhold: invalid f_count %d", fp->f_count));
2956 atomic_add_int(&fp->f_count, 1);
2960 * fdrop() - drop a reference to a descriptor
2963 fdrop(struct file *fp)
2965 struct flock lf;
2966 struct vnode *vp;
2967 int error, do_free = 0;
2970 * NOTE:
2971 * Simple atomic_fetchadd_int(f_count, -1) here will cause use-
2972 * after-free or double free (due to f_count 0->1 transition), if
2973 * fhold() is called on the fps found through filehead iteration.
2975 for (;;) {
2976 int count = fp->f_count;
2978 cpu_ccfence();
2979 KASSERT(count > 0, ("fdrop: invalid f_count %d", count));
2980 if (count == 1) {
2981 struct filelist_head *head = fp2filelist(fp);
2984 * About to drop the last reference, hold the
2985 * filehead spin lock and drop it, so that no
2986 * one could see this fp through filehead anymore,
2987 * let alone fhold() this fp.
2989 spin_lock(&head->spin);
2990 if (atomic_cmpset_int(&fp->f_count, count, 0)) {
2991 LIST_REMOVE(fp, f_list);
2992 spin_unlock(&head->spin);
2993 atomic_subtract_int(&nfiles, 1);
2994 do_free = 1; /* free this fp */
2995 break;
2997 spin_unlock(&head->spin);
2998 /* retry */
2999 } else if (atomic_cmpset_int(&fp->f_count, count, count - 1)) {
3000 break;
3002 /* retry */
3004 if (!do_free)
3005 return (0);
3007 KKASSERT(SLIST_FIRST(&fp->f_klist) == NULL);
3010 * The last reference has gone away, we own the fp structure free
3011 * and clear.
3013 if (fp->f_count < 0)
3014 panic("fdrop: count < 0");
3015 if ((fp->f_flag & FHASLOCK) && fp->f_type == DTYPE_VNODE &&
3016 (((struct vnode *)fp->f_data)->v_flag & VMAYHAVELOCKS)
3018 lf.l_whence = SEEK_SET;
3019 lf.l_start = 0;
3020 lf.l_len = 0;
3021 lf.l_type = F_UNLCK;
3022 vp = (struct vnode *)fp->f_data;
3023 VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, 0);
3025 if (fp->f_ops != &badfileops)
3026 error = fo_close(fp);
3027 else
3028 error = 0;
3029 ffree(fp);
3030 return (error);
3034 * Apply an advisory lock on a file descriptor.
3036 * Just attempt to get a record lock of the requested type on
3037 * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0).
3039 * MPALMOSTSAFE
3042 sys_flock(struct flock_args *uap)
3044 thread_t td = curthread;
3045 struct file *fp;
3046 struct vnode *vp;
3047 struct flock lf;
3048 int error;
3050 if ((fp = holdfp(td, uap->fd, -1)) == NULL)
3051 return (EBADF);
3052 if (fp->f_type != DTYPE_VNODE) {
3053 error = EOPNOTSUPP;
3054 goto done;
3056 vp = (struct vnode *)fp->f_data;
3057 lf.l_whence = SEEK_SET;
3058 lf.l_start = 0;
3059 lf.l_len = 0;
3060 if (uap->how & LOCK_UN) {
3061 lf.l_type = F_UNLCK;
3062 atomic_clear_int(&fp->f_flag, FHASLOCK); /* race ok */
3063 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, 0);
3064 goto done;
3066 if (uap->how & LOCK_EX)
3067 lf.l_type = F_WRLCK;
3068 else if (uap->how & LOCK_SH)
3069 lf.l_type = F_RDLCK;
3070 else {
3071 error = EBADF;
3072 goto done;
3074 if (uap->how & LOCK_NB)
3075 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, 0);
3076 else
3077 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, F_WAIT);
3078 atomic_set_int(&fp->f_flag, FHASLOCK); /* race ok */
3079 done:
3080 fdrop(fp);
3081 return (error);
3085 * File Descriptor pseudo-device driver (/dev/fd/).
3087 * Opening minor device N dup()s the file (if any) connected to file
3088 * descriptor N belonging to the calling process. Note that this driver
3089 * consists of only the ``open()'' routine, because all subsequent
3090 * references to this file will be direct to the other driver.
3092 static int
3093 fdopen(struct dev_open_args *ap)
3095 thread_t td = curthread;
3097 KKASSERT(td->td_lwp != NULL);
3100 * XXX Kludge: set curlwp->lwp_dupfd to contain the value of the
3101 * the file descriptor being sought for duplication. The error
3102 * return ensures that the vnode for this device will be released
3103 * by vn_open. Open will detect this special error and take the
3104 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN
3105 * will simply report the error.
3107 td->td_lwp->lwp_dupfd = minor(ap->a_head.a_dev);
3108 return (ENODEV);
3112 * The caller has reserved the file descriptor dfd for us. On success we
3113 * must fsetfd() it. On failure the caller will clean it up.
3116 dupfdopen(thread_t td, int dfd, int sfd, int mode, int error)
3118 struct filedesc *fdp;
3119 struct file *wfp;
3120 struct file *xfp;
3121 int werror;
3123 if ((wfp = holdfp(td, sfd, -1)) == NULL)
3124 return (EBADF);
3127 * Close a revoke/dup race. Duping a descriptor marked as revoked
3128 * will dup a dummy descriptor instead of the real one.
3130 if (wfp->f_flag & FREVOKED) {
3131 kprintf("Warning: attempt to dup() a revoked descriptor\n");
3132 fdrop(wfp);
3133 wfp = NULL;
3134 werror = falloc(NULL, &wfp, NULL);
3135 if (werror)
3136 return (werror);
3139 fdp = td->td_proc->p_fd;
3142 * There are two cases of interest here.
3144 * For ENODEV simply dup sfd to file descriptor dfd and return.
3146 * For ENXIO steal away the file structure from sfd and store it
3147 * dfd. sfd is effectively closed by this operation.
3149 * Any other error code is just returned.
3151 switch (error) {
3152 case ENODEV:
3154 * Check that the mode the file is being opened for is a
3155 * subset of the mode of the existing descriptor.
3157 if (((mode & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag) {
3158 error = EACCES;
3159 break;
3161 spin_lock(&fdp->fd_spin);
3162 fdp->fd_files[dfd].fileflags = fdp->fd_files[sfd].fileflags;
3163 fsetfd_locked(fdp, wfp, dfd);
3164 spin_unlock(&fdp->fd_spin);
3165 error = 0;
3166 break;
3167 case ENXIO:
3169 * Steal away the file pointer from dfd, and stuff it into indx.
3171 spin_lock(&fdp->fd_spin);
3172 fdp->fd_files[dfd].fileflags = fdp->fd_files[sfd].fileflags;
3173 fsetfd(fdp, wfp, dfd);
3174 if ((xfp = funsetfd_locked(fdp, sfd)) != NULL) {
3175 spin_unlock(&fdp->fd_spin);
3176 fdrop(xfp);
3177 } else {
3178 spin_unlock(&fdp->fd_spin);
3180 error = 0;
3181 break;
3182 default:
3183 break;
3185 fdrop(wfp);
3186 return (error);
3190 * NOT MPSAFE - I think these refer to a common file descriptor table
3191 * and we need to spinlock that to link fdtol in.
3193 struct filedesc_to_leader *
3194 filedesc_to_leader_alloc(struct filedesc_to_leader *old,
3195 struct proc *leader)
3197 struct filedesc_to_leader *fdtol;
3199 fdtol = kmalloc(sizeof(struct filedesc_to_leader),
3200 M_FILEDESC_TO_LEADER, M_WAITOK | M_ZERO);
3201 fdtol->fdl_refcount = 1;
3202 fdtol->fdl_holdcount = 0;
3203 fdtol->fdl_wakeup = 0;
3204 fdtol->fdl_leader = leader;
3205 if (old != NULL) {
3206 fdtol->fdl_next = old->fdl_next;
3207 fdtol->fdl_prev = old;
3208 old->fdl_next = fdtol;
3209 fdtol->fdl_next->fdl_prev = fdtol;
3210 } else {
3211 fdtol->fdl_next = fdtol;
3212 fdtol->fdl_prev = fdtol;
3214 return fdtol;
3218 * Scan all file pointers in the system. The callback is made with
3219 * the master list spinlock held exclusively.
3221 void
3222 allfiles_scan_exclusive(int (*callback)(struct file *, void *), void *data)
3224 int i;
3226 for (i = 0; i < NFILELIST_HEADS; ++i) {
3227 struct filelist_head *head = &filelist_heads[i];
3228 struct file *fp;
3230 spin_lock(&head->spin);
3231 LIST_FOREACH(fp, &head->list, f_list) {
3232 int res;
3234 res = callback(fp, data);
3235 if (res < 0)
3236 break;
3238 spin_unlock(&head->spin);
3243 * Get file structures.
3245 * NOT MPSAFE - process list scan, SYSCTL_OUT (probably not mpsafe)
3248 struct sysctl_kern_file_info {
3249 int count;
3250 int error;
3251 struct sysctl_req *req;
3254 static int sysctl_kern_file_callback(struct proc *p, void *data);
3256 static int
3257 sysctl_kern_file(SYSCTL_HANDLER_ARGS)
3259 struct sysctl_kern_file_info info;
3262 * Note: because the number of file descriptors is calculated
3263 * in different ways for sizing vs returning the data,
3264 * there is information leakage from the first loop. However,
3265 * it is of a similar order of magnitude to the leakage from
3266 * global system statistics such as kern.openfiles.
3268 * When just doing a count, note that we cannot just count
3269 * the elements and add f_count via the filehead list because
3270 * threaded processes share their descriptor table and f_count might
3271 * still be '1' in that case.
3273 * Since the SYSCTL op can block, we must hold the process to
3274 * prevent it being ripped out from under us either in the
3275 * file descriptor loop or in the greater LIST_FOREACH. The
3276 * process may be in varying states of disrepair. If the process
3277 * is in SZOMB we may have caught it just as it is being removed
3278 * from the allproc list, we must skip it in that case to maintain
3279 * an unbroken chain through the allproc list.
3281 info.count = 0;
3282 info.error = 0;
3283 info.req = req;
3284 allproc_scan(sysctl_kern_file_callback, &info, 0);
3287 * When just calculating the size, overestimate a bit to try to
3288 * prevent system activity from causing the buffer-fill call
3289 * to fail later on.
3291 if (req->oldptr == NULL) {
3292 info.count = (info.count + 16) + (info.count / 10);
3293 info.error = SYSCTL_OUT(req, NULL,
3294 info.count * sizeof(struct kinfo_file));
3296 return (info.error);
3299 static int
3300 sysctl_kern_file_callback(struct proc *p, void *data)
3302 struct sysctl_kern_file_info *info = data;
3303 struct kinfo_file kf;
3304 struct filedesc *fdp;
3305 struct file *fp;
3306 uid_t uid;
3307 int n;
3309 if (p->p_stat == SIDL || p->p_stat == SZOMB)
3310 return(0);
3311 if (!(PRISON_CHECK(info->req->td->td_ucred, p->p_ucred) != 0))
3312 return(0);
3315 * Softref the fdp to prevent it from being destroyed
3317 spin_lock(&p->p_spin);
3318 if ((fdp = p->p_fd) == NULL) {
3319 spin_unlock(&p->p_spin);
3320 return(0);
3322 atomic_add_int(&fdp->fd_softrefs, 1);
3323 spin_unlock(&p->p_spin);
3326 * The fdp's own spinlock prevents the contents from being
3327 * modified.
3329 spin_lock_shared(&fdp->fd_spin);
3330 for (n = 0; n < fdp->fd_nfiles; ++n) {
3331 if ((fp = fdp->fd_files[n].fp) == NULL)
3332 continue;
3333 if (info->req->oldptr == NULL) {
3334 ++info->count;
3335 } else {
3336 uid = p->p_ucred ? p->p_ucred->cr_uid : -1;
3337 kcore_make_file(&kf, fp, p->p_pid, uid, n);
3338 spin_unlock_shared(&fdp->fd_spin);
3339 info->error = SYSCTL_OUT(info->req, &kf, sizeof(kf));
3340 spin_lock_shared(&fdp->fd_spin);
3341 if (info->error)
3342 break;
3345 spin_unlock_shared(&fdp->fd_spin);
3346 atomic_subtract_int(&fdp->fd_softrefs, 1);
3347 if (info->error)
3348 return(-1);
3349 return(0);
3352 SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD,
3353 0, 0, sysctl_kern_file, "S,file", "Entire file table");
3355 SYSCTL_INT(_kern, OID_AUTO, minfilesperproc, CTLFLAG_RW,
3356 &minfilesperproc, 0, "Minimum files allowed open per process");
3357 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW,
3358 &maxfilesperproc, 0, "Maximum files allowed open per process");
3359 SYSCTL_INT(_kern, OID_AUTO, maxfilesperuser, CTLFLAG_RW,
3360 &maxfilesperuser, 0, "Maximum files allowed open per user");
3362 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW,
3363 &maxfiles, 0, "Maximum number of files");
3365 SYSCTL_INT(_kern, OID_AUTO, maxfilesrootres, CTLFLAG_RW,
3366 &maxfilesrootres, 0, "Descriptors reserved for root use");
3368 SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD,
3369 &nfiles, 0, "System-wide number of open files");
3371 static void
3372 fildesc_drvinit(void *unused)
3374 int fd;
3376 for (fd = 0; fd < NUMFDESC; fd++) {
3377 make_dev(&fildesc_ops, fd,
3378 UID_BIN, GID_BIN, 0666, "fd/%d", fd);
3381 make_dev(&fildesc_ops, 0, UID_ROOT, GID_WHEEL, 0666, "stdin");
3382 make_dev(&fildesc_ops, 1, UID_ROOT, GID_WHEEL, 0666, "stdout");
3383 make_dev(&fildesc_ops, 2, UID_ROOT, GID_WHEEL, 0666, "stderr");
3386 struct fileops badfileops = {
3387 .fo_read = badfo_readwrite,
3388 .fo_write = badfo_readwrite,
3389 .fo_ioctl = badfo_ioctl,
3390 .fo_kqfilter = badfo_kqfilter,
3391 .fo_stat = badfo_stat,
3392 .fo_close = badfo_close,
3393 .fo_shutdown = badfo_shutdown
3397 badfo_readwrite(
3398 struct file *fp,
3399 struct uio *uio,
3400 struct ucred *cred,
3401 int flags
3403 return (EBADF);
3407 badfo_ioctl(struct file *fp, u_long com, caddr_t data,
3408 struct ucred *cred, struct sysmsg *msgv)
3410 return (EBADF);
3414 * Must return an error to prevent registration, typically
3415 * due to a revoked descriptor (file_filtops assigned).
3418 badfo_kqfilter(struct file *fp, struct knote *kn)
3420 return (EOPNOTSUPP);
3424 badfo_stat(struct file *fp, struct stat *sb, struct ucred *cred)
3426 return (EBADF);
3430 badfo_close(struct file *fp)
3432 return (EBADF);
3436 badfo_shutdown(struct file *fp, int how)
3438 return (EBADF);
3442 nofo_shutdown(struct file *fp, int how)
3444 return (EOPNOTSUPP);
3447 SYSINIT(fildescdev, SI_SUB_DRIVERS, SI_ORDER_MIDDLE + CDEV_MAJOR,
3448 fildesc_drvinit,NULL);
3450 static void
3451 filelist_heads_init(void *arg __unused)
3453 int i;
3455 for (i = 0; i < NFILELIST_HEADS; ++i) {
3456 struct filelist_head *head = &filelist_heads[i];
3458 spin_init(&head->spin, "filehead_spin");
3459 LIST_INIT(&head->list);
3463 SYSINIT(filelistheads, SI_BOOT1_LOCK, SI_ORDER_ANY,
3464 filelist_heads_init, NULL);
3466 static void
3467 file_objcache_init(void *dummy __unused)
3469 file_objcache = objcache_create("file", maxfiles, maxfiles / 8,
3470 NULL, NULL, NULL, /* TODO: ctor/dtor */
3471 objcache_malloc_alloc, objcache_malloc_free, &file_malloc_args);
3473 SYSINIT(fpobjcache, SI_BOOT2_POST_SMP, SI_ORDER_ANY, file_objcache_init, NULL);