2 * Copyright (c) 2005 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * Copyright (c) 1982, 1986, 1989, 1991, 1993
36 * The Regents of the University of California. All rights reserved.
37 * (c) UNIX System Laboratories, Inc.
38 * All or some portions of this file are derived from material licensed
39 * to the University of California by American Telephone and Telegraph
40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41 * the permission of UNIX System Laboratories, Inc.
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by the University of
54 * California, Berkeley and its contributors.
55 * 4. Neither the name of the University nor the names of its contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
71 * @(#)kern_descrip.c 8.6 (Berkeley) 4/19/94
72 * $FreeBSD: src/sys/kern/kern_descrip.c,v 1.81.2.19 2004/02/28 00:43:31 tegge Exp $
73 * $DragonFly: src/sys/kern/kern_descrip.c,v 1.79 2008/08/31 13:18:28 aggelos Exp $
76 #include "opt_compat.h"
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/malloc.h>
80 #include <sys/sysproto.h>
82 #include <sys/device.h>
83 #include <sys/filedesc.h>
84 #include <sys/kernel.h>
85 #include <sys/sysctl.h>
86 #include <sys/vnode.h>
88 #include <sys/nlookup.h>
91 #include <sys/filio.h>
92 #include <sys/fcntl.h>
93 #include <sys/unistd.h>
94 #include <sys/resourcevar.h>
95 #include <sys/event.h>
96 #include <sys/kern_syscall.h>
97 #include <sys/kcore.h>
98 #include <sys/kinfo.h>
102 #include <vm/vm_extern.h>
104 #include <sys/thread2.h>
105 #include <sys/file2.h>
106 #include <sys/spinlock2.h>
108 static void fsetfd_locked(struct filedesc
*fdp
, struct file
*fp
, int fd
);
109 static void fdreserve_locked (struct filedesc
*fdp
, int fd0
, int incr
);
110 static struct file
*funsetfd_locked (struct filedesc
*fdp
, int fd
);
111 static int checkfpclosed(struct filedesc
*fdp
, int fd
, struct file
*fp
);
112 static void ffree(struct file
*fp
);
114 static MALLOC_DEFINE(M_FILEDESC
, "file desc", "Open file descriptor table");
115 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER
, "file desc to leader",
116 "file desc to leader structures");
117 MALLOC_DEFINE(M_FILE
, "file", "Open file structure");
118 static MALLOC_DEFINE(M_SIGIO
, "sigio", "sigio structures");
120 static d_open_t fdopen
;
123 #define CDEV_MAJOR 22
124 static struct dev_ops fildesc_ops
= {
125 { "FD", CDEV_MAJOR
, 0 },
129 static int badfo_readwrite (struct file
*fp
, struct uio
*uio
,
130 struct ucred
*cred
, int flags
);
131 static int badfo_ioctl (struct file
*fp
, u_long com
, caddr_t data
,
133 static int badfo_poll (struct file
*fp
, int events
, struct ucred
*cred
);
134 static int badfo_kqfilter (struct file
*fp
, struct knote
*kn
);
135 static int badfo_stat (struct file
*fp
, struct stat
*sb
, struct ucred
*cred
);
136 static int badfo_close (struct file
*fp
);
137 static int badfo_shutdown (struct file
*fp
, int how
);
140 * Descriptor management.
142 static struct filelist filehead
= LIST_HEAD_INITIALIZER(&filehead
);
143 static struct spinlock filehead_spin
= SPINLOCK_INITIALIZER(&filehead_spin
);
144 static int nfiles
; /* actual number of open files */
148 * Fixup fd_freefile and fd_lastfile after a descriptor has been cleared.
150 * MPSAFE - must be called with fdp->fd_spin exclusively held
154 fdfixup_locked(struct filedesc
*fdp
, int fd
)
156 if (fd
< fdp
->fd_freefile
) {
157 fdp
->fd_freefile
= fd
;
159 while (fdp
->fd_lastfile
>= 0 &&
160 fdp
->fd_files
[fdp
->fd_lastfile
].fp
== NULL
&&
161 fdp
->fd_files
[fdp
->fd_lastfile
].reserved
== 0
168 * System calls on descriptors.
173 sys_getdtablesize(struct getdtablesize_args
*uap
)
175 struct proc
*p
= curproc
;
176 struct plimit
*limit
= p
->p_limit
;
178 spin_lock_rd(&limit
->p_spin
);
180 min((int)limit
->pl_rlimit
[RLIMIT_NOFILE
].rlim_cur
, maxfilesperproc
);
181 spin_unlock_rd(&limit
->p_spin
);
186 * Duplicate a file descriptor to a particular value.
188 * note: keep in mind that a potential race condition exists when closing
189 * descriptors from a shared descriptor table (via rfork).
194 sys_dup2(struct dup2_args
*uap
)
198 error
= kern_dup(DUP_FIXED
, uap
->from
, uap
->to
, uap
->sysmsg_fds
);
204 * Duplicate a file descriptor.
209 sys_dup(struct dup_args
*uap
)
213 error
= kern_dup(DUP_VARIABLE
, uap
->fd
, 0, uap
->sysmsg_fds
);
219 * MPALMOSTSAFE - acquires mplock for fp operations
222 kern_fcntl(int fd
, int cmd
, union fcntl_dat
*dat
, struct ucred
*cred
)
224 struct thread
*td
= curthread
;
225 struct proc
*p
= td
->td_proc
;
230 int tmp
, error
, flg
= F_POSIX
;
235 * Operations on file descriptors that do not require a file pointer.
239 error
= fgetfdflags(p
->p_fd
, fd
, &tmp
);
241 dat
->fc_cloexec
= (tmp
& UF_EXCLOSE
) ? FD_CLOEXEC
: 0;
245 if (dat
->fc_cloexec
& FD_CLOEXEC
)
246 error
= fsetfdflags(p
->p_fd
, fd
, UF_EXCLOSE
);
248 error
= fclrfdflags(p
->p_fd
, fd
, UF_EXCLOSE
);
252 error
= kern_dup(DUP_VARIABLE
, fd
, newmin
, &dat
->fc_fd
);
259 * Operations on file pointers
261 if ((fp
= holdfp(p
->p_fd
, fd
, -1)) == NULL
)
267 dat
->fc_flags
= OFLAGS(fp
->f_flag
);
272 oflags
= fp
->f_flag
& FCNTLFLAGS
;
273 fp
->f_flag
&= ~FCNTLFLAGS
;
274 fp
->f_flag
|= FFLAGS(dat
->fc_flags
& ~O_ACCMODE
) & FCNTLFLAGS
;
276 if ((fp
->f_flag
^ oflags
) & FASYNC
) {
277 tmp
= fp
->f_flag
& FASYNC
;
278 error
= fo_ioctl(fp
, FIOASYNC
, (caddr_t
)&tmp
, cred
);
281 fp
->f_flag
= (fp
->f_flag
& ~FCNTLFLAGS
) | oflags
;
285 error
= fo_ioctl(fp
, FIOGETOWN
, (caddr_t
)&dat
->fc_owner
, cred
);
289 error
= fo_ioctl(fp
, FIOSETOWN
, (caddr_t
)&dat
->fc_owner
, cred
);
294 /* Fall into F_SETLK */
297 if (fp
->f_type
!= DTYPE_VNODE
) {
301 vp
= (struct vnode
*)fp
->f_data
;
304 * copyin/lockop may block
306 if (dat
->fc_flock
.l_whence
== SEEK_CUR
)
307 dat
->fc_flock
.l_start
+= fp
->f_offset
;
309 switch (dat
->fc_flock
.l_type
) {
311 if ((fp
->f_flag
& FREAD
) == 0) {
315 p
->p_leader
->p_flag
|= P_ADVLOCK
;
316 error
= VOP_ADVLOCK(vp
, (caddr_t
)p
->p_leader
, F_SETLK
,
317 &dat
->fc_flock
, flg
);
320 if ((fp
->f_flag
& FWRITE
) == 0) {
324 p
->p_leader
->p_flag
|= P_ADVLOCK
;
325 error
= VOP_ADVLOCK(vp
, (caddr_t
)p
->p_leader
, F_SETLK
,
326 &dat
->fc_flock
, flg
);
329 error
= VOP_ADVLOCK(vp
, (caddr_t
)p
->p_leader
, F_UNLCK
,
330 &dat
->fc_flock
, F_POSIX
);
338 * It is possible to race a close() on the descriptor while
339 * we were blocked getting the lock. If this occurs the
340 * close might not have caught the lock.
342 if (checkfpclosed(p
->p_fd
, fd
, fp
)) {
343 dat
->fc_flock
.l_whence
= SEEK_SET
;
344 dat
->fc_flock
.l_start
= 0;
345 dat
->fc_flock
.l_len
= 0;
346 dat
->fc_flock
.l_type
= F_UNLCK
;
347 (void) VOP_ADVLOCK(vp
, (caddr_t
)p
->p_leader
,
348 F_UNLCK
, &dat
->fc_flock
, F_POSIX
);
353 if (fp
->f_type
!= DTYPE_VNODE
) {
357 vp
= (struct vnode
*)fp
->f_data
;
359 * copyin/lockop may block
361 if (dat
->fc_flock
.l_type
!= F_RDLCK
&&
362 dat
->fc_flock
.l_type
!= F_WRLCK
&&
363 dat
->fc_flock
.l_type
!= F_UNLCK
) {
367 if (dat
->fc_flock
.l_whence
== SEEK_CUR
)
368 dat
->fc_flock
.l_start
+= fp
->f_offset
;
369 error
= VOP_ADVLOCK(vp
, (caddr_t
)p
->p_leader
, F_GETLK
,
370 &dat
->fc_flock
, F_POSIX
);
383 * The file control system call.
388 sys_fcntl(struct fcntl_args
*uap
)
395 dat
.fc_fd
= uap
->arg
;
398 dat
.fc_cloexec
= uap
->arg
;
401 dat
.fc_flags
= uap
->arg
;
404 dat
.fc_owner
= uap
->arg
;
409 error
= copyin((caddr_t
)uap
->arg
, &dat
.fc_flock
,
410 sizeof(struct flock
));
416 error
= kern_fcntl(uap
->fd
, uap
->cmd
, &dat
, curproc
->p_ucred
);
421 uap
->sysmsg_result
= dat
.fc_fd
;
424 uap
->sysmsg_result
= dat
.fc_cloexec
;
427 uap
->sysmsg_result
= dat
.fc_flags
;
430 uap
->sysmsg_result
= dat
.fc_owner
;
432 error
= copyout(&dat
.fc_flock
, (caddr_t
)uap
->arg
,
433 sizeof(struct flock
));
442 * Common code for dup, dup2, and fcntl(F_DUPFD).
444 * The type flag can be either DUP_FIXED or DUP_VARIABLE. DUP_FIXED tells
445 * kern_dup() to destructively dup over an existing file descriptor if new
446 * is already open. DUP_VARIABLE tells kern_dup() to find the lowest
447 * unused file descriptor that is greater than or equal to new.
452 kern_dup(enum dup_type type
, int old
, int new, int *res
)
454 struct thread
*td
= curthread
;
455 struct proc
*p
= td
->td_proc
;
456 struct filedesc
*fdp
= p
->p_fd
;
464 * Verify that we have a valid descriptor to dup from and
465 * possibly to dup to.
468 spin_lock_wr(&fdp
->fd_spin
);
469 if (new < 0 || new > p
->p_rlimit
[RLIMIT_NOFILE
].rlim_cur
||
470 new >= maxfilesperproc
) {
471 spin_unlock_wr(&fdp
->fd_spin
);
474 if ((unsigned)old
>= fdp
->fd_nfiles
|| fdp
->fd_files
[old
].fp
== NULL
) {
475 spin_unlock_wr(&fdp
->fd_spin
);
478 if (type
== DUP_FIXED
&& old
== new) {
480 spin_unlock_wr(&fdp
->fd_spin
);
483 fp
= fdp
->fd_files
[old
].fp
;
484 oldflags
= fdp
->fd_files
[old
].fileflags
;
485 fhold(fp
); /* MPSAFE - can be called with a spinlock held */
488 * Allocate a new descriptor if DUP_VARIABLE, or expand the table
489 * if the requested descriptor is beyond the current table size.
491 * This can block. Retry if the source descriptor no longer matches
492 * or if our expectation in the expansion case races.
494 * If we are not expanding or allocating a new decriptor, then reset
495 * the target descriptor to a reserved state so we have a uniform
496 * setup for the next code block.
498 if (type
== DUP_VARIABLE
|| new >= fdp
->fd_nfiles
) {
499 spin_unlock_wr(&fdp
->fd_spin
);
500 error
= fdalloc(p
, new, &newfd
);
501 spin_lock_wr(&fdp
->fd_spin
);
503 spin_unlock_wr(&fdp
->fd_spin
);
510 if (old
>= fdp
->fd_nfiles
|| fdp
->fd_files
[old
].fp
!= fp
) {
511 fsetfd_locked(fdp
, NULL
, newfd
);
512 spin_unlock_wr(&fdp
->fd_spin
);
517 * Check for expansion race
519 if (type
!= DUP_VARIABLE
&& new != newfd
) {
520 fsetfd_locked(fdp
, NULL
, newfd
);
521 spin_unlock_wr(&fdp
->fd_spin
);
526 * Check for ripout, newfd reused old (this case probably
530 fsetfd_locked(fdp
, NULL
, newfd
);
531 spin_unlock_wr(&fdp
->fd_spin
);
538 if (fdp
->fd_files
[new].reserved
) {
539 spin_unlock_wr(&fdp
->fd_spin
);
541 kprintf("Warning: dup(): target descriptor %d is reserved, waiting for it to be resolved\n", new);
542 tsleep(fdp
, 0, "fdres", hz
);
547 * If the target descriptor was never allocated we have
548 * to allocate it. If it was we have to clean out the
549 * old descriptor. delfp inherits the ref from the
552 delfp
= fdp
->fd_files
[new].fp
;
553 fdp
->fd_files
[new].fp
= NULL
;
554 fdp
->fd_files
[new].reserved
= 1;
556 fdreserve_locked(fdp
, new, 1);
557 if (new > fdp
->fd_lastfile
)
558 fdp
->fd_lastfile
= new;
564 * NOTE: still holding an exclusive spinlock
568 * If a descriptor is being overwritten we may hve to tell
569 * fdfree() to sleep to ensure that all relevant process
570 * leaders can be traversed in closef().
572 if (delfp
!= NULL
&& p
->p_fdtol
!= NULL
) {
573 fdp
->fd_holdleaderscount
++;
578 KASSERT(delfp
== NULL
|| type
== DUP_FIXED
,
579 ("dup() picked an open file"));
582 * Duplicate the source descriptor, update lastfile. If the new
583 * descriptor was not allocated and we aren't replacing an existing
584 * descriptor we have to mark the descriptor as being in use.
586 * The fd_files[] array inherits fp's hold reference.
588 fsetfd_locked(fdp
, fp
, new);
589 fdp
->fd_files
[new].fileflags
= oldflags
& ~UF_EXCLOSE
;
590 spin_unlock_wr(&fdp
->fd_spin
);
595 * If we dup'd over a valid file, we now own the reference to it
596 * and must dispose of it using closef() semantics (as if a
597 * close() were performed on it).
602 spin_lock_wr(&fdp
->fd_spin
);
603 fdp
->fd_holdleaderscount
--;
604 if (fdp
->fd_holdleaderscount
== 0 &&
605 fdp
->fd_holdleaderswakeup
!= 0) {
606 fdp
->fd_holdleaderswakeup
= 0;
607 spin_unlock_wr(&fdp
->fd_spin
);
608 wakeup(&fdp
->fd_holdleaderscount
);
610 spin_unlock_wr(&fdp
->fd_spin
);
618 * If sigio is on the list associated with a process or process group,
619 * disable signalling from the device, remove sigio from the list and
623 funsetown(struct sigio
*sigio
)
628 *(sigio
->sio_myref
) = NULL
;
630 if (sigio
->sio_pgid
< 0) {
631 SLIST_REMOVE(&sigio
->sio_pgrp
->pg_sigiolst
, sigio
,
633 } else /* if ((*sigiop)->sio_pgid > 0) */ {
634 SLIST_REMOVE(&sigio
->sio_proc
->p_sigiolst
, sigio
,
637 crfree(sigio
->sio_ucred
);
638 kfree(sigio
, M_SIGIO
);
641 /* Free a list of sigio structures. */
643 funsetownlst(struct sigiolst
*sigiolst
)
647 while ((sigio
= SLIST_FIRST(sigiolst
)) != NULL
)
652 * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg).
654 * After permission checking, add a sigio structure to the sigio list for
655 * the process or process group.
658 fsetown(pid_t pgid
, struct sigio
**sigiop
)
674 * Policy - Don't allow a process to FSETOWN a process
675 * in another session.
677 * Remove this test to allow maximum flexibility or
678 * restrict FSETOWN to the current process or process
679 * group for maximum safety.
681 if (proc
->p_session
!= curproc
->p_session
)
685 } else /* if (pgid < 0) */ {
686 pgrp
= pgfind(-pgid
);
691 * Policy - Don't allow a process to FSETOWN a process
692 * in another session.
694 * Remove this test to allow maximum flexibility or
695 * restrict FSETOWN to the current process or process
696 * group for maximum safety.
698 if (pgrp
->pg_session
!= curproc
->p_session
)
704 sigio
= kmalloc(sizeof(struct sigio
), M_SIGIO
, M_WAITOK
);
706 SLIST_INSERT_HEAD(&proc
->p_sigiolst
, sigio
, sio_pgsigio
);
707 sigio
->sio_proc
= proc
;
709 SLIST_INSERT_HEAD(&pgrp
->pg_sigiolst
, sigio
, sio_pgsigio
);
710 sigio
->sio_pgrp
= pgrp
;
712 sigio
->sio_pgid
= pgid
;
713 sigio
->sio_ucred
= crhold(curproc
->p_ucred
);
714 /* It would be convenient if p_ruid was in ucred. */
715 sigio
->sio_ruid
= curproc
->p_ucred
->cr_ruid
;
716 sigio
->sio_myref
= sigiop
;
724 * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg).
727 fgetown(struct sigio
*sigio
)
729 return (sigio
!= NULL
? sigio
->sio_pgid
: 0);
733 * Close many file descriptors.
738 sys_closefrom(struct closefrom_args
*uap
)
740 return(kern_closefrom(uap
->fd
));
744 * Close all file descriptors greater then or equal to fd
749 kern_closefrom(int fd
)
751 struct thread
*td
= curthread
;
752 struct proc
*p
= td
->td_proc
;
753 struct filedesc
*fdp
;
762 * NOTE: This function will skip unassociated descriptors and
763 * reserved descriptors that have not yet been assigned.
764 * fd_lastfile can change as a side effect of kern_close().
766 spin_lock_wr(&fdp
->fd_spin
);
767 while (fd
<= fdp
->fd_lastfile
) {
768 if (fdp
->fd_files
[fd
].fp
!= NULL
) {
769 spin_unlock_wr(&fdp
->fd_spin
);
770 /* ok if this races another close */
771 if (kern_close(fd
) == EINTR
)
773 spin_lock_wr(&fdp
->fd_spin
);
777 spin_unlock_wr(&fdp
->fd_spin
);
782 * Close a file descriptor.
787 sys_close(struct close_args
*uap
)
789 return(kern_close(uap
->fd
));
793 * MPALMOSTSAFE - acquires mplock around knote_fdclose() calls
798 struct thread
*td
= curthread
;
799 struct proc
*p
= td
->td_proc
;
800 struct filedesc
*fdp
;
808 spin_lock_wr(&fdp
->fd_spin
);
809 if ((fp
= funsetfd_locked(fdp
, fd
)) == NULL
) {
810 spin_unlock_wr(&fdp
->fd_spin
);
814 if (p
->p_fdtol
!= NULL
) {
816 * Ask fdfree() to sleep to ensure that all relevant
817 * process leaders can be traversed in closef().
819 fdp
->fd_holdleaderscount
++;
824 * we now hold the fp reference that used to be owned by the descriptor
827 spin_unlock_wr(&fdp
->fd_spin
);
828 if (fd
< fdp
->fd_knlistsize
) {
830 if (fd
< fdp
->fd_knlistsize
)
831 knote_fdclose(p
, fd
);
834 error
= closef(fp
, p
);
836 spin_lock_wr(&fdp
->fd_spin
);
837 fdp
->fd_holdleaderscount
--;
838 if (fdp
->fd_holdleaderscount
== 0 &&
839 fdp
->fd_holdleaderswakeup
!= 0) {
840 fdp
->fd_holdleaderswakeup
= 0;
841 spin_unlock_wr(&fdp
->fd_spin
);
842 wakeup(&fdp
->fd_holdleaderscount
);
844 spin_unlock_wr(&fdp
->fd_spin
);
851 * shutdown_args(int fd, int how)
854 kern_shutdown(int fd
, int how
)
856 struct thread
*td
= curthread
;
857 struct proc
*p
= td
->td_proc
;
863 if ((fp
= holdfp(p
->p_fd
, fd
, -1)) == NULL
)
865 error
= fo_shutdown(fp
, how
);
872 sys_shutdown(struct shutdown_args
*uap
)
876 error
= kern_shutdown(uap
->s
, uap
->how
);
882 kern_fstat(int fd
, struct stat
*ub
)
884 struct thread
*td
= curthread
;
885 struct proc
*p
= td
->td_proc
;
891 if ((fp
= holdfp(p
->p_fd
, fd
, -1)) == NULL
)
893 error
= fo_stat(fp
, ub
, p
->p_ucred
);
900 * Return status information about a file descriptor.
903 sys_fstat(struct fstat_args
*uap
)
908 error
= kern_fstat(uap
->fd
, &st
);
911 error
= copyout(&st
, uap
->sb
, sizeof(st
));
916 * Return pathconf information about a file descriptor.
920 sys_fpathconf(struct fpathconf_args
*uap
)
922 struct thread
*td
= curthread
;
923 struct proc
*p
= td
->td_proc
;
930 if ((fp
= holdfp(p
->p_fd
, uap
->fd
, -1)) == NULL
)
933 switch (fp
->f_type
) {
936 if (uap
->name
!= _PC_PIPE_BUF
) {
939 uap
->sysmsg_result
= PIPE_BUF
;
945 vp
= (struct vnode
*)fp
->f_data
;
946 error
= VOP_PATHCONF(vp
, uap
->name
, uap
->sysmsg_fds
);
957 SYSCTL_INT(_debug
, OID_AUTO
, fdexpand
, CTLFLAG_RD
, &fdexpand
, 0, "");
960 * Grow the file table so it can hold through descriptor (want).
962 * The fdp's spinlock must be held exclusively on entry and may be held
963 * exclusively on return. The spinlock may be cycled by the routine.
968 fdgrow_locked(struct filedesc
*fdp
, int want
)
970 struct fdnode
*newfiles
;
971 struct fdnode
*oldfiles
;
976 /* nf has to be of the form 2^n - 1 */
978 } while (nf
<= want
);
980 spin_unlock_wr(&fdp
->fd_spin
);
981 newfiles
= kmalloc(nf
* sizeof(struct fdnode
), M_FILEDESC
, M_WAITOK
);
982 spin_lock_wr(&fdp
->fd_spin
);
985 * We could have raced another extend while we were not holding
988 if (fdp
->fd_nfiles
>= nf
) {
989 spin_unlock_wr(&fdp
->fd_spin
);
990 kfree(newfiles
, M_FILEDESC
);
991 spin_lock_wr(&fdp
->fd_spin
);
995 * Copy the existing ofile and ofileflags arrays
996 * and zero the new portion of each array.
998 extra
= nf
- fdp
->fd_nfiles
;
999 bcopy(fdp
->fd_files
, newfiles
, fdp
->fd_nfiles
* sizeof(struct fdnode
));
1000 bzero(&newfiles
[fdp
->fd_nfiles
], extra
* sizeof(struct fdnode
));
1002 oldfiles
= fdp
->fd_files
;
1003 fdp
->fd_files
= newfiles
;
1004 fdp
->fd_nfiles
= nf
;
1006 if (oldfiles
!= fdp
->fd_builtin_files
) {
1007 spin_unlock_wr(&fdp
->fd_spin
);
1008 kfree(oldfiles
, M_FILEDESC
);
1009 spin_lock_wr(&fdp
->fd_spin
);
1015 * Number of nodes in right subtree, including the root.
1018 right_subtree_size(int n
)
1020 return (n
^ (n
| (n
+ 1)));
1027 right_ancestor(int n
)
1029 return (n
| (n
+ 1));
1036 left_ancestor(int n
)
1038 return ((n
& (n
+ 1)) - 1);
1042 * Traverse the in-place binary tree buttom-up adjusting the allocation
1043 * count so scans can determine where free descriptors are located.
1045 * MPSAFE - caller must be holding an exclusive spinlock on fdp
1049 fdreserve_locked(struct filedesc
*fdp
, int fd
, int incr
)
1052 fdp
->fd_files
[fd
].allocated
+= incr
;
1053 KKASSERT(fdp
->fd_files
[fd
].allocated
>= 0);
1054 fd
= left_ancestor(fd
);
1059 * Reserve a file descriptor for the process. If no error occurs, the
1060 * caller MUST at some point call fsetfd() or assign a file pointer
1061 * or dispose of the reservation.
1066 fdalloc(struct proc
*p
, int want
, int *result
)
1068 struct filedesc
*fdp
= p
->p_fd
;
1069 int fd
, rsize
, rsum
, node
, lim
;
1071 spin_lock_rd(&p
->p_limit
->p_spin
);
1072 lim
= min((int)p
->p_rlimit
[RLIMIT_NOFILE
].rlim_cur
, maxfilesperproc
);
1073 spin_unlock_rd(&p
->p_limit
->p_spin
);
1076 spin_lock_wr(&fdp
->fd_spin
);
1077 if (want
>= fdp
->fd_nfiles
)
1078 fdgrow_locked(fdp
, want
);
1081 * Search for a free descriptor starting at the higher
1082 * of want or fd_freefile. If that fails, consider
1083 * expanding the ofile array.
1085 * NOTE! the 'allocated' field is a cumulative recursive allocation
1086 * count. If we happen to see a value of 0 then we can shortcut
1087 * our search. Otherwise we run through through the tree going
1088 * down branches we know have free descriptor(s) until we hit a
1089 * leaf node. The leaf node will be free but will not necessarily
1090 * have an allocated field of 0.
1093 /* move up the tree looking for a subtree with a free node */
1094 for (fd
= max(want
, fdp
->fd_freefile
); fd
< min(fdp
->fd_nfiles
, lim
);
1095 fd
= right_ancestor(fd
)) {
1096 if (fdp
->fd_files
[fd
].allocated
== 0)
1099 rsize
= right_subtree_size(fd
);
1100 if (fdp
->fd_files
[fd
].allocated
== rsize
)
1101 continue; /* right subtree full */
1104 * Free fd is in the right subtree of the tree rooted at fd.
1105 * Call that subtree R. Look for the smallest (leftmost)
1106 * subtree of R with an unallocated fd: continue moving
1107 * down the left branch until encountering a full left
1108 * subtree, then move to the right.
1110 for (rsum
= 0, rsize
/= 2; rsize
> 0; rsize
/= 2) {
1112 rsum
+= fdp
->fd_files
[node
].allocated
;
1113 if (fdp
->fd_files
[fd
].allocated
== rsum
+ rsize
) {
1114 fd
= node
; /* move to the right */
1115 if (fdp
->fd_files
[node
].allocated
== 0)
1124 * No space in current array. Expand?
1126 if (fdp
->fd_nfiles
>= lim
) {
1127 spin_unlock_wr(&fdp
->fd_spin
);
1130 fdgrow_locked(fdp
, want
);
1134 KKASSERT(fd
< fdp
->fd_nfiles
);
1135 if (fd
> fdp
->fd_lastfile
)
1136 fdp
->fd_lastfile
= fd
;
1137 if (want
<= fdp
->fd_freefile
)
1138 fdp
->fd_freefile
= fd
;
1140 KKASSERT(fdp
->fd_files
[fd
].fp
== NULL
);
1141 KKASSERT(fdp
->fd_files
[fd
].reserved
== 0);
1142 fdp
->fd_files
[fd
].fileflags
= 0;
1143 fdp
->fd_files
[fd
].reserved
= 1;
1144 fdreserve_locked(fdp
, fd
, 1);
1145 spin_unlock_wr(&fdp
->fd_spin
);
1150 * Check to see whether n user file descriptors
1151 * are available to the process p.
1156 fdavail(struct proc
*p
, int n
)
1158 struct filedesc
*fdp
= p
->p_fd
;
1159 struct fdnode
*fdnode
;
1162 spin_lock_rd(&p
->p_limit
->p_spin
);
1163 lim
= min((int)p
->p_rlimit
[RLIMIT_NOFILE
].rlim_cur
, maxfilesperproc
);
1164 spin_unlock_rd(&p
->p_limit
->p_spin
);
1166 spin_lock_rd(&fdp
->fd_spin
);
1167 if ((i
= lim
- fdp
->fd_nfiles
) > 0 && (n
-= i
) <= 0) {
1168 spin_unlock_rd(&fdp
->fd_spin
);
1171 last
= min(fdp
->fd_nfiles
, lim
);
1172 fdnode
= &fdp
->fd_files
[fdp
->fd_freefile
];
1173 for (i
= last
- fdp
->fd_freefile
; --i
>= 0; ++fdnode
) {
1174 if (fdnode
->fp
== NULL
&& --n
<= 0) {
1175 spin_unlock_rd(&fdp
->fd_spin
);
1179 spin_unlock_rd(&fdp
->fd_spin
);
1184 * Revoke open descriptors referencing (f_data, f_type)
1186 * Any revoke executed within a prison is only able to
1187 * revoke descriptors for processes within that prison.
1189 * Returns 0 on success or an error code.
1191 struct fdrevoke_info
{
1201 static int fdrevoke_check_callback(struct file
*fp
, void *vinfo
);
1202 static int fdrevoke_proc_callback(struct proc
*p
, void *vinfo
);
1205 fdrevoke(void *f_data
, short f_type
, struct ucred
*cred
)
1207 struct fdrevoke_info info
;
1210 bzero(&info
, sizeof(info
));
1214 error
= falloc(NULL
, &info
.nfp
, NULL
);
1219 * Scan the file pointer table once. dups do not dup file pointers,
1220 * only descriptors, so there is no leak. Set FREVOKED on the fps
1223 allfiles_scan_exclusive(fdrevoke_check_callback
, &info
);
1226 * If any fps were marked track down the related descriptors
1227 * and close them. Any dup()s at this point will notice
1228 * the FREVOKED already set in the fp and do the right thing.
1230 * Any fps with non-zero msgcounts (aka sent over a unix-domain
1231 * socket) bumped the intransit counter and will require a
1232 * scan. Races against fps leaving the socket are closed by
1233 * the socket code checking for FREVOKED.
1236 allproc_scan(fdrevoke_proc_callback
, &info
);
1238 unp_revoke_gc(info
.nfp
);
1244 * Locate matching file pointers directly.
1247 fdrevoke_check_callback(struct file
*fp
, void *vinfo
)
1249 struct fdrevoke_info
*info
= vinfo
;
1252 * File pointers already flagged for revokation are skipped.
1254 if (fp
->f_flag
& FREVOKED
)
1258 * If revoking from a prison file pointers created outside of
1259 * that prison, or file pointers without creds, cannot be revoked.
1261 if (info
->cred
->cr_prison
&&
1262 (fp
->f_cred
== NULL
||
1263 info
->cred
->cr_prison
!= fp
->f_cred
->cr_prison
)) {
1268 * If the file pointer matches then mark it for revocation. The
1269 * flag is currently only used by unp_revoke_gc().
1271 * info->count is a heuristic and can race in a SMP environment.
1273 if (info
->data
== fp
->f_data
&& info
->type
== fp
->f_type
) {
1274 atomic_set_int(&fp
->f_flag
, FREVOKED
);
1275 info
->count
+= fp
->f_count
;
1283 * Locate matching file pointers via process descriptor tables.
1286 fdrevoke_proc_callback(struct proc
*p
, void *vinfo
)
1288 struct fdrevoke_info
*info
= vinfo
;
1289 struct filedesc
*fdp
;
1293 if (p
->p_stat
== SIDL
|| p
->p_stat
== SZOMB
)
1295 if (info
->cred
->cr_prison
&&
1296 info
->cred
->cr_prison
!= p
->p_ucred
->cr_prison
) {
1301 * If the controlling terminal of the process matches the
1302 * vnode being revoked we clear the controlling terminal.
1304 * The normal spec_close() may not catch this because it
1305 * uses curproc instead of p.
1307 if (p
->p_session
&& info
->type
== DTYPE_VNODE
&&
1308 info
->data
== p
->p_session
->s_ttyvp
) {
1309 p
->p_session
->s_ttyvp
= NULL
;
1314 * Locate and close any matching file descriptors.
1316 if ((fdp
= p
->p_fd
) == NULL
)
1318 spin_lock_wr(&fdp
->fd_spin
);
1319 for (n
= 0; n
< fdp
->fd_nfiles
; ++n
) {
1320 if ((fp
= fdp
->fd_files
[n
].fp
) == NULL
)
1322 if (fp
->f_flag
& FREVOKED
) {
1324 fdp
->fd_files
[n
].fp
= info
->nfp
;
1325 spin_unlock_wr(&fdp
->fd_spin
);
1327 spin_lock_wr(&fdp
->fd_spin
);
1331 spin_unlock_wr(&fdp
->fd_spin
);
1337 * Create a new open file structure and reserve a file decriptor
1338 * for the process that refers to it.
1340 * Root creds are checked using p, or assumed if p is NULL. If
1341 * resultfd is non-NULL then p must also be non-NULL. No file
1342 * descriptor is reserved if resultfd is NULL.
1344 * A file pointer with a refcount of 1 is returned. Note that the
1345 * file pointer is NOT associated with the descriptor. If falloc
1346 * returns success, fsetfd() MUST be called to either associate the
1347 * file pointer or clear the reservation.
1352 falloc(struct proc
*p
, struct file
**resultfp
, int *resultfd
)
1354 static struct timeval lastfail
;
1362 * Handle filetable full issues and root overfill.
1364 if (nfiles
>= maxfiles
- maxfilesrootres
&&
1365 ((p
&& p
->p_ucred
->cr_ruid
!= 0) || nfiles
>= maxfiles
)) {
1366 if (ppsratecheck(&lastfail
, &curfail
, 1)) {
1367 kprintf("kern.maxfiles limit exceeded by uid %d, please see tuning(7).\n",
1368 (p
? p
->p_ucred
->cr_ruid
: -1));
1375 * Allocate a new file descriptor.
1377 fp
= kmalloc(sizeof(struct file
), M_FILE
, M_WAITOK
| M_ZERO
);
1378 spin_init(&fp
->f_spin
);
1380 fp
->f_ops
= &badfileops
;
1383 fp
->f_cred
= crhold(p
->p_ucred
);
1385 fp
->f_cred
= crhold(proc0
.p_ucred
);
1386 spin_lock_wr(&filehead_spin
);
1388 LIST_INSERT_HEAD(&filehead
, fp
, f_list
);
1389 spin_unlock_wr(&filehead_spin
);
1391 if ((error
= fdalloc(p
, 0, resultfd
)) != 0) {
1408 checkfpclosed(struct filedesc
*fdp
, int fd
, struct file
*fp
)
1412 spin_lock_rd(&fdp
->fd_spin
);
1413 if ((unsigned) fd
>= fdp
->fd_nfiles
|| fp
!= fdp
->fd_files
[fd
].fp
)
1417 spin_unlock_rd(&fdp
->fd_spin
);
1422 * Associate a file pointer with a previously reserved file descriptor.
1423 * This function always succeeds.
1425 * If fp is NULL, the file descriptor is returned to the pool.
1429 * MPSAFE (exclusive spinlock must be held on call)
1432 fsetfd_locked(struct filedesc
*fdp
, struct file
*fp
, int fd
)
1434 KKASSERT((unsigned)fd
< fdp
->fd_nfiles
);
1435 KKASSERT(fdp
->fd_files
[fd
].reserved
!= 0);
1438 fdp
->fd_files
[fd
].fp
= fp
;
1439 fdp
->fd_files
[fd
].reserved
= 0;
1440 if (fp
->f_type
== DTYPE_KQUEUE
) {
1441 if (fdp
->fd_knlistsize
< 0)
1442 fdp
->fd_knlistsize
= 0;
1445 fdp
->fd_files
[fd
].reserved
= 0;
1446 fdreserve_locked(fdp
, fd
, -1);
1447 fdfixup_locked(fdp
, fd
);
1455 fsetfd(struct proc
*p
, struct file
*fp
, int fd
)
1457 struct filedesc
*fdp
= p
->p_fd
;
1459 spin_lock_wr(&fdp
->fd_spin
);
1460 fsetfd_locked(fdp
, fp
, fd
);
1461 spin_unlock_wr(&fdp
->fd_spin
);
1465 * MPSAFE (exclusive spinlock must be held on call)
1469 funsetfd_locked(struct filedesc
*fdp
, int fd
)
1473 if ((unsigned)fd
>= fdp
->fd_nfiles
)
1475 if ((fp
= fdp
->fd_files
[fd
].fp
) == NULL
)
1477 fdp
->fd_files
[fd
].fp
= NULL
;
1478 fdp
->fd_files
[fd
].fileflags
= 0;
1480 fdreserve_locked(fdp
, fd
, -1);
1481 fdfixup_locked(fdp
, fd
);
1489 fgetfdflags(struct filedesc
*fdp
, int fd
, int *flagsp
)
1493 spin_lock_rd(&fdp
->fd_spin
);
1494 if (((u_int
)fd
) >= fdp
->fd_nfiles
) {
1496 } else if (fdp
->fd_files
[fd
].fp
== NULL
) {
1499 *flagsp
= fdp
->fd_files
[fd
].fileflags
;
1502 spin_unlock_rd(&fdp
->fd_spin
);
1510 fsetfdflags(struct filedesc
*fdp
, int fd
, int add_flags
)
1514 spin_lock_wr(&fdp
->fd_spin
);
1515 if (((u_int
)fd
) >= fdp
->fd_nfiles
) {
1517 } else if (fdp
->fd_files
[fd
].fp
== NULL
) {
1520 fdp
->fd_files
[fd
].fileflags
|= add_flags
;
1523 spin_unlock_wr(&fdp
->fd_spin
);
1531 fclrfdflags(struct filedesc
*fdp
, int fd
, int rem_flags
)
1535 spin_lock_wr(&fdp
->fd_spin
);
1536 if (((u_int
)fd
) >= fdp
->fd_nfiles
) {
1538 } else if (fdp
->fd_files
[fd
].fp
== NULL
) {
1541 fdp
->fd_files
[fd
].fileflags
&= ~rem_flags
;
1544 spin_unlock_wr(&fdp
->fd_spin
);
1549 fsetcred(struct file
*fp
, struct ucred
*cr
)
1557 * Free a file descriptor.
1561 ffree(struct file
*fp
)
1563 KASSERT((fp
->f_count
== 0), ("ffree: fp_fcount not 0!"));
1564 spin_lock_wr(&filehead_spin
);
1565 LIST_REMOVE(fp
, f_list
);
1567 spin_unlock_wr(&filehead_spin
);
1569 if (fp
->f_nchandle
.ncp
)
1570 cache_drop(&fp
->f_nchandle
);
1575 * called from init_main, initialize filedesc0 for proc0.
1578 fdinit_bootstrap(struct proc
*p0
, struct filedesc
*fdp0
, int cmask
)
1582 fdp0
->fd_refcnt
= 1;
1583 fdp0
->fd_cmask
= cmask
;
1584 fdp0
->fd_files
= fdp0
->fd_builtin_files
;
1585 fdp0
->fd_nfiles
= NDFILE
;
1586 fdp0
->fd_lastfile
= -1;
1587 spin_init(&fdp0
->fd_spin
);
1591 * Build a new filedesc structure.
1596 fdinit(struct proc
*p
)
1598 struct filedesc
*newfdp
;
1599 struct filedesc
*fdp
= p
->p_fd
;
1601 newfdp
= kmalloc(sizeof(struct filedesc
), M_FILEDESC
, M_WAITOK
|M_ZERO
);
1602 spin_lock_rd(&fdp
->fd_spin
);
1604 newfdp
->fd_cdir
= fdp
->fd_cdir
;
1605 vref(newfdp
->fd_cdir
);
1606 cache_copy(&fdp
->fd_ncdir
, &newfdp
->fd_ncdir
);
1610 * rdir may not be set in e.g. proc0 or anything vm_fork'd off of
1611 * proc0, but should unconditionally exist in other processes.
1614 newfdp
->fd_rdir
= fdp
->fd_rdir
;
1615 vref(newfdp
->fd_rdir
);
1616 cache_copy(&fdp
->fd_nrdir
, &newfdp
->fd_nrdir
);
1619 newfdp
->fd_jdir
= fdp
->fd_jdir
;
1620 vref(newfdp
->fd_jdir
);
1621 cache_copy(&fdp
->fd_njdir
, &newfdp
->fd_njdir
);
1623 spin_unlock_rd(&fdp
->fd_spin
);
1625 /* Create the file descriptor table. */
1626 newfdp
->fd_refcnt
= 1;
1627 newfdp
->fd_cmask
= cmask
;
1628 newfdp
->fd_files
= newfdp
->fd_builtin_files
;
1629 newfdp
->fd_nfiles
= NDFILE
;
1630 newfdp
->fd_knlistsize
= -1;
1631 newfdp
->fd_lastfile
= -1;
1632 spin_init(&newfdp
->fd_spin
);
1638 * Share a filedesc structure.
1643 fdshare(struct proc
*p
)
1645 struct filedesc
*fdp
;
1648 spin_lock_wr(&fdp
->fd_spin
);
1650 spin_unlock_wr(&fdp
->fd_spin
);
1655 * Copy a filedesc structure.
1660 fdcopy(struct proc
*p
)
1662 struct filedesc
*fdp
= p
->p_fd
;
1663 struct filedesc
*newfdp
;
1664 struct fdnode
*fdnode
;
1669 * Certain daemons might not have file descriptors.
1675 * Allocate the new filedesc and fd_files[] array. This can race
1676 * with operations by other threads on the fdp so we have to be
1679 newfdp
= kmalloc(sizeof(struct filedesc
), M_FILEDESC
, M_WAITOK
| M_ZERO
);
1681 spin_lock_rd(&fdp
->fd_spin
);
1682 if (fdp
->fd_lastfile
< NDFILE
) {
1683 newfdp
->fd_files
= newfdp
->fd_builtin_files
;
1687 * We have to allocate (N^2-1) entries for our in-place
1688 * binary tree. Allow the table to shrink.
1692 while (ni
> fdp
->fd_lastfile
&& ni
> NDFILE
) {
1696 spin_unlock_rd(&fdp
->fd_spin
);
1697 newfdp
->fd_files
= kmalloc(i
* sizeof(struct fdnode
),
1698 M_FILEDESC
, M_WAITOK
| M_ZERO
);
1701 * Check for race, retry
1703 spin_lock_rd(&fdp
->fd_spin
);
1704 if (i
<= fdp
->fd_lastfile
) {
1705 spin_unlock_rd(&fdp
->fd_spin
);
1706 kfree(newfdp
->fd_files
, M_FILEDESC
);
1712 * Dup the remaining fields. vref() and cache_hold() can be
1713 * safely called while holding the read spinlock on fdp.
1715 * The read spinlock on fdp is still being held.
1717 * NOTE: vref and cache_hold calls for the case where the vnode
1718 * or cache entry already has at least one ref may be called
1719 * while holding spin locks.
1721 if ((newfdp
->fd_cdir
= fdp
->fd_cdir
) != NULL
) {
1722 vref(newfdp
->fd_cdir
);
1723 cache_copy(&fdp
->fd_ncdir
, &newfdp
->fd_ncdir
);
1726 * We must check for fd_rdir here, at least for now because
1727 * the init process is created before we have access to the
1728 * rootvode to take a reference to it.
1730 if ((newfdp
->fd_rdir
= fdp
->fd_rdir
) != NULL
) {
1731 vref(newfdp
->fd_rdir
);
1732 cache_copy(&fdp
->fd_nrdir
, &newfdp
->fd_nrdir
);
1734 if ((newfdp
->fd_jdir
= fdp
->fd_jdir
) != NULL
) {
1735 vref(newfdp
->fd_jdir
);
1736 cache_copy(&fdp
->fd_njdir
, &newfdp
->fd_njdir
);
1738 newfdp
->fd_refcnt
= 1;
1739 newfdp
->fd_nfiles
= i
;
1740 newfdp
->fd_lastfile
= fdp
->fd_lastfile
;
1741 newfdp
->fd_freefile
= fdp
->fd_freefile
;
1742 newfdp
->fd_cmask
= fdp
->fd_cmask
;
1743 newfdp
->fd_knlist
= NULL
;
1744 newfdp
->fd_knlistsize
= -1;
1745 newfdp
->fd_knhash
= NULL
;
1746 newfdp
->fd_knhashmask
= 0;
1747 spin_init(&newfdp
->fd_spin
);
1750 * Copy the descriptor table through (i). This also copies the
1751 * allocation state. Then go through and ref the file pointers
1752 * and clean up any KQ descriptors.
1754 * kq descriptors cannot be copied. Since we haven't ref'd the
1755 * copied files yet we can ignore the return value from funsetfd().
1757 * The read spinlock on fdp is still being held.
1759 bcopy(fdp
->fd_files
, newfdp
->fd_files
, i
* sizeof(struct fdnode
));
1760 for (i
= 0 ; i
< newfdp
->fd_nfiles
; ++i
) {
1761 fdnode
= &newfdp
->fd_files
[i
];
1762 if (fdnode
->reserved
) {
1763 fdreserve_locked(newfdp
, i
, -1);
1764 fdnode
->reserved
= 0;
1765 fdfixup_locked(newfdp
, i
);
1766 } else if (fdnode
->fp
) {
1767 if (fdnode
->fp
->f_type
== DTYPE_KQUEUE
) {
1768 (void)funsetfd_locked(newfdp
, i
);
1774 spin_unlock_rd(&fdp
->fd_spin
);
1779 * Release a filedesc structure.
1781 * NOT MPSAFE (MPSAFE for refs > 1, but the final cleanup code is not MPSAFE)
1784 fdfree(struct proc
*p
)
1786 struct filedesc
*fdp
= p
->p_fd
;
1787 struct fdnode
*fdnode
;
1789 struct filedesc_to_leader
*fdtol
;
1794 /* Certain daemons might not have file descriptors. */
1799 * Severe messing around to follow
1801 spin_lock_wr(&fdp
->fd_spin
);
1803 /* Check for special need to clear POSIX style locks */
1805 if (fdtol
!= NULL
) {
1806 KASSERT(fdtol
->fdl_refcount
> 0,
1807 ("filedesc_to_refcount botch: fdl_refcount=%d",
1808 fdtol
->fdl_refcount
));
1809 if (fdtol
->fdl_refcount
== 1 &&
1810 (p
->p_leader
->p_flag
& P_ADVLOCK
) != 0) {
1811 for (i
= 0; i
<= fdp
->fd_lastfile
; ++i
) {
1812 fdnode
= &fdp
->fd_files
[i
];
1813 if (fdnode
->fp
== NULL
||
1814 fdnode
->fp
->f_type
!= DTYPE_VNODE
) {
1819 spin_unlock_wr(&fdp
->fd_spin
);
1821 lf
.l_whence
= SEEK_SET
;
1824 lf
.l_type
= F_UNLCK
;
1825 vp
= (struct vnode
*)fp
->f_data
;
1826 (void) VOP_ADVLOCK(vp
,
1827 (caddr_t
)p
->p_leader
,
1832 spin_lock_wr(&fdp
->fd_spin
);
1836 if (fdtol
->fdl_refcount
== 1) {
1837 if (fdp
->fd_holdleaderscount
> 0 &&
1838 (p
->p_leader
->p_flag
& P_ADVLOCK
) != 0) {
1840 * close() or do_dup() has cleared a reference
1841 * in a shared file descriptor table.
1843 fdp
->fd_holdleaderswakeup
= 1;
1844 msleep(&fdp
->fd_holdleaderscount
,
1845 &fdp
->fd_spin
, 0, "fdlhold", 0);
1848 if (fdtol
->fdl_holdcount
> 0) {
1850 * Ensure that fdtol->fdl_leader
1851 * remains valid in closef().
1853 fdtol
->fdl_wakeup
= 1;
1854 msleep(fdtol
, &fdp
->fd_spin
, 0, "fdlhold", 0);
1858 fdtol
->fdl_refcount
--;
1859 if (fdtol
->fdl_refcount
== 0 &&
1860 fdtol
->fdl_holdcount
== 0) {
1861 fdtol
->fdl_next
->fdl_prev
= fdtol
->fdl_prev
;
1862 fdtol
->fdl_prev
->fdl_next
= fdtol
->fdl_next
;
1867 if (fdtol
!= NULL
) {
1868 spin_unlock_wr(&fdp
->fd_spin
);
1869 kfree(fdtol
, M_FILEDESC_TO_LEADER
);
1870 spin_lock_wr(&fdp
->fd_spin
);
1873 if (--fdp
->fd_refcnt
> 0) {
1874 spin_unlock_wr(&fdp
->fd_spin
);
1877 spin_unlock_wr(&fdp
->fd_spin
);
1880 * we are the last reference to the structure, we can
1881 * safely assume it will not change out from under us.
1883 for (i
= 0; i
<= fdp
->fd_lastfile
; ++i
) {
1884 if (fdp
->fd_files
[i
].fp
)
1885 closef(fdp
->fd_files
[i
].fp
, p
);
1887 if (fdp
->fd_files
!= fdp
->fd_builtin_files
)
1888 kfree(fdp
->fd_files
, M_FILEDESC
);
1890 cache_drop(&fdp
->fd_ncdir
);
1891 vrele(fdp
->fd_cdir
);
1894 cache_drop(&fdp
->fd_nrdir
);
1895 vrele(fdp
->fd_rdir
);
1898 cache_drop(&fdp
->fd_njdir
);
1899 vrele(fdp
->fd_jdir
);
1902 kfree(fdp
->fd_knlist
, M_KQUEUE
);
1904 kfree(fdp
->fd_knhash
, M_KQUEUE
);
1905 kfree(fdp
, M_FILEDESC
);
1909 * Retrieve and reference the file pointer associated with a descriptor.
1914 holdfp(struct filedesc
*fdp
, int fd
, int flag
)
1918 spin_lock_rd(&fdp
->fd_spin
);
1919 if (((u_int
)fd
) >= fdp
->fd_nfiles
) {
1923 if ((fp
= fdp
->fd_files
[fd
].fp
) == NULL
)
1925 if ((fp
->f_flag
& flag
) == 0 && flag
!= -1) {
1931 spin_unlock_rd(&fdp
->fd_spin
);
1936 * holdsock() - load the struct file pointer associated
1937 * with a socket into *fpp. If an error occurs, non-zero
1938 * will be returned and *fpp will be set to NULL.
1943 holdsock(struct filedesc
*fdp
, int fd
, struct file
**fpp
)
1948 spin_lock_rd(&fdp
->fd_spin
);
1949 if ((unsigned)fd
>= fdp
->fd_nfiles
) {
1954 if ((fp
= fdp
->fd_files
[fd
].fp
) == NULL
) {
1958 if (fp
->f_type
!= DTYPE_SOCKET
) {
1965 spin_unlock_rd(&fdp
->fd_spin
);
1971 * Convert a user file descriptor to a held file pointer.
1976 holdvnode(struct filedesc
*fdp
, int fd
, struct file
**fpp
)
1981 spin_lock_rd(&fdp
->fd_spin
);
1982 if ((unsigned)fd
>= fdp
->fd_nfiles
) {
1987 if ((fp
= fdp
->fd_files
[fd
].fp
) == NULL
) {
1991 if (fp
->f_type
!= DTYPE_VNODE
&& fp
->f_type
!= DTYPE_FIFO
) {
1999 spin_unlock_rd(&fdp
->fd_spin
);
2005 * For setugid programs, we don't want to people to use that setugidness
2006 * to generate error messages which write to a file which otherwise would
2007 * otherwise be off-limits to the process.
2009 * This is a gross hack to plug the hole. A better solution would involve
2010 * a special vop or other form of generalized access control mechanism. We
2011 * go ahead and just reject all procfs file systems accesses as dangerous.
2013 * Since setugidsafety calls this only for fd 0, 1 and 2, this check is
2014 * sufficient. We also don't for check setugidness since we know we are.
2017 is_unsafe(struct file
*fp
)
2019 if (fp
->f_type
== DTYPE_VNODE
&&
2020 ((struct vnode
*)(fp
->f_data
))->v_tag
== VT_PROCFS
)
2026 * Make this setguid thing safe, if at all possible.
2028 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose()
2031 setugidsafety(struct proc
*p
)
2033 struct filedesc
*fdp
= p
->p_fd
;
2036 /* Certain daemons might not have file descriptors. */
2041 * note: fdp->fd_files may be reallocated out from under us while
2042 * we are blocked in a close. Be careful!
2044 for (i
= 0; i
<= fdp
->fd_lastfile
; i
++) {
2047 if (fdp
->fd_files
[i
].fp
&& is_unsafe(fdp
->fd_files
[i
].fp
)) {
2050 if (i
< fdp
->fd_knlistsize
)
2051 knote_fdclose(p
, i
);
2053 * NULL-out descriptor prior to close to avoid
2054 * a race while close blocks.
2056 if ((fp
= funsetfd_locked(fdp
, i
)) != NULL
)
2063 * Close any files on exec?
2065 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose()
2068 fdcloseexec(struct proc
*p
)
2070 struct filedesc
*fdp
= p
->p_fd
;
2073 /* Certain daemons might not have file descriptors. */
2078 * We cannot cache fd_files since operations may block and rip
2079 * them out from under us.
2081 for (i
= 0; i
<= fdp
->fd_lastfile
; i
++) {
2082 if (fdp
->fd_files
[i
].fp
!= NULL
&&
2083 (fdp
->fd_files
[i
].fileflags
& UF_EXCLOSE
)) {
2086 if (i
< fdp
->fd_knlistsize
)
2087 knote_fdclose(p
, i
);
2089 * NULL-out descriptor prior to close to avoid
2090 * a race while close blocks.
2092 if ((fp
= funsetfd_locked(fdp
, i
)) != NULL
)
2099 * It is unsafe for set[ug]id processes to be started with file
2100 * descriptors 0..2 closed, as these descriptors are given implicit
2101 * significance in the Standard C library. fdcheckstd() will create a
2102 * descriptor referencing /dev/null for each of stdin, stdout, and
2103 * stderr that is not already open.
2105 * NOT MPSAFE - calls falloc, vn_open, etc
2108 fdcheckstd(struct proc
*p
)
2110 struct nlookupdata nd
;
2111 struct filedesc
*fdp
;
2114 int i
, error
, flags
, devnull
;
2121 for (i
= 0; i
< 3; i
++) {
2122 if (fdp
->fd_files
[i
].fp
!= NULL
)
2125 if ((error
= falloc(p
, &fp
, &devnull
)) != 0)
2128 error
= nlookup_init(&nd
, "/dev/null", UIO_SYSSPACE
,
2129 NLC_FOLLOW
|NLC_LOCKVP
);
2130 flags
= FREAD
| FWRITE
;
2132 error
= vn_open(&nd
, fp
, flags
, 0);
2134 fsetfd(p
, fp
, devnull
);
2136 fsetfd(p
, NULL
, devnull
);
2141 KKASSERT(i
== devnull
);
2143 error
= kern_dup(DUP_FIXED
, devnull
, i
, &retval
);
2152 * Internal form of close.
2153 * Decrement reference count on file structure.
2154 * Note: td and/or p may be NULL when closing a file
2155 * that was being passed in a message.
2157 * MPALMOSTSAFE - acquires mplock for VOP operations
2160 closef(struct file
*fp
, struct proc
*p
)
2164 struct filedesc_to_leader
*fdtol
;
2170 * POSIX record locking dictates that any close releases ALL
2171 * locks owned by this process. This is handled by setting
2172 * a flag in the unlock to free ONLY locks obeying POSIX
2173 * semantics, and not to free BSD-style file locks.
2174 * If the descriptor was in a message, POSIX-style locks
2175 * aren't passed with the descriptor.
2177 if (p
!= NULL
&& fp
->f_type
== DTYPE_VNODE
&&
2178 (((struct vnode
*)fp
->f_data
)->v_flag
& VMAYHAVELOCKS
)
2181 if ((p
->p_leader
->p_flag
& P_ADVLOCK
) != 0) {
2182 lf
.l_whence
= SEEK_SET
;
2185 lf
.l_type
= F_UNLCK
;
2186 vp
= (struct vnode
*)fp
->f_data
;
2187 (void) VOP_ADVLOCK(vp
, (caddr_t
)p
->p_leader
, F_UNLCK
,
2191 if (fdtol
!= NULL
) {
2193 * Handle special case where file descriptor table
2194 * is shared between multiple process leaders.
2196 for (fdtol
= fdtol
->fdl_next
;
2197 fdtol
!= p
->p_fdtol
;
2198 fdtol
= fdtol
->fdl_next
) {
2199 if ((fdtol
->fdl_leader
->p_flag
&
2202 fdtol
->fdl_holdcount
++;
2203 lf
.l_whence
= SEEK_SET
;
2206 lf
.l_type
= F_UNLCK
;
2207 vp
= (struct vnode
*)fp
->f_data
;
2208 (void) VOP_ADVLOCK(vp
,
2209 (caddr_t
)fdtol
->fdl_leader
,
2210 F_UNLCK
, &lf
, F_POSIX
);
2211 fdtol
->fdl_holdcount
--;
2212 if (fdtol
->fdl_holdcount
== 0 &&
2213 fdtol
->fdl_wakeup
!= 0) {
2214 fdtol
->fdl_wakeup
= 0;
2227 * fhold() can only be called if f_count is already at least 1 (i.e. the
2228 * caller of fhold() already has a reference to the file pointer in some
2231 * f_count is not spin-locked. Instead, atomic ops are used for
2232 * incrementing, decrementing, and handling the 1->0 transition.
2235 fhold(struct file
*fp
)
2237 atomic_add_int(&fp
->f_count
, 1);
2241 * fdrop() - drop a reference to a descriptor
2243 * MPALMOSTSAFE - acquires mplock for final close sequence
2246 fdrop(struct file
*fp
)
2253 * A combined fetch and subtract is needed to properly detect
2254 * 1->0 transitions, otherwise two cpus dropping from a ref
2255 * count of 2 might both try to run the 1->0 code.
2257 if (atomic_fetchadd_int(&fp
->f_count
, -1) > 1)
2263 * The last reference has gone away, we own the fp structure free
2266 if (fp
->f_count
< 0)
2267 panic("fdrop: count < 0");
2268 if ((fp
->f_flag
& FHASLOCK
) && fp
->f_type
== DTYPE_VNODE
&&
2269 (((struct vnode
*)fp
->f_data
)->v_flag
& VMAYHAVELOCKS
)
2271 lf
.l_whence
= SEEK_SET
;
2274 lf
.l_type
= F_UNLCK
;
2275 vp
= (struct vnode
*)fp
->f_data
;
2276 (void) VOP_ADVLOCK(vp
, (caddr_t
)fp
, F_UNLCK
, &lf
, 0);
2278 if (fp
->f_ops
!= &badfileops
)
2279 error
= fo_close(fp
);
2288 * Apply an advisory lock on a file descriptor.
2290 * Just attempt to get a record lock of the requested type on
2291 * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0).
2294 sys_flock(struct flock_args
*uap
)
2296 struct proc
*p
= curproc
;
2302 if ((fp
= holdfp(p
->p_fd
, uap
->fd
, -1)) == NULL
)
2304 if (fp
->f_type
!= DTYPE_VNODE
) {
2308 vp
= (struct vnode
*)fp
->f_data
;
2309 lf
.l_whence
= SEEK_SET
;
2312 if (uap
->how
& LOCK_UN
) {
2313 lf
.l_type
= F_UNLCK
;
2314 fp
->f_flag
&= ~FHASLOCK
;
2315 error
= VOP_ADVLOCK(vp
, (caddr_t
)fp
, F_UNLCK
, &lf
, 0);
2318 if (uap
->how
& LOCK_EX
)
2319 lf
.l_type
= F_WRLCK
;
2320 else if (uap
->how
& LOCK_SH
)
2321 lf
.l_type
= F_RDLCK
;
2326 fp
->f_flag
|= FHASLOCK
;
2327 if (uap
->how
& LOCK_NB
)
2328 error
= VOP_ADVLOCK(vp
, (caddr_t
)fp
, F_SETLK
, &lf
, 0);
2330 error
= VOP_ADVLOCK(vp
, (caddr_t
)fp
, F_SETLK
, &lf
, F_WAIT
);
2337 * File Descriptor pseudo-device driver (/dev/fd/).
2339 * Opening minor device N dup()s the file (if any) connected to file
2340 * descriptor N belonging to the calling process. Note that this driver
2341 * consists of only the ``open()'' routine, because all subsequent
2342 * references to this file will be direct to the other driver.
2346 fdopen(struct dev_open_args
*ap
)
2348 thread_t td
= curthread
;
2350 KKASSERT(td
->td_lwp
!= NULL
);
2353 * XXX Kludge: set curlwp->lwp_dupfd to contain the value of the
2354 * the file descriptor being sought for duplication. The error
2355 * return ensures that the vnode for this device will be released
2356 * by vn_open. Open will detect this special error and take the
2357 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN
2358 * will simply report the error.
2360 td
->td_lwp
->lwp_dupfd
= minor(ap
->a_head
.a_dev
);
2365 * The caller has reserved the file descriptor dfd for us. On success we
2366 * must fsetfd() it. On failure the caller will clean it up.
2368 * NOT MPSAFE - isn't getting spinlocks, possibly other things
2371 dupfdopen(struct proc
*p
, int dfd
, int sfd
, int mode
, int error
)
2373 struct filedesc
*fdp
= p
->p_fd
;
2378 if ((wfp
= holdfp(fdp
, sfd
, -1)) == NULL
)
2382 * Close a revoke/dup race. Duping a descriptor marked as revoked
2383 * will dup a dummy descriptor instead of the real one.
2385 if (wfp
->f_flag
& FREVOKED
) {
2386 kprintf("Warning: attempt to dup() a revoked descriptor\n");
2389 werror
= falloc(NULL
, &wfp
, NULL
);
2395 * There are two cases of interest here.
2397 * For ENODEV simply dup sfd to file descriptor dfd and return.
2399 * For ENXIO steal away the file structure from sfd and store it
2400 * dfd. sfd is effectively closed by this operation.
2402 * Any other error code is just returned.
2407 * Check that the mode the file is being opened for is a
2408 * subset of the mode of the existing descriptor.
2410 if (((mode
& (FREAD
|FWRITE
)) | wfp
->f_flag
) != wfp
->f_flag
) {
2414 fdp
->fd_files
[dfd
].fileflags
= fdp
->fd_files
[sfd
].fileflags
;
2415 fsetfd(p
, wfp
, dfd
);
2420 * Steal away the file pointer from dfd, and stuff it into indx.
2422 fdp
->fd_files
[dfd
].fileflags
= fdp
->fd_files
[sfd
].fileflags
;
2423 fsetfd(p
, wfp
, dfd
);
2424 if ((xfp
= funsetfd_locked(fdp
, sfd
)) != NULL
)
2436 * NOT MPSAFE - I think these refer to a common file descriptor table
2437 * and we need to spinlock that to link fdtol in.
2439 struct filedesc_to_leader
*
2440 filedesc_to_leader_alloc(struct filedesc_to_leader
*old
,
2441 struct proc
*leader
)
2443 struct filedesc_to_leader
*fdtol
;
2445 fdtol
= kmalloc(sizeof(struct filedesc_to_leader
),
2446 M_FILEDESC_TO_LEADER
, M_WAITOK
);
2447 fdtol
->fdl_refcount
= 1;
2448 fdtol
->fdl_holdcount
= 0;
2449 fdtol
->fdl_wakeup
= 0;
2450 fdtol
->fdl_leader
= leader
;
2452 fdtol
->fdl_next
= old
->fdl_next
;
2453 fdtol
->fdl_prev
= old
;
2454 old
->fdl_next
= fdtol
;
2455 fdtol
->fdl_next
->fdl_prev
= fdtol
;
2457 fdtol
->fdl_next
= fdtol
;
2458 fdtol
->fdl_prev
= fdtol
;
2464 * Scan all file pointers in the system. The callback is made with
2465 * the master list spinlock held exclusively.
2470 allfiles_scan_exclusive(int (*callback
)(struct file
*, void *), void *data
)
2475 spin_lock_wr(&filehead_spin
);
2476 LIST_FOREACH(fp
, &filehead
, f_list
) {
2477 res
= callback(fp
, data
);
2481 spin_unlock_wr(&filehead_spin
);
2485 * Get file structures.
2487 * NOT MPSAFE - process list scan, SYSCTL_OUT (probably not mpsafe)
2490 struct sysctl_kern_file_info
{
2493 struct sysctl_req
*req
;
2496 static int sysctl_kern_file_callback(struct proc
*p
, void *data
);
2499 sysctl_kern_file(SYSCTL_HANDLER_ARGS
)
2501 struct sysctl_kern_file_info info
;
2504 * Note: because the number of file descriptors is calculated
2505 * in different ways for sizing vs returning the data,
2506 * there is information leakage from the first loop. However,
2507 * it is of a similar order of magnitude to the leakage from
2508 * global system statistics such as kern.openfiles.
2510 * When just doing a count, note that we cannot just count
2511 * the elements and add f_count via the filehead list because
2512 * threaded processes share their descriptor table and f_count might
2513 * still be '1' in that case.
2515 * Since the SYSCTL op can block, we must hold the process to
2516 * prevent it being ripped out from under us either in the
2517 * file descriptor loop or in the greater LIST_FOREACH. The
2518 * process may be in varying states of disrepair. If the process
2519 * is in SZOMB we may have caught it just as it is being removed
2520 * from the allproc list, we must skip it in that case to maintain
2521 * an unbroken chain through the allproc list.
2526 allproc_scan(sysctl_kern_file_callback
, &info
);
2529 * When just calculating the size, overestimate a bit to try to
2530 * prevent system activity from causing the buffer-fill call
2533 if (req
->oldptr
== NULL
) {
2534 info
.count
= (info
.count
+ 16) + (info
.count
/ 10);
2535 info
.error
= SYSCTL_OUT(req
, NULL
,
2536 info
.count
* sizeof(struct kinfo_file
));
2538 return (info
.error
);
2542 sysctl_kern_file_callback(struct proc
*p
, void *data
)
2544 struct sysctl_kern_file_info
*info
= data
;
2545 struct kinfo_file kf
;
2546 struct filedesc
*fdp
;
2551 if (p
->p_stat
== SIDL
|| p
->p_stat
== SZOMB
)
2553 if (!PRISON_CHECK(info
->req
->td
->td_proc
->p_ucred
, p
->p_ucred
) != 0)
2555 if ((fdp
= p
->p_fd
) == NULL
)
2557 spin_lock_rd(&fdp
->fd_spin
);
2558 for (n
= 0; n
< fdp
->fd_nfiles
; ++n
) {
2559 if ((fp
= fdp
->fd_files
[n
].fp
) == NULL
)
2561 if (info
->req
->oldptr
== NULL
) {
2564 uid
= p
->p_ucred
? p
->p_ucred
->cr_uid
: -1;
2565 kcore_make_file(&kf
, fp
, p
->p_pid
, uid
, n
);
2566 spin_unlock_rd(&fdp
->fd_spin
);
2567 info
->error
= SYSCTL_OUT(info
->req
, &kf
, sizeof(kf
));
2568 spin_lock_rd(&fdp
->fd_spin
);
2573 spin_unlock_rd(&fdp
->fd_spin
);
2579 SYSCTL_PROC(_kern
, KERN_FILE
, file
, CTLTYPE_OPAQUE
|CTLFLAG_RD
,
2580 0, 0, sysctl_kern_file
, "S,file", "Entire file table");
2582 SYSCTL_INT(_kern
, KERN_MAXFILESPERPROC
, maxfilesperproc
, CTLFLAG_RW
,
2583 &maxfilesperproc
, 0, "Maximum files allowed open per process");
2585 SYSCTL_INT(_kern
, KERN_MAXFILES
, maxfiles
, CTLFLAG_RW
,
2586 &maxfiles
, 0, "Maximum number of files");
2588 SYSCTL_INT(_kern
, OID_AUTO
, maxfilesrootres
, CTLFLAG_RW
,
2589 &maxfilesrootres
, 0, "Descriptors reserved for root use");
2591 SYSCTL_INT(_kern
, OID_AUTO
, openfiles
, CTLFLAG_RD
,
2592 &nfiles
, 0, "System-wide number of open files");
2595 fildesc_drvinit(void *unused
)
2599 dev_ops_add(&fildesc_ops
, 0, 0);
2600 for (fd
= 0; fd
< NUMFDESC
; fd
++) {
2601 make_dev(&fildesc_ops
, fd
,
2602 UID_BIN
, GID_BIN
, 0666, "fd/%d", fd
);
2604 make_dev(&fildesc_ops
, 0, UID_ROOT
, GID_WHEEL
, 0666, "stdin");
2605 make_dev(&fildesc_ops
, 1, UID_ROOT
, GID_WHEEL
, 0666, "stdout");
2606 make_dev(&fildesc_ops
, 2, UID_ROOT
, GID_WHEEL
, 0666, "stderr");
2612 struct fileops badfileops
= {
2613 .fo_read
= badfo_readwrite
,
2614 .fo_write
= badfo_readwrite
,
2615 .fo_ioctl
= badfo_ioctl
,
2616 .fo_poll
= badfo_poll
,
2617 .fo_kqfilter
= badfo_kqfilter
,
2618 .fo_stat
= badfo_stat
,
2619 .fo_close
= badfo_close
,
2620 .fo_shutdown
= badfo_shutdown
2640 badfo_ioctl(struct file
*fp
, u_long com
, caddr_t data
, struct ucred
*cred
)
2649 badfo_poll(struct file
*fp
, int events
, struct ucred
*cred
)
2658 badfo_kqfilter(struct file
*fp
, struct knote
*kn
)
2664 badfo_stat(struct file
*fp
, struct stat
*sb
, struct ucred
*cred
)
2673 badfo_close(struct file
*fp
)
2682 badfo_shutdown(struct file
*fp
, int how
)
2691 nofo_shutdown(struct file
*fp
, int how
)
2693 return (EOPNOTSUPP
);
2696 SYSINIT(fildescdev
,SI_SUB_DRIVERS
,SI_ORDER_MIDDLE
+CDEV_MAJOR
,
2697 fildesc_drvinit
,NULL
)