acpi: restructure genwakecode.sh
[dragonfly.git] / sys / kern / kern_descrip.c
blobbcf9cdf7b3e8cf58a8b65e9f16baef864cfb1044
1 /*
2 * Copyright (c) 2005 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Jeffrey Hsu.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
35 * Copyright (c) 1982, 1986, 1989, 1991, 1993
36 * The Regents of the University of California. All rights reserved.
37 * (c) UNIX System Laboratories, Inc.
38 * All or some portions of this file are derived from material licensed
39 * to the University of California by American Telephone and Telegraph
40 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
41 * the permission of UNIX System Laboratories, Inc.
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 * notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 * notice, this list of conditions and the following disclaimer in the
50 * documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 * must display the following acknowledgement:
53 * This product includes software developed by the University of
54 * California, Berkeley and its contributors.
55 * 4. Neither the name of the University nor the names of its contributors
56 * may be used to endorse or promote products derived from this software
57 * without specific prior written permission.
59 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
62 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 * SUCH DAMAGE.
71 * @(#)kern_descrip.c 8.6 (Berkeley) 4/19/94
72 * $FreeBSD: src/sys/kern/kern_descrip.c,v 1.81.2.19 2004/02/28 00:43:31 tegge Exp $
73 * $DragonFly: src/sys/kern/kern_descrip.c,v 1.79 2008/08/31 13:18:28 aggelos Exp $
76 #include "opt_compat.h"
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/malloc.h>
80 #include <sys/sysproto.h>
81 #include <sys/conf.h>
82 #include <sys/device.h>
83 #include <sys/filedesc.h>
84 #include <sys/kernel.h>
85 #include <sys/sysctl.h>
86 #include <sys/vnode.h>
87 #include <sys/proc.h>
88 #include <sys/nlookup.h>
89 #include <sys/file.h>
90 #include <sys/stat.h>
91 #include <sys/filio.h>
92 #include <sys/fcntl.h>
93 #include <sys/unistd.h>
94 #include <sys/resourcevar.h>
95 #include <sys/event.h>
96 #include <sys/kern_syscall.h>
97 #include <sys/kcore.h>
98 #include <sys/kinfo.h>
99 #include <sys/un.h>
101 #include <vm/vm.h>
102 #include <vm/vm_extern.h>
104 #include <sys/thread2.h>
105 #include <sys/file2.h>
106 #include <sys/spinlock2.h>
108 static void fsetfd_locked(struct filedesc *fdp, struct file *fp, int fd);
109 static void fdreserve_locked (struct filedesc *fdp, int fd0, int incr);
110 static struct file *funsetfd_locked (struct filedesc *fdp, int fd);
111 static int checkfpclosed(struct filedesc *fdp, int fd, struct file *fp);
112 static void ffree(struct file *fp);
114 static MALLOC_DEFINE(M_FILEDESC, "file desc", "Open file descriptor table");
115 static MALLOC_DEFINE(M_FILEDESC_TO_LEADER, "file desc to leader",
116 "file desc to leader structures");
117 MALLOC_DEFINE(M_FILE, "file", "Open file structure");
118 static MALLOC_DEFINE(M_SIGIO, "sigio", "sigio structures");
120 static d_open_t fdopen;
121 #define NUMFDESC 64
123 #define CDEV_MAJOR 22
124 static struct dev_ops fildesc_ops = {
125 { "FD", CDEV_MAJOR, 0 },
126 .d_open = fdopen,
129 static int badfo_readwrite (struct file *fp, struct uio *uio,
130 struct ucred *cred, int flags);
131 static int badfo_ioctl (struct file *fp, u_long com, caddr_t data,
132 struct ucred *cred, struct sysmsg *msg);
133 static int badfo_poll (struct file *fp, int events, struct ucred *cred);
134 static int badfo_kqfilter (struct file *fp, struct knote *kn);
135 static int badfo_stat (struct file *fp, struct stat *sb, struct ucred *cred);
136 static int badfo_close (struct file *fp);
137 static int badfo_shutdown (struct file *fp, int how);
140 * Descriptor management.
142 static struct filelist filehead = LIST_HEAD_INITIALIZER(&filehead);
143 static struct spinlock filehead_spin = SPINLOCK_INITIALIZER(&filehead_spin);
144 static int nfiles; /* actual number of open files */
145 extern int cmask;
148 * Fixup fd_freefile and fd_lastfile after a descriptor has been cleared.
150 * MPSAFE - must be called with fdp->fd_spin exclusively held
152 static __inline
153 void
154 fdfixup_locked(struct filedesc *fdp, int fd)
156 if (fd < fdp->fd_freefile) {
157 fdp->fd_freefile = fd;
159 while (fdp->fd_lastfile >= 0 &&
160 fdp->fd_files[fdp->fd_lastfile].fp == NULL &&
161 fdp->fd_files[fdp->fd_lastfile].reserved == 0
163 --fdp->fd_lastfile;
168 * System calls on descriptors.
170 * MPSAFE
173 sys_getdtablesize(struct getdtablesize_args *uap)
175 struct proc *p = curproc;
176 struct plimit *limit = p->p_limit;
178 spin_lock_rd(&limit->p_spin);
179 uap->sysmsg_result =
180 min((int)limit->pl_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc);
181 spin_unlock_rd(&limit->p_spin);
182 return (0);
186 * Duplicate a file descriptor to a particular value.
188 * note: keep in mind that a potential race condition exists when closing
189 * descriptors from a shared descriptor table (via rfork).
191 * MPSAFE
194 sys_dup2(struct dup2_args *uap)
196 int error;
197 int fd = 0;
199 error = kern_dup(DUP_FIXED, uap->from, uap->to, &fd);
200 uap->sysmsg_fds[0] = fd;
202 return (error);
206 * Duplicate a file descriptor.
208 * MPSAFE
211 sys_dup(struct dup_args *uap)
213 int error;
214 int fd = 0;
216 error = kern_dup(DUP_VARIABLE, uap->fd, 0, &fd);
217 uap->sysmsg_fds[0] = fd;
219 return (error);
223 * MPALMOSTSAFE - acquires mplock for fp operations
226 kern_fcntl(int fd, int cmd, union fcntl_dat *dat, struct ucred *cred)
228 struct thread *td = curthread;
229 struct proc *p = td->td_proc;
230 struct file *fp;
231 struct vnode *vp;
232 u_int newmin;
233 u_int oflags;
234 u_int nflags;
235 int tmp, error, flg = F_POSIX;
237 KKASSERT(p);
240 * Operations on file descriptors that do not require a file pointer.
242 switch (cmd) {
243 case F_GETFD:
244 error = fgetfdflags(p->p_fd, fd, &tmp);
245 if (error == 0)
246 dat->fc_cloexec = (tmp & UF_EXCLOSE) ? FD_CLOEXEC : 0;
247 return (error);
249 case F_SETFD:
250 if (dat->fc_cloexec & FD_CLOEXEC)
251 error = fsetfdflags(p->p_fd, fd, UF_EXCLOSE);
252 else
253 error = fclrfdflags(p->p_fd, fd, UF_EXCLOSE);
254 return (error);
255 case F_DUPFD:
256 newmin = dat->fc_fd;
257 error = kern_dup(DUP_VARIABLE, fd, newmin, &dat->fc_fd);
258 return (error);
259 default:
260 break;
264 * Operations on file pointers
266 if ((fp = holdfp(p->p_fd, fd, -1)) == NULL)
267 return (EBADF);
269 get_mplock();
270 switch (cmd) {
271 case F_GETFL:
272 dat->fc_flags = OFLAGS(fp->f_flag);
273 error = 0;
274 break;
276 case F_SETFL:
277 oflags = fp->f_flag;
278 nflags = FFLAGS(dat->fc_flags & ~O_ACCMODE) & FCNTLFLAGS;
279 nflags |= oflags & ~FCNTLFLAGS;
281 error = 0;
282 if (((nflags ^ oflags) & O_APPEND) && (oflags & FAPPENDONLY))
283 error = EINVAL;
284 if (error == 0 && ((nflags ^ oflags) & FASYNC)) {
285 tmp = nflags & FASYNC;
286 error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp,
287 cred, NULL);
289 if (error == 0)
290 fp->f_flag = nflags;
291 break;
293 case F_GETOWN:
294 error = fo_ioctl(fp, FIOGETOWN, (caddr_t)&dat->fc_owner,
295 cred, NULL);
296 break;
298 case F_SETOWN:
299 error = fo_ioctl(fp, FIOSETOWN, (caddr_t)&dat->fc_owner,
300 cred, NULL);
301 break;
303 case F_SETLKW:
304 flg |= F_WAIT;
305 /* Fall into F_SETLK */
307 case F_SETLK:
308 if (fp->f_type != DTYPE_VNODE) {
309 error = EBADF;
310 break;
312 vp = (struct vnode *)fp->f_data;
315 * copyin/lockop may block
317 if (dat->fc_flock.l_whence == SEEK_CUR)
318 dat->fc_flock.l_start += fp->f_offset;
320 switch (dat->fc_flock.l_type) {
321 case F_RDLCK:
322 if ((fp->f_flag & FREAD) == 0) {
323 error = EBADF;
324 break;
326 p->p_leader->p_flag |= P_ADVLOCK;
327 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
328 &dat->fc_flock, flg);
329 break;
330 case F_WRLCK:
331 if ((fp->f_flag & FWRITE) == 0) {
332 error = EBADF;
333 break;
335 p->p_leader->p_flag |= P_ADVLOCK;
336 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_SETLK,
337 &dat->fc_flock, flg);
338 break;
339 case F_UNLCK:
340 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
341 &dat->fc_flock, F_POSIX);
342 break;
343 default:
344 error = EINVAL;
345 break;
349 * It is possible to race a close() on the descriptor while
350 * we were blocked getting the lock. If this occurs the
351 * close might not have caught the lock.
353 if (checkfpclosed(p->p_fd, fd, fp)) {
354 dat->fc_flock.l_whence = SEEK_SET;
355 dat->fc_flock.l_start = 0;
356 dat->fc_flock.l_len = 0;
357 dat->fc_flock.l_type = F_UNLCK;
358 (void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader,
359 F_UNLCK, &dat->fc_flock, F_POSIX);
361 break;
363 case F_GETLK:
364 if (fp->f_type != DTYPE_VNODE) {
365 error = EBADF;
366 break;
368 vp = (struct vnode *)fp->f_data;
370 * copyin/lockop may block
372 if (dat->fc_flock.l_type != F_RDLCK &&
373 dat->fc_flock.l_type != F_WRLCK &&
374 dat->fc_flock.l_type != F_UNLCK) {
375 error = EINVAL;
376 break;
378 if (dat->fc_flock.l_whence == SEEK_CUR)
379 dat->fc_flock.l_start += fp->f_offset;
380 error = VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_GETLK,
381 &dat->fc_flock, F_POSIX);
382 break;
383 default:
384 error = EINVAL;
385 break;
387 rel_mplock();
389 fdrop(fp);
390 return (error);
394 * The file control system call.
396 * MPSAFE
399 sys_fcntl(struct fcntl_args *uap)
401 union fcntl_dat dat;
402 int error;
404 switch (uap->cmd) {
405 case F_DUPFD:
406 dat.fc_fd = uap->arg;
407 break;
408 case F_SETFD:
409 dat.fc_cloexec = uap->arg;
410 break;
411 case F_SETFL:
412 dat.fc_flags = uap->arg;
413 break;
414 case F_SETOWN:
415 dat.fc_owner = uap->arg;
416 break;
417 case F_SETLKW:
418 case F_SETLK:
419 case F_GETLK:
420 error = copyin((caddr_t)uap->arg, &dat.fc_flock,
421 sizeof(struct flock));
422 if (error)
423 return (error);
424 break;
427 error = kern_fcntl(uap->fd, uap->cmd, &dat, curproc->p_ucred);
429 if (error == 0) {
430 switch (uap->cmd) {
431 case F_DUPFD:
432 uap->sysmsg_result = dat.fc_fd;
433 break;
434 case F_GETFD:
435 uap->sysmsg_result = dat.fc_cloexec;
436 break;
437 case F_GETFL:
438 uap->sysmsg_result = dat.fc_flags;
439 break;
440 case F_GETOWN:
441 uap->sysmsg_result = dat.fc_owner;
442 case F_GETLK:
443 error = copyout(&dat.fc_flock, (caddr_t)uap->arg,
444 sizeof(struct flock));
445 break;
449 return (error);
453 * Common code for dup, dup2, and fcntl(F_DUPFD).
455 * The type flag can be either DUP_FIXED or DUP_VARIABLE. DUP_FIXED tells
456 * kern_dup() to destructively dup over an existing file descriptor if new
457 * is already open. DUP_VARIABLE tells kern_dup() to find the lowest
458 * unused file descriptor that is greater than or equal to new.
460 * MPSAFE
463 kern_dup(enum dup_type type, int old, int new, int *res)
465 struct thread *td = curthread;
466 struct proc *p = td->td_proc;
467 struct filedesc *fdp = p->p_fd;
468 struct file *fp;
469 struct file *delfp;
470 int oldflags;
471 int holdleaders;
472 int error, newfd;
475 * Verify that we have a valid descriptor to dup from and
476 * possibly to dup to.
478 retry:
479 spin_lock_wr(&fdp->fd_spin);
480 if (new < 0 || new > p->p_rlimit[RLIMIT_NOFILE].rlim_cur ||
481 new >= maxfilesperproc) {
482 spin_unlock_wr(&fdp->fd_spin);
483 return (EINVAL);
485 if ((unsigned)old >= fdp->fd_nfiles || fdp->fd_files[old].fp == NULL) {
486 spin_unlock_wr(&fdp->fd_spin);
487 return (EBADF);
489 if (type == DUP_FIXED && old == new) {
490 *res = new;
491 spin_unlock_wr(&fdp->fd_spin);
492 return (0);
494 fp = fdp->fd_files[old].fp;
495 oldflags = fdp->fd_files[old].fileflags;
496 fhold(fp); /* MPSAFE - can be called with a spinlock held */
499 * Allocate a new descriptor if DUP_VARIABLE, or expand the table
500 * if the requested descriptor is beyond the current table size.
502 * This can block. Retry if the source descriptor no longer matches
503 * or if our expectation in the expansion case races.
505 * If we are not expanding or allocating a new decriptor, then reset
506 * the target descriptor to a reserved state so we have a uniform
507 * setup for the next code block.
509 if (type == DUP_VARIABLE || new >= fdp->fd_nfiles) {
510 spin_unlock_wr(&fdp->fd_spin);
511 error = fdalloc(p, new, &newfd);
512 spin_lock_wr(&fdp->fd_spin);
513 if (error) {
514 spin_unlock_wr(&fdp->fd_spin);
515 fdrop(fp);
516 return (error);
519 * Check for ripout
521 if (old >= fdp->fd_nfiles || fdp->fd_files[old].fp != fp) {
522 fsetfd_locked(fdp, NULL, newfd);
523 spin_unlock_wr(&fdp->fd_spin);
524 fdrop(fp);
525 goto retry;
528 * Check for expansion race
530 if (type != DUP_VARIABLE && new != newfd) {
531 fsetfd_locked(fdp, NULL, newfd);
532 spin_unlock_wr(&fdp->fd_spin);
533 fdrop(fp);
534 goto retry;
537 * Check for ripout, newfd reused old (this case probably
538 * can't occur).
540 if (old == newfd) {
541 fsetfd_locked(fdp, NULL, newfd);
542 spin_unlock_wr(&fdp->fd_spin);
543 fdrop(fp);
544 goto retry;
546 new = newfd;
547 delfp = NULL;
548 } else {
549 if (fdp->fd_files[new].reserved) {
550 spin_unlock_wr(&fdp->fd_spin);
551 fdrop(fp);
552 kprintf("Warning: dup(): target descriptor %d is reserved, waiting for it to be resolved\n", new);
553 tsleep(fdp, 0, "fdres", hz);
554 goto retry;
558 * If the target descriptor was never allocated we have
559 * to allocate it. If it was we have to clean out the
560 * old descriptor. delfp inherits the ref from the
561 * descriptor table.
563 delfp = fdp->fd_files[new].fp;
564 fdp->fd_files[new].fp = NULL;
565 fdp->fd_files[new].reserved = 1;
566 if (delfp == NULL) {
567 fdreserve_locked(fdp, new, 1);
568 if (new > fdp->fd_lastfile)
569 fdp->fd_lastfile = new;
575 * NOTE: still holding an exclusive spinlock
579 * If a descriptor is being overwritten we may hve to tell
580 * fdfree() to sleep to ensure that all relevant process
581 * leaders can be traversed in closef().
583 if (delfp != NULL && p->p_fdtol != NULL) {
584 fdp->fd_holdleaderscount++;
585 holdleaders = 1;
586 } else {
587 holdleaders = 0;
589 KASSERT(delfp == NULL || type == DUP_FIXED,
590 ("dup() picked an open file"));
593 * Duplicate the source descriptor, update lastfile. If the new
594 * descriptor was not allocated and we aren't replacing an existing
595 * descriptor we have to mark the descriptor as being in use.
597 * The fd_files[] array inherits fp's hold reference.
599 fsetfd_locked(fdp, fp, new);
600 fdp->fd_files[new].fileflags = oldflags & ~UF_EXCLOSE;
601 spin_unlock_wr(&fdp->fd_spin);
602 fdrop(fp);
603 *res = new;
606 * If we dup'd over a valid file, we now own the reference to it
607 * and must dispose of it using closef() semantics (as if a
608 * close() were performed on it).
610 if (delfp) {
611 closef(delfp, p);
612 if (holdleaders) {
613 spin_lock_wr(&fdp->fd_spin);
614 fdp->fd_holdleaderscount--;
615 if (fdp->fd_holdleaderscount == 0 &&
616 fdp->fd_holdleaderswakeup != 0) {
617 fdp->fd_holdleaderswakeup = 0;
618 spin_unlock_wr(&fdp->fd_spin);
619 wakeup(&fdp->fd_holdleaderscount);
620 } else {
621 spin_unlock_wr(&fdp->fd_spin);
625 return (0);
629 * If sigio is on the list associated with a process or process group,
630 * disable signalling from the device, remove sigio from the list and
631 * free sigio.
633 void
634 funsetown(struct sigio *sigio)
636 if (sigio == NULL)
637 return;
638 crit_enter();
639 *(sigio->sio_myref) = NULL;
640 crit_exit();
641 if (sigio->sio_pgid < 0) {
642 SLIST_REMOVE(&sigio->sio_pgrp->pg_sigiolst, sigio,
643 sigio, sio_pgsigio);
644 } else /* if ((*sigiop)->sio_pgid > 0) */ {
645 SLIST_REMOVE(&sigio->sio_proc->p_sigiolst, sigio,
646 sigio, sio_pgsigio);
648 crfree(sigio->sio_ucred);
649 kfree(sigio, M_SIGIO);
652 /* Free a list of sigio structures. */
653 void
654 funsetownlst(struct sigiolst *sigiolst)
656 struct sigio *sigio;
658 while ((sigio = SLIST_FIRST(sigiolst)) != NULL)
659 funsetown(sigio);
663 * This is common code for FIOSETOWN ioctl called by fcntl(fd, F_SETOWN, arg).
665 * After permission checking, add a sigio structure to the sigio list for
666 * the process or process group.
669 fsetown(pid_t pgid, struct sigio **sigiop)
671 struct proc *proc;
672 struct pgrp *pgrp;
673 struct sigio *sigio;
675 if (pgid == 0) {
676 funsetown(*sigiop);
677 return (0);
679 if (pgid > 0) {
680 proc = pfind(pgid);
681 if (proc == NULL)
682 return (ESRCH);
685 * Policy - Don't allow a process to FSETOWN a process
686 * in another session.
688 * Remove this test to allow maximum flexibility or
689 * restrict FSETOWN to the current process or process
690 * group for maximum safety.
692 if (proc->p_session != curproc->p_session)
693 return (EPERM);
695 pgrp = NULL;
696 } else /* if (pgid < 0) */ {
697 pgrp = pgfind(-pgid);
698 if (pgrp == NULL)
699 return (ESRCH);
702 * Policy - Don't allow a process to FSETOWN a process
703 * in another session.
705 * Remove this test to allow maximum flexibility or
706 * restrict FSETOWN to the current process or process
707 * group for maximum safety.
709 if (pgrp->pg_session != curproc->p_session)
710 return (EPERM);
712 proc = NULL;
714 funsetown(*sigiop);
715 sigio = kmalloc(sizeof(struct sigio), M_SIGIO, M_WAITOK);
716 if (pgid > 0) {
717 SLIST_INSERT_HEAD(&proc->p_sigiolst, sigio, sio_pgsigio);
718 sigio->sio_proc = proc;
719 } else {
720 SLIST_INSERT_HEAD(&pgrp->pg_sigiolst, sigio, sio_pgsigio);
721 sigio->sio_pgrp = pgrp;
723 sigio->sio_pgid = pgid;
724 sigio->sio_ucred = crhold(curproc->p_ucred);
725 /* It would be convenient if p_ruid was in ucred. */
726 sigio->sio_ruid = curproc->p_ucred->cr_ruid;
727 sigio->sio_myref = sigiop;
728 crit_enter();
729 *sigiop = sigio;
730 crit_exit();
731 return (0);
735 * This is common code for FIOGETOWN ioctl called by fcntl(fd, F_GETOWN, arg).
737 pid_t
738 fgetown(struct sigio *sigio)
740 return (sigio != NULL ? sigio->sio_pgid : 0);
744 * Close many file descriptors.
746 * MPSAFE
749 sys_closefrom(struct closefrom_args *uap)
751 return(kern_closefrom(uap->fd));
755 * Close all file descriptors greater then or equal to fd
757 * MPSAFE
760 kern_closefrom(int fd)
762 struct thread *td = curthread;
763 struct proc *p = td->td_proc;
764 struct filedesc *fdp;
766 KKASSERT(p);
767 fdp = p->p_fd;
769 if (fd < 0)
770 return (EINVAL);
773 * NOTE: This function will skip unassociated descriptors and
774 * reserved descriptors that have not yet been assigned.
775 * fd_lastfile can change as a side effect of kern_close().
777 spin_lock_wr(&fdp->fd_spin);
778 while (fd <= fdp->fd_lastfile) {
779 if (fdp->fd_files[fd].fp != NULL) {
780 spin_unlock_wr(&fdp->fd_spin);
781 /* ok if this races another close */
782 if (kern_close(fd) == EINTR)
783 return (EINTR);
784 spin_lock_wr(&fdp->fd_spin);
786 ++fd;
788 spin_unlock_wr(&fdp->fd_spin);
789 return (0);
793 * Close a file descriptor.
795 * MPSAFE
798 sys_close(struct close_args *uap)
800 return(kern_close(uap->fd));
804 * MPALMOSTSAFE - acquires mplock around knote_fdclose() calls
807 kern_close(int fd)
809 struct thread *td = curthread;
810 struct proc *p = td->td_proc;
811 struct filedesc *fdp;
812 struct file *fp;
813 int error;
814 int holdleaders;
816 KKASSERT(p);
817 fdp = p->p_fd;
819 spin_lock_wr(&fdp->fd_spin);
820 if ((fp = funsetfd_locked(fdp, fd)) == NULL) {
821 spin_unlock_wr(&fdp->fd_spin);
822 return (EBADF);
824 holdleaders = 0;
825 if (p->p_fdtol != NULL) {
827 * Ask fdfree() to sleep to ensure that all relevant
828 * process leaders can be traversed in closef().
830 fdp->fd_holdleaderscount++;
831 holdleaders = 1;
835 * we now hold the fp reference that used to be owned by the descriptor
836 * array.
838 spin_unlock_wr(&fdp->fd_spin);
839 if (fd < fdp->fd_knlistsize) {
840 get_mplock();
841 if (fd < fdp->fd_knlistsize)
842 knote_fdclose(p, fd);
843 rel_mplock();
845 error = closef(fp, p);
846 if (holdleaders) {
847 spin_lock_wr(&fdp->fd_spin);
848 fdp->fd_holdleaderscount--;
849 if (fdp->fd_holdleaderscount == 0 &&
850 fdp->fd_holdleaderswakeup != 0) {
851 fdp->fd_holdleaderswakeup = 0;
852 spin_unlock_wr(&fdp->fd_spin);
853 wakeup(&fdp->fd_holdleaderscount);
854 } else {
855 spin_unlock_wr(&fdp->fd_spin);
858 return (error);
862 * shutdown_args(int fd, int how)
865 kern_shutdown(int fd, int how)
867 struct thread *td = curthread;
868 struct proc *p = td->td_proc;
869 struct file *fp;
870 int error;
872 KKASSERT(p);
874 if ((fp = holdfp(p->p_fd, fd, -1)) == NULL)
875 return (EBADF);
876 error = fo_shutdown(fp, how);
877 fdrop(fp);
879 return (error);
883 sys_shutdown(struct shutdown_args *uap)
885 int error;
887 error = kern_shutdown(uap->s, uap->how);
889 return (error);
893 * MPSAFE
896 kern_fstat(int fd, struct stat *ub)
898 struct thread *td = curthread;
899 struct proc *p = td->td_proc;
900 struct file *fp;
901 int error;
903 KKASSERT(p);
905 if ((fp = holdfp(p->p_fd, fd, -1)) == NULL)
906 return (EBADF);
907 error = fo_stat(fp, ub, p->p_ucred);
908 fdrop(fp);
910 return (error);
914 * Return status information about a file descriptor.
916 * MPSAFE
919 sys_fstat(struct fstat_args *uap)
921 struct stat st;
922 int error;
924 error = kern_fstat(uap->fd, &st);
926 if (error == 0)
927 error = copyout(&st, uap->sb, sizeof(st));
928 return (error);
932 * Return pathconf information about a file descriptor.
934 /* ARGSUSED */
936 sys_fpathconf(struct fpathconf_args *uap)
938 struct thread *td = curthread;
939 struct proc *p = td->td_proc;
940 struct file *fp;
941 struct vnode *vp;
942 int error = 0;
944 KKASSERT(p);
946 if ((fp = holdfp(p->p_fd, uap->fd, -1)) == NULL)
947 return (EBADF);
949 switch (fp->f_type) {
950 case DTYPE_PIPE:
951 case DTYPE_SOCKET:
952 if (uap->name != _PC_PIPE_BUF) {
953 error = EINVAL;
954 } else {
955 uap->sysmsg_result = PIPE_BUF;
956 error = 0;
958 break;
959 case DTYPE_FIFO:
960 case DTYPE_VNODE:
961 vp = (struct vnode *)fp->f_data;
962 error = VOP_PATHCONF(vp, uap->name, &uap->sysmsg_reg);
963 break;
964 default:
965 error = EOPNOTSUPP;
966 break;
968 fdrop(fp);
969 return(error);
972 static int fdexpand;
973 SYSCTL_INT(_debug, OID_AUTO, fdexpand, CTLFLAG_RD, &fdexpand, 0, "");
976 * Grow the file table so it can hold through descriptor (want).
978 * The fdp's spinlock must be held exclusively on entry and may be held
979 * exclusively on return. The spinlock may be cycled by the routine.
981 * MPSAFE
983 static void
984 fdgrow_locked(struct filedesc *fdp, int want)
986 struct fdnode *newfiles;
987 struct fdnode *oldfiles;
988 int nf, extra;
990 nf = fdp->fd_nfiles;
991 do {
992 /* nf has to be of the form 2^n - 1 */
993 nf = 2 * nf + 1;
994 } while (nf <= want);
996 spin_unlock_wr(&fdp->fd_spin);
997 newfiles = kmalloc(nf * sizeof(struct fdnode), M_FILEDESC, M_WAITOK);
998 spin_lock_wr(&fdp->fd_spin);
1001 * We could have raced another extend while we were not holding
1002 * the spinlock.
1004 if (fdp->fd_nfiles >= nf) {
1005 spin_unlock_wr(&fdp->fd_spin);
1006 kfree(newfiles, M_FILEDESC);
1007 spin_lock_wr(&fdp->fd_spin);
1008 return;
1011 * Copy the existing ofile and ofileflags arrays
1012 * and zero the new portion of each array.
1014 extra = nf - fdp->fd_nfiles;
1015 bcopy(fdp->fd_files, newfiles, fdp->fd_nfiles * sizeof(struct fdnode));
1016 bzero(&newfiles[fdp->fd_nfiles], extra * sizeof(struct fdnode));
1018 oldfiles = fdp->fd_files;
1019 fdp->fd_files = newfiles;
1020 fdp->fd_nfiles = nf;
1022 if (oldfiles != fdp->fd_builtin_files) {
1023 spin_unlock_wr(&fdp->fd_spin);
1024 kfree(oldfiles, M_FILEDESC);
1025 spin_lock_wr(&fdp->fd_spin);
1027 fdexpand++;
1031 * Number of nodes in right subtree, including the root.
1033 static __inline int
1034 right_subtree_size(int n)
1036 return (n ^ (n | (n + 1)));
1040 * Bigger ancestor.
1042 static __inline int
1043 right_ancestor(int n)
1045 return (n | (n + 1));
1049 * Smaller ancestor.
1051 static __inline int
1052 left_ancestor(int n)
1054 return ((n & (n + 1)) - 1);
1058 * Traverse the in-place binary tree buttom-up adjusting the allocation
1059 * count so scans can determine where free descriptors are located.
1061 * MPSAFE - caller must be holding an exclusive spinlock on fdp
1063 static
1064 void
1065 fdreserve_locked(struct filedesc *fdp, int fd, int incr)
1067 while (fd >= 0) {
1068 fdp->fd_files[fd].allocated += incr;
1069 KKASSERT(fdp->fd_files[fd].allocated >= 0);
1070 fd = left_ancestor(fd);
1075 * Reserve a file descriptor for the process. If no error occurs, the
1076 * caller MUST at some point call fsetfd() or assign a file pointer
1077 * or dispose of the reservation.
1079 * MPSAFE
1082 fdalloc(struct proc *p, int want, int *result)
1084 struct filedesc *fdp = p->p_fd;
1085 int fd, rsize, rsum, node, lim;
1087 spin_lock_rd(&p->p_limit->p_spin);
1088 lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc);
1089 spin_unlock_rd(&p->p_limit->p_spin);
1090 if (want >= lim)
1091 return (EMFILE);
1092 spin_lock_wr(&fdp->fd_spin);
1093 if (want >= fdp->fd_nfiles)
1094 fdgrow_locked(fdp, want);
1097 * Search for a free descriptor starting at the higher
1098 * of want or fd_freefile. If that fails, consider
1099 * expanding the ofile array.
1101 * NOTE! the 'allocated' field is a cumulative recursive allocation
1102 * count. If we happen to see a value of 0 then we can shortcut
1103 * our search. Otherwise we run through through the tree going
1104 * down branches we know have free descriptor(s) until we hit a
1105 * leaf node. The leaf node will be free but will not necessarily
1106 * have an allocated field of 0.
1108 retry:
1109 /* move up the tree looking for a subtree with a free node */
1110 for (fd = max(want, fdp->fd_freefile); fd < min(fdp->fd_nfiles, lim);
1111 fd = right_ancestor(fd)) {
1112 if (fdp->fd_files[fd].allocated == 0)
1113 goto found;
1115 rsize = right_subtree_size(fd);
1116 if (fdp->fd_files[fd].allocated == rsize)
1117 continue; /* right subtree full */
1120 * Free fd is in the right subtree of the tree rooted at fd.
1121 * Call that subtree R. Look for the smallest (leftmost)
1122 * subtree of R with an unallocated fd: continue moving
1123 * down the left branch until encountering a full left
1124 * subtree, then move to the right.
1126 for (rsum = 0, rsize /= 2; rsize > 0; rsize /= 2) {
1127 node = fd + rsize;
1128 rsum += fdp->fd_files[node].allocated;
1129 if (fdp->fd_files[fd].allocated == rsum + rsize) {
1130 fd = node; /* move to the right */
1131 if (fdp->fd_files[node].allocated == 0)
1132 goto found;
1133 rsum = 0;
1136 goto found;
1140 * No space in current array. Expand?
1142 if (fdp->fd_nfiles >= lim) {
1143 spin_unlock_wr(&fdp->fd_spin);
1144 return (EMFILE);
1146 fdgrow_locked(fdp, want);
1147 goto retry;
1149 found:
1150 KKASSERT(fd < fdp->fd_nfiles);
1151 if (fd > fdp->fd_lastfile)
1152 fdp->fd_lastfile = fd;
1153 if (want <= fdp->fd_freefile)
1154 fdp->fd_freefile = fd;
1155 *result = fd;
1156 KKASSERT(fdp->fd_files[fd].fp == NULL);
1157 KKASSERT(fdp->fd_files[fd].reserved == 0);
1158 fdp->fd_files[fd].fileflags = 0;
1159 fdp->fd_files[fd].reserved = 1;
1160 fdreserve_locked(fdp, fd, 1);
1161 spin_unlock_wr(&fdp->fd_spin);
1162 return (0);
1166 * Check to see whether n user file descriptors
1167 * are available to the process p.
1169 * MPSAFE
1172 fdavail(struct proc *p, int n)
1174 struct filedesc *fdp = p->p_fd;
1175 struct fdnode *fdnode;
1176 int i, lim, last;
1178 spin_lock_rd(&p->p_limit->p_spin);
1179 lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfilesperproc);
1180 spin_unlock_rd(&p->p_limit->p_spin);
1182 spin_lock_rd(&fdp->fd_spin);
1183 if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0) {
1184 spin_unlock_rd(&fdp->fd_spin);
1185 return (1);
1187 last = min(fdp->fd_nfiles, lim);
1188 fdnode = &fdp->fd_files[fdp->fd_freefile];
1189 for (i = last - fdp->fd_freefile; --i >= 0; ++fdnode) {
1190 if (fdnode->fp == NULL && --n <= 0) {
1191 spin_unlock_rd(&fdp->fd_spin);
1192 return (1);
1195 spin_unlock_rd(&fdp->fd_spin);
1196 return (0);
1200 * Revoke open descriptors referencing (f_data, f_type)
1202 * Any revoke executed within a prison is only able to
1203 * revoke descriptors for processes within that prison.
1205 * Returns 0 on success or an error code.
1207 struct fdrevoke_info {
1208 void *data;
1209 short type;
1210 short unused;
1211 int count;
1212 int intransit;
1213 struct ucred *cred;
1214 struct file *nfp;
1217 static int fdrevoke_check_callback(struct file *fp, void *vinfo);
1218 static int fdrevoke_proc_callback(struct proc *p, void *vinfo);
1221 fdrevoke(void *f_data, short f_type, struct ucred *cred)
1223 struct fdrevoke_info info;
1224 int error;
1226 bzero(&info, sizeof(info));
1227 info.data = f_data;
1228 info.type = f_type;
1229 info.cred = cred;
1230 error = falloc(NULL, &info.nfp, NULL);
1231 if (error)
1232 return (error);
1235 * Scan the file pointer table once. dups do not dup file pointers,
1236 * only descriptors, so there is no leak. Set FREVOKED on the fps
1237 * being revoked.
1239 allfiles_scan_exclusive(fdrevoke_check_callback, &info);
1242 * If any fps were marked track down the related descriptors
1243 * and close them. Any dup()s at this point will notice
1244 * the FREVOKED already set in the fp and do the right thing.
1246 * Any fps with non-zero msgcounts (aka sent over a unix-domain
1247 * socket) bumped the intransit counter and will require a
1248 * scan. Races against fps leaving the socket are closed by
1249 * the socket code checking for FREVOKED.
1251 if (info.count)
1252 allproc_scan(fdrevoke_proc_callback, &info);
1253 if (info.intransit)
1254 unp_revoke_gc(info.nfp);
1255 fdrop(info.nfp);
1256 return(0);
1260 * Locate matching file pointers directly.
1262 static int
1263 fdrevoke_check_callback(struct file *fp, void *vinfo)
1265 struct fdrevoke_info *info = vinfo;
1268 * File pointers already flagged for revokation are skipped.
1270 if (fp->f_flag & FREVOKED)
1271 return(0);
1274 * If revoking from a prison file pointers created outside of
1275 * that prison, or file pointers without creds, cannot be revoked.
1277 if (info->cred->cr_prison &&
1278 (fp->f_cred == NULL ||
1279 info->cred->cr_prison != fp->f_cred->cr_prison)) {
1280 return(0);
1284 * If the file pointer matches then mark it for revocation. The
1285 * flag is currently only used by unp_revoke_gc().
1287 * info->count is a heuristic and can race in a SMP environment.
1289 if (info->data == fp->f_data && info->type == fp->f_type) {
1290 atomic_set_int(&fp->f_flag, FREVOKED);
1291 info->count += fp->f_count;
1292 if (fp->f_msgcount)
1293 ++info->intransit;
1295 return(0);
1299 * Locate matching file pointers via process descriptor tables.
1301 static int
1302 fdrevoke_proc_callback(struct proc *p, void *vinfo)
1304 struct fdrevoke_info *info = vinfo;
1305 struct filedesc *fdp;
1306 struct file *fp;
1307 int n;
1309 if (p->p_stat == SIDL || p->p_stat == SZOMB)
1310 return(0);
1311 if (info->cred->cr_prison &&
1312 info->cred->cr_prison != p->p_ucred->cr_prison) {
1313 return(0);
1317 * If the controlling terminal of the process matches the
1318 * vnode being revoked we clear the controlling terminal.
1320 * The normal spec_close() may not catch this because it
1321 * uses curproc instead of p.
1323 if (p->p_session && info->type == DTYPE_VNODE &&
1324 info->data == p->p_session->s_ttyvp) {
1325 p->p_session->s_ttyvp = NULL;
1326 vrele(info->data);
1330 * Softref the fdp to prevent it from being destroyed
1332 spin_lock_wr(&p->p_spin);
1333 if ((fdp = p->p_fd) == NULL) {
1334 spin_unlock_wr(&p->p_spin);
1335 return(0);
1337 atomic_add_int(&fdp->fd_softrefs, 1);
1338 spin_unlock_wr(&p->p_spin);
1341 * Locate and close any matching file descriptors.
1343 spin_lock_wr(&fdp->fd_spin);
1344 for (n = 0; n < fdp->fd_nfiles; ++n) {
1345 if ((fp = fdp->fd_files[n].fp) == NULL)
1346 continue;
1347 if (fp->f_flag & FREVOKED) {
1348 fhold(info->nfp);
1349 fdp->fd_files[n].fp = info->nfp;
1350 spin_unlock_wr(&fdp->fd_spin);
1351 closef(fp, p);
1352 spin_lock_wr(&fdp->fd_spin);
1353 --info->count;
1356 spin_unlock_wr(&fdp->fd_spin);
1357 atomic_subtract_int(&fdp->fd_softrefs, 1);
1358 return(0);
1362 * falloc:
1363 * Create a new open file structure and reserve a file decriptor
1364 * for the process that refers to it.
1366 * Root creds are checked using p, or assumed if p is NULL. If
1367 * resultfd is non-NULL then p must also be non-NULL. No file
1368 * descriptor is reserved if resultfd is NULL.
1370 * A file pointer with a refcount of 1 is returned. Note that the
1371 * file pointer is NOT associated with the descriptor. If falloc
1372 * returns success, fsetfd() MUST be called to either associate the
1373 * file pointer or clear the reservation.
1375 * MPSAFE
1378 falloc(struct proc *p, struct file **resultfp, int *resultfd)
1380 static struct timeval lastfail;
1381 static int curfail;
1382 struct file *fp;
1383 int error;
1385 fp = NULL;
1388 * Handle filetable full issues and root overfill.
1390 if (nfiles >= maxfiles - maxfilesrootres &&
1391 ((p && p->p_ucred->cr_ruid != 0) || nfiles >= maxfiles)) {
1392 if (ppsratecheck(&lastfail, &curfail, 1)) {
1393 kprintf("kern.maxfiles limit exceeded by uid %d, please see tuning(7).\n",
1394 (p ? p->p_ucred->cr_ruid : -1));
1396 error = ENFILE;
1397 goto done;
1401 * Allocate a new file descriptor.
1403 fp = kmalloc(sizeof(struct file), M_FILE, M_WAITOK | M_ZERO);
1404 spin_init(&fp->f_spin);
1405 fp->f_count = 1;
1406 fp->f_ops = &badfileops;
1407 fp->f_seqcount = 1;
1408 if (p)
1409 fp->f_cred = crhold(p->p_ucred);
1410 else
1411 fp->f_cred = crhold(proc0.p_ucred);
1412 spin_lock_wr(&filehead_spin);
1413 nfiles++;
1414 LIST_INSERT_HEAD(&filehead, fp, f_list);
1415 spin_unlock_wr(&filehead_spin);
1416 if (resultfd) {
1417 if ((error = fdalloc(p, 0, resultfd)) != 0) {
1418 fdrop(fp);
1419 fp = NULL;
1421 } else {
1422 error = 0;
1424 done:
1425 *resultfp = fp;
1426 return (error);
1430 * MPSAFE
1432 static
1434 checkfpclosed(struct filedesc *fdp, int fd, struct file *fp)
1436 int error;
1438 spin_lock_rd(&fdp->fd_spin);
1439 if ((unsigned) fd >= fdp->fd_nfiles || fp != fdp->fd_files[fd].fp)
1440 error = EBADF;
1441 else
1442 error = 0;
1443 spin_unlock_rd(&fdp->fd_spin);
1444 return (error);
1448 * Associate a file pointer with a previously reserved file descriptor.
1449 * This function always succeeds.
1451 * If fp is NULL, the file descriptor is returned to the pool.
1455 * MPSAFE (exclusive spinlock must be held on call)
1457 static void
1458 fsetfd_locked(struct filedesc *fdp, struct file *fp, int fd)
1460 KKASSERT((unsigned)fd < fdp->fd_nfiles);
1461 KKASSERT(fdp->fd_files[fd].reserved != 0);
1462 if (fp) {
1463 fhold(fp);
1464 fdp->fd_files[fd].fp = fp;
1465 fdp->fd_files[fd].reserved = 0;
1466 if (fp->f_type == DTYPE_KQUEUE) {
1467 if (fdp->fd_knlistsize < 0)
1468 fdp->fd_knlistsize = 0;
1470 } else {
1471 fdp->fd_files[fd].reserved = 0;
1472 fdreserve_locked(fdp, fd, -1);
1473 fdfixup_locked(fdp, fd);
1478 * MPSAFE
1480 void
1481 fsetfd(struct proc *p, struct file *fp, int fd)
1483 struct filedesc *fdp = p->p_fd;
1485 spin_lock_wr(&fdp->fd_spin);
1486 fsetfd_locked(fdp, fp, fd);
1487 spin_unlock_wr(&fdp->fd_spin);
1491 * MPSAFE (exclusive spinlock must be held on call)
1493 static
1494 struct file *
1495 funsetfd_locked(struct filedesc *fdp, int fd)
1497 struct file *fp;
1499 if ((unsigned)fd >= fdp->fd_nfiles)
1500 return (NULL);
1501 if ((fp = fdp->fd_files[fd].fp) == NULL)
1502 return (NULL);
1503 fdp->fd_files[fd].fp = NULL;
1504 fdp->fd_files[fd].fileflags = 0;
1506 fdreserve_locked(fdp, fd, -1);
1507 fdfixup_locked(fdp, fd);
1508 return(fp);
1512 * MPSAFE
1515 fgetfdflags(struct filedesc *fdp, int fd, int *flagsp)
1517 int error;
1519 spin_lock_rd(&fdp->fd_spin);
1520 if (((u_int)fd) >= fdp->fd_nfiles) {
1521 error = EBADF;
1522 } else if (fdp->fd_files[fd].fp == NULL) {
1523 error = EBADF;
1524 } else {
1525 *flagsp = fdp->fd_files[fd].fileflags;
1526 error = 0;
1528 spin_unlock_rd(&fdp->fd_spin);
1529 return (error);
1533 * MPSAFE
1536 fsetfdflags(struct filedesc *fdp, int fd, int add_flags)
1538 int error;
1540 spin_lock_wr(&fdp->fd_spin);
1541 if (((u_int)fd) >= fdp->fd_nfiles) {
1542 error = EBADF;
1543 } else if (fdp->fd_files[fd].fp == NULL) {
1544 error = EBADF;
1545 } else {
1546 fdp->fd_files[fd].fileflags |= add_flags;
1547 error = 0;
1549 spin_unlock_wr(&fdp->fd_spin);
1550 return (error);
1554 * MPSAFE
1557 fclrfdflags(struct filedesc *fdp, int fd, int rem_flags)
1559 int error;
1561 spin_lock_wr(&fdp->fd_spin);
1562 if (((u_int)fd) >= fdp->fd_nfiles) {
1563 error = EBADF;
1564 } else if (fdp->fd_files[fd].fp == NULL) {
1565 error = EBADF;
1566 } else {
1567 fdp->fd_files[fd].fileflags &= ~rem_flags;
1568 error = 0;
1570 spin_unlock_wr(&fdp->fd_spin);
1571 return (error);
1574 void
1575 fsetcred(struct file *fp, struct ucred *cr)
1577 crhold(cr);
1578 crfree(fp->f_cred);
1579 fp->f_cred = cr;
1583 * Free a file descriptor.
1585 static
1586 void
1587 ffree(struct file *fp)
1589 KASSERT((fp->f_count == 0), ("ffree: fp_fcount not 0!"));
1590 spin_lock_wr(&filehead_spin);
1591 LIST_REMOVE(fp, f_list);
1592 nfiles--;
1593 spin_unlock_wr(&filehead_spin);
1594 crfree(fp->f_cred);
1595 if (fp->f_nchandle.ncp)
1596 cache_drop(&fp->f_nchandle);
1597 kfree(fp, M_FILE);
1601 * called from init_main, initialize filedesc0 for proc0.
1603 void
1604 fdinit_bootstrap(struct proc *p0, struct filedesc *fdp0, int cmask)
1606 p0->p_fd = fdp0;
1607 p0->p_fdtol = NULL;
1608 fdp0->fd_refcnt = 1;
1609 fdp0->fd_cmask = cmask;
1610 fdp0->fd_files = fdp0->fd_builtin_files;
1611 fdp0->fd_nfiles = NDFILE;
1612 fdp0->fd_lastfile = -1;
1613 spin_init(&fdp0->fd_spin);
1617 * Build a new filedesc structure.
1619 * NOT MPSAFE (vref)
1621 struct filedesc *
1622 fdinit(struct proc *p)
1624 struct filedesc *newfdp;
1625 struct filedesc *fdp = p->p_fd;
1627 newfdp = kmalloc(sizeof(struct filedesc), M_FILEDESC, M_WAITOK|M_ZERO);
1628 spin_lock_rd(&fdp->fd_spin);
1629 if (fdp->fd_cdir) {
1630 newfdp->fd_cdir = fdp->fd_cdir;
1631 vref(newfdp->fd_cdir);
1632 cache_copy(&fdp->fd_ncdir, &newfdp->fd_ncdir);
1636 * rdir may not be set in e.g. proc0 or anything vm_fork'd off of
1637 * proc0, but should unconditionally exist in other processes.
1639 if (fdp->fd_rdir) {
1640 newfdp->fd_rdir = fdp->fd_rdir;
1641 vref(newfdp->fd_rdir);
1642 cache_copy(&fdp->fd_nrdir, &newfdp->fd_nrdir);
1644 if (fdp->fd_jdir) {
1645 newfdp->fd_jdir = fdp->fd_jdir;
1646 vref(newfdp->fd_jdir);
1647 cache_copy(&fdp->fd_njdir, &newfdp->fd_njdir);
1649 spin_unlock_rd(&fdp->fd_spin);
1651 /* Create the file descriptor table. */
1652 newfdp->fd_refcnt = 1;
1653 newfdp->fd_cmask = cmask;
1654 newfdp->fd_files = newfdp->fd_builtin_files;
1655 newfdp->fd_nfiles = NDFILE;
1656 newfdp->fd_knlistsize = -1;
1657 newfdp->fd_lastfile = -1;
1658 spin_init(&newfdp->fd_spin);
1660 return (newfdp);
1664 * Share a filedesc structure.
1666 * MPSAFE
1668 struct filedesc *
1669 fdshare(struct proc *p)
1671 struct filedesc *fdp;
1673 fdp = p->p_fd;
1674 spin_lock_wr(&fdp->fd_spin);
1675 fdp->fd_refcnt++;
1676 spin_unlock_wr(&fdp->fd_spin);
1677 return (fdp);
1681 * Copy a filedesc structure.
1683 * MPSAFE
1685 struct filedesc *
1686 fdcopy(struct proc *p)
1688 struct filedesc *fdp = p->p_fd;
1689 struct filedesc *newfdp;
1690 struct fdnode *fdnode;
1691 int i;
1692 int ni;
1695 * Certain daemons might not have file descriptors.
1697 if (fdp == NULL)
1698 return (NULL);
1701 * Allocate the new filedesc and fd_files[] array. This can race
1702 * with operations by other threads on the fdp so we have to be
1703 * careful.
1705 newfdp = kmalloc(sizeof(struct filedesc), M_FILEDESC, M_WAITOK | M_ZERO);
1706 again:
1707 spin_lock_rd(&fdp->fd_spin);
1708 if (fdp->fd_lastfile < NDFILE) {
1709 newfdp->fd_files = newfdp->fd_builtin_files;
1710 i = NDFILE;
1711 } else {
1713 * We have to allocate (N^2-1) entries for our in-place
1714 * binary tree. Allow the table to shrink.
1716 i = fdp->fd_nfiles;
1717 ni = (i - 1) / 2;
1718 while (ni > fdp->fd_lastfile && ni > NDFILE) {
1719 i = ni;
1720 ni = (i - 1) / 2;
1722 spin_unlock_rd(&fdp->fd_spin);
1723 newfdp->fd_files = kmalloc(i * sizeof(struct fdnode),
1724 M_FILEDESC, M_WAITOK | M_ZERO);
1727 * Check for race, retry
1729 spin_lock_rd(&fdp->fd_spin);
1730 if (i <= fdp->fd_lastfile) {
1731 spin_unlock_rd(&fdp->fd_spin);
1732 kfree(newfdp->fd_files, M_FILEDESC);
1733 goto again;
1738 * Dup the remaining fields. vref() and cache_hold() can be
1739 * safely called while holding the read spinlock on fdp.
1741 * The read spinlock on fdp is still being held.
1743 * NOTE: vref and cache_hold calls for the case where the vnode
1744 * or cache entry already has at least one ref may be called
1745 * while holding spin locks.
1747 if ((newfdp->fd_cdir = fdp->fd_cdir) != NULL) {
1748 vref(newfdp->fd_cdir);
1749 cache_copy(&fdp->fd_ncdir, &newfdp->fd_ncdir);
1752 * We must check for fd_rdir here, at least for now because
1753 * the init process is created before we have access to the
1754 * rootvode to take a reference to it.
1756 if ((newfdp->fd_rdir = fdp->fd_rdir) != NULL) {
1757 vref(newfdp->fd_rdir);
1758 cache_copy(&fdp->fd_nrdir, &newfdp->fd_nrdir);
1760 if ((newfdp->fd_jdir = fdp->fd_jdir) != NULL) {
1761 vref(newfdp->fd_jdir);
1762 cache_copy(&fdp->fd_njdir, &newfdp->fd_njdir);
1764 newfdp->fd_refcnt = 1;
1765 newfdp->fd_nfiles = i;
1766 newfdp->fd_lastfile = fdp->fd_lastfile;
1767 newfdp->fd_freefile = fdp->fd_freefile;
1768 newfdp->fd_cmask = fdp->fd_cmask;
1769 newfdp->fd_knlist = NULL;
1770 newfdp->fd_knlistsize = -1;
1771 newfdp->fd_knhash = NULL;
1772 newfdp->fd_knhashmask = 0;
1773 spin_init(&newfdp->fd_spin);
1776 * Copy the descriptor table through (i). This also copies the
1777 * allocation state. Then go through and ref the file pointers
1778 * and clean up any KQ descriptors.
1780 * kq descriptors cannot be copied. Since we haven't ref'd the
1781 * copied files yet we can ignore the return value from funsetfd().
1783 * The read spinlock on fdp is still being held.
1785 bcopy(fdp->fd_files, newfdp->fd_files, i * sizeof(struct fdnode));
1786 for (i = 0 ; i < newfdp->fd_nfiles; ++i) {
1787 fdnode = &newfdp->fd_files[i];
1788 if (fdnode->reserved) {
1789 fdreserve_locked(newfdp, i, -1);
1790 fdnode->reserved = 0;
1791 fdfixup_locked(newfdp, i);
1792 } else if (fdnode->fp) {
1793 if (fdnode->fp->f_type == DTYPE_KQUEUE) {
1794 (void)funsetfd_locked(newfdp, i);
1795 } else {
1796 fhold(fdnode->fp);
1800 spin_unlock_rd(&fdp->fd_spin);
1801 return (newfdp);
1805 * Release a filedesc structure.
1807 * NOT MPSAFE (MPSAFE for refs > 1, but the final cleanup code is not MPSAFE)
1809 void
1810 fdfree(struct proc *p, struct filedesc *repl)
1812 struct filedesc *fdp;
1813 struct fdnode *fdnode;
1814 int i;
1815 struct filedesc_to_leader *fdtol;
1816 struct file *fp;
1817 struct vnode *vp;
1818 struct flock lf;
1821 * Certain daemons might not have file descriptors.
1823 fdp = p->p_fd;
1824 if (fdp == NULL) {
1825 p->p_fd = repl;
1826 return;
1830 * Severe messing around to follow.
1832 spin_lock_wr(&fdp->fd_spin);
1834 /* Check for special need to clear POSIX style locks */
1835 fdtol = p->p_fdtol;
1836 if (fdtol != NULL) {
1837 KASSERT(fdtol->fdl_refcount > 0,
1838 ("filedesc_to_refcount botch: fdl_refcount=%d",
1839 fdtol->fdl_refcount));
1840 if (fdtol->fdl_refcount == 1 &&
1841 (p->p_leader->p_flag & P_ADVLOCK) != 0) {
1842 for (i = 0; i <= fdp->fd_lastfile; ++i) {
1843 fdnode = &fdp->fd_files[i];
1844 if (fdnode->fp == NULL ||
1845 fdnode->fp->f_type != DTYPE_VNODE) {
1846 continue;
1848 fp = fdnode->fp;
1849 fhold(fp);
1850 spin_unlock_wr(&fdp->fd_spin);
1852 lf.l_whence = SEEK_SET;
1853 lf.l_start = 0;
1854 lf.l_len = 0;
1855 lf.l_type = F_UNLCK;
1856 vp = (struct vnode *)fp->f_data;
1857 (void) VOP_ADVLOCK(vp,
1858 (caddr_t)p->p_leader,
1859 F_UNLCK,
1860 &lf,
1861 F_POSIX);
1862 fdrop(fp);
1863 spin_lock_wr(&fdp->fd_spin);
1866 retry:
1867 if (fdtol->fdl_refcount == 1) {
1868 if (fdp->fd_holdleaderscount > 0 &&
1869 (p->p_leader->p_flag & P_ADVLOCK) != 0) {
1871 * close() or do_dup() has cleared a reference
1872 * in a shared file descriptor table.
1874 fdp->fd_holdleaderswakeup = 1;
1875 ssleep(&fdp->fd_holdleaderscount,
1876 &fdp->fd_spin, 0, "fdlhold", 0);
1877 goto retry;
1879 if (fdtol->fdl_holdcount > 0) {
1881 * Ensure that fdtol->fdl_leader
1882 * remains valid in closef().
1884 fdtol->fdl_wakeup = 1;
1885 ssleep(fdtol, &fdp->fd_spin, 0, "fdlhold", 0);
1886 goto retry;
1889 fdtol->fdl_refcount--;
1890 if (fdtol->fdl_refcount == 0 &&
1891 fdtol->fdl_holdcount == 0) {
1892 fdtol->fdl_next->fdl_prev = fdtol->fdl_prev;
1893 fdtol->fdl_prev->fdl_next = fdtol->fdl_next;
1894 } else {
1895 fdtol = NULL;
1897 p->p_fdtol = NULL;
1898 if (fdtol != NULL) {
1899 spin_unlock_wr(&fdp->fd_spin);
1900 kfree(fdtol, M_FILEDESC_TO_LEADER);
1901 spin_lock_wr(&fdp->fd_spin);
1904 if (--fdp->fd_refcnt > 0) {
1905 spin_unlock_wr(&fdp->fd_spin);
1906 spin_lock_wr(&p->p_spin);
1907 p->p_fd = repl;
1908 spin_unlock_wr(&p->p_spin);
1909 return;
1913 * Even though we are the last reference to the structure allproc
1914 * scans may still reference the structure. Maintain proper
1915 * locks until we can replace p->p_fd.
1917 * Also note that kqueue's closef still needs to reference the
1918 * fdp via p->p_fd, so we have to close the descriptors before
1919 * we replace p->p_fd.
1921 for (i = 0; i <= fdp->fd_lastfile; ++i) {
1922 if (fdp->fd_files[i].fp) {
1923 fp = funsetfd_locked(fdp, i);
1924 if (fp) {
1925 spin_unlock_wr(&fdp->fd_spin);
1926 closef(fp, p);
1927 spin_lock_wr(&fdp->fd_spin);
1931 spin_unlock_wr(&fdp->fd_spin);
1934 * Interlock against an allproc scan operations (typically frevoke).
1936 spin_lock_wr(&p->p_spin);
1937 p->p_fd = repl;
1938 spin_unlock_wr(&p->p_spin);
1941 * Wait for any softrefs to go away. This race rarely occurs so
1942 * we can use a non-critical-path style poll/sleep loop. The
1943 * race only occurs against allproc scans.
1945 * No new softrefs can occur with the fdp disconnected from the
1946 * process.
1948 if (fdp->fd_softrefs) {
1949 kprintf("pid %d: Warning, fdp race avoided\n", p->p_pid);
1950 while (fdp->fd_softrefs)
1951 tsleep(&fdp->fd_softrefs, 0, "fdsoft", 1);
1954 if (fdp->fd_files != fdp->fd_builtin_files)
1955 kfree(fdp->fd_files, M_FILEDESC);
1956 if (fdp->fd_cdir) {
1957 cache_drop(&fdp->fd_ncdir);
1958 vrele(fdp->fd_cdir);
1960 if (fdp->fd_rdir) {
1961 cache_drop(&fdp->fd_nrdir);
1962 vrele(fdp->fd_rdir);
1964 if (fdp->fd_jdir) {
1965 cache_drop(&fdp->fd_njdir);
1966 vrele(fdp->fd_jdir);
1968 if (fdp->fd_knlist)
1969 kfree(fdp->fd_knlist, M_KQUEUE);
1970 if (fdp->fd_knhash)
1971 kfree(fdp->fd_knhash, M_KQUEUE);
1972 kfree(fdp, M_FILEDESC);
1976 * Retrieve and reference the file pointer associated with a descriptor.
1978 * MPSAFE
1980 struct file *
1981 holdfp(struct filedesc *fdp, int fd, int flag)
1983 struct file* fp;
1985 spin_lock_rd(&fdp->fd_spin);
1986 if (((u_int)fd) >= fdp->fd_nfiles) {
1987 fp = NULL;
1988 goto done;
1990 if ((fp = fdp->fd_files[fd].fp) == NULL)
1991 goto done;
1992 if ((fp->f_flag & flag) == 0 && flag != -1) {
1993 fp = NULL;
1994 goto done;
1996 fhold(fp);
1997 done:
1998 spin_unlock_rd(&fdp->fd_spin);
1999 return (fp);
2003 * holdsock() - load the struct file pointer associated
2004 * with a socket into *fpp. If an error occurs, non-zero
2005 * will be returned and *fpp will be set to NULL.
2007 * MPSAFE
2010 holdsock(struct filedesc *fdp, int fd, struct file **fpp)
2012 struct file *fp;
2013 int error;
2015 spin_lock_rd(&fdp->fd_spin);
2016 if ((unsigned)fd >= fdp->fd_nfiles) {
2017 error = EBADF;
2018 fp = NULL;
2019 goto done;
2021 if ((fp = fdp->fd_files[fd].fp) == NULL) {
2022 error = EBADF;
2023 goto done;
2025 if (fp->f_type != DTYPE_SOCKET) {
2026 error = ENOTSOCK;
2027 goto done;
2029 fhold(fp);
2030 error = 0;
2031 done:
2032 spin_unlock_rd(&fdp->fd_spin);
2033 *fpp = fp;
2034 return (error);
2038 * Convert a user file descriptor to a held file pointer.
2040 * MPSAFE
2043 holdvnode(struct filedesc *fdp, int fd, struct file **fpp)
2045 struct file *fp;
2046 int error;
2048 spin_lock_rd(&fdp->fd_spin);
2049 if ((unsigned)fd >= fdp->fd_nfiles) {
2050 error = EBADF;
2051 fp = NULL;
2052 goto done;
2054 if ((fp = fdp->fd_files[fd].fp) == NULL) {
2055 error = EBADF;
2056 goto done;
2058 if (fp->f_type != DTYPE_VNODE && fp->f_type != DTYPE_FIFO) {
2059 fp = NULL;
2060 error = EINVAL;
2061 goto done;
2063 fhold(fp);
2064 error = 0;
2065 done:
2066 spin_unlock_rd(&fdp->fd_spin);
2067 *fpp = fp;
2068 return (error);
2072 * For setugid programs, we don't want to people to use that setugidness
2073 * to generate error messages which write to a file which otherwise would
2074 * otherwise be off-limits to the process.
2076 * This is a gross hack to plug the hole. A better solution would involve
2077 * a special vop or other form of generalized access control mechanism. We
2078 * go ahead and just reject all procfs file systems accesses as dangerous.
2080 * Since setugidsafety calls this only for fd 0, 1 and 2, this check is
2081 * sufficient. We also don't for check setugidness since we know we are.
2083 static int
2084 is_unsafe(struct file *fp)
2086 if (fp->f_type == DTYPE_VNODE &&
2087 ((struct vnode *)(fp->f_data))->v_tag == VT_PROCFS)
2088 return (1);
2089 return (0);
2093 * Make this setguid thing safe, if at all possible.
2095 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose()
2097 void
2098 setugidsafety(struct proc *p)
2100 struct filedesc *fdp = p->p_fd;
2101 int i;
2103 /* Certain daemons might not have file descriptors. */
2104 if (fdp == NULL)
2105 return;
2108 * note: fdp->fd_files may be reallocated out from under us while
2109 * we are blocked in a close. Be careful!
2111 for (i = 0; i <= fdp->fd_lastfile; i++) {
2112 if (i > 2)
2113 break;
2114 if (fdp->fd_files[i].fp && is_unsafe(fdp->fd_files[i].fp)) {
2115 struct file *fp;
2117 if (i < fdp->fd_knlistsize)
2118 knote_fdclose(p, i);
2120 * NULL-out descriptor prior to close to avoid
2121 * a race while close blocks.
2123 if ((fp = funsetfd_locked(fdp, i)) != NULL)
2124 closef(fp, p);
2130 * Close any files on exec?
2132 * NOT MPSAFE - scans fdp without spinlocks, calls knote_fdclose()
2134 void
2135 fdcloseexec(struct proc *p)
2137 struct filedesc *fdp = p->p_fd;
2138 int i;
2140 /* Certain daemons might not have file descriptors. */
2141 if (fdp == NULL)
2142 return;
2145 * We cannot cache fd_files since operations may block and rip
2146 * them out from under us.
2148 for (i = 0; i <= fdp->fd_lastfile; i++) {
2149 if (fdp->fd_files[i].fp != NULL &&
2150 (fdp->fd_files[i].fileflags & UF_EXCLOSE)) {
2151 struct file *fp;
2153 if (i < fdp->fd_knlistsize)
2154 knote_fdclose(p, i);
2156 * NULL-out descriptor prior to close to avoid
2157 * a race while close blocks.
2159 if ((fp = funsetfd_locked(fdp, i)) != NULL)
2160 closef(fp, p);
2166 * It is unsafe for set[ug]id processes to be started with file
2167 * descriptors 0..2 closed, as these descriptors are given implicit
2168 * significance in the Standard C library. fdcheckstd() will create a
2169 * descriptor referencing /dev/null for each of stdin, stdout, and
2170 * stderr that is not already open.
2172 * NOT MPSAFE - calls falloc, vn_open, etc
2175 fdcheckstd(struct proc *p)
2177 struct nlookupdata nd;
2178 struct filedesc *fdp;
2179 struct file *fp;
2180 int retval;
2181 int i, error, flags, devnull;
2183 fdp = p->p_fd;
2184 if (fdp == NULL)
2185 return (0);
2186 devnull = -1;
2187 error = 0;
2188 for (i = 0; i < 3; i++) {
2189 if (fdp->fd_files[i].fp != NULL)
2190 continue;
2191 if (devnull < 0) {
2192 if ((error = falloc(p, &fp, &devnull)) != 0)
2193 break;
2195 error = nlookup_init(&nd, "/dev/null", UIO_SYSSPACE,
2196 NLC_FOLLOW|NLC_LOCKVP);
2197 flags = FREAD | FWRITE;
2198 if (error == 0)
2199 error = vn_open(&nd, fp, flags, 0);
2200 if (error == 0)
2201 fsetfd(p, fp, devnull);
2202 else
2203 fsetfd(p, NULL, devnull);
2204 fdrop(fp);
2205 nlookup_done(&nd);
2206 if (error)
2207 break;
2208 KKASSERT(i == devnull);
2209 } else {
2210 error = kern_dup(DUP_FIXED, devnull, i, &retval);
2211 if (error != 0)
2212 break;
2215 return (error);
2219 * Internal form of close.
2220 * Decrement reference count on file structure.
2221 * Note: td and/or p may be NULL when closing a file
2222 * that was being passed in a message.
2224 * MPALMOSTSAFE - acquires mplock for VOP operations
2227 closef(struct file *fp, struct proc *p)
2229 struct vnode *vp;
2230 struct flock lf;
2231 struct filedesc_to_leader *fdtol;
2233 if (fp == NULL)
2234 return (0);
2237 * POSIX record locking dictates that any close releases ALL
2238 * locks owned by this process. This is handled by setting
2239 * a flag in the unlock to free ONLY locks obeying POSIX
2240 * semantics, and not to free BSD-style file locks.
2241 * If the descriptor was in a message, POSIX-style locks
2242 * aren't passed with the descriptor.
2244 if (p != NULL && fp->f_type == DTYPE_VNODE &&
2245 (((struct vnode *)fp->f_data)->v_flag & VMAYHAVELOCKS)
2247 get_mplock();
2248 if ((p->p_leader->p_flag & P_ADVLOCK) != 0) {
2249 lf.l_whence = SEEK_SET;
2250 lf.l_start = 0;
2251 lf.l_len = 0;
2252 lf.l_type = F_UNLCK;
2253 vp = (struct vnode *)fp->f_data;
2254 (void) VOP_ADVLOCK(vp, (caddr_t)p->p_leader, F_UNLCK,
2255 &lf, F_POSIX);
2257 fdtol = p->p_fdtol;
2258 if (fdtol != NULL) {
2260 * Handle special case where file descriptor table
2261 * is shared between multiple process leaders.
2263 for (fdtol = fdtol->fdl_next;
2264 fdtol != p->p_fdtol;
2265 fdtol = fdtol->fdl_next) {
2266 if ((fdtol->fdl_leader->p_flag &
2267 P_ADVLOCK) == 0)
2268 continue;
2269 fdtol->fdl_holdcount++;
2270 lf.l_whence = SEEK_SET;
2271 lf.l_start = 0;
2272 lf.l_len = 0;
2273 lf.l_type = F_UNLCK;
2274 vp = (struct vnode *)fp->f_data;
2275 (void) VOP_ADVLOCK(vp,
2276 (caddr_t)fdtol->fdl_leader,
2277 F_UNLCK, &lf, F_POSIX);
2278 fdtol->fdl_holdcount--;
2279 if (fdtol->fdl_holdcount == 0 &&
2280 fdtol->fdl_wakeup != 0) {
2281 fdtol->fdl_wakeup = 0;
2282 wakeup(fdtol);
2286 rel_mplock();
2288 return (fdrop(fp));
2292 * MPSAFE
2294 * fhold() can only be called if f_count is already at least 1 (i.e. the
2295 * caller of fhold() already has a reference to the file pointer in some
2296 * manner or other).
2298 * f_count is not spin-locked. Instead, atomic ops are used for
2299 * incrementing, decrementing, and handling the 1->0 transition.
2301 void
2302 fhold(struct file *fp)
2304 atomic_add_int(&fp->f_count, 1);
2308 * fdrop() - drop a reference to a descriptor
2310 * MPALMOSTSAFE - acquires mplock for final close sequence
2313 fdrop(struct file *fp)
2315 struct flock lf;
2316 struct vnode *vp;
2317 int error;
2320 * A combined fetch and subtract is needed to properly detect
2321 * 1->0 transitions, otherwise two cpus dropping from a ref
2322 * count of 2 might both try to run the 1->0 code.
2324 if (atomic_fetchadd_int(&fp->f_count, -1) > 1)
2325 return (0);
2327 get_mplock();
2330 * The last reference has gone away, we own the fp structure free
2331 * and clear.
2333 if (fp->f_count < 0)
2334 panic("fdrop: count < 0");
2335 if ((fp->f_flag & FHASLOCK) && fp->f_type == DTYPE_VNODE &&
2336 (((struct vnode *)fp->f_data)->v_flag & VMAYHAVELOCKS)
2338 lf.l_whence = SEEK_SET;
2339 lf.l_start = 0;
2340 lf.l_len = 0;
2341 lf.l_type = F_UNLCK;
2342 vp = (struct vnode *)fp->f_data;
2343 (void) VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, 0);
2345 if (fp->f_ops != &badfileops)
2346 error = fo_close(fp);
2347 else
2348 error = 0;
2349 ffree(fp);
2350 rel_mplock();
2351 return (error);
2355 * Apply an advisory lock on a file descriptor.
2357 * Just attempt to get a record lock of the requested type on
2358 * the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0).
2361 sys_flock(struct flock_args *uap)
2363 struct proc *p = curproc;
2364 struct file *fp;
2365 struct vnode *vp;
2366 struct flock lf;
2367 int error;
2369 if ((fp = holdfp(p->p_fd, uap->fd, -1)) == NULL)
2370 return (EBADF);
2371 if (fp->f_type != DTYPE_VNODE) {
2372 error = EOPNOTSUPP;
2373 goto done;
2375 vp = (struct vnode *)fp->f_data;
2376 lf.l_whence = SEEK_SET;
2377 lf.l_start = 0;
2378 lf.l_len = 0;
2379 if (uap->how & LOCK_UN) {
2380 lf.l_type = F_UNLCK;
2381 fp->f_flag &= ~FHASLOCK;
2382 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_UNLCK, &lf, 0);
2383 goto done;
2385 if (uap->how & LOCK_EX)
2386 lf.l_type = F_WRLCK;
2387 else if (uap->how & LOCK_SH)
2388 lf.l_type = F_RDLCK;
2389 else {
2390 error = EBADF;
2391 goto done;
2393 fp->f_flag |= FHASLOCK;
2394 if (uap->how & LOCK_NB)
2395 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, 0);
2396 else
2397 error = VOP_ADVLOCK(vp, (caddr_t)fp, F_SETLK, &lf, F_WAIT);
2398 done:
2399 fdrop(fp);
2400 return (error);
2404 * File Descriptor pseudo-device driver (/dev/fd/).
2406 * Opening minor device N dup()s the file (if any) connected to file
2407 * descriptor N belonging to the calling process. Note that this driver
2408 * consists of only the ``open()'' routine, because all subsequent
2409 * references to this file will be direct to the other driver.
2411 /* ARGSUSED */
2412 static int
2413 fdopen(struct dev_open_args *ap)
2415 thread_t td = curthread;
2417 KKASSERT(td->td_lwp != NULL);
2420 * XXX Kludge: set curlwp->lwp_dupfd to contain the value of the
2421 * the file descriptor being sought for duplication. The error
2422 * return ensures that the vnode for this device will be released
2423 * by vn_open. Open will detect this special error and take the
2424 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN
2425 * will simply report the error.
2427 td->td_lwp->lwp_dupfd = minor(ap->a_head.a_dev);
2428 return (ENODEV);
2432 * The caller has reserved the file descriptor dfd for us. On success we
2433 * must fsetfd() it. On failure the caller will clean it up.
2435 * NOT MPSAFE - isn't getting spinlocks, possibly other things
2438 dupfdopen(struct proc *p, int dfd, int sfd, int mode, int error)
2440 struct filedesc *fdp = p->p_fd;
2441 struct file *wfp;
2442 struct file *xfp;
2443 int werror;
2445 if ((wfp = holdfp(fdp, sfd, -1)) == NULL)
2446 return (EBADF);
2449 * Close a revoke/dup race. Duping a descriptor marked as revoked
2450 * will dup a dummy descriptor instead of the real one.
2452 if (wfp->f_flag & FREVOKED) {
2453 kprintf("Warning: attempt to dup() a revoked descriptor\n");
2454 fdrop(wfp);
2455 wfp = NULL;
2456 werror = falloc(NULL, &wfp, NULL);
2457 if (werror)
2458 return (werror);
2462 * There are two cases of interest here.
2464 * For ENODEV simply dup sfd to file descriptor dfd and return.
2466 * For ENXIO steal away the file structure from sfd and store it
2467 * dfd. sfd is effectively closed by this operation.
2469 * Any other error code is just returned.
2471 switch (error) {
2472 case ENODEV:
2474 * Check that the mode the file is being opened for is a
2475 * subset of the mode of the existing descriptor.
2477 if (((mode & (FREAD|FWRITE)) | wfp->f_flag) != wfp->f_flag) {
2478 error = EACCES;
2479 break;
2481 fdp->fd_files[dfd].fileflags = fdp->fd_files[sfd].fileflags;
2482 fsetfd(p, wfp, dfd);
2483 error = 0;
2484 break;
2485 case ENXIO:
2487 * Steal away the file pointer from dfd, and stuff it into indx.
2489 fdp->fd_files[dfd].fileflags = fdp->fd_files[sfd].fileflags;
2490 fsetfd(p, wfp, dfd);
2491 if ((xfp = funsetfd_locked(fdp, sfd)) != NULL)
2492 fdrop(xfp);
2493 error = 0;
2494 break;
2495 default:
2496 break;
2498 fdrop(wfp);
2499 return (error);
2503 * NOT MPSAFE - I think these refer to a common file descriptor table
2504 * and we need to spinlock that to link fdtol in.
2506 struct filedesc_to_leader *
2507 filedesc_to_leader_alloc(struct filedesc_to_leader *old,
2508 struct proc *leader)
2510 struct filedesc_to_leader *fdtol;
2512 fdtol = kmalloc(sizeof(struct filedesc_to_leader),
2513 M_FILEDESC_TO_LEADER, M_WAITOK);
2514 fdtol->fdl_refcount = 1;
2515 fdtol->fdl_holdcount = 0;
2516 fdtol->fdl_wakeup = 0;
2517 fdtol->fdl_leader = leader;
2518 if (old != NULL) {
2519 fdtol->fdl_next = old->fdl_next;
2520 fdtol->fdl_prev = old;
2521 old->fdl_next = fdtol;
2522 fdtol->fdl_next->fdl_prev = fdtol;
2523 } else {
2524 fdtol->fdl_next = fdtol;
2525 fdtol->fdl_prev = fdtol;
2527 return fdtol;
2531 * Scan all file pointers in the system. The callback is made with
2532 * the master list spinlock held exclusively.
2534 * MPSAFE
2536 void
2537 allfiles_scan_exclusive(int (*callback)(struct file *, void *), void *data)
2539 struct file *fp;
2540 int res;
2542 spin_lock_wr(&filehead_spin);
2543 LIST_FOREACH(fp, &filehead, f_list) {
2544 res = callback(fp, data);
2545 if (res < 0)
2546 break;
2548 spin_unlock_wr(&filehead_spin);
2552 * Get file structures.
2554 * NOT MPSAFE - process list scan, SYSCTL_OUT (probably not mpsafe)
2557 struct sysctl_kern_file_info {
2558 int count;
2559 int error;
2560 struct sysctl_req *req;
2563 static int sysctl_kern_file_callback(struct proc *p, void *data);
2565 static int
2566 sysctl_kern_file(SYSCTL_HANDLER_ARGS)
2568 struct sysctl_kern_file_info info;
2571 * Note: because the number of file descriptors is calculated
2572 * in different ways for sizing vs returning the data,
2573 * there is information leakage from the first loop. However,
2574 * it is of a similar order of magnitude to the leakage from
2575 * global system statistics such as kern.openfiles.
2577 * When just doing a count, note that we cannot just count
2578 * the elements and add f_count via the filehead list because
2579 * threaded processes share their descriptor table and f_count might
2580 * still be '1' in that case.
2582 * Since the SYSCTL op can block, we must hold the process to
2583 * prevent it being ripped out from under us either in the
2584 * file descriptor loop or in the greater LIST_FOREACH. The
2585 * process may be in varying states of disrepair. If the process
2586 * is in SZOMB we may have caught it just as it is being removed
2587 * from the allproc list, we must skip it in that case to maintain
2588 * an unbroken chain through the allproc list.
2590 info.count = 0;
2591 info.error = 0;
2592 info.req = req;
2593 allproc_scan(sysctl_kern_file_callback, &info);
2596 * When just calculating the size, overestimate a bit to try to
2597 * prevent system activity from causing the buffer-fill call
2598 * to fail later on.
2600 if (req->oldptr == NULL) {
2601 info.count = (info.count + 16) + (info.count / 10);
2602 info.error = SYSCTL_OUT(req, NULL,
2603 info.count * sizeof(struct kinfo_file));
2605 return (info.error);
2608 static int
2609 sysctl_kern_file_callback(struct proc *p, void *data)
2611 struct sysctl_kern_file_info *info = data;
2612 struct kinfo_file kf;
2613 struct filedesc *fdp;
2614 struct file *fp;
2615 uid_t uid;
2616 int n;
2618 if (p->p_stat == SIDL || p->p_stat == SZOMB)
2619 return(0);
2620 if (!PRISON_CHECK(info->req->td->td_proc->p_ucred, p->p_ucred) != 0)
2621 return(0);
2624 * Softref the fdp to prevent it from being destroyed
2626 spin_lock_wr(&p->p_spin);
2627 if ((fdp = p->p_fd) == NULL) {
2628 spin_unlock_wr(&p->p_spin);
2629 return(0);
2631 atomic_add_int(&fdp->fd_softrefs, 1);
2632 spin_unlock_wr(&p->p_spin);
2635 * The fdp's own spinlock prevents the contents from being
2636 * modified.
2638 spin_lock_rd(&fdp->fd_spin);
2639 for (n = 0; n < fdp->fd_nfiles; ++n) {
2640 if ((fp = fdp->fd_files[n].fp) == NULL)
2641 continue;
2642 if (info->req->oldptr == NULL) {
2643 ++info->count;
2644 } else {
2645 uid = p->p_ucred ? p->p_ucred->cr_uid : -1;
2646 kcore_make_file(&kf, fp, p->p_pid, uid, n);
2647 spin_unlock_rd(&fdp->fd_spin);
2648 info->error = SYSCTL_OUT(info->req, &kf, sizeof(kf));
2649 spin_lock_rd(&fdp->fd_spin);
2650 if (info->error)
2651 break;
2654 spin_unlock_rd(&fdp->fd_spin);
2655 atomic_subtract_int(&fdp->fd_softrefs, 1);
2656 if (info->error)
2657 return(-1);
2658 return(0);
2661 SYSCTL_PROC(_kern, KERN_FILE, file, CTLTYPE_OPAQUE|CTLFLAG_RD,
2662 0, 0, sysctl_kern_file, "S,file", "Entire file table");
2664 SYSCTL_INT(_kern, KERN_MAXFILESPERPROC, maxfilesperproc, CTLFLAG_RW,
2665 &maxfilesperproc, 0, "Maximum files allowed open per process");
2667 SYSCTL_INT(_kern, KERN_MAXFILES, maxfiles, CTLFLAG_RW,
2668 &maxfiles, 0, "Maximum number of files");
2670 SYSCTL_INT(_kern, OID_AUTO, maxfilesrootres, CTLFLAG_RW,
2671 &maxfilesrootres, 0, "Descriptors reserved for root use");
2673 SYSCTL_INT(_kern, OID_AUTO, openfiles, CTLFLAG_RD,
2674 &nfiles, 0, "System-wide number of open files");
2676 static void
2677 fildesc_drvinit(void *unused)
2679 int fd;
2681 for (fd = 0; fd < NUMFDESC; fd++) {
2682 make_dev(&fildesc_ops, fd,
2683 UID_BIN, GID_BIN, 0666, "fd/%d", fd);
2686 kprintf("fildesc_drvinit() building stdin, stdout, stderr: \n");
2688 make_dev(&fildesc_ops, 0, UID_ROOT, GID_WHEEL, 0666, "stdin");
2689 make_dev(&fildesc_ops, 1, UID_ROOT, GID_WHEEL, 0666, "stdout");
2690 make_dev(&fildesc_ops, 2, UID_ROOT, GID_WHEEL, 0666, "stderr");
2694 * MPSAFE
2696 struct fileops badfileops = {
2697 .fo_read = badfo_readwrite,
2698 .fo_write = badfo_readwrite,
2699 .fo_ioctl = badfo_ioctl,
2700 .fo_poll = badfo_poll,
2701 .fo_kqfilter = badfo_kqfilter,
2702 .fo_stat = badfo_stat,
2703 .fo_close = badfo_close,
2704 .fo_shutdown = badfo_shutdown
2708 * MPSAFE
2710 static int
2711 badfo_readwrite(
2712 struct file *fp,
2713 struct uio *uio,
2714 struct ucred *cred,
2715 int flags
2717 return (EBADF);
2721 * MPSAFE
2723 static int
2724 badfo_ioctl(struct file *fp, u_long com, caddr_t data,
2725 struct ucred *cred, struct sysmsg *msgv)
2727 return (EBADF);
2731 * MPSAFE
2733 static int
2734 badfo_poll(struct file *fp, int events, struct ucred *cred)
2736 return (0);
2740 * MPSAFE
2742 static int
2743 badfo_kqfilter(struct file *fp, struct knote *kn)
2745 return (0);
2748 static int
2749 badfo_stat(struct file *fp, struct stat *sb, struct ucred *cred)
2751 return (EBADF);
2755 * MPSAFE
2757 static int
2758 badfo_close(struct file *fp)
2760 return (EBADF);
2764 * MPSAFE
2766 static int
2767 badfo_shutdown(struct file *fp, int how)
2769 return (EBADF);
2773 * MPSAFE
2776 nofo_shutdown(struct file *fp, int how)
2778 return (EOPNOTSUPP);
2781 SYSINIT(fildescdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,
2782 fildesc_drvinit,NULL)