Mark up sysctl node with Li, like in sysctl(7).
[netbsd-mini2440.git] / sys / kern / kern_descrip.c
blob7860c07891dab10fa35863c3e4611552d4d4cf2f
1 /* $NetBSD: kern_descrip.c,v 1.189 2009/03/29 04:40:01 rmind Exp $ */
3 /*-
4 * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Andrew Doran.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 * Copyright (c) 1982, 1986, 1989, 1991, 1993
34 * The Regents of the University of California. All rights reserved.
35 * (c) UNIX System Laboratories, Inc.
36 * All or some portions of this file are derived from material licensed
37 * to the University of California by American Telephone and Telegraph
38 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
39 * the permission of UNIX System Laboratories, Inc.
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
43 * are met:
44 * 1. Redistributions of source code must retain the above copyright
45 * notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 * notice, this list of conditions and the following disclaimer in the
48 * documentation and/or other materials provided with the distribution.
49 * 3. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
65 * @(#)kern_descrip.c 8.8 (Berkeley) 2/14/95
69 * File descriptor management.
72 #include <sys/cdefs.h>
73 __KERNEL_RCSID(0, "$NetBSD: kern_descrip.c,v 1.189 2009/03/29 04:40:01 rmind Exp $");
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/filedesc.h>
78 #include <sys/kernel.h>
79 #include <sys/proc.h>
80 #include <sys/file.h>
81 #include <sys/socket.h>
82 #include <sys/socketvar.h>
83 #include <sys/stat.h>
84 #include <sys/ioctl.h>
85 #include <sys/fcntl.h>
86 #include <sys/pool.h>
87 #include <sys/unistd.h>
88 #include <sys/resourcevar.h>
89 #include <sys/conf.h>
90 #include <sys/event.h>
91 #include <sys/kauth.h>
92 #include <sys/atomic.h>
93 #include <sys/syscallargs.h>
94 #include <sys/cpu.h>
95 #include <sys/kmem.h>
96 #include <sys/vnode.h>
98 static int file_ctor(void *, void *, int);
99 static void file_dtor(void *, void *);
100 static int fdfile_ctor(void *, void *, int);
101 static void fdfile_dtor(void *, void *);
102 static int filedesc_ctor(void *, void *, int);
103 static void filedesc_dtor(void *, void *);
104 static int filedescopen(dev_t, int, int, lwp_t *);
106 kmutex_t filelist_lock; /* lock on filehead */
107 struct filelist filehead; /* head of list of open files */
108 u_int nfiles; /* actual number of open files */
110 static pool_cache_t filedesc_cache;
111 static pool_cache_t file_cache;
112 static pool_cache_t fdfile_cache;
114 const struct cdevsw filedesc_cdevsw = {
115 filedescopen, noclose, noread, nowrite, noioctl,
116 nostop, notty, nopoll, nommap, nokqfilter, D_OTHER | D_MPSAFE,
119 /* For ease of reading. */
120 __strong_alias(fd_putvnode,fd_putfile)
121 __strong_alias(fd_putsock,fd_putfile)
124 * Initialize the descriptor system.
126 void
127 fd_sys_init(void)
130 mutex_init(&filelist_lock, MUTEX_DEFAULT, IPL_NONE);
132 file_cache = pool_cache_init(sizeof(file_t), coherency_unit, 0,
133 0, "file", NULL, IPL_NONE, file_ctor, file_dtor, NULL);
134 KASSERT(file_cache != NULL);
136 fdfile_cache = pool_cache_init(sizeof(fdfile_t), coherency_unit, 0,
137 PR_LARGECACHE, "fdfile", NULL, IPL_NONE, fdfile_ctor, fdfile_dtor,
138 NULL);
139 KASSERT(fdfile_cache != NULL);
141 filedesc_cache = pool_cache_init(sizeof(filedesc_t), coherency_unit,
142 0, 0, "filedesc", NULL, IPL_NONE, filedesc_ctor, filedesc_dtor,
143 NULL);
144 KASSERT(filedesc_cache != NULL);
147 static int
148 fd_next_zero(filedesc_t *fdp, uint32_t *bitmap, int want, u_int bits)
150 int i, off, maxoff;
151 uint32_t sub;
153 KASSERT(mutex_owned(&fdp->fd_lock));
155 if (want > bits)
156 return -1;
158 off = want >> NDENTRYSHIFT;
159 i = want & NDENTRYMASK;
160 if (i) {
161 sub = bitmap[off] | ((u_int)~0 >> (NDENTRIES - i));
162 if (sub != ~0)
163 goto found;
164 off++;
167 maxoff = NDLOSLOTS(bits);
168 while (off < maxoff) {
169 if ((sub = bitmap[off]) != ~0)
170 goto found;
171 off++;
174 return (-1);
176 found:
177 return (off << NDENTRYSHIFT) + ffs(~sub) - 1;
180 static int
181 fd_last_set(filedesc_t *fd, int last)
183 int off, i;
184 fdfile_t **ofiles = fd->fd_ofiles;
185 uint32_t *bitmap = fd->fd_lomap;
187 KASSERT(mutex_owned(&fd->fd_lock));
189 off = (last - 1) >> NDENTRYSHIFT;
191 while (off >= 0 && !bitmap[off])
192 off--;
194 if (off < 0)
195 return (-1);
197 i = ((off + 1) << NDENTRYSHIFT) - 1;
198 if (i >= last)
199 i = last - 1;
201 /* XXX should use bitmap */
202 /* XXXAD does not work for fd_copy() */
203 while (i > 0 && (ofiles[i] == NULL || !ofiles[i]->ff_allocated))
204 i--;
206 return (i);
209 void
210 fd_used(filedesc_t *fdp, unsigned fd)
212 u_int off = fd >> NDENTRYSHIFT;
213 fdfile_t *ff;
215 ff = fdp->fd_ofiles[fd];
217 KASSERT(mutex_owned(&fdp->fd_lock));
218 KASSERT((fdp->fd_lomap[off] & (1 << (fd & NDENTRYMASK))) == 0);
219 KASSERT(ff != NULL);
220 KASSERT(ff->ff_file == NULL);
221 KASSERT(!ff->ff_allocated);
223 ff->ff_allocated = 1;
224 fdp->fd_lomap[off] |= 1 << (fd & NDENTRYMASK);
225 if (fdp->fd_lomap[off] == ~0) {
226 KASSERT((fdp->fd_himap[off >> NDENTRYSHIFT] &
227 (1 << (off & NDENTRYMASK))) == 0);
228 fdp->fd_himap[off >> NDENTRYSHIFT] |= 1 << (off & NDENTRYMASK);
231 if ((int)fd > fdp->fd_lastfile) {
232 fdp->fd_lastfile = fd;
235 if (fd >= NDFDFILE) {
236 fdp->fd_nused++;
237 } else {
238 KASSERT(ff == (fdfile_t *)fdp->fd_dfdfile[fd]);
242 void
243 fd_unused(filedesc_t *fdp, unsigned fd)
245 u_int off = fd >> NDENTRYSHIFT;
246 fdfile_t *ff;
248 ff = fdp->fd_ofiles[fd];
251 * Don't assert the lock is held here, as we may be copying
252 * the table during exec() and it is not needed there.
253 * procfs and sysctl are locked out by proc::p_reflock.
255 * KASSERT(mutex_owned(&fdp->fd_lock));
257 KASSERT(ff != NULL);
258 KASSERT(ff->ff_file == NULL);
259 KASSERT(ff->ff_allocated);
261 if (fd < fdp->fd_freefile) {
262 fdp->fd_freefile = fd;
265 if (fdp->fd_lomap[off] == ~0) {
266 KASSERT((fdp->fd_himap[off >> NDENTRYSHIFT] &
267 (1 << (off & NDENTRYMASK))) != 0);
268 fdp->fd_himap[off >> NDENTRYSHIFT] &=
269 ~(1 << (off & NDENTRYMASK));
271 KASSERT((fdp->fd_lomap[off] & (1 << (fd & NDENTRYMASK))) != 0);
272 fdp->fd_lomap[off] &= ~(1 << (fd & NDENTRYMASK));
273 ff->ff_allocated = 0;
275 KASSERT(fd <= fdp->fd_lastfile);
276 if (fd == fdp->fd_lastfile) {
277 fdp->fd_lastfile = fd_last_set(fdp, fd);
280 if (fd >= NDFDFILE) {
281 KASSERT(fdp->fd_nused > 0);
282 fdp->fd_nused--;
283 } else {
284 KASSERT(ff == (fdfile_t *)fdp->fd_dfdfile[fd]);
289 * Custom version of fd_unused() for fd_copy(), where the descriptor
290 * table is not yet fully initialized.
292 static inline void
293 fd_zap(filedesc_t *fdp, unsigned fd)
295 u_int off = fd >> NDENTRYSHIFT;
297 if (fd < fdp->fd_freefile) {
298 fdp->fd_freefile = fd;
301 if (fdp->fd_lomap[off] == ~0) {
302 KASSERT((fdp->fd_himap[off >> NDENTRYSHIFT] &
303 (1 << (off & NDENTRYMASK))) != 0);
304 fdp->fd_himap[off >> NDENTRYSHIFT] &=
305 ~(1 << (off & NDENTRYMASK));
307 KASSERT((fdp->fd_lomap[off] & (1 << (fd & NDENTRYMASK))) != 0);
308 fdp->fd_lomap[off] &= ~(1 << (fd & NDENTRYMASK));
311 bool
312 fd_isused(filedesc_t *fdp, unsigned fd)
314 u_int off = fd >> NDENTRYSHIFT;
316 KASSERT(fd < fdp->fd_nfiles);
318 return (fdp->fd_lomap[off] & (1 << (fd & NDENTRYMASK))) != 0;
322 * Look up the file structure corresponding to a file descriptor
323 * and return the file, holding a reference on the descriptor.
325 inline file_t *
326 fd_getfile(unsigned fd)
328 filedesc_t *fdp;
329 fdfile_t *ff;
330 file_t *fp;
332 fdp = curlwp->l_fd;
335 * Look up the fdfile structure representing this descriptor.
336 * Ensure that we see fd_nfiles before fd_ofiles since we
337 * are doing this unlocked. See fd_tryexpand().
339 if (__predict_false(fd >= fdp->fd_nfiles)) {
340 return NULL;
342 membar_consumer();
343 ff = fdp->fd_ofiles[fd];
344 KASSERT(fd >= NDFDFILE || ff == (fdfile_t *)fdp->fd_dfdfile[fd]);
345 if (__predict_false(ff == NULL)) {
346 return NULL;
350 * Now get a reference to the descriptor. Issue a memory
351 * barrier to ensure that we acquire the file pointer _after_
352 * adding a reference. If no memory barrier, we could fetch
353 * a stale pointer.
355 atomic_inc_uint(&ff->ff_refcnt);
356 #ifndef __HAVE_ATOMIC_AS_MEMBAR
357 membar_enter();
358 #endif
361 * If the file is not open or is being closed then put the
362 * reference back.
364 fp = ff->ff_file;
365 if (__predict_true(fp != NULL)) {
366 return fp;
368 fd_putfile(fd);
369 return NULL;
373 * Release a reference to a file descriptor acquired with fd_getfile().
375 void
376 fd_putfile(unsigned fd)
378 filedesc_t *fdp;
379 fdfile_t *ff;
380 u_int u, v;
382 fdp = curlwp->l_fd;
383 ff = fdp->fd_ofiles[fd];
385 KASSERT(fd < fdp->fd_nfiles);
386 KASSERT(ff != NULL);
387 KASSERT((ff->ff_refcnt & FR_MASK) > 0);
388 KASSERT(fd >= NDFDFILE || ff == (fdfile_t *)fdp->fd_dfdfile[fd]);
391 * Ensure that any use of the file is complete and globally
392 * visible before dropping the final reference. If no membar,
393 * the current CPU could still access memory associated with
394 * the file after it has been freed or recycled by another
395 * CPU.
397 #ifndef __HAVE_ATOMIC_AS_MEMBAR
398 membar_exit();
399 #endif
402 * Be optimistic and start out with the assumption that no other
403 * threads are trying to close the descriptor. If the CAS fails,
404 * we lost a race and/or it's being closed.
406 for (u = ff->ff_refcnt & FR_MASK;; u = v) {
407 v = atomic_cas_uint(&ff->ff_refcnt, u, u - 1);
408 if (__predict_true(u == v)) {
409 return;
411 if (__predict_false((v & FR_CLOSING) != 0)) {
412 break;
416 /* Another thread is waiting to close the file: join it. */
417 (void)fd_close(fd);
421 * Convenience wrapper around fd_getfile() that returns reference
422 * to a vnode.
425 fd_getvnode(unsigned fd, file_t **fpp)
427 vnode_t *vp;
428 file_t *fp;
430 fp = fd_getfile(fd);
431 if (__predict_false(fp == NULL)) {
432 return EBADF;
434 if (__predict_false(fp->f_type != DTYPE_VNODE)) {
435 fd_putfile(fd);
436 return EINVAL;
438 vp = fp->f_data;
439 if (__predict_false(vp->v_type == VBAD)) {
440 /* XXX Is this case really necessary? */
441 fd_putfile(fd);
442 return EBADF;
444 *fpp = fp;
445 return 0;
449 * Convenience wrapper around fd_getfile() that returns reference
450 * to a socket.
453 fd_getsock(unsigned fd, struct socket **sop)
455 file_t *fp;
457 fp = fd_getfile(fd);
458 if (__predict_false(fp == NULL)) {
459 return EBADF;
461 if (__predict_false(fp->f_type != DTYPE_SOCKET)) {
462 fd_putfile(fd);
463 return ENOTSOCK;
465 *sop = fp->f_data;
466 return 0;
470 * Look up the file structure corresponding to a file descriptor
471 * and return it with a reference held on the file, not the
472 * descriptor.
474 * This is heavyweight and only used when accessing descriptors
475 * from a foreign process. The caller must ensure that `p' does
476 * not exit or fork across this call.
478 * To release the file (not descriptor) reference, use closef().
480 file_t *
481 fd_getfile2(proc_t *p, unsigned fd)
483 filedesc_t *fdp;
484 fdfile_t *ff;
485 file_t *fp;
487 fdp = p->p_fd;
488 mutex_enter(&fdp->fd_lock);
489 if (fd > fdp->fd_nfiles) {
490 mutex_exit(&fdp->fd_lock);
491 return NULL;
493 if ((ff = fdp->fd_ofiles[fd]) == NULL) {
494 mutex_exit(&fdp->fd_lock);
495 return NULL;
497 mutex_enter(&ff->ff_lock);
498 if ((fp = ff->ff_file) == NULL) {
499 mutex_exit(&ff->ff_lock);
500 mutex_exit(&fdp->fd_lock);
501 return NULL;
503 mutex_enter(&fp->f_lock);
504 fp->f_count++;
505 mutex_exit(&fp->f_lock);
506 mutex_exit(&ff->ff_lock);
507 mutex_exit(&fdp->fd_lock);
509 return fp;
513 * Internal form of close. Must be called with a reference to the
514 * descriptor, and will drop the reference. When all descriptor
515 * references are dropped, releases the descriptor slot and a single
516 * reference to the file structure.
519 fd_close(unsigned fd)
521 struct flock lf;
522 filedesc_t *fdp;
523 fdfile_t *ff;
524 file_t *fp;
525 proc_t *p;
526 lwp_t *l;
528 l = curlwp;
529 p = l->l_proc;
530 fdp = l->l_fd;
531 ff = fdp->fd_ofiles[fd];
533 KASSERT(fd >= NDFDFILE || ff == (fdfile_t *)fdp->fd_dfdfile[fd]);
535 mutex_enter(&ff->ff_lock);
536 KASSERT((ff->ff_refcnt & FR_MASK) > 0);
537 if (ff->ff_file == NULL) {
539 * Another user of the file is already closing, and is
540 * waiting for other users of the file to drain. Release
541 * our reference, and wake up the closer.
543 atomic_dec_uint(&ff->ff_refcnt);
544 cv_broadcast(&ff->ff_closing);
545 mutex_exit(&ff->ff_lock);
548 * An application error, so pretend that the descriptor
549 * was already closed. We can't safely wait for it to
550 * be closed without potentially deadlocking.
552 return (EBADF);
554 KASSERT((ff->ff_refcnt & FR_CLOSING) == 0);
557 * There may be multiple users of this file within the process.
558 * Notify existing and new users that the file is closing. This
559 * will prevent them from adding additional uses to this file
560 * while we are closing it.
562 fp = ff->ff_file;
563 ff->ff_file = NULL;
564 ff->ff_exclose = false;
567 * We expect the caller to hold a descriptor reference - drop it.
568 * The reference count may increase beyond zero at this point due
569 * to an erroneous descriptor reference by an application, but
570 * fd_getfile() will notice that the file is being closed and drop
571 * the reference again.
573 #ifndef __HAVE_ATOMIC_AS_MEMBAR
574 membar_producer();
575 #endif
576 if (__predict_false(atomic_dec_uint_nv(&ff->ff_refcnt) != 0)) {
578 * Wait for other references to drain. This is typically
579 * an application error - the descriptor is being closed
580 * while still in use.
583 atomic_or_uint(&ff->ff_refcnt, FR_CLOSING);
586 * Remove any knotes attached to the file. A knote
587 * attached to the descriptor can hold references on it.
589 mutex_exit(&ff->ff_lock);
590 if (!SLIST_EMPTY(&ff->ff_knlist)) {
591 knote_fdclose(fd);
594 /* Try to drain out descriptor references. */
595 (*fp->f_ops->fo_drain)(fp);
596 mutex_enter(&ff->ff_lock);
599 * We need to see the count drop to zero at least once,
600 * in order to ensure that all pre-existing references
601 * have been drained. New references past this point are
602 * of no interest.
604 while ((ff->ff_refcnt & FR_MASK) != 0) {
605 cv_wait(&ff->ff_closing, &ff->ff_lock);
607 atomic_and_uint(&ff->ff_refcnt, ~FR_CLOSING);
608 } else {
609 /* If no references, there must be no knotes. */
610 KASSERT(SLIST_EMPTY(&ff->ff_knlist));
612 mutex_exit(&ff->ff_lock);
615 * POSIX record locking dictates that any close releases ALL
616 * locks owned by this process. This is handled by setting
617 * a flag in the unlock to free ONLY locks obeying POSIX
618 * semantics, and not to free BSD-style file locks.
619 * If the descriptor was in a message, POSIX-style locks
620 * aren't passed with the descriptor.
622 if ((p->p_flag & PK_ADVLOCK) != 0 && fp->f_type == DTYPE_VNODE) {
623 lf.l_whence = SEEK_SET;
624 lf.l_start = 0;
625 lf.l_len = 0;
626 lf.l_type = F_UNLCK;
627 (void)VOP_ADVLOCK(fp->f_data, p, F_UNLCK, &lf, F_POSIX);
631 /* Free descriptor slot. */
632 mutex_enter(&fdp->fd_lock);
633 fd_unused(fdp, fd);
634 mutex_exit(&fdp->fd_lock);
636 /* Now drop reference to the file itself. */
637 return closef(fp);
641 * Duplicate a file descriptor.
644 fd_dup(file_t *fp, int minfd, int *newp, bool exclose)
646 proc_t *p;
647 int error;
649 p = curproc;
651 while ((error = fd_alloc(p, minfd, newp)) != 0) {
652 if (error != ENOSPC) {
653 return error;
655 fd_tryexpand(p);
658 curlwp->l_fd->fd_ofiles[*newp]->ff_exclose = exclose;
659 fd_affix(p, fp, *newp);
660 return 0;
664 * dup2 operation.
667 fd_dup2(file_t *fp, unsigned new)
669 filedesc_t *fdp;
670 fdfile_t *ff;
672 fdp = curlwp->l_fd;
675 * Ensure there are enough slots in the descriptor table,
676 * and allocate an fdfile_t up front in case we need it.
678 while (new >= fdp->fd_nfiles) {
679 fd_tryexpand(curproc);
681 ff = pool_cache_get(fdfile_cache, PR_WAITOK);
684 * If there is already a file open, close it. If the file is
685 * half open, wait for it to be constructed before closing it.
686 * XXX Potential for deadlock here?
688 mutex_enter(&fdp->fd_lock);
689 while (fd_isused(fdp, new)) {
690 mutex_exit(&fdp->fd_lock);
691 if (fd_getfile(new) != NULL) {
692 (void)fd_close(new);
693 } else {
694 /* XXX Crummy, but unlikely to happen. */
695 kpause("dup2", false, 1, NULL);
697 mutex_enter(&fdp->fd_lock);
699 if (fdp->fd_ofiles[new] == NULL) {
700 KASSERT(new >= NDFDFILE);
701 fdp->fd_ofiles[new] = ff;
702 ff = NULL;
704 fd_used(fdp, new);
705 mutex_exit(&fdp->fd_lock);
707 /* Slot is now allocated. Insert copy of the file. */
708 fd_affix(curproc, fp, new);
709 if (ff != NULL) {
710 pool_cache_put(fdfile_cache, ff);
712 return 0;
716 * Drop reference to a file structure.
719 closef(file_t *fp)
721 struct flock lf;
722 int error;
725 * Drop reference. If referenced elsewhere it's still open
726 * and we have nothing more to do.
728 mutex_enter(&fp->f_lock);
729 KASSERT(fp->f_count > 0);
730 if (--fp->f_count > 0) {
731 mutex_exit(&fp->f_lock);
732 return 0;
734 KASSERT(fp->f_count == 0);
735 mutex_exit(&fp->f_lock);
737 /* We held the last reference - release locks, close and free. */
738 if ((fp->f_flag & FHASLOCK) && fp->f_type == DTYPE_VNODE) {
739 lf.l_whence = SEEK_SET;
740 lf.l_start = 0;
741 lf.l_len = 0;
742 lf.l_type = F_UNLCK;
743 (void)VOP_ADVLOCK(fp->f_data, fp, F_UNLCK, &lf, F_FLOCK);
745 if (fp->f_ops != NULL) {
746 error = (*fp->f_ops->fo_close)(fp);
747 } else {
748 error = 0;
750 ffree(fp);
752 return error;
756 * Allocate a file descriptor for the process.
759 fd_alloc(proc_t *p, int want, int *result)
761 filedesc_t *fdp;
762 int i, lim, last, error;
763 u_int off, new;
764 fdfile_t *ff;
766 KASSERT(p == curproc || p == &proc0);
768 fdp = p->p_fd;
769 ff = pool_cache_get(fdfile_cache, PR_WAITOK);
770 KASSERT(ff->ff_refcnt == 0);
771 KASSERT(ff->ff_file == NULL);
774 * Search for a free descriptor starting at the higher
775 * of want or fd_freefile.
777 mutex_enter(&fdp->fd_lock);
778 KASSERT(fdp->fd_ofiles[0] == (fdfile_t *)fdp->fd_dfdfile[0]);
779 lim = min((int)p->p_rlimit[RLIMIT_NOFILE].rlim_cur, maxfiles);
780 last = min(fdp->fd_nfiles, lim);
781 for (;;) {
782 if ((i = want) < fdp->fd_freefile)
783 i = fdp->fd_freefile;
784 off = i >> NDENTRYSHIFT;
785 new = fd_next_zero(fdp, fdp->fd_himap, off,
786 (last + NDENTRIES - 1) >> NDENTRYSHIFT);
787 if (new == -1)
788 break;
789 i = fd_next_zero(fdp, &fdp->fd_lomap[new],
790 new > off ? 0 : i & NDENTRYMASK, NDENTRIES);
791 if (i == -1) {
793 * Free file descriptor in this block was
794 * below want, try again with higher want.
796 want = (new + 1) << NDENTRYSHIFT;
797 continue;
799 i += (new << NDENTRYSHIFT);
800 if (i >= last) {
801 break;
803 if (fdp->fd_ofiles[i] == NULL) {
804 KASSERT(i >= NDFDFILE);
805 fdp->fd_ofiles[i] = ff;
806 } else {
807 pool_cache_put(fdfile_cache, ff);
809 KASSERT(fdp->fd_ofiles[i]->ff_file == NULL);
810 fd_used(fdp, i);
811 if (want <= fdp->fd_freefile) {
812 fdp->fd_freefile = i;
814 *result = i;
815 mutex_exit(&fdp->fd_lock);
816 KASSERT(i >= NDFDFILE ||
817 fdp->fd_ofiles[i] == (fdfile_t *)fdp->fd_dfdfile[i]);
818 return 0;
821 /* No space in current array. Let the caller expand and retry. */
822 error = (fdp->fd_nfiles >= lim) ? EMFILE : ENOSPC;
823 mutex_exit(&fdp->fd_lock);
824 pool_cache_put(fdfile_cache, ff);
825 return error;
829 * Allocate memory for the open files array.
831 static fdfile_t **
832 fd_ofile_alloc(int n)
834 uintptr_t *ptr, sz;
836 KASSERT(n > NDFILE);
838 sz = (n + 2) * sizeof(uintptr_t);
839 ptr = kmem_alloc((size_t)sz, KM_SLEEP);
840 ptr[1] = sz;
842 return (fdfile_t **)(ptr + 2);
846 * Free an open files array.
848 static void
849 fd_ofile_free(int n, fdfile_t **of)
851 uintptr_t *ptr, sz;
853 KASSERT(n > NDFILE);
855 sz = (n + 2) * sizeof(uintptr_t);
856 ptr = (uintptr_t *)of - 2;
857 KASSERT(ptr[1] == sz);
858 kmem_free(ptr, sz);
862 * Allocate descriptor bitmap.
864 static void
865 fd_map_alloc(int n, uint32_t **lo, uint32_t **hi)
867 uint8_t *ptr;
868 size_t szlo, szhi;
870 KASSERT(n > NDENTRIES);
872 szlo = NDLOSLOTS(n) * sizeof(uint32_t);
873 szhi = NDHISLOTS(n) * sizeof(uint32_t);
874 ptr = kmem_alloc(szlo + szhi, KM_SLEEP);
875 *lo = (uint32_t *)ptr;
876 *hi = (uint32_t *)(ptr + szlo);
880 * Free descriptor bitmap.
882 static void
883 fd_map_free(int n, uint32_t *lo, uint32_t *hi)
885 size_t szlo, szhi;
887 KASSERT(n > NDENTRIES);
889 szlo = NDLOSLOTS(n) * sizeof(uint32_t);
890 szhi = NDHISLOTS(n) * sizeof(uint32_t);
891 KASSERT(hi == (uint32_t *)((uint8_t *)lo + szlo));
892 kmem_free(lo, szlo + szhi);
896 * Expand a process' descriptor table.
898 void
899 fd_tryexpand(proc_t *p)
901 filedesc_t *fdp;
902 int i, numfiles, oldnfiles;
903 fdfile_t **newofile;
904 uint32_t *newhimap, *newlomap;
906 KASSERT(p == curproc || p == &proc0);
908 fdp = p->p_fd;
909 newhimap = NULL;
910 newlomap = NULL;
911 oldnfiles = fdp->fd_nfiles;
913 if (oldnfiles < NDEXTENT)
914 numfiles = NDEXTENT;
915 else
916 numfiles = 2 * oldnfiles;
918 newofile = fd_ofile_alloc(numfiles);
919 if (NDHISLOTS(numfiles) > NDHISLOTS(oldnfiles)) {
920 fd_map_alloc(numfiles, &newlomap, &newhimap);
923 mutex_enter(&fdp->fd_lock);
924 KASSERT(fdp->fd_ofiles[0] == (fdfile_t *)fdp->fd_dfdfile[0]);
925 if (fdp->fd_nfiles != oldnfiles) {
926 /* fdp changed; caller must retry */
927 mutex_exit(&fdp->fd_lock);
928 fd_ofile_free(numfiles, newofile);
929 if (NDHISLOTS(numfiles) > NDHISLOTS(oldnfiles)) {
930 fd_map_free(numfiles, newlomap, newhimap);
932 return;
935 /* Copy the existing ofile array and zero the new portion. */
936 i = sizeof(fdfile_t *) * fdp->fd_nfiles;
937 memcpy(newofile, fdp->fd_ofiles, i);
938 memset((uint8_t *)newofile + i, 0, numfiles * sizeof(fdfile_t *) - i);
941 * Link old ofiles array into list to be discarded. We defer
942 * freeing until process exit if the descriptor table is visble
943 * to other threads.
945 if (oldnfiles > NDFILE) {
946 if ((fdp->fd_refcnt | p->p_nlwps) > 1) {
947 fdp->fd_ofiles[-2] = (void *)fdp->fd_discard;
948 fdp->fd_discard = fdp->fd_ofiles - 2;
949 } else {
950 fd_ofile_free(oldnfiles, fdp->fd_ofiles);
954 if (NDHISLOTS(numfiles) > NDHISLOTS(oldnfiles)) {
955 i = NDHISLOTS(oldnfiles) * sizeof(uint32_t);
956 memcpy(newhimap, fdp->fd_himap, i);
957 memset((uint8_t *)newhimap + i, 0,
958 NDHISLOTS(numfiles) * sizeof(uint32_t) - i);
960 i = NDLOSLOTS(oldnfiles) * sizeof(uint32_t);
961 memcpy(newlomap, fdp->fd_lomap, i);
962 memset((uint8_t *)newlomap + i, 0,
963 NDLOSLOTS(numfiles) * sizeof(uint32_t) - i);
965 if (NDHISLOTS(oldnfiles) > NDHISLOTS(NDFILE)) {
966 fd_map_free(oldnfiles, fdp->fd_lomap, fdp->fd_himap);
968 fdp->fd_himap = newhimap;
969 fdp->fd_lomap = newlomap;
973 * All other modifications must become globally visible before
974 * the change to fd_nfiles. See fd_getfile().
976 fdp->fd_ofiles = newofile;
977 membar_producer();
978 fdp->fd_nfiles = numfiles;
979 mutex_exit(&fdp->fd_lock);
981 KASSERT(fdp->fd_ofiles[0] == (fdfile_t *)fdp->fd_dfdfile[0]);
985 * Create a new open file structure and allocate a file descriptor
986 * for the current process.
989 fd_allocfile(file_t **resultfp, int *resultfd)
991 file_t *fp;
992 proc_t *p;
993 int error;
995 p = curproc;
997 while ((error = fd_alloc(p, 0, resultfd)) != 0) {
998 if (error != ENOSPC) {
999 return error;
1001 fd_tryexpand(p);
1004 fp = pool_cache_get(file_cache, PR_WAITOK);
1005 KASSERT(fp->f_count == 0);
1006 KASSERT(fp->f_msgcount == 0);
1007 KASSERT(fp->f_unpcount == 0);
1008 fp->f_cred = kauth_cred_get();
1009 kauth_cred_hold(fp->f_cred);
1011 if (__predict_false(atomic_inc_uint_nv(&nfiles) >= maxfiles)) {
1012 fd_abort(p, fp, *resultfd);
1013 tablefull("file", "increase kern.maxfiles or MAXFILES");
1014 return ENFILE;
1018 * Don't allow recycled files to be scanned.
1020 if ((fp->f_flag & FSCAN) != 0) {
1021 mutex_enter(&fp->f_lock);
1022 atomic_and_uint(&fp->f_flag, ~FSCAN);
1023 mutex_exit(&fp->f_lock);
1026 fp->f_advice = 0;
1027 fp->f_msgcount = 0;
1028 fp->f_offset = 0;
1029 *resultfp = fp;
1031 return 0;
1035 * Successful creation of a new descriptor: make visible to the process.
1037 void
1038 fd_affix(proc_t *p, file_t *fp, unsigned fd)
1040 fdfile_t *ff;
1041 filedesc_t *fdp;
1043 KASSERT(p == curproc || p == &proc0);
1045 /* Add a reference to the file structure. */
1046 mutex_enter(&fp->f_lock);
1047 fp->f_count++;
1048 mutex_exit(&fp->f_lock);
1051 * Insert the new file into the descriptor slot.
1053 * The memory barriers provided by lock activity in this routine
1054 * ensure that any updates to the file structure become globally
1055 * visible before the file becomes visible to other LWPs in the
1056 * current process.
1058 fdp = p->p_fd;
1059 ff = fdp->fd_ofiles[fd];
1061 KASSERT(ff != NULL);
1062 KASSERT(ff->ff_file == NULL);
1063 KASSERT(ff->ff_allocated);
1064 KASSERT(fd_isused(fdp, fd));
1065 KASSERT(fd >= NDFDFILE ||
1066 fdp->fd_ofiles[fd] == (fdfile_t *)fdp->fd_dfdfile[fd]);
1068 /* No need to lock in order to make file initially visible. */
1069 ff->ff_file = fp;
1073 * Abort creation of a new descriptor: free descriptor slot and file.
1075 void
1076 fd_abort(proc_t *p, file_t *fp, unsigned fd)
1078 filedesc_t *fdp;
1079 fdfile_t *ff;
1081 KASSERT(p == curproc || p == &proc0);
1083 fdp = p->p_fd;
1084 ff = fdp->fd_ofiles[fd];
1086 KASSERT(fd >= NDFDFILE ||
1087 fdp->fd_ofiles[fd] == (fdfile_t *)fdp->fd_dfdfile[fd]);
1089 mutex_enter(&fdp->fd_lock);
1090 KASSERT(fd_isused(fdp, fd));
1091 fd_unused(fdp, fd);
1092 mutex_exit(&fdp->fd_lock);
1094 if (fp != NULL) {
1095 ffree(fp);
1100 * Free a file descriptor.
1102 void
1103 ffree(file_t *fp)
1106 KASSERT(fp->f_count == 0);
1108 atomic_dec_uint(&nfiles);
1109 kauth_cred_free(fp->f_cred);
1110 pool_cache_put(file_cache, fp);
1113 static int
1114 file_ctor(void *arg, void *obj, int flags)
1116 file_t *fp = obj;
1118 memset(fp, 0, sizeof(*fp));
1119 mutex_init(&fp->f_lock, MUTEX_DEFAULT, IPL_NONE);
1121 mutex_enter(&filelist_lock);
1122 LIST_INSERT_HEAD(&filehead, fp, f_list);
1123 mutex_exit(&filelist_lock);
1125 return 0;
1128 static void
1129 file_dtor(void *arg, void *obj)
1131 file_t *fp = obj;
1133 mutex_enter(&filelist_lock);
1134 LIST_REMOVE(fp, f_list);
1135 mutex_exit(&filelist_lock);
1137 mutex_destroy(&fp->f_lock);
1140 static int
1141 fdfile_ctor(void *arg, void *obj, int flags)
1143 fdfile_t *ff = obj;
1145 memset(ff, 0, sizeof(*ff));
1146 mutex_init(&ff->ff_lock, MUTEX_DEFAULT, IPL_NONE);
1147 cv_init(&ff->ff_closing, "fdclose");
1149 return 0;
1152 static void
1153 fdfile_dtor(void *arg, void *obj)
1155 fdfile_t *ff = obj;
1157 mutex_destroy(&ff->ff_lock);
1158 cv_destroy(&ff->ff_closing);
1161 file_t *
1162 fgetdummy(void)
1164 file_t *fp;
1166 fp = kmem_alloc(sizeof(*fp), KM_SLEEP);
1167 if (fp != NULL) {
1168 memset(fp, 0, sizeof(*fp));
1169 mutex_init(&fp->f_lock, MUTEX_DEFAULT, IPL_NONE);
1171 return fp;
1174 void
1175 fputdummy(file_t *fp)
1178 mutex_destroy(&fp->f_lock);
1179 kmem_free(fp, sizeof(*fp));
1183 * Create an initial filedesc structure.
1185 filedesc_t *
1186 fd_init(filedesc_t *fdp)
1188 unsigned fd;
1190 if (fdp == NULL) {
1191 fdp = pool_cache_get(filedesc_cache, PR_WAITOK);
1192 } else {
1193 filedesc_ctor(NULL, fdp, PR_WAITOK);
1196 fdp->fd_refcnt = 1;
1197 fdp->fd_ofiles = fdp->fd_dfiles;
1198 fdp->fd_nfiles = NDFILE;
1199 fdp->fd_himap = fdp->fd_dhimap;
1200 fdp->fd_lomap = fdp->fd_dlomap;
1201 KASSERT(fdp->fd_lastfile == -1);
1202 KASSERT(fdp->fd_lastkqfile == -1);
1203 KASSERT(fdp->fd_knhash == NULL);
1205 memset(&fdp->fd_startzero, 0, sizeof(*fdp) -
1206 offsetof(filedesc_t, fd_startzero));
1207 for (fd = 0; fd < NDFDFILE; fd++) {
1208 fdp->fd_ofiles[fd] = (fdfile_t *)fdp->fd_dfdfile[fd];
1211 return fdp;
1215 * Initialize a file descriptor table.
1217 static int
1218 filedesc_ctor(void *arg, void *obj, int flag)
1220 filedesc_t *fdp = obj;
1221 int i;
1223 memset(fdp, 0, sizeof(*fdp));
1224 mutex_init(&fdp->fd_lock, MUTEX_DEFAULT, IPL_NONE);
1225 fdp->fd_lastfile = -1;
1226 fdp->fd_lastkqfile = -1;
1228 CTASSERT(sizeof(fdp->fd_dfdfile[0]) >= sizeof(fdfile_t));
1229 for (i = 0; i < NDFDFILE; i++) {
1230 fdfile_ctor(NULL, fdp->fd_dfdfile[i], PR_WAITOK);
1233 return 0;
1236 static void
1237 filedesc_dtor(void *arg, void *obj)
1239 filedesc_t *fdp = obj;
1240 int i;
1242 for (i = 0; i < NDFDFILE; i++) {
1243 fdfile_dtor(NULL, fdp->fd_dfdfile[i]);
1246 mutex_destroy(&fdp->fd_lock);
1250 * Make p2 share p1's filedesc structure.
1252 void
1253 fd_share(struct proc *p2)
1255 filedesc_t *fdp;
1257 fdp = curlwp->l_fd;
1258 p2->p_fd = fdp;
1259 atomic_inc_uint(&fdp->fd_refcnt);
1263 * Copy a filedesc structure.
1265 filedesc_t *
1266 fd_copy(void)
1268 filedesc_t *newfdp, *fdp;
1269 fdfile_t *ff, *fflist, **ffp, **nffp, *ff2;
1270 int i, nused, numfiles, lastfile, j, newlast;
1271 file_t *fp;
1273 fdp = curproc->p_fd;
1274 newfdp = pool_cache_get(filedesc_cache, PR_WAITOK);
1275 newfdp->fd_refcnt = 1;
1277 KASSERT(newfdp->fd_knhash == NULL);
1278 KASSERT(newfdp->fd_knhashmask == 0);
1279 KASSERT(newfdp->fd_discard == NULL);
1281 for (;;) {
1282 numfiles = fdp->fd_nfiles;
1283 lastfile = fdp->fd_lastfile;
1286 * If the number of open files fits in the internal arrays
1287 * of the open file structure, use them, otherwise allocate
1288 * additional memory for the number of descriptors currently
1289 * in use.
1291 if (lastfile < NDFILE) {
1292 i = NDFILE;
1293 newfdp->fd_ofiles = newfdp->fd_dfiles;
1294 } else {
1296 * Compute the smallest multiple of NDEXTENT needed
1297 * for the file descriptors currently in use,
1298 * allowing the table to shrink.
1300 i = numfiles;
1301 while (i >= 2 * NDEXTENT && i > lastfile * 2) {
1302 i /= 2;
1304 newfdp->fd_ofiles = fd_ofile_alloc(i);
1305 KASSERT(i > NDFILE);
1307 if (NDHISLOTS(i) <= NDHISLOTS(NDFILE)) {
1308 newfdp->fd_himap = newfdp->fd_dhimap;
1309 newfdp->fd_lomap = newfdp->fd_dlomap;
1310 } else {
1311 fd_map_alloc(i, &newfdp->fd_lomap,
1312 &newfdp->fd_himap);
1316 * Allocate and string together fdfile structures.
1317 * We abuse fdfile_t::ff_file here, but it will be
1318 * cleared before this routine returns.
1320 nused = fdp->fd_nused;
1321 fflist = NULL;
1322 for (j = nused; j != 0; j--) {
1323 ff = pool_cache_get(fdfile_cache, PR_WAITOK);
1324 ff->ff_file = (void *)fflist;
1325 fflist = ff;
1328 mutex_enter(&fdp->fd_lock);
1329 if (numfiles == fdp->fd_nfiles && nused == fdp->fd_nused &&
1330 lastfile == fdp->fd_lastfile) {
1331 break;
1333 mutex_exit(&fdp->fd_lock);
1334 if (i > NDFILE) {
1335 fd_ofile_free(i, newfdp->fd_ofiles);
1337 if (NDHISLOTS(i) > NDHISLOTS(NDFILE)) {
1338 fd_map_free(i, newfdp->fd_lomap, newfdp->fd_himap);
1340 while (fflist != NULL) {
1341 ff = fflist;
1342 fflist = (void *)ff->ff_file;
1343 ff->ff_file = NULL;
1344 pool_cache_put(fdfile_cache, ff);
1348 newfdp->fd_nfiles = i;
1349 newfdp->fd_freefile = fdp->fd_freefile;
1350 newfdp->fd_exclose = fdp->fd_exclose;
1353 * Clear the entries that will not be copied over.
1354 * Avoid calling memset with 0 size.
1356 if (lastfile < (i-1)) {
1357 memset(newfdp->fd_ofiles + lastfile + 1, 0,
1358 (i - lastfile - 1) * sizeof(file_t **));
1360 if (i < NDENTRIES * NDENTRIES) {
1361 i = NDENTRIES * NDENTRIES; /* size of inlined bitmaps */
1363 memcpy(newfdp->fd_himap, fdp->fd_himap, NDHISLOTS(i)*sizeof(uint32_t));
1364 memcpy(newfdp->fd_lomap, fdp->fd_lomap, NDLOSLOTS(i)*sizeof(uint32_t));
1366 ffp = fdp->fd_ofiles;
1367 nffp = newfdp->fd_ofiles;
1368 j = imax(lastfile, (NDFDFILE - 1));
1369 newlast = -1;
1370 KASSERT(j < fdp->fd_nfiles);
1371 for (i = 0; i <= j; i++, ffp++, *nffp++ = ff2) {
1372 ff = *ffp;
1373 /* Install built-in fdfiles even if unused here. */
1374 if (i < NDFDFILE) {
1375 ff2 = (fdfile_t *)newfdp->fd_dfdfile[i];
1376 } else {
1377 ff2 = NULL;
1379 /* Determine if descriptor is active in parent. */
1380 if (ff == NULL || !fd_isused(fdp, i)) {
1381 KASSERT(ff != NULL || i >= NDFDFILE);
1382 continue;
1384 mutex_enter(&ff->ff_lock);
1385 fp = ff->ff_file;
1386 if (fp == NULL) {
1387 /* Descriptor is half-open: free slot. */
1388 fd_zap(newfdp, i);
1389 mutex_exit(&ff->ff_lock);
1390 continue;
1392 if (fp->f_type == DTYPE_KQUEUE) {
1393 /* kqueue descriptors cannot be copied. */
1394 fd_zap(newfdp, i);
1395 mutex_exit(&ff->ff_lock);
1396 continue;
1398 /* It's active: add a reference to the file. */
1399 mutex_enter(&fp->f_lock);
1400 fp->f_count++;
1401 mutex_exit(&fp->f_lock);
1402 /* Consume one fdfile_t to represent it. */
1403 if (i >= NDFDFILE) {
1404 ff2 = fflist;
1405 fflist = (void *)ff2->ff_file;
1407 ff2->ff_file = fp;
1408 ff2->ff_exclose = ff->ff_exclose;
1409 ff2->ff_allocated = true;
1410 mutex_exit(&ff->ff_lock);
1411 if (i > newlast) {
1412 newlast = i;
1415 mutex_exit(&fdp->fd_lock);
1417 /* Discard unused fdfile_t structures. */
1418 while (__predict_false(fflist != NULL)) {
1419 ff = fflist;
1420 fflist = (void *)ff->ff_file;
1421 ff->ff_file = NULL;
1422 pool_cache_put(fdfile_cache, ff);
1423 nused--;
1425 KASSERT(nused >= 0);
1426 KASSERT(newfdp->fd_ofiles[0] == (fdfile_t *)newfdp->fd_dfdfile[0]);
1428 newfdp->fd_nused = nused;
1429 newfdp->fd_lastfile = newlast;
1431 return (newfdp);
1435 * Release a filedesc structure.
1437 void
1438 fd_free(void)
1440 filedesc_t *fdp;
1441 fdfile_t *ff;
1442 file_t *fp;
1443 int fd, lastfd;
1444 void **discard;
1446 fdp = curlwp->l_fd;
1448 KASSERT(fdp->fd_ofiles[0] == (fdfile_t *)fdp->fd_dfdfile[0]);
1450 if (atomic_dec_uint_nv(&fdp->fd_refcnt) > 0)
1451 return;
1454 * Close any files that the process holds open.
1456 for (fd = 0, lastfd = fdp->fd_nfiles - 1; fd <= lastfd; fd++) {
1457 ff = fdp->fd_ofiles[fd];
1458 KASSERT(fd >= NDFDFILE ||
1459 ff == (fdfile_t *)fdp->fd_dfdfile[fd]);
1460 if ((ff = fdp->fd_ofiles[fd]) == NULL)
1461 continue;
1462 if ((fp = ff->ff_file) != NULL) {
1464 * Must use fd_close() here as kqueue holds
1465 * long term references to descriptors.
1467 ff->ff_refcnt++;
1468 fd_close(fd);
1470 KASSERT(ff->ff_refcnt == 0);
1471 KASSERT(ff->ff_file == NULL);
1472 KASSERT(!ff->ff_exclose);
1473 KASSERT(!ff->ff_allocated);
1474 if (fd >= NDFDFILE) {
1475 pool_cache_put(fdfile_cache, ff);
1480 * Clean out the descriptor table for the next user and return
1481 * to the cache.
1483 while ((discard = fdp->fd_discard) != NULL) {
1484 fdp->fd_discard = discard[0];
1485 kmem_free(discard, (uintptr_t)discard[1]);
1487 if (NDHISLOTS(fdp->fd_nfiles) > NDHISLOTS(NDFILE)) {
1488 KASSERT(fdp->fd_himap != fdp->fd_dhimap);
1489 KASSERT(fdp->fd_lomap != fdp->fd_dlomap);
1490 fd_map_free(fdp->fd_nfiles, fdp->fd_lomap, fdp->fd_himap);
1492 if (fdp->fd_nfiles > NDFILE) {
1493 KASSERT(fdp->fd_ofiles != fdp->fd_dfiles);
1494 fd_ofile_free(fdp->fd_nfiles, fdp->fd_ofiles);
1496 if (fdp->fd_knhash != NULL) {
1497 hashdone(fdp->fd_knhash, HASH_LIST, fdp->fd_knhashmask);
1498 fdp->fd_knhash = NULL;
1499 fdp->fd_knhashmask = 0;
1500 } else {
1501 KASSERT(fdp->fd_knhashmask == 0);
1503 fdp->fd_lastkqfile = -1;
1504 pool_cache_put(filedesc_cache, fdp);
1508 * File Descriptor pseudo-device driver (/dev/fd/).
1510 * Opening minor device N dup()s the file (if any) connected to file
1511 * descriptor N belonging to the calling process. Note that this driver
1512 * consists of only the ``open()'' routine, because all subsequent
1513 * references to this file will be direct to the other driver.
1515 static int
1516 filedescopen(dev_t dev, int mode, int type, lwp_t *l)
1520 * XXX Kludge: set dupfd to contain the value of the
1521 * the file descriptor being sought for duplication. The error
1522 * return ensures that the vnode for this device will be released
1523 * by vn_open. Open will detect this special error and take the
1524 * actions in dupfdopen below. Other callers of vn_open or VOP_OPEN
1525 * will simply report the error.
1527 l->l_dupfd = minor(dev); /* XXX */
1528 return EDUPFD;
1532 * Duplicate the specified descriptor to a free descriptor.
1535 fd_dupopen(int old, int *new, int mode, int error)
1537 filedesc_t *fdp;
1538 fdfile_t *ff;
1539 file_t *fp;
1541 if ((fp = fd_getfile(old)) == NULL) {
1542 return EBADF;
1544 fdp = curlwp->l_fd;
1545 ff = fdp->fd_ofiles[old];
1548 * There are two cases of interest here.
1550 * For EDUPFD simply dup (dfd) to file descriptor
1551 * (indx) and return.
1553 * For EMOVEFD steal away the file structure from (dfd) and
1554 * store it in (indx). (dfd) is effectively closed by
1555 * this operation.
1557 * Any other error code is just returned.
1559 switch (error) {
1560 case EDUPFD:
1562 * Check that the mode the file is being opened for is a
1563 * subset of the mode of the existing descriptor.
1565 if (((mode & (FREAD|FWRITE)) | fp->f_flag) != fp->f_flag) {
1566 error = EACCES;
1567 break;
1570 /* Copy it. */
1571 error = fd_dup(fp, 0, new, fdp->fd_ofiles[old]->ff_exclose);
1572 break;
1574 case EMOVEFD:
1575 /* Copy it. */
1576 error = fd_dup(fp, 0, new, fdp->fd_ofiles[old]->ff_exclose);
1577 if (error != 0) {
1578 break;
1581 /* Steal away the file pointer from 'old'. */
1582 (void)fd_close(old);
1583 return 0;
1586 fd_putfile(old);
1587 return error;
1591 * Sets descriptor owner. If the owner is a process, 'pgid'
1592 * is set to positive value, process ID. If the owner is process group,
1593 * 'pgid' is set to -pg_id.
1596 fsetown(pid_t *pgid, u_long cmd, const void *data)
1598 int id = *(const int *)data;
1599 int error;
1601 switch (cmd) {
1602 case TIOCSPGRP:
1603 if (id < 0)
1604 return (EINVAL);
1605 id = -id;
1606 break;
1607 default:
1608 break;
1611 if (id > 0 && !pfind(id))
1612 return (ESRCH);
1613 else if (id < 0 && (error = pgid_in_session(curproc, -id)))
1614 return (error);
1616 *pgid = id;
1617 return (0);
1621 * Return descriptor owner information. If the value is positive,
1622 * it's process ID. If it's negative, it's process group ID and
1623 * needs the sign removed before use.
1626 fgetown(pid_t pgid, u_long cmd, void *data)
1629 switch (cmd) {
1630 case TIOCGPGRP:
1631 *(int *)data = -pgid;
1632 break;
1633 default:
1634 *(int *)data = pgid;
1635 break;
1637 return (0);
1641 * Send signal to descriptor owner, either process or process group.
1643 void
1644 fownsignal(pid_t pgid, int signo, int code, int band, void *fdescdata)
1646 ksiginfo_t ksi;
1648 KASSERT(!cpu_intr_p());
1650 if (pgid == 0) {
1651 return;
1654 KSI_INIT(&ksi);
1655 ksi.ksi_signo = signo;
1656 ksi.ksi_code = code;
1657 ksi.ksi_band = band;
1659 mutex_enter(proc_lock);
1660 if (pgid > 0) {
1661 struct proc *p1;
1663 p1 = p_find(pgid, PFIND_LOCKED);
1664 if (p1 != NULL) {
1665 kpsignal(p1, &ksi, fdescdata);
1667 } else {
1668 struct pgrp *pgrp;
1670 KASSERT(pgid < 0);
1671 pgrp = pg_find(-pgid, PFIND_LOCKED);
1672 if (pgrp != NULL) {
1673 kpgsignal(pgrp, &ksi, fdescdata, 0);
1676 mutex_exit(proc_lock);
1680 fd_clone(file_t *fp, unsigned fd, int flag, const struct fileops *fops,
1681 void *data)
1684 fp->f_flag = flag;
1685 fp->f_type = DTYPE_MISC;
1686 fp->f_ops = fops;
1687 fp->f_data = data;
1688 curlwp->l_dupfd = fd;
1689 fd_affix(curproc, fp, fd);
1691 return EMOVEFD;
1695 fnullop_fcntl(file_t *fp, u_int cmd, void *data)
1698 if (cmd == F_SETFL)
1699 return 0;
1701 return EOPNOTSUPP;
1705 fnullop_poll(file_t *fp, int which)
1708 return 0;
1712 fnullop_kqfilter(file_t *fp, struct knote *kn)
1715 return 0;
1718 void
1719 fnullop_drain(file_t *fp)
1725 fbadop_read(file_t *fp, off_t *offset, struct uio *uio,
1726 kauth_cred_t cred, int flags)
1729 return EOPNOTSUPP;
1733 fbadop_write(file_t *fp, off_t *offset, struct uio *uio,
1734 kauth_cred_t cred, int flags)
1737 return EOPNOTSUPP;
1741 fbadop_ioctl(file_t *fp, u_long com, void *data)
1744 return EOPNOTSUPP;
1748 fbadop_stat(file_t *fp, struct stat *sb)
1751 return EOPNOTSUPP;
1755 fbadop_close(file_t *fp)
1758 return EOPNOTSUPP;