2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * @(#)vfs_subr.c 8.31 (Berkeley) 5/26/95
39 * $FreeBSD: src/sys/kern/vfs_subr.c,v 1.249.2.30 2003/04/04 20:35:57 tegge Exp $
40 * $DragonFly: src/sys/kern/vfs_subr.c,v 1.118 2008/09/17 21:44:18 dillon Exp $
44 * External virtual filesystem routines
48 #include <sys/param.h>
49 #include <sys/systm.h>
52 #include <sys/dirent.h>
53 #include <sys/domain.h>
54 #include <sys/eventhandler.h>
55 #include <sys/fcntl.h>
57 #include <sys/kernel.h>
58 #include <sys/kthread.h>
59 #include <sys/malloc.h>
61 #include <sys/mount.h>
64 #include <sys/reboot.h>
65 #include <sys/socket.h>
67 #include <sys/sysctl.h>
68 #include <sys/syslog.h>
69 #include <sys/unistd.h>
70 #include <sys/vmmeter.h>
71 #include <sys/vnode.h>
73 #include <machine/limits.h>
76 #include <vm/vm_object.h>
77 #include <vm/vm_extern.h>
78 #include <vm/vm_kern.h>
80 #include <vm/vm_map.h>
81 #include <vm/vm_page.h>
82 #include <vm/vm_pager.h>
83 #include <vm/vnode_pager.h>
84 #include <vm/vm_zone.h>
87 #include <sys/thread2.h>
88 #include <sys/sysref2.h>
89 #include <sys/mplock2.h>
91 static MALLOC_DEFINE(M_NETADDR
, "Export Host", "Export host address structure");
94 SYSCTL_INT(_debug
, OID_AUTO
, numvnodes
, CTLFLAG_RD
, &numvnodes
, 0, "");
96 SYSCTL_INT(_vfs
, OID_AUTO
, fastdev
, CTLFLAG_RW
, &vfs_fastdev
, 0, "");
98 enum vtype iftovt_tab
[16] = {
99 VNON
, VFIFO
, VCHR
, VNON
, VDIR
, VNON
, VBLK
, VNON
,
100 VREG
, VNON
, VLNK
, VNON
, VSOCK
, VNON
, VNON
, VBAD
,
102 int vttoif_tab
[9] = {
103 0, S_IFREG
, S_IFDIR
, S_IFBLK
, S_IFCHR
, S_IFLNK
,
104 S_IFSOCK
, S_IFIFO
, S_IFMT
,
107 static int reassignbufcalls
;
108 SYSCTL_INT(_vfs
, OID_AUTO
, reassignbufcalls
, CTLFLAG_RW
,
109 &reassignbufcalls
, 0, "");
110 static int reassignbufloops
;
111 SYSCTL_INT(_vfs
, OID_AUTO
, reassignbufloops
, CTLFLAG_RW
,
112 &reassignbufloops
, 0, "");
113 static int reassignbufsortgood
;
114 SYSCTL_INT(_vfs
, OID_AUTO
, reassignbufsortgood
, CTLFLAG_RW
,
115 &reassignbufsortgood
, 0, "");
116 static int reassignbufsortbad
;
117 SYSCTL_INT(_vfs
, OID_AUTO
, reassignbufsortbad
, CTLFLAG_RW
,
118 &reassignbufsortbad
, 0, "");
119 static int reassignbufmethod
= 1;
120 SYSCTL_INT(_vfs
, OID_AUTO
, reassignbufmethod
, CTLFLAG_RW
,
121 &reassignbufmethod
, 0, "");
123 int nfs_mount_type
= -1;
124 static struct lwkt_token spechash_token
;
125 struct nfs_public nfs_pub
; /* publicly exported FS */
128 SYSCTL_INT(_kern
, KERN_MAXVNODES
, maxvnodes
, CTLFLAG_RW
,
129 &desiredvnodes
, 0, "Maximum number of vnodes");
131 static void vfs_free_addrlist (struct netexport
*nep
);
132 static int vfs_free_netcred (struct radix_node
*rn
, void *w
);
133 static int vfs_hang_addrlist (struct mount
*mp
, struct netexport
*nep
,
134 const struct export_args
*argp
);
137 * Red black tree functions
139 static int rb_buf_compare(struct buf
*b1
, struct buf
*b2
);
140 RB_GENERATE2(buf_rb_tree
, buf
, b_rbnode
, rb_buf_compare
, off_t
, b_loffset
);
141 RB_GENERATE2(buf_rb_hash
, buf
, b_rbhash
, rb_buf_compare
, off_t
, b_loffset
);
144 rb_buf_compare(struct buf
*b1
, struct buf
*b2
)
146 if (b1
->b_loffset
< b2
->b_loffset
)
148 if (b1
->b_loffset
> b2
->b_loffset
)
154 * Returns non-zero if the vnode is a candidate for lazy msyncing.
157 vshouldmsync(struct vnode
*vp
)
159 if (vp
->v_auxrefs
!= 0 || vp
->v_sysref
.refcnt
> 0)
160 return (0); /* other holders */
162 (vp
->v_object
->ref_count
|| vp
->v_object
->resident_page_count
)) {
169 * Initialize the vnode management data structures.
171 * Called from vfsinit()
180 * Desiredvnodes is kern.maxvnodes. We want to scale it
181 * according to available system memory but we may also have
182 * to limit it based on available KVM, which is capped on 32 bit
185 * WARNING! For machines with 64-256M of ram we have to be sure
186 * that the default limit scales down well due to HAMMER
187 * taking up significantly more memory per-vnode vs UFS.
188 * We want around ~5800 on a 128M machine.
190 factor1
= 20 * (sizeof(struct vm_object
) + sizeof(struct vnode
));
191 factor2
= 22 * (sizeof(struct vm_object
) + sizeof(struct vnode
));
193 imin((int64_t)vmstats
.v_page_count
* PAGE_SIZE
/ factor1
,
195 desiredvnodes
= imax(desiredvnodes
, maxproc
* 8);
197 lwkt_token_init(&spechash_token
);
201 * Knob to control the precision of file timestamps:
203 * 0 = seconds only; nanoseconds zeroed.
204 * 1 = seconds and nanoseconds, accurate within 1/HZ.
205 * 2 = seconds and nanoseconds, truncated to microseconds.
206 * >=3 = seconds and nanoseconds, maximum precision.
208 enum { TSP_SEC
, TSP_HZ
, TSP_USEC
, TSP_NSEC
};
210 static int timestamp_precision
= TSP_SEC
;
211 SYSCTL_INT(_vfs
, OID_AUTO
, timestamp_precision
, CTLFLAG_RW
,
212 ×tamp_precision
, 0, "");
215 * Get a current timestamp.
220 vfs_timestamp(struct timespec
*tsp
)
224 switch (timestamp_precision
) {
226 tsp
->tv_sec
= time_second
;
234 TIMEVAL_TO_TIMESPEC(&tv
, tsp
);
244 * Set vnode attributes to VNOVAL
247 vattr_null(struct vattr
*vap
)
250 vap
->va_size
= VNOVAL
;
251 vap
->va_bytes
= VNOVAL
;
252 vap
->va_mode
= VNOVAL
;
253 vap
->va_nlink
= VNOVAL
;
254 vap
->va_uid
= VNOVAL
;
255 vap
->va_gid
= VNOVAL
;
256 vap
->va_fsid
= VNOVAL
;
257 vap
->va_fileid
= VNOVAL
;
258 vap
->va_blocksize
= VNOVAL
;
259 vap
->va_rmajor
= VNOVAL
;
260 vap
->va_rminor
= VNOVAL
;
261 vap
->va_atime
.tv_sec
= VNOVAL
;
262 vap
->va_atime
.tv_nsec
= VNOVAL
;
263 vap
->va_mtime
.tv_sec
= VNOVAL
;
264 vap
->va_mtime
.tv_nsec
= VNOVAL
;
265 vap
->va_ctime
.tv_sec
= VNOVAL
;
266 vap
->va_ctime
.tv_nsec
= VNOVAL
;
267 vap
->va_flags
= VNOVAL
;
268 vap
->va_gen
= VNOVAL
;
270 /* va_*_uuid fields are only valid if related flags are set */
274 * Flush out and invalidate all buffers associated with a vnode.
278 static int vinvalbuf_bp(struct buf
*bp
, void *data
);
280 struct vinvalbuf_bp_info
{
288 vinvalbuf(struct vnode
*vp
, int flags
, int slpflag
, int slptimeo
)
290 struct vinvalbuf_bp_info info
;
295 lwkt_gettoken(&vlock
, &vp
->v_token
);
298 * If we are being asked to save, call fsync to ensure that the inode
301 if (flags
& V_SAVE
) {
302 error
= bio_track_wait(&vp
->v_track_write
, slpflag
, slptimeo
);
305 if (!RB_EMPTY(&vp
->v_rbdirty_tree
)) {
306 if ((error
= VOP_FSYNC(vp
, MNT_WAIT
, 0)) != 0)
310 * Dirty bufs may be left or generated via races
311 * in circumstances where vinvalbuf() is called on
312 * a vnode not undergoing reclamation. Only
313 * panic if we are trying to reclaim the vnode.
315 if ((vp
->v_flag
& VRECLAIMED
) &&
316 (bio_track_active(&vp
->v_track_write
) ||
317 !RB_EMPTY(&vp
->v_rbdirty_tree
))) {
318 panic("vinvalbuf: dirty bufs");
322 info
.slptimeo
= slptimeo
;
323 info
.lkflags
= LK_EXCLUSIVE
| LK_SLEEPFAIL
;
324 if (slpflag
& PCATCH
)
325 info
.lkflags
|= LK_PCATCH
;
330 * Flush the buffer cache until nothing is left.
332 while (!RB_EMPTY(&vp
->v_rbclean_tree
) ||
333 !RB_EMPTY(&vp
->v_rbdirty_tree
)) {
334 error
= RB_SCAN(buf_rb_tree
, &vp
->v_rbclean_tree
, NULL
,
335 vinvalbuf_bp
, &info
);
337 error
= RB_SCAN(buf_rb_tree
, &vp
->v_rbdirty_tree
, NULL
,
338 vinvalbuf_bp
, &info
);
343 * Wait for I/O completion. We may block in the pip code so we have
347 bio_track_wait(&vp
->v_track_write
, 0, 0);
348 if ((object
= vp
->v_object
) != NULL
) {
349 while (object
->paging_in_progress
)
350 vm_object_pip_sleep(object
, "vnvlbx");
352 } while (bio_track_active(&vp
->v_track_write
));
355 * Destroy the copy in the VM cache, too.
357 if ((object
= vp
->v_object
) != NULL
) {
358 vm_object_page_remove(object
, 0, 0,
359 (flags
& V_SAVE
) ? TRUE
: FALSE
);
362 if (!RB_EMPTY(&vp
->v_rbdirty_tree
) || !RB_EMPTY(&vp
->v_rbclean_tree
))
363 panic("vinvalbuf: flush failed");
364 if (!RB_EMPTY(&vp
->v_rbhash_tree
))
365 panic("vinvalbuf: flush failed, buffers still present");
368 lwkt_reltoken(&vlock
);
373 vinvalbuf_bp(struct buf
*bp
, void *data
)
375 struct vinvalbuf_bp_info
*info
= data
;
378 if (BUF_LOCK(bp
, LK_EXCLUSIVE
| LK_NOWAIT
)) {
379 error
= BUF_TIMELOCK(bp
, info
->lkflags
,
380 "vinvalbuf", info
->slptimeo
);
390 KKASSERT(bp
->b_vp
== info
->vp
);
393 * XXX Since there are no node locks for NFS, I
394 * believe there is a slight chance that a delayed
395 * write will occur while sleeping just above, so
396 * check for it. Note that vfs_bio_awrite expects
397 * buffers to reside on a queue, while bwrite() and
400 * NOTE: NO B_LOCKED CHECK. Also no buf_checkwrite()
401 * check. This code will write out the buffer, period.
403 if (((bp
->b_flags
& (B_DELWRI
| B_INVAL
)) == B_DELWRI
) &&
404 (info
->flags
& V_SAVE
)) {
405 if (bp
->b_vp
== info
->vp
) {
406 if (bp
->b_flags
& B_CLUSTEROK
) {
416 } else if (info
->flags
& V_SAVE
) {
418 * Cannot set B_NOCACHE on a clean buffer as this will
419 * destroy the VM backing store which might actually
420 * be dirty (and unsynchronized).
423 bp
->b_flags
|= (B_INVAL
| B_RELBUF
);
427 bp
->b_flags
|= (B_INVAL
| B_NOCACHE
| B_RELBUF
);
434 * Truncate a file's buffer and pages to a specified length. This
435 * is in lieu of the old vinvalbuf mechanism, which performed unneeded
438 * The vnode must be locked.
440 static int vtruncbuf_bp_trunc_cmp(struct buf
*bp
, void *data
);
441 static int vtruncbuf_bp_trunc(struct buf
*bp
, void *data
);
442 static int vtruncbuf_bp_metasync_cmp(struct buf
*bp
, void *data
);
443 static int vtruncbuf_bp_metasync(struct buf
*bp
, void *data
);
446 vtruncbuf(struct vnode
*vp
, off_t length
, int blksize
)
449 const char *filename
;
454 * Round up to the *next* block, then destroy the buffers in question.
455 * Since we are only removing some of the buffers we must rely on the
456 * scan count to determine whether a loop is necessary.
458 if ((count
= (int)(length
% blksize
)) != 0)
459 truncloffset
= length
+ (blksize
- count
);
461 truncloffset
= length
;
463 lwkt_gettoken(&vlock
, &vp
->v_token
);
465 count
= RB_SCAN(buf_rb_tree
, &vp
->v_rbclean_tree
,
466 vtruncbuf_bp_trunc_cmp
,
467 vtruncbuf_bp_trunc
, &truncloffset
);
468 count
+= RB_SCAN(buf_rb_tree
, &vp
->v_rbdirty_tree
,
469 vtruncbuf_bp_trunc_cmp
,
470 vtruncbuf_bp_trunc
, &truncloffset
);
474 * For safety, fsync any remaining metadata if the file is not being
475 * truncated to 0. Since the metadata does not represent the entire
476 * dirty list we have to rely on the hit count to ensure that we get
481 count
= RB_SCAN(buf_rb_tree
, &vp
->v_rbdirty_tree
,
482 vtruncbuf_bp_metasync_cmp
,
483 vtruncbuf_bp_metasync
, vp
);
488 * Clean out any left over VM backing store.
490 * It is possible to have in-progress I/O from buffers that were
491 * not part of the truncation. This should not happen if we
492 * are truncating to 0-length.
494 vnode_pager_setsize(vp
, length
);
495 bio_track_wait(&vp
->v_track_write
, 0, 0);
500 spin_lock_wr(&vp
->v_spinlock
);
501 filename
= TAILQ_FIRST(&vp
->v_namecache
) ?
502 TAILQ_FIRST(&vp
->v_namecache
)->nc_name
: "?";
503 spin_unlock_wr(&vp
->v_spinlock
);
506 * Make sure no buffers were instantiated while we were trying
507 * to clean out the remaining VM pages. This could occur due
508 * to busy dirty VM pages being flushed out to disk.
511 count
= RB_SCAN(buf_rb_tree
, &vp
->v_rbclean_tree
,
512 vtruncbuf_bp_trunc_cmp
,
513 vtruncbuf_bp_trunc
, &truncloffset
);
514 count
+= RB_SCAN(buf_rb_tree
, &vp
->v_rbdirty_tree
,
515 vtruncbuf_bp_trunc_cmp
,
516 vtruncbuf_bp_trunc
, &truncloffset
);
518 kprintf("Warning: vtruncbuf(): Had to re-clean %d "
519 "left over buffers in %s\n", count
, filename
);
523 lwkt_reltoken(&vlock
);
529 * The callback buffer is beyond the new file EOF and must be destroyed.
530 * Note that the compare function must conform to the RB_SCAN's requirements.
534 vtruncbuf_bp_trunc_cmp(struct buf
*bp
, void *data
)
536 if (bp
->b_loffset
>= *(off_t
*)data
)
543 vtruncbuf_bp_trunc(struct buf
*bp
, void *data
)
546 * Do not try to use a buffer we cannot immediately lock, but sleep
547 * anyway to prevent a livelock. The code will loop until all buffers
550 if (BUF_LOCK(bp
, LK_EXCLUSIVE
| LK_NOWAIT
)) {
551 if (BUF_LOCK(bp
, LK_EXCLUSIVE
|LK_SLEEPFAIL
) == 0)
555 bp
->b_flags
|= (B_INVAL
| B_RELBUF
| B_NOCACHE
);
562 * Fsync all meta-data after truncating a file to be non-zero. Only metadata
563 * blocks (with a negative loffset) are scanned.
564 * Note that the compare function must conform to the RB_SCAN's requirements.
567 vtruncbuf_bp_metasync_cmp(struct buf
*bp
, void *data
)
569 if (bp
->b_loffset
< 0)
575 vtruncbuf_bp_metasync(struct buf
*bp
, void *data
)
577 struct vnode
*vp
= data
;
579 if (bp
->b_flags
& B_DELWRI
) {
581 * Do not try to use a buffer we cannot immediately lock,
582 * but sleep anyway to prevent a livelock. The code will
583 * loop until all buffers can be acted upon.
585 if (BUF_LOCK(bp
, LK_EXCLUSIVE
| LK_NOWAIT
)) {
586 if (BUF_LOCK(bp
, LK_EXCLUSIVE
|LK_SLEEPFAIL
) == 0)
602 * vfsync - implements a multipass fsync on a file which understands
603 * dependancies and meta-data. The passed vnode must be locked. The
604 * waitfor argument may be MNT_WAIT or MNT_NOWAIT, or MNT_LAZY.
606 * When fsyncing data asynchronously just do one consolidated pass starting
607 * with the most negative block number. This may not get all the data due
610 * When fsyncing data synchronously do a data pass, then a metadata pass,
611 * then do additional data+metadata passes to try to get all the data out.
613 static int vfsync_wait_output(struct vnode
*vp
,
614 int (*waitoutput
)(struct vnode
*, struct thread
*));
615 static int vfsync_data_only_cmp(struct buf
*bp
, void *data
);
616 static int vfsync_meta_only_cmp(struct buf
*bp
, void *data
);
617 static int vfsync_lazy_range_cmp(struct buf
*bp
, void *data
);
618 static int vfsync_bp(struct buf
*bp
, void *data
);
627 int (*checkdef
)(struct buf
*);
631 vfsync(struct vnode
*vp
, int waitfor
, int passes
,
632 int (*checkdef
)(struct buf
*),
633 int (*waitoutput
)(struct vnode
*, struct thread
*))
635 struct vfsync_info info
;
639 bzero(&info
, sizeof(info
));
641 if ((info
.checkdef
= checkdef
) == NULL
)
644 lwkt_gettoken(&vlock
, &vp
->v_token
);
649 * Lazy (filesystem syncer typ) Asynchronous plus limit the
650 * number of data (not meta) pages we try to flush to 1MB.
651 * A non-zero return means that lazy limit was reached.
653 info
.lazylimit
= 1024 * 1024;
655 error
= RB_SCAN(buf_rb_tree
, &vp
->v_rbdirty_tree
,
656 vfsync_lazy_range_cmp
, vfsync_bp
, &info
);
657 RB_SCAN(buf_rb_tree
, &vp
->v_rbdirty_tree
,
658 vfsync_meta_only_cmp
, vfsync_bp
, &info
);
661 else if (!RB_EMPTY(&vp
->v_rbdirty_tree
))
662 vn_syncer_add_to_worklist(vp
, 1);
667 * Asynchronous. Do a data-only pass and a meta-only pass.
670 RB_SCAN(buf_rb_tree
, &vp
->v_rbdirty_tree
, vfsync_data_only_cmp
,
672 RB_SCAN(buf_rb_tree
, &vp
->v_rbdirty_tree
, vfsync_meta_only_cmp
,
678 * Synchronous. Do a data-only pass, then a meta-data+data
679 * pass, then additional integrated passes to try to get
680 * all the dependancies flushed.
682 RB_SCAN(buf_rb_tree
, &vp
->v_rbdirty_tree
, vfsync_data_only_cmp
,
684 error
= vfsync_wait_output(vp
, waitoutput
);
686 info
.skippedbufs
= 0;
687 RB_SCAN(buf_rb_tree
, &vp
->v_rbdirty_tree
, NULL
,
689 error
= vfsync_wait_output(vp
, waitoutput
);
690 if (info
.skippedbufs
)
691 kprintf("Warning: vfsync skipped %d dirty bufs in pass2!\n", info
.skippedbufs
);
693 while (error
== 0 && passes
> 0 &&
694 !RB_EMPTY(&vp
->v_rbdirty_tree
)
697 info
.synchronous
= 1;
700 error
= RB_SCAN(buf_rb_tree
, &vp
->v_rbdirty_tree
, NULL
,
706 error
= vfsync_wait_output(vp
, waitoutput
);
710 lwkt_reltoken(&vlock
);
715 vfsync_wait_output(struct vnode
*vp
,
716 int (*waitoutput
)(struct vnode
*, struct thread
*))
720 error
= bio_track_wait(&vp
->v_track_write
, 0, 0);
722 error
= waitoutput(vp
, curthread
);
727 vfsync_data_only_cmp(struct buf
*bp
, void *data
)
729 if (bp
->b_loffset
< 0)
735 vfsync_meta_only_cmp(struct buf
*bp
, void *data
)
737 if (bp
->b_loffset
< 0)
743 vfsync_lazy_range_cmp(struct buf
*bp
, void *data
)
745 struct vfsync_info
*info
= data
;
746 if (bp
->b_loffset
< info
->vp
->v_lazyw
)
752 vfsync_bp(struct buf
*bp
, void *data
)
754 struct vfsync_info
*info
= data
;
755 struct vnode
*vp
= info
->vp
;
759 * if syncdeps is not set we do not try to write buffers which have
762 if (!info
->synchronous
&& info
->syncdeps
== 0 && info
->checkdef(bp
))
766 * Ignore buffers that we cannot immediately lock. XXX
768 if (BUF_LOCK(bp
, LK_EXCLUSIVE
| LK_NOWAIT
)) {
769 kprintf("Warning: vfsync_bp skipping dirty buffer %p\n", bp
);
773 if ((bp
->b_flags
& B_DELWRI
) == 0)
774 panic("vfsync_bp: buffer not dirty");
776 panic("vfsync_bp: buffer vp mismatch");
779 * B_NEEDCOMMIT (primarily used by NFS) is a state where the buffer
780 * has been written but an additional handshake with the device
781 * is required before we can dispose of the buffer. We have no idea
782 * how to do this so we have to skip these buffers.
784 if (bp
->b_flags
& B_NEEDCOMMIT
) {
790 * Ask bioops if it is ok to sync
792 if (LIST_FIRST(&bp
->b_dep
) != NULL
&& buf_checkwrite(bp
)) {
798 if (info
->synchronous
) {
800 * Synchronous flushing. An error may be returned.
806 * Asynchronous flushing. A negative return value simply
807 * stops the scan and is not considered an error. We use
808 * this to support limited MNT_LAZY flushes.
810 vp
->v_lazyw
= bp
->b_loffset
;
811 if ((vp
->v_flag
& VOBJBUF
) && (bp
->b_flags
& B_CLUSTEROK
)) {
812 info
->lazycount
+= vfs_bio_awrite(bp
);
814 info
->lazycount
+= bp
->b_bufsize
;
818 if (info
->lazylimit
&& info
->lazycount
>= info
->lazylimit
)
827 * Associate a buffer with a vnode.
832 bgetvp(struct vnode
*vp
, struct buf
*bp
)
836 KASSERT(bp
->b_vp
== NULL
, ("bgetvp: not free"));
837 KKASSERT((bp
->b_flags
& (B_HASHED
|B_DELWRI
|B_VNCLEAN
|B_VNDIRTY
)) == 0);
840 * Insert onto list for new vnode.
842 lwkt_gettoken(&vlock
, &vp
->v_token
);
843 if (buf_rb_hash_RB_INSERT(&vp
->v_rbhash_tree
, bp
)) {
844 lwkt_reltoken(&vlock
);
848 bp
->b_flags
|= B_HASHED
;
849 bp
->b_flags
|= B_VNCLEAN
;
850 if (buf_rb_tree_RB_INSERT(&vp
->v_rbclean_tree
, bp
))
851 panic("reassignbuf: dup lblk/clean vp %p bp %p", vp
, bp
);
853 lwkt_reltoken(&vlock
);
858 * Disassociate a buffer from a vnode.
861 brelvp(struct buf
*bp
)
866 KASSERT(bp
->b_vp
!= NULL
, ("brelvp: NULL"));
869 * Delete from old vnode list, if on one.
872 lwkt_gettoken(&vlock
, &vp
->v_token
);
873 if (bp
->b_flags
& (B_VNDIRTY
| B_VNCLEAN
)) {
874 if (bp
->b_flags
& B_VNDIRTY
)
875 buf_rb_tree_RB_REMOVE(&vp
->v_rbdirty_tree
, bp
);
877 buf_rb_tree_RB_REMOVE(&vp
->v_rbclean_tree
, bp
);
878 bp
->b_flags
&= ~(B_VNDIRTY
| B_VNCLEAN
);
880 if (bp
->b_flags
& B_HASHED
) {
881 buf_rb_hash_RB_REMOVE(&vp
->v_rbhash_tree
, bp
);
882 bp
->b_flags
&= ~B_HASHED
;
884 if ((vp
->v_flag
& VONWORKLST
) && RB_EMPTY(&vp
->v_rbdirty_tree
)) {
885 vclrflags(vp
, VONWORKLST
);
886 LIST_REMOVE(vp
, v_synclist
);
889 lwkt_reltoken(&vlock
);
895 * Reassign the buffer to the proper clean/dirty list based on B_DELWRI.
896 * This routine is called when the state of the B_DELWRI bit is changed.
901 reassignbuf(struct buf
*bp
)
903 struct vnode
*vp
= bp
->b_vp
;
907 KKASSERT(vp
!= NULL
);
911 * B_PAGING flagged buffers cannot be reassigned because their vp
912 * is not fully linked in.
914 if (bp
->b_flags
& B_PAGING
)
915 panic("cannot reassign paging buffer");
917 lwkt_gettoken(&vlock
, &vp
->v_token
);
918 if (bp
->b_flags
& B_DELWRI
) {
920 * Move to the dirty list, add the vnode to the worklist
922 if (bp
->b_flags
& B_VNCLEAN
) {
923 buf_rb_tree_RB_REMOVE(&vp
->v_rbclean_tree
, bp
);
924 bp
->b_flags
&= ~B_VNCLEAN
;
926 if ((bp
->b_flags
& B_VNDIRTY
) == 0) {
927 if (buf_rb_tree_RB_INSERT(&vp
->v_rbdirty_tree
, bp
)) {
928 panic("reassignbuf: dup lblk vp %p bp %p",
931 bp
->b_flags
|= B_VNDIRTY
;
933 if ((vp
->v_flag
& VONWORKLST
) == 0) {
934 switch (vp
->v_type
) {
941 vp
->v_rdev
->si_mountpoint
!= NULL
) {
949 vn_syncer_add_to_worklist(vp
, delay
);
953 * Move to the clean list, remove the vnode from the worklist
954 * if no dirty blocks remain.
956 if (bp
->b_flags
& B_VNDIRTY
) {
957 buf_rb_tree_RB_REMOVE(&vp
->v_rbdirty_tree
, bp
);
958 bp
->b_flags
&= ~B_VNDIRTY
;
960 if ((bp
->b_flags
& B_VNCLEAN
) == 0) {
961 if (buf_rb_tree_RB_INSERT(&vp
->v_rbclean_tree
, bp
)) {
962 panic("reassignbuf: dup lblk vp %p bp %p",
965 bp
->b_flags
|= B_VNCLEAN
;
967 if ((vp
->v_flag
& VONWORKLST
) &&
968 RB_EMPTY(&vp
->v_rbdirty_tree
)) {
969 vclrflags(vp
, VONWORKLST
);
970 LIST_REMOVE(vp
, v_synclist
);
973 lwkt_reltoken(&vlock
);
977 * Create a vnode for a block device.
978 * Used for mounting the root file system.
980 extern struct vop_ops
*devfs_vnode_dev_vops_p
;
982 bdevvp(cdev_t dev
, struct vnode
**vpp
)
992 error
= getspecialvnode(VT_NON
, NULL
, &devfs_vnode_dev_vops_p
,
1003 v_associate_rdev(vp
, dev
);
1004 vp
->v_umajor
= dev
->si_umajor
;
1005 vp
->v_uminor
= dev
->si_uminor
;
1012 v_associate_rdev(struct vnode
*vp
, cdev_t dev
)
1018 if (dev_is_good(dev
) == 0)
1020 KKASSERT(vp
->v_rdev
== NULL
);
1021 vp
->v_rdev
= reference_dev(dev
);
1022 lwkt_gettoken(&ilock
, &spechash_token
);
1023 SLIST_INSERT_HEAD(&dev
->si_hlist
, vp
, v_cdevnext
);
1024 lwkt_reltoken(&ilock
);
1029 v_release_rdev(struct vnode
*vp
)
1034 if ((dev
= vp
->v_rdev
) != NULL
) {
1035 lwkt_gettoken(&ilock
, &spechash_token
);
1036 SLIST_REMOVE(&dev
->si_hlist
, vp
, vnode
, v_cdevnext
);
1039 lwkt_reltoken(&ilock
);
1044 * Add a vnode to the alias list hung off the cdev_t. We only associate
1045 * the device number with the vnode. The actual device is not associated
1046 * until the vnode is opened (usually in spec_open()), and will be
1047 * disassociated on last close.
1050 addaliasu(struct vnode
*nvp
, int x
, int y
)
1052 if (nvp
->v_type
!= VBLK
&& nvp
->v_type
!= VCHR
)
1053 panic("addaliasu on non-special vnode");
1059 * Simple call that a filesystem can make to try to get rid of a
1060 * vnode. It will fail if anyone is referencing the vnode (including
1063 * The filesystem can check whether its in-memory inode structure still
1064 * references the vp on return.
1067 vclean_unlocked(struct vnode
*vp
)
1070 if (sysref_isactive(&vp
->v_sysref
) == 0)
1076 * Disassociate a vnode from its underlying filesystem.
1078 * The vnode must be VX locked and referenced. In all normal situations
1079 * there are no active references. If vclean_vxlocked() is called while
1080 * there are active references, the vnode is being ripped out and we have
1081 * to call VOP_CLOSE() as appropriate before we can reclaim it.
1084 vclean_vxlocked(struct vnode
*vp
, int flags
)
1091 * If the vnode has already been reclaimed we have nothing to do.
1093 if (vp
->v_flag
& VRECLAIMED
)
1095 vsetflags(vp
, VRECLAIMED
);
1098 * Scrap the vfs cache
1100 while (cache_inval_vp(vp
, 0) != 0) {
1101 kprintf("Warning: vnode %p clean/cache_resolution race detected\n", vp
);
1102 tsleep(vp
, 0, "vclninv", 2);
1106 * Check to see if the vnode is in use. If so we have to reference it
1107 * before we clean it out so that its count cannot fall to zero and
1108 * generate a race against ourselves to recycle it.
1110 active
= sysref_isactive(&vp
->v_sysref
);
1113 * Clean out any buffers associated with the vnode and destroy its
1114 * object, if it has one.
1116 vinvalbuf(vp
, V_SAVE
, 0, 0);
1119 * If purging an active vnode (typically during a forced unmount
1120 * or reboot), it must be closed and deactivated before being
1121 * reclaimed. This isn't really all that safe, but what can
1124 * Note that neither of these routines unlocks the vnode.
1126 if (active
&& (flags
& DOCLOSE
)) {
1127 while ((n
= vp
->v_opencount
) != 0) {
1128 if (vp
->v_writecount
)
1129 VOP_CLOSE(vp
, FWRITE
|FNONBLOCK
);
1131 VOP_CLOSE(vp
, FNONBLOCK
);
1132 if (vp
->v_opencount
== n
) {
1133 kprintf("Warning: unable to force-close"
1141 * If the vnode has not been deactivated, deactivated it. Deactivation
1142 * can create new buffers and VM pages so we have to call vinvalbuf()
1143 * again to make sure they all get flushed.
1145 * This can occur if a file with a link count of 0 needs to be
1148 * If the vnode is already dead don't try to deactivate it.
1150 if ((vp
->v_flag
& VINACTIVE
) == 0) {
1151 vsetflags(vp
, VINACTIVE
);
1154 vinvalbuf(vp
, V_SAVE
, 0, 0);
1158 * If the vnode has an object, destroy it.
1160 if ((object
= vp
->v_object
) != NULL
) {
1161 if (object
->ref_count
== 0) {
1162 if ((object
->flags
& OBJ_DEAD
) == 0)
1163 vm_object_terminate(object
);
1165 vm_pager_deallocate(object
);
1167 vclrflags(vp
, VOBJBUF
);
1169 KKASSERT((vp
->v_flag
& VOBJBUF
) == 0);
1172 * Reclaim the vnode if not already dead.
1174 if (vp
->v_mount
&& VOP_RECLAIM(vp
))
1175 panic("vclean: cannot reclaim");
1178 * Done with purge, notify sleepers of the grim news.
1180 vp
->v_ops
= &dead_vnode_vops_p
;
1185 * If we are destroying an active vnode, reactivate it now that
1186 * we have reassociated it with deadfs. This prevents the system
1187 * from crashing on the vnode due to it being unexpectedly marked
1188 * as inactive or reclaimed.
1190 if (active
&& (flags
& DOCLOSE
)) {
1191 vclrflags(vp
, VINACTIVE
| VRECLAIMED
);
1196 * Eliminate all activity associated with the requested vnode
1197 * and with all vnodes aliased to the requested vnode.
1199 * The vnode must be referenced but should not be locked.
1202 vrevoke(struct vnode
*vp
, struct ucred
*cred
)
1211 * If the vnode has a device association, scrap all vnodes associated
1212 * with the device. Don't let the device disappear on us while we
1213 * are scrapping the vnodes.
1215 * The passed vp will probably show up in the list, do not VX lock
1218 * Releasing the vnode's rdev here can mess up specfs's call to
1219 * device close, so don't do it. The vnode has been disassociated
1220 * and the device will be closed after the last ref on the related
1221 * fp goes away (if not still open by e.g. the kernel).
1223 if (vp
->v_type
!= VCHR
) {
1224 error
= fdrevoke(vp
, DTYPE_VNODE
, cred
);
1227 if ((dev
= vp
->v_rdev
) == NULL
) {
1231 lwkt_gettoken(&ilock
, &spechash_token
);
1233 vqn
= SLIST_FIRST(&dev
->si_hlist
);
1236 while ((vq
= vqn
) != NULL
) {
1237 vqn
= SLIST_NEXT(vqn
, v_cdevnext
);
1240 fdrevoke(vq
, DTYPE_VNODE
, cred
);
1241 /*v_release_rdev(vq);*/
1244 lwkt_reltoken(&ilock
);
1251 * This is called when the object underlying a vnode is being destroyed,
1252 * such as in a remove(). Try to recycle the vnode immediately if the
1253 * only active reference is our reference.
1255 * Directory vnodes in the namecache with children cannot be immediately
1256 * recycled because numerous VOP_N*() ops require them to be stable.
1258 * To avoid recursive recycling from VOP_INACTIVE implemenetations this
1259 * function is a NOP if VRECLAIMED is already set.
1262 vrecycle(struct vnode
*vp
)
1264 if (vp
->v_sysref
.refcnt
<= 1 && (vp
->v_flag
& VRECLAIMED
) == 0) {
1265 if (cache_inval_vp_nonblock(vp
))
1274 * Return the maximum I/O size allowed for strategy calls on VP.
1276 * If vp is VCHR or VBLK we dive the device, otherwise we use
1277 * the vp's mount info.
1280 vmaxiosize(struct vnode
*vp
)
1282 if (vp
->v_type
== VBLK
|| vp
->v_type
== VCHR
) {
1283 return(vp
->v_rdev
->si_iosize_max
);
1285 return(vp
->v_mount
->mnt_iosize_max
);
1290 * Eliminate all activity associated with a vnode in preparation for reuse.
1292 * The vnode must be VX locked and refd and will remain VX locked and refd
1293 * on return. This routine may be called with the vnode in any state, as
1294 * long as it is VX locked. The vnode will be cleaned out and marked
1295 * VRECLAIMED but will not actually be reused until all existing refs and
1298 * NOTE: This routine may be called on a vnode which has not yet been
1299 * already been deactivated (VOP_INACTIVE), or on a vnode which has
1300 * already been reclaimed.
1302 * This routine is not responsible for placing us back on the freelist.
1303 * Instead, it happens automatically when the caller releases the VX lock
1304 * (assuming there aren't any other references).
1307 vgone_vxlocked(struct vnode
*vp
)
1310 * assert that the VX lock is held. This is an absolute requirement
1311 * now for vgone_vxlocked() to be called.
1313 KKASSERT(vp
->v_lock
.lk_exclusivecount
== 1);
1318 * Clean out the filesystem specific data and set the VRECLAIMED
1319 * bit. Also deactivate the vnode if necessary.
1321 vclean_vxlocked(vp
, DOCLOSE
);
1324 * Delete from old mount point vnode list, if on one.
1326 if (vp
->v_mount
!= NULL
) {
1327 KKASSERT(vp
->v_data
== NULL
);
1328 insmntque(vp
, NULL
);
1332 * If special device, remove it from special device alias list
1333 * if it is on one. This should normally only occur if a vnode is
1334 * being revoked as the device should otherwise have been released
1337 if ((vp
->v_type
== VBLK
|| vp
->v_type
== VCHR
) && vp
->v_rdev
!= NULL
) {
1349 * Lookup a vnode by device number.
1351 * Returns non-zero and *vpp set to a vref'd vnode on success.
1352 * Returns zero on failure.
1355 vfinddev(cdev_t dev
, enum vtype type
, struct vnode
**vpp
)
1360 lwkt_gettoken(&ilock
, &spechash_token
);
1361 SLIST_FOREACH(vp
, &dev
->si_hlist
, v_cdevnext
) {
1362 if (type
== vp
->v_type
) {
1365 lwkt_reltoken(&ilock
);
1369 lwkt_reltoken(&ilock
);
1374 * Calculate the total number of references to a special device. This
1375 * routine may only be called for VBLK and VCHR vnodes since v_rdev is
1376 * an overloaded field. Since udev2dev can now return NULL, we have
1377 * to check for a NULL v_rdev.
1380 count_dev(cdev_t dev
)
1386 if (SLIST_FIRST(&dev
->si_hlist
)) {
1387 lwkt_gettoken(&ilock
, &spechash_token
);
1388 SLIST_FOREACH(vp
, &dev
->si_hlist
, v_cdevnext
) {
1389 count
+= vp
->v_opencount
;
1391 lwkt_reltoken(&ilock
);
1397 vcount(struct vnode
*vp
)
1399 if (vp
->v_rdev
== NULL
)
1401 return(count_dev(vp
->v_rdev
));
1405 * Initialize VMIO for a vnode. This routine MUST be called before a
1406 * VFS can issue buffer cache ops on a vnode. It is typically called
1407 * when a vnode is initialized from its inode.
1410 vinitvmio(struct vnode
*vp
, off_t filesize
)
1416 if ((object
= vp
->v_object
) == NULL
) {
1417 object
= vnode_pager_alloc(vp
, filesize
, 0, 0);
1419 * Dereference the reference we just created. This assumes
1420 * that the object is associated with the vp.
1422 object
->ref_count
--;
1425 if (object
->flags
& OBJ_DEAD
) {
1427 vm_object_dead_sleep(object
, "vodead");
1428 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
1432 KASSERT(vp
->v_object
!= NULL
, ("vinitvmio: NULL object"));
1433 vsetflags(vp
, VOBJBUF
);
1439 * Print out a description of a vnode.
1441 static char *typename
[] =
1442 {"VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD"};
1445 vprint(char *label
, struct vnode
*vp
)
1450 kprintf("%s: %p: ", label
, (void *)vp
);
1452 kprintf("%p: ", (void *)vp
);
1453 kprintf("type %s, sysrefs %d, writecount %d, holdcnt %d,",
1454 typename
[vp
->v_type
],
1455 vp
->v_sysref
.refcnt
, vp
->v_writecount
, vp
->v_auxrefs
);
1457 if (vp
->v_flag
& VROOT
)
1458 strcat(buf
, "|VROOT");
1459 if (vp
->v_flag
& VPFSROOT
)
1460 strcat(buf
, "|VPFSROOT");
1461 if (vp
->v_flag
& VTEXT
)
1462 strcat(buf
, "|VTEXT");
1463 if (vp
->v_flag
& VSYSTEM
)
1464 strcat(buf
, "|VSYSTEM");
1465 if (vp
->v_flag
& VFREE
)
1466 strcat(buf
, "|VFREE");
1467 if (vp
->v_flag
& VOBJBUF
)
1468 strcat(buf
, "|VOBJBUF");
1470 kprintf(" flags (%s)", &buf
[1]);
1471 if (vp
->v_data
== NULL
) {
1480 * Do the usual access checking.
1481 * file_mode, uid and gid are from the vnode in question,
1482 * while acc_mode and cred are from the VOP_ACCESS parameter list
1485 vaccess(enum vtype type
, mode_t file_mode
, uid_t uid
, gid_t gid
,
1486 mode_t acc_mode
, struct ucred
*cred
)
1492 * Super-user always gets read/write access, but execute access depends
1493 * on at least one execute bit being set.
1495 if (priv_check_cred(cred
, PRIV_ROOT
, 0) == 0) {
1496 if ((acc_mode
& VEXEC
) && type
!= VDIR
&&
1497 (file_mode
& (S_IXUSR
|S_IXGRP
|S_IXOTH
)) == 0)
1504 /* Otherwise, check the owner. */
1505 if (cred
->cr_uid
== uid
) {
1506 if (acc_mode
& VEXEC
)
1508 if (acc_mode
& VREAD
)
1510 if (acc_mode
& VWRITE
)
1512 return ((file_mode
& mask
) == mask
? 0 : EACCES
);
1515 /* Otherwise, check the groups. */
1516 ismember
= groupmember(gid
, cred
);
1517 if (cred
->cr_svgid
== gid
|| ismember
) {
1518 if (acc_mode
& VEXEC
)
1520 if (acc_mode
& VREAD
)
1522 if (acc_mode
& VWRITE
)
1524 return ((file_mode
& mask
) == mask
? 0 : EACCES
);
1527 /* Otherwise, check everyone else. */
1528 if (acc_mode
& VEXEC
)
1530 if (acc_mode
& VREAD
)
1532 if (acc_mode
& VWRITE
)
1534 return ((file_mode
& mask
) == mask
? 0 : EACCES
);
1538 #include <ddb/ddb.h>
1540 static int db_show_locked_vnodes(struct mount
*mp
, void *data
);
1543 * List all of the locked vnodes in the system.
1544 * Called when debugging the kernel.
1546 DB_SHOW_COMMAND(lockedvnodes
, lockedvnodes
)
1548 kprintf("Locked vnodes\n");
1549 mountlist_scan(db_show_locked_vnodes
, NULL
,
1550 MNTSCAN_FORWARD
|MNTSCAN_NOBUSY
);
1554 db_show_locked_vnodes(struct mount
*mp
, void *data __unused
)
1558 TAILQ_FOREACH(vp
, &mp
->mnt_nvnodelist
, v_nmntvnodes
) {
1559 if (vn_islocked(vp
))
1567 * Top level filesystem related information gathering.
1569 static int sysctl_ovfs_conf (SYSCTL_HANDLER_ARGS
);
1572 vfs_sysctl(SYSCTL_HANDLER_ARGS
)
1574 int *name
= (int *)arg1
- 1; /* XXX */
1575 u_int namelen
= arg2
+ 1; /* XXX */
1576 struct vfsconf
*vfsp
;
1579 #if 1 || defined(COMPAT_PRELITE2)
1580 /* Resolve ambiguity between VFS_VFSCONF and VFS_GENERIC. */
1582 return (sysctl_ovfs_conf(oidp
, arg1
, arg2
, req
));
1586 /* all sysctl names at this level are at least name and field */
1588 return (ENOTDIR
); /* overloaded */
1589 if (name
[0] != VFS_GENERIC
) {
1590 vfsp
= vfsconf_find_by_typenum(name
[0]);
1592 return (EOPNOTSUPP
);
1593 return ((*vfsp
->vfc_vfsops
->vfs_sysctl
)(&name
[1], namelen
- 1,
1594 oldp
, oldlenp
, newp
, newlen
, p
));
1598 case VFS_MAXTYPENUM
:
1601 maxtypenum
= vfsconf_get_maxtypenum();
1602 return (SYSCTL_OUT(req
, &maxtypenum
, sizeof(maxtypenum
)));
1605 return (ENOTDIR
); /* overloaded */
1606 vfsp
= vfsconf_find_by_typenum(name
[2]);
1608 return (EOPNOTSUPP
);
1609 return (SYSCTL_OUT(req
, vfsp
, sizeof *vfsp
));
1611 return (EOPNOTSUPP
);
1614 SYSCTL_NODE(_vfs
, VFS_GENERIC
, generic
, CTLFLAG_RD
, vfs_sysctl
,
1615 "Generic filesystem");
1617 #if 1 || defined(COMPAT_PRELITE2)
1620 sysctl_ovfs_conf_iter(struct vfsconf
*vfsp
, void *data
)
1623 struct ovfsconf ovfs
;
1624 struct sysctl_req
*req
= (struct sysctl_req
*) data
;
1626 bzero(&ovfs
, sizeof(ovfs
));
1627 ovfs
.vfc_vfsops
= vfsp
->vfc_vfsops
; /* XXX used as flag */
1628 strcpy(ovfs
.vfc_name
, vfsp
->vfc_name
);
1629 ovfs
.vfc_index
= vfsp
->vfc_typenum
;
1630 ovfs
.vfc_refcount
= vfsp
->vfc_refcount
;
1631 ovfs
.vfc_flags
= vfsp
->vfc_flags
;
1632 error
= SYSCTL_OUT(req
, &ovfs
, sizeof ovfs
);
1634 return error
; /* abort iteration with error code */
1636 return 0; /* continue iterating with next element */
1640 sysctl_ovfs_conf(SYSCTL_HANDLER_ARGS
)
1642 return vfsconf_each(sysctl_ovfs_conf_iter
, (void*)req
);
1645 #endif /* 1 || COMPAT_PRELITE2 */
1648 * Check to see if a filesystem is mounted on a block device.
1651 vfs_mountedon(struct vnode
*vp
)
1655 if ((dev
= vp
->v_rdev
) == NULL
) {
1656 /* if (vp->v_type != VBLK)
1657 dev = get_dev(vp->v_uminor, vp->v_umajor); */
1659 if (dev
!= NULL
&& dev
->si_mountpoint
)
1665 * Unmount all filesystems. The list is traversed in reverse order
1666 * of mounting to avoid dependencies.
1669 static int vfs_umountall_callback(struct mount
*mp
, void *data
);
1672 vfs_unmountall(void)
1677 count
= mountlist_scan(vfs_umountall_callback
,
1678 NULL
, MNTSCAN_REVERSE
|MNTSCAN_NOBUSY
);
1684 vfs_umountall_callback(struct mount
*mp
, void *data
)
1688 error
= dounmount(mp
, MNT_FORCE
);
1690 mountlist_remove(mp
);
1691 kprintf("unmount of filesystem mounted from %s failed (",
1692 mp
->mnt_stat
.f_mntfromname
);
1696 kprintf("%d)\n", error
);
1702 * Checks the mount flags for parameter mp and put the names comma-separated
1703 * into a string buffer buf with a size limit specified by len.
1705 * It returns the number of bytes written into buf, and (*errorp) will be
1706 * set to 0, EINVAL (if passed length is 0), or ENOSPC (supplied buffer was
1707 * not large enough). The buffer will be 0-terminated if len was not 0.
1710 vfs_flagstostr(int flags
, const struct mountctl_opt
*optp
,
1711 char *buf
, size_t len
, int *errorp
)
1713 static const struct mountctl_opt optnames
[] = {
1714 { MNT_ASYNC
, "asynchronous" },
1715 { MNT_EXPORTED
, "NFS exported" },
1716 { MNT_LOCAL
, "local" },
1717 { MNT_NOATIME
, "noatime" },
1718 { MNT_NODEV
, "nodev" },
1719 { MNT_NOEXEC
, "noexec" },
1720 { MNT_NOSUID
, "nosuid" },
1721 { MNT_NOSYMFOLLOW
, "nosymfollow" },
1722 { MNT_QUOTA
, "with-quotas" },
1723 { MNT_RDONLY
, "read-only" },
1724 { MNT_SYNCHRONOUS
, "synchronous" },
1725 { MNT_UNION
, "union" },
1726 { MNT_NOCLUSTERR
, "noclusterr" },
1727 { MNT_NOCLUSTERW
, "noclusterw" },
1728 { MNT_SUIDDIR
, "suiddir" },
1729 { MNT_SOFTDEP
, "soft-updates" },
1730 { MNT_IGNORE
, "ignore" },
1740 bleft
= len
- 1; /* leave room for trailing \0 */
1743 * Checks the size of the string. If it contains
1744 * any data, then we will append the new flags to
1747 actsize
= strlen(buf
);
1751 /* Default flags if no flags passed */
1755 if (bleft
< 0) { /* degenerate case, 0-length buffer */
1760 for (; flags
&& optp
->o_opt
; ++optp
) {
1761 if ((flags
& optp
->o_opt
) == 0)
1763 optlen
= strlen(optp
->o_name
);
1764 if (bwritten
|| actsize
> 0) {
1769 buf
[bwritten
++] = ',';
1770 buf
[bwritten
++] = ' ';
1773 if (bleft
< optlen
) {
1777 bcopy(optp
->o_name
, buf
+ bwritten
, optlen
);
1780 flags
&= ~optp
->o_opt
;
1784 * Space already reserved for trailing \0
1791 * Build hash lists of net addresses and hang them off the mount point.
1792 * Called by ufs_mount() to set up the lists of export addresses.
1795 vfs_hang_addrlist(struct mount
*mp
, struct netexport
*nep
,
1796 const struct export_args
*argp
)
1799 struct radix_node_head
*rnh
;
1801 struct radix_node
*rn
;
1802 struct sockaddr
*saddr
, *smask
= 0;
1806 if (argp
->ex_addrlen
== 0) {
1807 if (mp
->mnt_flag
& MNT_DEFEXPORTED
)
1809 np
= &nep
->ne_defexported
;
1810 np
->netc_exflags
= argp
->ex_flags
;
1811 np
->netc_anon
= argp
->ex_anon
;
1812 np
->netc_anon
.cr_ref
= 1;
1813 mp
->mnt_flag
|= MNT_DEFEXPORTED
;
1817 if (argp
->ex_addrlen
< 0 || argp
->ex_addrlen
> MLEN
)
1819 if (argp
->ex_masklen
< 0 || argp
->ex_masklen
> MLEN
)
1822 i
= sizeof(struct netcred
) + argp
->ex_addrlen
+ argp
->ex_masklen
;
1823 np
= (struct netcred
*) kmalloc(i
, M_NETADDR
, M_WAITOK
| M_ZERO
);
1824 saddr
= (struct sockaddr
*) (np
+ 1);
1825 if ((error
= copyin(argp
->ex_addr
, (caddr_t
) saddr
, argp
->ex_addrlen
)))
1827 if (saddr
->sa_len
> argp
->ex_addrlen
)
1828 saddr
->sa_len
= argp
->ex_addrlen
;
1829 if (argp
->ex_masklen
) {
1830 smask
= (struct sockaddr
*)((caddr_t
)saddr
+ argp
->ex_addrlen
);
1831 error
= copyin(argp
->ex_mask
, (caddr_t
)smask
, argp
->ex_masklen
);
1834 if (smask
->sa_len
> argp
->ex_masklen
)
1835 smask
->sa_len
= argp
->ex_masklen
;
1837 i
= saddr
->sa_family
;
1838 if ((rnh
= nep
->ne_rtable
[i
]) == 0) {
1840 * Seems silly to initialize every AF when most are not used,
1841 * do so on demand here
1843 SLIST_FOREACH(dom
, &domains
, dom_next
)
1844 if (dom
->dom_family
== i
&& dom
->dom_rtattach
) {
1845 dom
->dom_rtattach((void **) &nep
->ne_rtable
[i
],
1849 if ((rnh
= nep
->ne_rtable
[i
]) == 0) {
1854 rn
= (*rnh
->rnh_addaddr
) ((char *) saddr
, (char *) smask
, rnh
,
1856 if (rn
== 0 || np
!= (struct netcred
*) rn
) { /* already exists */
1860 np
->netc_exflags
= argp
->ex_flags
;
1861 np
->netc_anon
= argp
->ex_anon
;
1862 np
->netc_anon
.cr_ref
= 1;
1865 kfree(np
, M_NETADDR
);
1871 vfs_free_netcred(struct radix_node
*rn
, void *w
)
1873 struct radix_node_head
*rnh
= (struct radix_node_head
*) w
;
1875 (*rnh
->rnh_deladdr
) (rn
->rn_key
, rn
->rn_mask
, rnh
);
1876 kfree((caddr_t
) rn
, M_NETADDR
);
1881 * Free the net address hash lists that are hanging off the mount points.
1884 vfs_free_addrlist(struct netexport
*nep
)
1887 struct radix_node_head
*rnh
;
1889 for (i
= 0; i
<= AF_MAX
; i
++)
1890 if ((rnh
= nep
->ne_rtable
[i
])) {
1891 (*rnh
->rnh_walktree
) (rnh
, vfs_free_netcred
,
1893 kfree((caddr_t
) rnh
, M_RTABLE
);
1894 nep
->ne_rtable
[i
] = 0;
1899 vfs_export(struct mount
*mp
, struct netexport
*nep
,
1900 const struct export_args
*argp
)
1904 if (argp
->ex_flags
& MNT_DELEXPORT
) {
1905 if (mp
->mnt_flag
& MNT_EXPUBLIC
) {
1906 vfs_setpublicfs(NULL
, NULL
, NULL
);
1907 mp
->mnt_flag
&= ~MNT_EXPUBLIC
;
1909 vfs_free_addrlist(nep
);
1910 mp
->mnt_flag
&= ~(MNT_EXPORTED
| MNT_DEFEXPORTED
);
1912 if (argp
->ex_flags
& MNT_EXPORTED
) {
1913 if (argp
->ex_flags
& MNT_EXPUBLIC
) {
1914 if ((error
= vfs_setpublicfs(mp
, nep
, argp
)) != 0)
1916 mp
->mnt_flag
|= MNT_EXPUBLIC
;
1918 if ((error
= vfs_hang_addrlist(mp
, nep
, argp
)))
1920 mp
->mnt_flag
|= MNT_EXPORTED
;
1927 * Set the publicly exported filesystem (WebNFS). Currently, only
1928 * one public filesystem is possible in the spec (RFC 2054 and 2055)
1931 vfs_setpublicfs(struct mount
*mp
, struct netexport
*nep
,
1932 const struct export_args
*argp
)
1939 * mp == NULL -> invalidate the current info, the FS is
1940 * no longer exported. May be called from either vfs_export
1941 * or unmount, so check if it hasn't already been done.
1944 if (nfs_pub
.np_valid
) {
1945 nfs_pub
.np_valid
= 0;
1946 if (nfs_pub
.np_index
!= NULL
) {
1947 FREE(nfs_pub
.np_index
, M_TEMP
);
1948 nfs_pub
.np_index
= NULL
;
1955 * Only one allowed at a time.
1957 if (nfs_pub
.np_valid
!= 0 && mp
!= nfs_pub
.np_mount
)
1961 * Get real filehandle for root of exported FS.
1963 bzero((caddr_t
)&nfs_pub
.np_handle
, sizeof(nfs_pub
.np_handle
));
1964 nfs_pub
.np_handle
.fh_fsid
= mp
->mnt_stat
.f_fsid
;
1966 if ((error
= VFS_ROOT(mp
, &rvp
)))
1969 if ((error
= VFS_VPTOFH(rvp
, &nfs_pub
.np_handle
.fh_fid
)))
1975 * If an indexfile was specified, pull it in.
1977 if (argp
->ex_indexfile
!= NULL
) {
1980 error
= vn_get_namelen(rvp
, &namelen
);
1983 MALLOC(nfs_pub
.np_index
, char *, namelen
, M_TEMP
,
1985 error
= copyinstr(argp
->ex_indexfile
, nfs_pub
.np_index
,
1989 * Check for illegal filenames.
1991 for (cp
= nfs_pub
.np_index
; *cp
; cp
++) {
1999 FREE(nfs_pub
.np_index
, M_TEMP
);
2004 nfs_pub
.np_mount
= mp
;
2005 nfs_pub
.np_valid
= 1;
2010 vfs_export_lookup(struct mount
*mp
, struct netexport
*nep
,
2011 struct sockaddr
*nam
)
2014 struct radix_node_head
*rnh
;
2015 struct sockaddr
*saddr
;
2018 if (mp
->mnt_flag
& MNT_EXPORTED
) {
2020 * Lookup in the export list first.
2024 rnh
= nep
->ne_rtable
[saddr
->sa_family
];
2026 np
= (struct netcred
*)
2027 (*rnh
->rnh_matchaddr
)((char *)saddr
,
2029 if (np
&& np
->netc_rnodes
->rn_flags
& RNF_ROOT
)
2034 * If no address match, use the default if it exists.
2036 if (np
== NULL
&& mp
->mnt_flag
& MNT_DEFEXPORTED
)
2037 np
= &nep
->ne_defexported
;
2043 * perform msync on all vnodes under a mount point. The mount point must
2044 * be locked. This code is also responsible for lazy-freeing unreferenced
2045 * vnodes whos VM objects no longer contain pages.
2047 * NOTE: MNT_WAIT still skips vnodes in the VXLOCK state.
2049 * NOTE: XXX VOP_PUTPAGES and friends requires that the vnode be locked,
2050 * but vnode_pager_putpages() doesn't lock the vnode. We have to do it
2051 * way up in this high level function.
2053 static int vfs_msync_scan1(struct mount
*mp
, struct vnode
*vp
, void *data
);
2054 static int vfs_msync_scan2(struct mount
*mp
, struct vnode
*vp
, void *data
);
2057 vfs_msync(struct mount
*mp
, int flags
)
2061 vmsc_flags
= VMSC_GETVP
;
2062 if (flags
!= MNT_WAIT
)
2063 vmsc_flags
|= VMSC_NOWAIT
;
2064 vmntvnodescan(mp
, vmsc_flags
, vfs_msync_scan1
, vfs_msync_scan2
,
2065 (void *)(intptr_t)flags
);
2069 * scan1 is a fast pre-check. There could be hundreds of thousands of
2070 * vnodes, we cannot afford to do anything heavy weight until we have a
2071 * fairly good indication that there is work to do.
2075 vfs_msync_scan1(struct mount
*mp
, struct vnode
*vp
, void *data
)
2077 int flags
= (int)(intptr_t)data
;
2079 if ((vp
->v_flag
& VRECLAIMED
) == 0) {
2080 if (vshouldmsync(vp
))
2081 return(0); /* call scan2 */
2082 if ((mp
->mnt_flag
& MNT_RDONLY
) == 0 &&
2083 (vp
->v_flag
& VOBJDIRTY
) &&
2084 (flags
== MNT_WAIT
|| vn_islocked(vp
) == 0)) {
2085 return(0); /* call scan2 */
2090 * do not call scan2, continue the loop
2096 * This callback is handed a locked vnode.
2100 vfs_msync_scan2(struct mount
*mp
, struct vnode
*vp
, void *data
)
2103 int flags
= (int)(intptr_t)data
;
2105 if (vp
->v_flag
& VRECLAIMED
)
2108 if ((mp
->mnt_flag
& MNT_RDONLY
) == 0 && (vp
->v_flag
& VOBJDIRTY
)) {
2109 if ((obj
= vp
->v_object
) != NULL
) {
2110 vm_object_page_clean(obj
, 0, 0,
2111 flags
== MNT_WAIT
? OBJPC_SYNC
: OBJPC_NOSYNC
);
2118 * Record a process's interest in events which might happen to
2119 * a vnode. Because poll uses the historic select-style interface
2120 * internally, this routine serves as both the ``check for any
2121 * pending events'' and the ``record my interest in future events''
2122 * functions. (These are done together, while the lock is held,
2123 * to avoid race conditions.)
2126 vn_pollrecord(struct vnode
*vp
, int events
)
2130 KKASSERT(curthread
->td_proc
!= NULL
);
2132 lwkt_gettoken(&vlock
, &vp
->v_token
);
2133 if (vp
->v_pollinfo
.vpi_revents
& events
) {
2135 * This leaves events we are not interested
2136 * in available for the other process which
2137 * which presumably had requested them
2138 * (otherwise they would never have been
2141 events
&= vp
->v_pollinfo
.vpi_revents
;
2142 vp
->v_pollinfo
.vpi_revents
&= ~events
;
2144 lwkt_reltoken(&vlock
);
2147 vp
->v_pollinfo
.vpi_events
|= events
;
2148 selrecord(curthread
, &vp
->v_pollinfo
.vpi_selinfo
);
2149 lwkt_reltoken(&vlock
);
2154 * Note the occurrence of an event. If the VN_POLLEVENT macro is used,
2155 * it is possible for us to miss an event due to race conditions, but
2156 * that condition is expected to be rare, so for the moment it is the
2157 * preferred interface.
2160 vn_pollevent(struct vnode
*vp
, int events
)
2164 lwkt_gettoken(&vlock
, &vp
->v_token
);
2165 if (vp
->v_pollinfo
.vpi_events
& events
) {
2167 * We clear vpi_events so that we don't
2168 * call selwakeup() twice if two events are
2169 * posted before the polling process(es) is
2170 * awakened. This also ensures that we take at
2171 * most one selwakeup() if the polling process
2172 * is no longer interested. However, it does
2173 * mean that only one event can be noticed at
2174 * a time. (Perhaps we should only clear those
2175 * event bits which we note?) XXX
2177 vp
->v_pollinfo
.vpi_events
= 0; /* &= ~events ??? */
2178 vp
->v_pollinfo
.vpi_revents
|= events
;
2179 selwakeup(&vp
->v_pollinfo
.vpi_selinfo
);
2181 lwkt_reltoken(&vlock
);
2185 * Wake up anyone polling on vp because it is being revoked.
2186 * This depends on dead_poll() returning POLLHUP for correct
2190 vn_pollgone(struct vnode
*vp
)
2194 lwkt_gettoken(&vlock
, &vp
->v_token
);
2195 if (vp
->v_pollinfo
.vpi_events
) {
2196 vp
->v_pollinfo
.vpi_events
= 0;
2197 selwakeup(&vp
->v_pollinfo
.vpi_selinfo
);
2199 lwkt_reltoken(&vlock
);
2203 * extract the cdev_t from a VBLK or VCHR. The vnode must have been opened
2204 * (or v_rdev might be NULL).
2207 vn_todev(struct vnode
*vp
)
2209 if (vp
->v_type
!= VBLK
&& vp
->v_type
!= VCHR
)
2211 KKASSERT(vp
->v_rdev
!= NULL
);
2212 return (vp
->v_rdev
);
2216 * Check if vnode represents a disk device. The vnode does not need to be
2222 vn_isdisk(struct vnode
*vp
, int *errp
)
2226 if (vp
->v_type
!= VCHR
) {
2239 if (dev_is_good(dev
) == 0) {
2244 if ((dev_dflags(dev
) & D_DISK
) == 0) {
2255 vn_get_namelen(struct vnode
*vp
, int *namelen
)
2258 register_t retval
[2];
2260 error
= VOP_PATHCONF(vp
, _PC_NAME_MAX
, retval
);
2263 *namelen
= (int)retval
[0];
2268 vop_write_dirent(int *error
, struct uio
*uio
, ino_t d_ino
, uint8_t d_type
,
2269 uint16_t d_namlen
, const char *d_name
)
2274 len
= _DIRENT_RECLEN(d_namlen
);
2275 if (len
> uio
->uio_resid
)
2278 dp
= kmalloc(len
, M_TEMP
, M_WAITOK
| M_ZERO
);
2281 dp
->d_namlen
= d_namlen
;
2282 dp
->d_type
= d_type
;
2283 bcopy(d_name
, dp
->d_name
, d_namlen
);
2285 *error
= uiomove((caddr_t
)dp
, len
, uio
);
2293 vn_mark_atime(struct vnode
*vp
, struct thread
*td
)
2295 struct proc
*p
= td
->td_proc
;
2296 struct ucred
*cred
= p
? p
->p_ucred
: proc0
.p_ucred
;
2298 if ((vp
->v_mount
->mnt_flag
& (MNT_NOATIME
| MNT_RDONLY
)) == 0) {
2299 VOP_MARKATIME(vp
, cred
);