2 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * Copyright (c) 1989, 1993
35 * The Regents of the University of California. All rights reserved.
36 * (c) UNIX System Laboratories, Inc.
37 * All or some portions of this file are derived from material licensed
38 * to the University of California by American Telephone and Telegraph
39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 * the permission of UNIX System Laboratories, Inc.
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by the University of
53 * California, Berkeley and its contributors.
54 * 4. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
70 * $DragonFly: src/sys/kern/vfs_mount.c,v 1.37 2008/09/17 21:44:18 dillon Exp $
74 * External virtual filesystem routines
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/kernel.h>
81 #include <sys/malloc.h>
82 #include <sys/mount.h>
84 #include <sys/vnode.h>
86 #include <sys/eventhandler.h>
87 #include <sys/kthread.h>
88 #include <sys/sysctl.h>
90 #include <machine/limits.h>
93 #include <sys/thread2.h>
94 #include <sys/sysref2.h>
97 #include <vm/vm_object.h>
99 struct mountscan_info
{
100 TAILQ_ENTRY(mountscan_info
) msi_entry
;
102 struct mount
*msi_node
;
105 struct vmntvnodescan_info
{
106 TAILQ_ENTRY(vmntvnodescan_info
) entry
;
114 static int vnlru_nowhere
= 0;
115 SYSCTL_INT(_debug
, OID_AUTO
, vnlru_nowhere
, CTLFLAG_RD
,
117 "Number of times the vnlru process ran without success");
120 static struct lwkt_token mntid_token
;
121 static struct mount dummymount
;
123 /* note: mountlist exported to pstat */
124 struct mntlist mountlist
= TAILQ_HEAD_INITIALIZER(mountlist
);
125 static TAILQ_HEAD(,mountscan_info
) mountscan_list
;
126 static struct lwkt_token mountlist_token
;
127 static TAILQ_HEAD(,vmntvnodescan_info
) mntvnodescan_list
;
128 struct lwkt_token mntvnode_token
;
130 static TAILQ_HEAD(,bio_ops
) bio_ops_list
= TAILQ_HEAD_INITIALIZER(bio_ops_list
);
133 * Called from vfsinit()
138 lwkt_token_init(&mountlist_token
);
139 lwkt_token_init(&mntvnode_token
);
140 lwkt_token_init(&mntid_token
);
141 TAILQ_INIT(&mountscan_list
);
142 TAILQ_INIT(&mntvnodescan_list
);
143 mount_init(&dummymount
);
144 dummymount
.mnt_flag
|= MNT_RDONLY
;
148 * Support function called with mntvnode_token held to remove a vnode
149 * from the mountlist. We must update any list scans which are in progress.
152 vremovevnodemnt(struct vnode
*vp
)
154 struct vmntvnodescan_info
*info
;
156 TAILQ_FOREACH(info
, &mntvnodescan_list
, entry
) {
158 info
->vp
= TAILQ_NEXT(vp
, v_nmntvnodes
);
160 TAILQ_REMOVE(&vp
->v_mount
->mnt_nvnodelist
, vp
, v_nmntvnodes
);
164 * Allocate a new vnode and associate it with a tag, mount point, and
167 * A VX locked and refd vnode is returned. The caller should setup the
168 * remaining fields and vx_put() or, if he wishes to leave a vref,
169 * vx_unlock() the vnode.
172 getnewvnode(enum vtagtype tag
, struct mount
*mp
,
173 struct vnode
**vpp
, int lktimeout
, int lkflags
)
177 KKASSERT(mp
!= NULL
);
179 vp
= allocvnode(lktimeout
, lkflags
);
184 * By default the vnode is assigned the mount point's normal
187 vp
->v_ops
= &mp
->mnt_vn_use_ops
;
190 * Placing the vnode on the mount point's queue makes it visible.
191 * VNON prevents it from being messed with, however.
196 * A VX locked & refd vnode is returned.
203 * This function creates vnodes with special operations vectors. The
204 * mount point is optional.
206 * This routine is being phased out but is still used by vfs_conf to
207 * create vnodes for devices prior to the root mount (with mp == NULL).
210 getspecialvnode(enum vtagtype tag
, struct mount
*mp
,
211 struct vop_ops
**ops
,
212 struct vnode
**vpp
, int lktimeout
, int lkflags
)
216 vp
= allocvnode(lktimeout
, lkflags
);
225 * Placing the vnode on the mount point's queue makes it visible.
226 * VNON prevents it from being messed with, however.
231 * A VX locked & refd vnode is returned.
238 * Interlock against an unmount, return 0 on success, non-zero on failure.
240 * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount
243 * If no unmount is in-progress LK_NOWAIT is ignored. No other flag bits
244 * are used. A shared locked will be obtained and the filesystem will not
245 * be unmountable until the lock is released.
248 vfs_busy(struct mount
*mp
, int flags
)
252 if (mp
->mnt_kern_flag
& MNTK_UNMOUNT
) {
253 if (flags
& LK_NOWAIT
)
255 /* XXX not MP safe */
256 mp
->mnt_kern_flag
|= MNTK_MWAIT
;
258 * Since all busy locks are shared except the exclusive
259 * lock granted when unmounting, the only place that a
260 * wakeup needs to be done is at the release of the
261 * exclusive lock at the end of dounmount.
263 tsleep((caddr_t
)mp
, 0, "vfs_busy", 0);
267 if (lockmgr(&mp
->mnt_lock
, lkflags
))
268 panic("vfs_busy: unexpected lock failure");
273 * Free a busy filesystem.
276 vfs_unbusy(struct mount
*mp
)
278 lockmgr(&mp
->mnt_lock
, LK_RELEASE
);
282 * Lookup a filesystem type, and if found allocate and initialize
283 * a mount structure for it.
285 * Devname is usually updated by mount(8) after booting.
288 vfs_rootmountalloc(char *fstypename
, char *devname
, struct mount
**mpp
)
290 struct vfsconf
*vfsp
;
293 if (fstypename
== NULL
)
296 vfsp
= vfsconf_find_by_name(fstypename
);
299 mp
= kmalloc(sizeof(struct mount
), M_MOUNT
, M_WAITOK
| M_ZERO
);
301 lockinit(&mp
->mnt_lock
, "vfslock", VLKTIMEOUT
, 0);
303 vfs_busy(mp
, LK_NOWAIT
);
305 mp
->mnt_op
= vfsp
->vfc_vfsops
;
306 vfsp
->vfc_refcount
++;
307 mp
->mnt_stat
.f_type
= vfsp
->vfc_typenum
;
308 mp
->mnt_flag
|= MNT_RDONLY
;
309 mp
->mnt_flag
|= vfsp
->vfc_flags
& MNT_VISFLAGMASK
;
310 strncpy(mp
->mnt_stat
.f_fstypename
, vfsp
->vfc_name
, MFSNAMELEN
);
311 copystr(devname
, mp
->mnt_stat
.f_mntfromname
, MNAMELEN
- 1, 0);
317 * Basic mount structure initialization
320 mount_init(struct mount
*mp
)
322 lockinit(&mp
->mnt_lock
, "vfslock", 0, 0);
323 lwkt_token_init(&mp
->mnt_token
);
325 TAILQ_INIT(&mp
->mnt_nvnodelist
);
326 TAILQ_INIT(&mp
->mnt_reservedvnlist
);
327 TAILQ_INIT(&mp
->mnt_jlist
);
328 mp
->mnt_nvnodelistsize
= 0;
330 mp
->mnt_iosize_max
= DFLTPHYS
;
334 * Lookup a mount point by filesystem identifier.
337 vfs_getvfs(fsid_t
*fsid
)
342 lwkt_gettoken(&ilock
, &mountlist_token
);
343 TAILQ_FOREACH(mp
, &mountlist
, mnt_list
) {
344 if (mp
->mnt_stat
.f_fsid
.val
[0] == fsid
->val
[0] &&
345 mp
->mnt_stat
.f_fsid
.val
[1] == fsid
->val
[1]) {
349 lwkt_reltoken(&ilock
);
354 * Get a new unique fsid. Try to make its val[0] unique, since this value
355 * will be used to create fake device numbers for stat(). Also try (but
356 * not so hard) make its val[0] unique mod 2^16, since some emulators only
357 * support 16-bit device numbers. We end up with unique val[0]'s for the
358 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
360 * Keep in mind that several mounts may be running in parallel. Starting
361 * the search one past where the previous search terminated is both a
362 * micro-optimization and a defense against returning the same fsid to
366 vfs_getnewfsid(struct mount
*mp
)
368 static u_int16_t mntid_base
;
373 lwkt_gettoken(&ilock
, &mntid_token
);
374 mtype
= mp
->mnt_vfc
->vfc_typenum
;
375 tfsid
.val
[1] = mtype
;
376 mtype
= (mtype
& 0xFF) << 24;
378 tfsid
.val
[0] = makeudev(255,
379 mtype
| ((mntid_base
& 0xFF00) << 8) | (mntid_base
& 0xFF));
381 if (vfs_getvfs(&tfsid
) == NULL
)
384 mp
->mnt_stat
.f_fsid
.val
[0] = tfsid
.val
[0];
385 mp
->mnt_stat
.f_fsid
.val
[1] = tfsid
.val
[1];
386 lwkt_reltoken(&ilock
);
390 * Set the FSID for a new mount point to the template. Adjust
391 * the FSID to avoid collisions.
394 vfs_setfsid(struct mount
*mp
, fsid_t
*template)
398 bzero(&mp
->mnt_stat
.f_fsid
, sizeof(mp
->mnt_stat
.f_fsid
));
400 if (vfs_getvfs(template) == NULL
)
405 mp
->mnt_stat
.f_fsid
= *template;
410 * This routine is called when we have too many vnodes. It attempts
411 * to free <count> vnodes and will potentially free vnodes that still
412 * have VM backing store (VM backing store is typically the cause
413 * of a vnode blowout so we want to do this). Therefore, this operation
414 * is not considered cheap.
416 * A number of conditions may prevent a vnode from being reclaimed.
417 * the buffer cache may have references on the vnode, a directory
418 * vnode may still have references due to the namei cache representing
419 * underlying files, or the vnode may be in active use. It is not
420 * desireable to reuse such vnodes. These conditions may cause the
421 * number of vnodes to reach some minimum value regardless of what
422 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low.
426 * This is a quick non-blocking check to determine if the vnode is a good
427 * candidate for being (eventually) vgone()'d. Returns 0 if the vnode is
428 * not a good candidate, 1 if it is.
431 vmightfree(struct vnode
*vp
, int page_count
, int pass
)
433 if (vp
->v_flag
& VRECLAIMED
)
436 if ((vp
->v_flag
& VFREE
) && TAILQ_EMPTY(&vp
->v_namecache
))
439 if (sysref_isactive(&vp
->v_sysref
))
441 if (vp
->v_object
&& vp
->v_object
->resident_page_count
>= page_count
)
445 * XXX horrible hack. Up to four passes will be taken. Each pass
446 * makes a larger set of vnodes eligible. For now what this really
447 * means is that we try to recycle files opened only once before
448 * recycling files opened multiple times.
450 switch(vp
->v_flag
& (VAGE0
| VAGE1
)) {
470 * The vnode was found to be possibly vgone()able and the caller has locked it
471 * (thus the usecount should be 1 now). Determine if the vnode is actually
472 * vgone()able, doing some cleanups in the process. Returns 1 if the vnode
473 * can be vgone()'d, 0 otherwise.
475 * Note that v_auxrefs may be non-zero because (A) this vnode is not a leaf
476 * in the namecache topology and (B) this vnode has buffer cache bufs.
477 * We cannot remove vnodes with non-leaf namecache associations. We do a
478 * tentitive leaf check prior to attempting to flush out any buffers but the
479 * 'real' test when all is said in done is that v_auxrefs must become 0 for
480 * the vnode to be freeable.
482 * We could theoretically just unconditionally flush when v_auxrefs != 0,
483 * but flushing data associated with non-leaf nodes (which are always
484 * directories), just throws it away for no benefit. It is the buffer
485 * cache's responsibility to choose buffers to recycle from the cached
486 * data point of view.
489 visleaf(struct vnode
*vp
)
491 struct namecache
*ncp
;
493 spin_lock_wr(&vp
->v_spinlock
);
494 TAILQ_FOREACH(ncp
, &vp
->v_namecache
, nc_vnode
) {
495 if (!TAILQ_EMPTY(&ncp
->nc_list
)) {
496 spin_unlock_wr(&vp
->v_spinlock
);
500 spin_unlock_wr(&vp
->v_spinlock
);
505 * Try to clean up the vnode to the point where it can be vgone()'d, returning
506 * 0 if it cannot be vgone()'d (or already has been), 1 if it can. Unlike
507 * vmightfree() this routine may flush the vnode and block. Vnodes marked
508 * VFREE are still candidates for vgone()ing because they may hold namecache
509 * resources and could be blocking the namecache directory hierarchy (and
510 * related vnodes) from being freed.
513 vtrytomakegoneable(struct vnode
*vp
, int page_count
)
515 if (vp
->v_flag
& VRECLAIMED
)
517 if (vp
->v_sysref
.refcnt
> 1)
519 if (vp
->v_object
&& vp
->v_object
->resident_page_count
>= page_count
)
521 if (vp
->v_auxrefs
&& visleaf(vp
)) {
522 vinvalbuf(vp
, V_SAVE
, 0, 0);
524 kprintf((vp
->v_auxrefs
? "vrecycle: vp %p failed: %s\n" :
525 "vrecycle: vp %p succeeded: %s\n"), vp
,
526 (TAILQ_FIRST(&vp
->v_namecache
) ?
527 TAILQ_FIRST(&vp
->v_namecache
)->nc_name
: "?"));
532 * This sequence may seem a little strange, but we need to optimize
533 * the critical path a bit. We can't recycle vnodes with other
534 * references and because we are trying to recycle an otherwise
535 * perfectly fine vnode we have to invalidate the namecache in a
536 * way that avoids possible deadlocks (since the vnode lock is being
537 * held here). Finally, we have to check for other references one
538 * last time in case something snuck in during the inval.
540 if (vp
->v_sysref
.refcnt
> 1 || vp
->v_auxrefs
!= 0)
542 if (cache_inval_vp_nonblock(vp
))
544 return (vp
->v_sysref
.refcnt
<= 1 && vp
->v_auxrefs
== 0);
548 * Reclaim up to 1/10 of the vnodes associated with a mount point. Try
549 * to avoid vnodes which have lots of resident pages (we are trying to free
550 * vnodes, not memory).
552 * This routine is a callback from the mountlist scan. The mount point
553 * in question will be busied.
555 * NOTE: The 1/10 reclamation also ensures that the inactive data set
556 * (the vnodes being recycled by the one-time use) does not degenerate
557 * into too-small a set. This is important because once a vnode is
558 * marked as not being one-time-use (VAGE0/VAGE1 both 0) that vnode
559 * will not be destroyed EXCEPT by this mechanism. VM pages can still
560 * be cleaned/freed by the pageout daemon.
563 vlrureclaim(struct mount
*mp
, void *data
)
565 struct vnlru_info
*info
= data
;
572 int trigger_mult
= vnlru_nowhere
;
575 * Calculate the trigger point for the resident pages check. The
576 * minimum trigger value is approximately the number of pages in
577 * the system divded by the number of vnodes. However, due to
578 * various other system memory overheads unrelated to data caching
579 * it is a good idea to double the trigger (at least).
581 * trigger_mult starts at 0. If the recycler is having problems
582 * finding enough freeable vnodes it will increase trigger_mult.
583 * This should not happen in normal operation, even on machines with
584 * low amounts of memory, but extraordinary memory use by the system
585 * verses the amount of cached data can trigger it.
587 usevnodes
= desiredvnodes
;
590 trigger
= vmstats
.v_page_count
* (trigger_mult
+ 2) / usevnodes
;
593 lwkt_gettoken(&ilock
, &mntvnode_token
);
594 count
= mp
->mnt_nvnodelistsize
/ 10 + 1;
596 while (count
&& mp
->mnt_syncer
) {
598 * Next vnode. Use the special syncer vnode to placemark
599 * the LRU. This way the LRU code does not interfere with
602 vp
= TAILQ_NEXT(mp
->mnt_syncer
, v_nmntvnodes
);
603 TAILQ_REMOVE(&mp
->mnt_nvnodelist
, mp
->mnt_syncer
, v_nmntvnodes
);
605 TAILQ_INSERT_AFTER(&mp
->mnt_nvnodelist
, vp
,
606 mp
->mnt_syncer
, v_nmntvnodes
);
608 TAILQ_INSERT_HEAD(&mp
->mnt_nvnodelist
, mp
->mnt_syncer
,
610 vp
= TAILQ_NEXT(mp
->mnt_syncer
, v_nmntvnodes
);
618 * The VP will stick around while we hold mntvnode_token,
619 * at least until we block, so we can safely do an initial
620 * check, and then must check again after we lock the vnode.
622 if (vp
->v_type
== VNON
|| /* syncer or indeterminant */
623 !vmightfree(vp
, trigger
, info
->pass
) /* critical path opt */
630 * VX get the candidate vnode. If the VX get fails the
631 * vnode might still be on the mountlist. Our loop depends
632 * on us at least cycling the vnode to the end of the
635 if (vx_get_nonblock(vp
) != 0) {
641 * Since we blocked locking the vp, make sure it is still
642 * a candidate for reclamation. That is, it has not already
643 * been reclaimed and only has our VX reference associated
646 if (vp
->v_type
== VNON
|| /* syncer or indeterminant */
647 (vp
->v_flag
& VRECLAIMED
) ||
649 !vtrytomakegoneable(vp
, trigger
) /* critical path opt */
657 * All right, we are good, move the vp to the end of the
658 * mountlist and clean it out. The vget will have returned
659 * an error if the vnode was destroyed (VRECLAIMED set), so we
660 * do not have to check again. The vput() will move the
661 * vnode to the free list if the vgone() was successful.
663 KKASSERT(vp
->v_mount
== mp
);
669 lwkt_reltoken(&ilock
);
674 * Attempt to recycle vnodes in a context that is always safe to block.
675 * Calling vlrurecycle() from the bowels of file system code has some
676 * interesting deadlock problems.
678 static struct thread
*vnlruthread
;
679 static int vnlruproc_sig
;
682 vnlru_proc_wait(void)
684 if (vnlruproc_sig
== 0) {
685 vnlruproc_sig
= 1; /* avoid unnecessary wakeups */
688 tsleep(&vnlruproc_sig
, 0, "vlruwk", hz
);
694 struct thread
*td
= curthread
;
695 struct vnlru_info info
;
698 EVENTHANDLER_REGISTER(shutdown_pre_sync
, shutdown_kproc
, td
,
703 kproc_suspend_loop();
706 * Try to free some vnodes if we have too many
708 if (numvnodes
> desiredvnodes
&&
709 freevnodes
> desiredvnodes
* 2 / 10) {
710 int count
= numvnodes
- desiredvnodes
;
712 if (count
> freevnodes
/ 100)
713 count
= freevnodes
/ 100;
716 freesomevnodes(count
);
720 * Nothing to do if most of our vnodes are already on
723 if (numvnodes
- freevnodes
<= desiredvnodes
* 9 / 10) {
725 wakeup(&vnlruproc_sig
);
726 tsleep(td
, 0, "vlruwt", hz
);
732 * The pass iterates through the four combinations of
733 * VAGE0/VAGE1. We want to get rid of aged small files
738 while (done
== 0 && info
.pass
< 4) {
739 done
= mountlist_scan(vlrureclaim
, &info
,
745 * The vlrureclaim() call only processes 1/10 of the vnodes
746 * on each mount. If we couldn't find any repeat the loop
747 * at least enough times to cover all available vnodes before
748 * we start sleeping. Complain if the failure extends past
749 * 30 second, every 30 seconds.
753 if (vnlru_nowhere
% 10 == 0)
754 tsleep(td
, 0, "vlrup", hz
* 3);
755 if (vnlru_nowhere
% 100 == 0)
756 kprintf("vnlru_proc: vnode recycler stopped working!\n");
757 if (vnlru_nowhere
== 1000)
767 * MOUNTLIST FUNCTIONS
771 * mountlist_insert (MP SAFE)
773 * Add a new mount point to the mount list.
776 mountlist_insert(struct mount
*mp
, int how
)
780 lwkt_gettoken(&ilock
, &mountlist_token
);
781 if (how
== MNTINS_FIRST
)
782 TAILQ_INSERT_HEAD(&mountlist
, mp
, mnt_list
);
784 TAILQ_INSERT_TAIL(&mountlist
, mp
, mnt_list
);
785 lwkt_reltoken(&ilock
);
789 * mountlist_interlock (MP SAFE)
791 * Execute the specified interlock function with the mountlist token
792 * held. The function will be called in a serialized fashion verses
793 * other functions called through this mechanism.
796 mountlist_interlock(int (*callback
)(struct mount
*), struct mount
*mp
)
801 lwkt_gettoken(&ilock
, &mountlist_token
);
802 error
= callback(mp
);
803 lwkt_reltoken(&ilock
);
808 * mountlist_boot_getfirst (DURING BOOT ONLY)
810 * This function returns the first mount on the mountlist, which is
811 * expected to be the root mount. Since no interlocks are obtained
812 * this function is only safe to use during booting.
816 mountlist_boot_getfirst(void)
818 return(TAILQ_FIRST(&mountlist
));
822 * mountlist_remove (MP SAFE)
824 * Remove a node from the mountlist. If this node is the next scan node
825 * for any active mountlist scans, the active mountlist scan will be
826 * adjusted to skip the node, thus allowing removals during mountlist
830 mountlist_remove(struct mount
*mp
)
832 struct mountscan_info
*msi
;
835 lwkt_gettoken(&ilock
, &mountlist_token
);
836 TAILQ_FOREACH(msi
, &mountscan_list
, msi_entry
) {
837 if (msi
->msi_node
== mp
) {
838 if (msi
->msi_how
& MNTSCAN_FORWARD
)
839 msi
->msi_node
= TAILQ_NEXT(mp
, mnt_list
);
841 msi
->msi_node
= TAILQ_PREV(mp
, mntlist
, mnt_list
);
844 TAILQ_REMOVE(&mountlist
, mp
, mnt_list
);
845 lwkt_reltoken(&ilock
);
849 * mountlist_scan (MP SAFE)
851 * Safely scan the mount points on the mount list. Unless otherwise
852 * specified each mount point will be busied prior to the callback and
853 * unbusied afterwords. The callback may safely remove any mount point
854 * without interfering with the scan. If the current callback
855 * mount is removed the scanner will not attempt to unbusy it.
857 * If a mount node cannot be busied it is silently skipped.
859 * The callback return value is aggregated and a total is returned. A return
860 * value of < 0 is not aggregated and will terminate the scan.
862 * MNTSCAN_FORWARD - the mountlist is scanned in the forward direction
863 * MNTSCAN_REVERSE - the mountlist is scanned in reverse
864 * MNTSCAN_NOBUSY - the scanner will make the callback without busying
868 mountlist_scan(int (*callback
)(struct mount
*, void *), void *data
, int how
)
870 struct mountscan_info info
;
877 lwkt_gettoken(&ilock
, &mountlist_token
);
880 info
.msi_node
= NULL
; /* paranoia */
881 TAILQ_INSERT_TAIL(&mountscan_list
, &info
, msi_entry
);
886 if (how
& MNTSCAN_FORWARD
) {
887 info
.msi_node
= TAILQ_FIRST(&mountlist
);
888 while ((mp
= info
.msi_node
) != NULL
) {
889 if (how
& MNTSCAN_NOBUSY
) {
890 count
= callback(mp
, data
);
891 } else if (vfs_busy(mp
, LK_NOWAIT
) == 0) {
892 count
= callback(mp
, data
);
893 if (mp
== info
.msi_node
)
901 if (mp
== info
.msi_node
)
902 info
.msi_node
= TAILQ_NEXT(mp
, mnt_list
);
904 } else if (how
& MNTSCAN_REVERSE
) {
905 info
.msi_node
= TAILQ_LAST(&mountlist
, mntlist
);
906 while ((mp
= info
.msi_node
) != NULL
) {
907 if (how
& MNTSCAN_NOBUSY
) {
908 count
= callback(mp
, data
);
909 } else if (vfs_busy(mp
, LK_NOWAIT
) == 0) {
910 count
= callback(mp
, data
);
911 if (mp
== info
.msi_node
)
919 if (mp
== info
.msi_node
)
920 info
.msi_node
= TAILQ_PREV(mp
, mntlist
, mnt_list
);
923 TAILQ_REMOVE(&mountscan_list
, &info
, msi_entry
);
924 lwkt_reltoken(&ilock
);
929 * MOUNT RELATED VNODE FUNCTIONS
932 static struct kproc_desc vnlru_kp
= {
937 SYSINIT(vnlru
, SI_SUB_KTHREAD_UPDATE
, SI_ORDER_FIRST
, kproc_start
, &vnlru_kp
)
940 * Move a vnode from one mount queue to another.
945 insmntque(struct vnode
*vp
, struct mount
*mp
)
949 lwkt_gettoken(&ilock
, &mntvnode_token
);
951 * Delete from old mount point vnode list, if on one.
953 if (vp
->v_mount
!= NULL
) {
954 KASSERT(vp
->v_mount
->mnt_nvnodelistsize
> 0,
955 ("bad mount point vnode list size"));
957 vp
->v_mount
->mnt_nvnodelistsize
--;
960 * Insert into list of vnodes for the new mount point, if available.
961 * The 'end' of the LRU list is the vnode prior to mp->mnt_syncer.
963 if ((vp
->v_mount
= mp
) == NULL
) {
964 lwkt_reltoken(&ilock
);
967 if (mp
->mnt_syncer
) {
968 TAILQ_INSERT_BEFORE(mp
->mnt_syncer
, vp
, v_nmntvnodes
);
970 TAILQ_INSERT_TAIL(&mp
->mnt_nvnodelist
, vp
, v_nmntvnodes
);
972 mp
->mnt_nvnodelistsize
++;
973 lwkt_reltoken(&ilock
);
978 * Scan the vnodes under a mount point and issue appropriate callbacks.
980 * The fastfunc() callback is called with just the mountlist token held
981 * (no vnode lock). It may not block and the vnode may be undergoing
982 * modifications while the caller is processing it. The vnode will
983 * not be entirely destroyed, however, due to the fact that the mountlist
984 * token is held. A return value < 0 skips to the next vnode without calling
985 * the slowfunc(), a return value > 0 terminates the loop.
987 * The slowfunc() callback is called after the vnode has been successfully
988 * locked based on passed flags. The vnode is skipped if it gets rearranged
989 * or destroyed while blocking on the lock. A non-zero return value from
990 * the slow function terminates the loop. The slow function is allowed to
991 * arbitrarily block. The scanning code guarentees consistency of operation
992 * even if the slow function deletes or moves the node, or blocks and some
993 * other thread deletes or moves the node.
999 int (*fastfunc
)(struct mount
*mp
, struct vnode
*vp
, void *data
),
1000 int (*slowfunc
)(struct mount
*mp
, struct vnode
*vp
, void *data
),
1003 struct vmntvnodescan_info info
;
1007 int maxcount
= 1000000;
1011 lwkt_gettoken(&ilock
, &mntvnode_token
);
1014 * If asked to do one pass stop after iterating available vnodes.
1015 * Under heavy loads new vnodes can be added while we are scanning,
1016 * so this isn't perfect. Create a slop factor of 2x.
1018 if (flags
& VMSC_ONEPASS
)
1019 stopcount
= mp
->mnt_nvnodelistsize
* 2;
1021 info
.vp
= TAILQ_FIRST(&mp
->mnt_nvnodelist
);
1022 TAILQ_INSERT_TAIL(&mntvnodescan_list
, &info
, entry
);
1023 while ((vp
= info
.vp
) != NULL
) {
1024 if (--maxcount
== 0)
1025 panic("maxcount reached during vmntvnodescan");
1028 * Skip if visible but not ready, or special (e.g.
1031 if (vp
->v_type
== VNON
)
1033 KKASSERT(vp
->v_mount
== mp
);
1036 * Quick test. A negative return continues the loop without
1037 * calling the slow test. 0 continues onto the slow test.
1038 * A positive number aborts the loop.
1041 if ((r
= fastfunc(mp
, vp
, data
)) < 0) {
1050 * Get a vxlock on the vnode, retry if it has moved or isn't
1051 * in the mountlist where we expect it.
1056 switch(flags
& (VMSC_GETVP
|VMSC_GETVX
|VMSC_NOWAIT
)) {
1058 error
= vget(vp
, LK_EXCLUSIVE
);
1060 case VMSC_GETVP
|VMSC_NOWAIT
:
1061 error
= vget(vp
, LK_EXCLUSIVE
|LK_NOWAIT
);
1074 * Do not call the slow function if the vnode is
1075 * invalid or if it was ripped out from under us
1076 * while we (potentially) blocked.
1078 if (info
.vp
== vp
&& vp
->v_type
!= VNON
)
1079 r
= slowfunc(mp
, vp
, data
);
1084 switch(flags
& (VMSC_GETVP
|VMSC_GETVX
|VMSC_NOWAIT
)) {
1086 case VMSC_GETVP
|VMSC_NOWAIT
:
1101 * Yield after some processing. Depending on the number
1102 * of vnodes, we might wind up running for a long time.
1103 * Because threads are not preemptable, time critical
1104 * userland processes might starve. Give them a chance
1107 if (++count
== 10000) {
1108 /* We really want to yield a bit, so we simply sleep a tick */
1109 tsleep(mp
, 0, "vnodescn", 1);
1114 * If doing one pass this decrements to zero. If it starts
1115 * at zero it is effectively unlimited for the purposes of
1118 if (--stopcount
== 0)
1122 * Iterate. If the vnode was ripped out from under us
1123 * info.vp will already point to the next vnode, otherwise
1124 * we have to obtain the next valid vnode ourselves.
1127 info
.vp
= TAILQ_NEXT(vp
, v_nmntvnodes
);
1129 TAILQ_REMOVE(&mntvnodescan_list
, &info
, entry
);
1130 lwkt_reltoken(&ilock
);
1135 * Remove any vnodes in the vnode table belonging to mount point mp.
1137 * If FORCECLOSE is not specified, there should not be any active ones,
1138 * return error if any are found (nb: this is a user error, not a
1139 * system error). If FORCECLOSE is specified, detach any active vnodes
1142 * If WRITECLOSE is set, only flush out regular file vnodes open for
1145 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped.
1147 * `rootrefs' specifies the base reference count for the root vnode
1148 * of this filesystem. The root vnode is considered busy if its
1149 * v_sysref.refcnt exceeds this value. On a successful return, vflush()
1150 * will call vrele() on the root vnode exactly rootrefs times.
1151 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
1155 static int busyprt
= 0; /* print out busy vnodes */
1156 SYSCTL_INT(_debug
, OID_AUTO
, busyprt
, CTLFLAG_RW
, &busyprt
, 0, "");
1159 static int vflush_scan(struct mount
*mp
, struct vnode
*vp
, void *data
);
1161 struct vflush_info
{
1168 vflush(struct mount
*mp
, int rootrefs
, int flags
)
1170 struct thread
*td
= curthread
; /* XXX */
1171 struct vnode
*rootvp
= NULL
;
1173 struct vflush_info vflush_info
;
1176 KASSERT((flags
& (SKIPSYSTEM
| WRITECLOSE
)) == 0,
1177 ("vflush: bad args"));
1179 * Get the filesystem root vnode. We can vput() it
1180 * immediately, since with rootrefs > 0, it won't go away.
1182 if ((error
= VFS_ROOT(mp
, &rootvp
)) != 0) {
1183 if ((flags
& FORCECLOSE
) == 0)
1186 /* continue anyway */
1192 vflush_info
.busy
= 0;
1193 vflush_info
.flags
= flags
;
1194 vflush_info
.td
= td
;
1195 vmntvnodescan(mp
, VMSC_GETVX
, NULL
, vflush_scan
, &vflush_info
);
1197 if (rootrefs
> 0 && (flags
& FORCECLOSE
) == 0) {
1199 * If just the root vnode is busy, and if its refcount
1200 * is equal to `rootrefs', then go ahead and kill it.
1202 KASSERT(vflush_info
.busy
> 0, ("vflush: not busy"));
1203 KASSERT(rootvp
->v_sysref
.refcnt
>= rootrefs
, ("vflush: rootrefs"));
1204 if (vflush_info
.busy
== 1 && rootvp
->v_sysref
.refcnt
== rootrefs
) {
1206 vgone_vxlocked(rootvp
);
1208 vflush_info
.busy
= 0;
1211 if (vflush_info
.busy
)
1213 for (; rootrefs
> 0; rootrefs
--)
1219 * The scan callback is made with an VX locked vnode.
1222 vflush_scan(struct mount
*mp
, struct vnode
*vp
, void *data
)
1224 struct vflush_info
*info
= data
;
1228 * Skip over a vnodes marked VSYSTEM.
1230 if ((info
->flags
& SKIPSYSTEM
) && (vp
->v_flag
& VSYSTEM
)) {
1235 * If WRITECLOSE is set, flush out unlinked but still open
1236 * files (even if open only for reading) and regular file
1237 * vnodes open for writing.
1239 if ((info
->flags
& WRITECLOSE
) &&
1240 (vp
->v_type
== VNON
||
1241 (VOP_GETATTR(vp
, &vattr
) == 0 &&
1242 vattr
.va_nlink
> 0)) &&
1243 (vp
->v_writecount
== 0 || vp
->v_type
!= VREG
)) {
1248 * If we are the only holder (refcnt of 1) or the vnode is in
1249 * termination (refcnt < 0), we can vgone the vnode.
1251 if (vp
->v_sysref
.refcnt
<= 1) {
1257 * If FORCECLOSE is set, forcibly close the vnode. For block
1258 * or character devices we just clean and leave the vp
1259 * associated with devfs. For all other files, just kill them.
1261 * XXX we need to do something about devfs here, I'd rather not
1262 * blow away device associations.
1264 if (info
->flags
& FORCECLOSE
) {
1267 if (vp
->v_type
!= VBLK
&& vp
->v_type
!= VCHR
) {
1270 vclean_vxlocked(vp
, 0);
1271 /*vp->v_ops = &devfs_vnode_dev_vops_p;*/
1272 insmntque(vp
, NULL
);
1279 vprint("vflush: busy vnode", vp
);
1286 add_bio_ops(struct bio_ops
*ops
)
1288 TAILQ_INSERT_TAIL(&bio_ops_list
, ops
, entry
);
1292 rem_bio_ops(struct bio_ops
*ops
)
1294 TAILQ_REMOVE(&bio_ops_list
, ops
, entry
);
1298 * This calls the bio_ops io_sync function either for a mount point
1301 * WARNING: softdeps is weirdly coded and just isn't happy unless
1302 * io_sync is called with a NULL mount from the general syncing code.
1305 bio_ops_sync(struct mount
*mp
)
1307 struct bio_ops
*ops
;
1310 if ((ops
= mp
->mnt_bioops
) != NULL
)
1313 TAILQ_FOREACH(ops
, &bio_ops_list
, entry
) {