2 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * Copyright (c) 1989, 1993
35 * The Regents of the University of California. All rights reserved.
36 * (c) UNIX System Laboratories, Inc.
37 * All or some portions of this file are derived from material licensed
38 * to the University of California by American Telephone and Telegraph
39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 * the permission of UNIX System Laboratories, Inc.
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by the University of
53 * California, Berkeley and its contributors.
54 * 4. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
70 * $DragonFly: src/sys/kern/vfs_mount.c,v 1.37 2008/09/17 21:44:18 dillon Exp $
74 * External virtual filesystem routines
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/kernel.h>
81 #include <sys/malloc.h>
82 #include <sys/mount.h>
84 #include <sys/vnode.h>
86 #include <sys/eventhandler.h>
87 #include <sys/kthread.h>
88 #include <sys/sysctl.h>
90 #include <machine/limits.h>
93 #include <sys/thread2.h>
94 #include <sys/sysref2.h>
95 #include <sys/mplock2.h>
98 #include <vm/vm_object.h>
100 struct mountscan_info
{
101 TAILQ_ENTRY(mountscan_info
) msi_entry
;
103 struct mount
*msi_node
;
106 struct vmntvnodescan_info
{
107 TAILQ_ENTRY(vmntvnodescan_info
) entry
;
115 static int vnlru_nowhere
= 0;
116 SYSCTL_INT(_debug
, OID_AUTO
, vnlru_nowhere
, CTLFLAG_RD
,
118 "Number of times the vnlru process ran without success");
121 static struct lwkt_token mntid_token
;
122 static struct mount dummymount
;
124 /* note: mountlist exported to pstat */
125 struct mntlist mountlist
= TAILQ_HEAD_INITIALIZER(mountlist
);
126 static TAILQ_HEAD(,mountscan_info
) mountscan_list
;
127 static struct lwkt_token mountlist_token
;
128 static TAILQ_HEAD(,vmntvnodescan_info
) mntvnodescan_list
;
129 struct lwkt_token mntvnode_token
;
131 static TAILQ_HEAD(,bio_ops
) bio_ops_list
= TAILQ_HEAD_INITIALIZER(bio_ops_list
);
134 * Called from vfsinit()
139 lwkt_token_init(&mountlist_token
, 1, "mntlist");
140 lwkt_token_init(&mntvnode_token
, 1, "mntvnode");
141 lwkt_token_init(&mntid_token
, 1, "mntid");
142 TAILQ_INIT(&mountscan_list
);
143 TAILQ_INIT(&mntvnodescan_list
);
144 mount_init(&dummymount
);
145 dummymount
.mnt_flag
|= MNT_RDONLY
;
149 * Support function called with mntvnode_token held to remove a vnode
150 * from the mountlist. We must update any list scans which are in progress.
153 vremovevnodemnt(struct vnode
*vp
)
155 struct vmntvnodescan_info
*info
;
157 TAILQ_FOREACH(info
, &mntvnodescan_list
, entry
) {
159 info
->vp
= TAILQ_NEXT(vp
, v_nmntvnodes
);
161 TAILQ_REMOVE(&vp
->v_mount
->mnt_nvnodelist
, vp
, v_nmntvnodes
);
165 * Allocate a new vnode and associate it with a tag, mount point, and
168 * A VX locked and refd vnode is returned. The caller should setup the
169 * remaining fields and vx_put() or, if he wishes to leave a vref,
170 * vx_unlock() the vnode.
173 getnewvnode(enum vtagtype tag
, struct mount
*mp
,
174 struct vnode
**vpp
, int lktimeout
, int lkflags
)
178 KKASSERT(mp
!= NULL
);
180 vp
= allocvnode(lktimeout
, lkflags
);
185 * By default the vnode is assigned the mount point's normal
188 vp
->v_ops
= &mp
->mnt_vn_use_ops
;
191 * Placing the vnode on the mount point's queue makes it visible.
192 * VNON prevents it from being messed with, however.
197 * A VX locked & refd vnode is returned.
204 * This function creates vnodes with special operations vectors. The
205 * mount point is optional.
207 * This routine is being phased out but is still used by vfs_conf to
208 * create vnodes for devices prior to the root mount (with mp == NULL).
211 getspecialvnode(enum vtagtype tag
, struct mount
*mp
,
212 struct vop_ops
**ops
,
213 struct vnode
**vpp
, int lktimeout
, int lkflags
)
217 vp
= allocvnode(lktimeout
, lkflags
);
226 * Placing the vnode on the mount point's queue makes it visible.
227 * VNON prevents it from being messed with, however.
232 * A VX locked & refd vnode is returned.
239 * Interlock against an unmount, return 0 on success, non-zero on failure.
241 * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount
244 * If no unmount is in-progress LK_NOWAIT is ignored. No other flag bits
245 * are used. A shared locked will be obtained and the filesystem will not
246 * be unmountable until the lock is released.
249 vfs_busy(struct mount
*mp
, int flags
)
253 if (mp
->mnt_kern_flag
& MNTK_UNMOUNT
) {
254 if (flags
& LK_NOWAIT
)
256 /* XXX not MP safe */
257 mp
->mnt_kern_flag
|= MNTK_MWAIT
;
259 * Since all busy locks are shared except the exclusive
260 * lock granted when unmounting, the only place that a
261 * wakeup needs to be done is at the release of the
262 * exclusive lock at the end of dounmount.
264 tsleep((caddr_t
)mp
, 0, "vfs_busy", 0);
268 if (lockmgr(&mp
->mnt_lock
, lkflags
))
269 panic("vfs_busy: unexpected lock failure");
274 * Free a busy filesystem.
277 vfs_unbusy(struct mount
*mp
)
279 lockmgr(&mp
->mnt_lock
, LK_RELEASE
);
283 * Lookup a filesystem type, and if found allocate and initialize
284 * a mount structure for it.
286 * Devname is usually updated by mount(8) after booting.
289 vfs_rootmountalloc(char *fstypename
, char *devname
, struct mount
**mpp
)
291 struct vfsconf
*vfsp
;
294 if (fstypename
== NULL
)
297 vfsp
= vfsconf_find_by_name(fstypename
);
300 mp
= kmalloc(sizeof(struct mount
), M_MOUNT
, M_WAITOK
| M_ZERO
);
302 lockinit(&mp
->mnt_lock
, "vfslock", VLKTIMEOUT
, 0);
304 vfs_busy(mp
, LK_NOWAIT
);
306 mp
->mnt_op
= vfsp
->vfc_vfsops
;
307 vfsp
->vfc_refcount
++;
308 mp
->mnt_stat
.f_type
= vfsp
->vfc_typenum
;
309 mp
->mnt_flag
|= MNT_RDONLY
;
310 mp
->mnt_flag
|= vfsp
->vfc_flags
& MNT_VISFLAGMASK
;
311 strncpy(mp
->mnt_stat
.f_fstypename
, vfsp
->vfc_name
, MFSNAMELEN
);
312 copystr(devname
, mp
->mnt_stat
.f_mntfromname
, MNAMELEN
- 1, 0);
318 * Basic mount structure initialization
321 mount_init(struct mount
*mp
)
323 lockinit(&mp
->mnt_lock
, "vfslock", 0, 0);
324 lwkt_token_init(&mp
->mnt_token
, 1, "permnt");
326 TAILQ_INIT(&mp
->mnt_nvnodelist
);
327 TAILQ_INIT(&mp
->mnt_reservedvnlist
);
328 TAILQ_INIT(&mp
->mnt_jlist
);
329 mp
->mnt_nvnodelistsize
= 0;
331 mp
->mnt_iosize_max
= DFLTPHYS
;
335 * Lookup a mount point by filesystem identifier.
338 vfs_getvfs(fsid_t
*fsid
)
342 lwkt_gettoken(&mountlist_token
);
343 TAILQ_FOREACH(mp
, &mountlist
, mnt_list
) {
344 if (mp
->mnt_stat
.f_fsid
.val
[0] == fsid
->val
[0] &&
345 mp
->mnt_stat
.f_fsid
.val
[1] == fsid
->val
[1]) {
349 lwkt_reltoken(&mountlist_token
);
354 * Get a new unique fsid. Try to make its val[0] unique, since this value
355 * will be used to create fake device numbers for stat(). Also try (but
356 * not so hard) make its val[0] unique mod 2^16, since some emulators only
357 * support 16-bit device numbers. We end up with unique val[0]'s for the
358 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
360 * Keep in mind that several mounts may be running in parallel. Starting
361 * the search one past where the previous search terminated is both a
362 * micro-optimization and a defense against returning the same fsid to
366 vfs_getnewfsid(struct mount
*mp
)
368 static u_int16_t mntid_base
;
372 lwkt_gettoken(&mntid_token
);
373 mtype
= mp
->mnt_vfc
->vfc_typenum
;
374 tfsid
.val
[1] = mtype
;
375 mtype
= (mtype
& 0xFF) << 24;
377 tfsid
.val
[0] = makeudev(255,
378 mtype
| ((mntid_base
& 0xFF00) << 8) | (mntid_base
& 0xFF));
380 if (vfs_getvfs(&tfsid
) == NULL
)
383 mp
->mnt_stat
.f_fsid
.val
[0] = tfsid
.val
[0];
384 mp
->mnt_stat
.f_fsid
.val
[1] = tfsid
.val
[1];
385 lwkt_reltoken(&mntid_token
);
389 * Set the FSID for a new mount point to the template. Adjust
390 * the FSID to avoid collisions.
393 vfs_setfsid(struct mount
*mp
, fsid_t
*template)
397 bzero(&mp
->mnt_stat
.f_fsid
, sizeof(mp
->mnt_stat
.f_fsid
));
399 if (vfs_getvfs(template) == NULL
)
404 mp
->mnt_stat
.f_fsid
= *template;
409 * This routine is called when we have too many vnodes. It attempts
410 * to free <count> vnodes and will potentially free vnodes that still
411 * have VM backing store (VM backing store is typically the cause
412 * of a vnode blowout so we want to do this). Therefore, this operation
413 * is not considered cheap.
415 * A number of conditions may prevent a vnode from being reclaimed.
416 * the buffer cache may have references on the vnode, a directory
417 * vnode may still have references due to the namei cache representing
418 * underlying files, or the vnode may be in active use. It is not
419 * desireable to reuse such vnodes. These conditions may cause the
420 * number of vnodes to reach some minimum value regardless of what
421 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low.
425 * This is a quick non-blocking check to determine if the vnode is a good
426 * candidate for being (eventually) vgone()'d. Returns 0 if the vnode is
427 * not a good candidate, 1 if it is.
430 vmightfree(struct vnode
*vp
, int page_count
, int pass
)
432 if (vp
->v_flag
& VRECLAIMED
)
435 if ((vp
->v_flag
& VFREE
) && TAILQ_EMPTY(&vp
->v_namecache
))
438 if (sysref_isactive(&vp
->v_sysref
))
440 if (vp
->v_object
&& vp
->v_object
->resident_page_count
>= page_count
)
444 * XXX horrible hack. Up to four passes will be taken. Each pass
445 * makes a larger set of vnodes eligible. For now what this really
446 * means is that we try to recycle files opened only once before
447 * recycling files opened multiple times.
449 switch(vp
->v_flag
& (VAGE0
| VAGE1
)) {
469 * The vnode was found to be possibly vgone()able and the caller has locked it
470 * (thus the usecount should be 1 now). Determine if the vnode is actually
471 * vgone()able, doing some cleanups in the process. Returns 1 if the vnode
472 * can be vgone()'d, 0 otherwise.
474 * Note that v_auxrefs may be non-zero because (A) this vnode is not a leaf
475 * in the namecache topology and (B) this vnode has buffer cache bufs.
476 * We cannot remove vnodes with non-leaf namecache associations. We do a
477 * tentitive leaf check prior to attempting to flush out any buffers but the
478 * 'real' test when all is said in done is that v_auxrefs must become 0 for
479 * the vnode to be freeable.
481 * We could theoretically just unconditionally flush when v_auxrefs != 0,
482 * but flushing data associated with non-leaf nodes (which are always
483 * directories), just throws it away for no benefit. It is the buffer
484 * cache's responsibility to choose buffers to recycle from the cached
485 * data point of view.
488 visleaf(struct vnode
*vp
)
490 struct namecache
*ncp
;
492 spin_lock(&vp
->v_spinlock
);
493 TAILQ_FOREACH(ncp
, &vp
->v_namecache
, nc_vnode
) {
494 if (!TAILQ_EMPTY(&ncp
->nc_list
)) {
495 spin_unlock(&vp
->v_spinlock
);
499 spin_unlock(&vp
->v_spinlock
);
504 * Try to clean up the vnode to the point where it can be vgone()'d, returning
505 * 0 if it cannot be vgone()'d (or already has been), 1 if it can. Unlike
506 * vmightfree() this routine may flush the vnode and block. Vnodes marked
507 * VFREE are still candidates for vgone()ing because they may hold namecache
508 * resources and could be blocking the namecache directory hierarchy (and
509 * related vnodes) from being freed.
512 vtrytomakegoneable(struct vnode
*vp
, int page_count
)
514 if (vp
->v_flag
& VRECLAIMED
)
516 if (vp
->v_sysref
.refcnt
> 1)
518 if (vp
->v_object
&& vp
->v_object
->resident_page_count
>= page_count
)
520 if (vp
->v_auxrefs
&& visleaf(vp
)) {
521 vinvalbuf(vp
, V_SAVE
, 0, 0);
523 kprintf((vp
->v_auxrefs
? "vrecycle: vp %p failed: %s\n" :
524 "vrecycle: vp %p succeeded: %s\n"), vp
,
525 (TAILQ_FIRST(&vp
->v_namecache
) ?
526 TAILQ_FIRST(&vp
->v_namecache
)->nc_name
: "?"));
531 * This sequence may seem a little strange, but we need to optimize
532 * the critical path a bit. We can't recycle vnodes with other
533 * references and because we are trying to recycle an otherwise
534 * perfectly fine vnode we have to invalidate the namecache in a
535 * way that avoids possible deadlocks (since the vnode lock is being
536 * held here). Finally, we have to check for other references one
537 * last time in case something snuck in during the inval.
539 if (vp
->v_sysref
.refcnt
> 1 || vp
->v_auxrefs
!= 0)
541 if (cache_inval_vp_nonblock(vp
))
543 return (vp
->v_sysref
.refcnt
<= 1 && vp
->v_auxrefs
== 0);
547 * Reclaim up to 1/10 of the vnodes associated with a mount point. Try
548 * to avoid vnodes which have lots of resident pages (we are trying to free
549 * vnodes, not memory).
551 * This routine is a callback from the mountlist scan. The mount point
552 * in question will be busied.
554 * NOTE: The 1/10 reclamation also ensures that the inactive data set
555 * (the vnodes being recycled by the one-time use) does not degenerate
556 * into too-small a set. This is important because once a vnode is
557 * marked as not being one-time-use (VAGE0/VAGE1 both 0) that vnode
558 * will not be destroyed EXCEPT by this mechanism. VM pages can still
559 * be cleaned/freed by the pageout daemon.
562 vlrureclaim(struct mount
*mp
, void *data
)
564 struct vnlru_info
*info
= data
;
570 int trigger_mult
= vnlru_nowhere
;
573 * Calculate the trigger point for the resident pages check. The
574 * minimum trigger value is approximately the number of pages in
575 * the system divded by the number of vnodes. However, due to
576 * various other system memory overheads unrelated to data caching
577 * it is a good idea to double the trigger (at least).
579 * trigger_mult starts at 0. If the recycler is having problems
580 * finding enough freeable vnodes it will increase trigger_mult.
581 * This should not happen in normal operation, even on machines with
582 * low amounts of memory, but extraordinary memory use by the system
583 * verses the amount of cached data can trigger it.
585 usevnodes
= desiredvnodes
;
588 trigger
= vmstats
.v_page_count
* (trigger_mult
+ 2) / usevnodes
;
591 lwkt_gettoken(&mntvnode_token
);
592 count
= mp
->mnt_nvnodelistsize
/ 10 + 1;
594 while (count
&& mp
->mnt_syncer
) {
596 * Next vnode. Use the special syncer vnode to placemark
597 * the LRU. This way the LRU code does not interfere with
600 vp
= TAILQ_NEXT(mp
->mnt_syncer
, v_nmntvnodes
);
601 TAILQ_REMOVE(&mp
->mnt_nvnodelist
, mp
->mnt_syncer
, v_nmntvnodes
);
603 TAILQ_INSERT_AFTER(&mp
->mnt_nvnodelist
, vp
,
604 mp
->mnt_syncer
, v_nmntvnodes
);
606 TAILQ_INSERT_HEAD(&mp
->mnt_nvnodelist
, mp
->mnt_syncer
,
608 vp
= TAILQ_NEXT(mp
->mnt_syncer
, v_nmntvnodes
);
616 * The VP will stick around while we hold mntvnode_token,
617 * at least until we block, so we can safely do an initial
618 * check, and then must check again after we lock the vnode.
620 if (vp
->v_type
== VNON
|| /* syncer or indeterminant */
621 !vmightfree(vp
, trigger
, info
->pass
) /* critical path opt */
628 * VX get the candidate vnode. If the VX get fails the
629 * vnode might still be on the mountlist. Our loop depends
630 * on us at least cycling the vnode to the end of the
633 if (vx_get_nonblock(vp
) != 0) {
639 * Since we blocked locking the vp, make sure it is still
640 * a candidate for reclamation. That is, it has not already
641 * been reclaimed and only has our VX reference associated
644 if (vp
->v_type
== VNON
|| /* syncer or indeterminant */
645 (vp
->v_flag
& VRECLAIMED
) ||
647 !vtrytomakegoneable(vp
, trigger
) /* critical path opt */
655 * All right, we are good, move the vp to the end of the
656 * mountlist and clean it out. The vget will have returned
657 * an error if the vnode was destroyed (VRECLAIMED set), so we
658 * do not have to check again. The vput() will move the
659 * vnode to the free list if the vgone() was successful.
661 KKASSERT(vp
->v_mount
== mp
);
667 lwkt_reltoken(&mntvnode_token
);
672 * Attempt to recycle vnodes in a context that is always safe to block.
673 * Calling vlrurecycle() from the bowels of file system code has some
674 * interesting deadlock problems.
676 static struct thread
*vnlruthread
;
677 static int vnlruproc_sig
;
680 vnlru_proc_wait(void)
682 if (vnlruproc_sig
== 0) {
683 vnlruproc_sig
= 1; /* avoid unnecessary wakeups */
686 tsleep(&vnlruproc_sig
, 0, "vlruwk", hz
);
692 struct thread
*td
= curthread
;
693 struct vnlru_info info
;
696 EVENTHANDLER_REGISTER(shutdown_pre_sync
, shutdown_kproc
, td
,
703 kproc_suspend_loop();
706 * Try to free some vnodes if we have too many
708 if (numvnodes
> desiredvnodes
&&
709 freevnodes
> desiredvnodes
* 2 / 10) {
710 int count
= numvnodes
- desiredvnodes
;
712 if (count
> freevnodes
/ 100)
713 count
= freevnodes
/ 100;
716 freesomevnodes(count
);
720 * Nothing to do if most of our vnodes are already on
723 if (numvnodes
- freevnodes
<= desiredvnodes
* 9 / 10) {
725 wakeup(&vnlruproc_sig
);
726 tsleep(td
, 0, "vlruwt", hz
);
732 * The pass iterates through the four combinations of
733 * VAGE0/VAGE1. We want to get rid of aged small files
738 while (done
== 0 && info
.pass
< 4) {
739 done
= mountlist_scan(vlrureclaim
, &info
,
745 * The vlrureclaim() call only processes 1/10 of the vnodes
746 * on each mount. If we couldn't find any repeat the loop
747 * at least enough times to cover all available vnodes before
748 * we start sleeping. Complain if the failure extends past
749 * 30 second, every 30 seconds.
753 if (vnlru_nowhere
% 10 == 0)
754 tsleep(td
, 0, "vlrup", hz
* 3);
755 if (vnlru_nowhere
% 100 == 0)
756 kprintf("vnlru_proc: vnode recycler stopped working!\n");
757 if (vnlru_nowhere
== 1000)
769 * MOUNTLIST FUNCTIONS
773 * mountlist_insert (MP SAFE)
775 * Add a new mount point to the mount list.
778 mountlist_insert(struct mount
*mp
, int how
)
780 lwkt_gettoken(&mountlist_token
);
781 if (how
== MNTINS_FIRST
)
782 TAILQ_INSERT_HEAD(&mountlist
, mp
, mnt_list
);
784 TAILQ_INSERT_TAIL(&mountlist
, mp
, mnt_list
);
785 lwkt_reltoken(&mountlist_token
);
789 * mountlist_interlock (MP SAFE)
791 * Execute the specified interlock function with the mountlist token
792 * held. The function will be called in a serialized fashion verses
793 * other functions called through this mechanism.
796 mountlist_interlock(int (*callback
)(struct mount
*), struct mount
*mp
)
800 lwkt_gettoken(&mountlist_token
);
801 error
= callback(mp
);
802 lwkt_reltoken(&mountlist_token
);
807 * mountlist_boot_getfirst (DURING BOOT ONLY)
809 * This function returns the first mount on the mountlist, which is
810 * expected to be the root mount. Since no interlocks are obtained
811 * this function is only safe to use during booting.
815 mountlist_boot_getfirst(void)
817 return(TAILQ_FIRST(&mountlist
));
821 * mountlist_remove (MP SAFE)
823 * Remove a node from the mountlist. If this node is the next scan node
824 * for any active mountlist scans, the active mountlist scan will be
825 * adjusted to skip the node, thus allowing removals during mountlist
829 mountlist_remove(struct mount
*mp
)
831 struct mountscan_info
*msi
;
833 lwkt_gettoken(&mountlist_token
);
834 TAILQ_FOREACH(msi
, &mountscan_list
, msi_entry
) {
835 if (msi
->msi_node
== mp
) {
836 if (msi
->msi_how
& MNTSCAN_FORWARD
)
837 msi
->msi_node
= TAILQ_NEXT(mp
, mnt_list
);
839 msi
->msi_node
= TAILQ_PREV(mp
, mntlist
, mnt_list
);
842 TAILQ_REMOVE(&mountlist
, mp
, mnt_list
);
843 lwkt_reltoken(&mountlist_token
);
847 * mountlist_scan (MP SAFE)
849 * Safely scan the mount points on the mount list. Unless otherwise
850 * specified each mount point will be busied prior to the callback and
851 * unbusied afterwords. The callback may safely remove any mount point
852 * without interfering with the scan. If the current callback
853 * mount is removed the scanner will not attempt to unbusy it.
855 * If a mount node cannot be busied it is silently skipped.
857 * The callback return value is aggregated and a total is returned. A return
858 * value of < 0 is not aggregated and will terminate the scan.
860 * MNTSCAN_FORWARD - the mountlist is scanned in the forward direction
861 * MNTSCAN_REVERSE - the mountlist is scanned in reverse
862 * MNTSCAN_NOBUSY - the scanner will make the callback without busying
866 mountlist_scan(int (*callback
)(struct mount
*, void *), void *data
, int how
)
868 struct mountscan_info info
;
874 lwkt_gettoken(&mountlist_token
);
877 info
.msi_node
= NULL
; /* paranoia */
878 TAILQ_INSERT_TAIL(&mountscan_list
, &info
, msi_entry
);
883 if (how
& MNTSCAN_FORWARD
) {
884 info
.msi_node
= TAILQ_FIRST(&mountlist
);
885 while ((mp
= info
.msi_node
) != NULL
) {
886 if (how
& MNTSCAN_NOBUSY
) {
887 count
= callback(mp
, data
);
888 } else if (vfs_busy(mp
, LK_NOWAIT
) == 0) {
889 count
= callback(mp
, data
);
890 if (mp
== info
.msi_node
)
898 if (mp
== info
.msi_node
)
899 info
.msi_node
= TAILQ_NEXT(mp
, mnt_list
);
901 } else if (how
& MNTSCAN_REVERSE
) {
902 info
.msi_node
= TAILQ_LAST(&mountlist
, mntlist
);
903 while ((mp
= info
.msi_node
) != NULL
) {
904 if (how
& MNTSCAN_NOBUSY
) {
905 count
= callback(mp
, data
);
906 } else if (vfs_busy(mp
, LK_NOWAIT
) == 0) {
907 count
= callback(mp
, data
);
908 if (mp
== info
.msi_node
)
916 if (mp
== info
.msi_node
)
917 info
.msi_node
= TAILQ_PREV(mp
, mntlist
, mnt_list
);
920 TAILQ_REMOVE(&mountscan_list
, &info
, msi_entry
);
921 lwkt_reltoken(&mountlist_token
);
926 * MOUNT RELATED VNODE FUNCTIONS
929 static struct kproc_desc vnlru_kp
= {
934 SYSINIT(vnlru
, SI_SUB_KTHREAD_UPDATE
, SI_ORDER_FIRST
, kproc_start
, &vnlru_kp
)
937 * Move a vnode from one mount queue to another.
942 insmntque(struct vnode
*vp
, struct mount
*mp
)
944 lwkt_gettoken(&mntvnode_token
);
946 * Delete from old mount point vnode list, if on one.
948 if (vp
->v_mount
!= NULL
) {
949 KASSERT(vp
->v_mount
->mnt_nvnodelistsize
> 0,
950 ("bad mount point vnode list size"));
952 vp
->v_mount
->mnt_nvnodelistsize
--;
955 * Insert into list of vnodes for the new mount point, if available.
956 * The 'end' of the LRU list is the vnode prior to mp->mnt_syncer.
958 if ((vp
->v_mount
= mp
) == NULL
) {
959 lwkt_reltoken(&mntvnode_token
);
962 if (mp
->mnt_syncer
) {
963 TAILQ_INSERT_BEFORE(mp
->mnt_syncer
, vp
, v_nmntvnodes
);
965 TAILQ_INSERT_TAIL(&mp
->mnt_nvnodelist
, vp
, v_nmntvnodes
);
967 mp
->mnt_nvnodelistsize
++;
968 lwkt_reltoken(&mntvnode_token
);
973 * Scan the vnodes under a mount point and issue appropriate callbacks.
975 * The fastfunc() callback is called with just the mountlist token held
976 * (no vnode lock). It may not block and the vnode may be undergoing
977 * modifications while the caller is processing it. The vnode will
978 * not be entirely destroyed, however, due to the fact that the mountlist
979 * token is held. A return value < 0 skips to the next vnode without calling
980 * the slowfunc(), a return value > 0 terminates the loop.
982 * The slowfunc() callback is called after the vnode has been successfully
983 * locked based on passed flags. The vnode is skipped if it gets rearranged
984 * or destroyed while blocking on the lock. A non-zero return value from
985 * the slow function terminates the loop. The slow function is allowed to
986 * arbitrarily block. The scanning code guarentees consistency of operation
987 * even if the slow function deletes or moves the node, or blocks and some
988 * other thread deletes or moves the node.
990 * NOTE: We hold vmobj_token to prevent a VM object from being destroyed
991 * out from under the fastfunc()'s vnode test. It will not prevent
992 * v_object from getting NULL'd out but it will ensure that the
993 * pointer (if we race) will remain stable.
999 int (*fastfunc
)(struct mount
*mp
, struct vnode
*vp
, void *data
),
1000 int (*slowfunc
)(struct mount
*mp
, struct vnode
*vp
, void *data
),
1003 struct vmntvnodescan_info info
;
1006 int maxcount
= 1000000;
1010 lwkt_gettoken(&mntvnode_token
);
1011 lwkt_gettoken(&vmobj_token
);
1014 * If asked to do one pass stop after iterating available vnodes.
1015 * Under heavy loads new vnodes can be added while we are scanning,
1016 * so this isn't perfect. Create a slop factor of 2x.
1018 if (flags
& VMSC_ONEPASS
)
1019 stopcount
= mp
->mnt_nvnodelistsize
* 2;
1021 info
.vp
= TAILQ_FIRST(&mp
->mnt_nvnodelist
);
1022 TAILQ_INSERT_TAIL(&mntvnodescan_list
, &info
, entry
);
1023 while ((vp
= info
.vp
) != NULL
) {
1024 if (--maxcount
== 0)
1025 panic("maxcount reached during vmntvnodescan");
1028 * Skip if visible but not ready, or special (e.g.
1031 if (vp
->v_type
== VNON
)
1033 KKASSERT(vp
->v_mount
== mp
);
1036 * Quick test. A negative return continues the loop without
1037 * calling the slow test. 0 continues onto the slow test.
1038 * A positive number aborts the loop.
1041 if ((r
= fastfunc(mp
, vp
, data
)) < 0) {
1050 * Get a vxlock on the vnode, retry if it has moved or isn't
1051 * in the mountlist where we expect it.
1056 switch(flags
& (VMSC_GETVP
|VMSC_GETVX
|VMSC_NOWAIT
)) {
1058 error
= vget(vp
, LK_EXCLUSIVE
);
1060 case VMSC_GETVP
|VMSC_NOWAIT
:
1061 error
= vget(vp
, LK_EXCLUSIVE
|LK_NOWAIT
);
1074 * Do not call the slow function if the vnode is
1075 * invalid or if it was ripped out from under us
1076 * while we (potentially) blocked.
1078 if (info
.vp
== vp
&& vp
->v_type
!= VNON
)
1079 r
= slowfunc(mp
, vp
, data
);
1084 switch(flags
& (VMSC_GETVP
|VMSC_GETVX
|VMSC_NOWAIT
)) {
1086 case VMSC_GETVP
|VMSC_NOWAIT
:
1101 * Yield after some processing. Depending on the number
1102 * of vnodes, we might wind up running for a long time.
1103 * Because threads are not preemptable, time critical
1104 * userland processes might starve. Give them a chance
1107 if (++count
== 10000) {
1108 /* We really want to yield a bit, so we simply sleep a tick */
1109 tsleep(mp
, 0, "vnodescn", 1);
1114 * If doing one pass this decrements to zero. If it starts
1115 * at zero it is effectively unlimited for the purposes of
1118 if (--stopcount
== 0)
1122 * Iterate. If the vnode was ripped out from under us
1123 * info.vp will already point to the next vnode, otherwise
1124 * we have to obtain the next valid vnode ourselves.
1127 info
.vp
= TAILQ_NEXT(vp
, v_nmntvnodes
);
1129 TAILQ_REMOVE(&mntvnodescan_list
, &info
, entry
);
1130 lwkt_reltoken(&vmobj_token
);
1131 lwkt_reltoken(&mntvnode_token
);
1136 * Remove any vnodes in the vnode table belonging to mount point mp.
1138 * If FORCECLOSE is not specified, there should not be any active ones,
1139 * return error if any are found (nb: this is a user error, not a
1140 * system error). If FORCECLOSE is specified, detach any active vnodes
1143 * If WRITECLOSE is set, only flush out regular file vnodes open for
1146 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped.
1148 * `rootrefs' specifies the base reference count for the root vnode
1149 * of this filesystem. The root vnode is considered busy if its
1150 * v_sysref.refcnt exceeds this value. On a successful return, vflush()
1151 * will call vrele() on the root vnode exactly rootrefs times.
1152 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
1156 static int busyprt
= 0; /* print out busy vnodes */
1157 SYSCTL_INT(_debug
, OID_AUTO
, busyprt
, CTLFLAG_RW
, &busyprt
, 0, "");
1160 static int vflush_scan(struct mount
*mp
, struct vnode
*vp
, void *data
);
1162 struct vflush_info
{
1169 vflush(struct mount
*mp
, int rootrefs
, int flags
)
1171 struct thread
*td
= curthread
; /* XXX */
1172 struct vnode
*rootvp
= NULL
;
1174 struct vflush_info vflush_info
;
1177 KASSERT((flags
& (SKIPSYSTEM
| WRITECLOSE
)) == 0,
1178 ("vflush: bad args"));
1180 * Get the filesystem root vnode. We can vput() it
1181 * immediately, since with rootrefs > 0, it won't go away.
1183 if ((error
= VFS_ROOT(mp
, &rootvp
)) != 0) {
1184 if ((flags
& FORCECLOSE
) == 0)
1187 /* continue anyway */
1193 vflush_info
.busy
= 0;
1194 vflush_info
.flags
= flags
;
1195 vflush_info
.td
= td
;
1196 vmntvnodescan(mp
, VMSC_GETVX
, NULL
, vflush_scan
, &vflush_info
);
1198 if (rootrefs
> 0 && (flags
& FORCECLOSE
) == 0) {
1200 * If just the root vnode is busy, and if its refcount
1201 * is equal to `rootrefs', then go ahead and kill it.
1203 KASSERT(vflush_info
.busy
> 0, ("vflush: not busy"));
1204 KASSERT(rootvp
->v_sysref
.refcnt
>= rootrefs
, ("vflush: rootrefs"));
1205 if (vflush_info
.busy
== 1 && rootvp
->v_sysref
.refcnt
== rootrefs
) {
1207 vgone_vxlocked(rootvp
);
1209 vflush_info
.busy
= 0;
1212 if (vflush_info
.busy
)
1214 for (; rootrefs
> 0; rootrefs
--)
1220 * The scan callback is made with an VX locked vnode.
1223 vflush_scan(struct mount
*mp
, struct vnode
*vp
, void *data
)
1225 struct vflush_info
*info
= data
;
1229 * Skip over a vnodes marked VSYSTEM.
1231 if ((info
->flags
& SKIPSYSTEM
) && (vp
->v_flag
& VSYSTEM
)) {
1236 * If WRITECLOSE is set, flush out unlinked but still open
1237 * files (even if open only for reading) and regular file
1238 * vnodes open for writing.
1240 if ((info
->flags
& WRITECLOSE
) &&
1241 (vp
->v_type
== VNON
||
1242 (VOP_GETATTR(vp
, &vattr
) == 0 &&
1243 vattr
.va_nlink
> 0)) &&
1244 (vp
->v_writecount
== 0 || vp
->v_type
!= VREG
)) {
1249 * If we are the only holder (refcnt of 1) or the vnode is in
1250 * termination (refcnt < 0), we can vgone the vnode.
1252 if (vp
->v_sysref
.refcnt
<= 1) {
1258 * If FORCECLOSE is set, forcibly destroy the vnode and then move
1259 * it to a dummymount structure so vop_*() functions don't deref
1262 if (info
->flags
& FORCECLOSE
) {
1265 if (vp
->v_mount
== NULL
)
1266 insmntque(vp
, &dummymount
);
1272 vprint("vflush: busy vnode", vp
);
1279 add_bio_ops(struct bio_ops
*ops
)
1281 TAILQ_INSERT_TAIL(&bio_ops_list
, ops
, entry
);
1285 rem_bio_ops(struct bio_ops
*ops
)
1287 TAILQ_REMOVE(&bio_ops_list
, ops
, entry
);
1291 * This calls the bio_ops io_sync function either for a mount point
1294 * WARNING: softdeps is weirdly coded and just isn't happy unless
1295 * io_sync is called with a NULL mount from the general syncing code.
1298 bio_ops_sync(struct mount
*mp
)
1300 struct bio_ops
*ops
;
1303 if ((ops
= mp
->mnt_bioops
) != NULL
)
1306 TAILQ_FOREACH(ops
, &bio_ops_list
, entry
) {
1313 * Lookup a mount point by nch
1316 mount_get_by_nc(struct namecache
*ncp
)
1318 struct mount
*mp
= NULL
;
1320 lwkt_gettoken(&mountlist_token
);
1321 TAILQ_FOREACH(mp
, &mountlist
, mnt_list
) {
1322 if (ncp
== mp
->mnt_ncmountpt
.ncp
)
1325 lwkt_reltoken(&mountlist_token
);