Use more specific privilege PRIV_VFS_GENERATION
[dragonfly.git] / sys / kern / vfs_mount.c
blob86489591c14b71ae8cb917b6e9b33e9f1443e3d9
1 /*
2 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * Copyright (c) 1989, 1993
35 * The Regents of the University of California. All rights reserved.
36 * (c) UNIX System Laboratories, Inc.
37 * All or some portions of this file are derived from material licensed
38 * to the University of California by American Telephone and Telegraph
39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 * the permission of UNIX System Laboratories, Inc.
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by the University of
53 * California, Berkeley and its contributors.
54 * 4. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * SUCH DAMAGE.
70 * $DragonFly: src/sys/kern/vfs_mount.c,v 1.37 2008/09/17 21:44:18 dillon Exp $
74 * External virtual filesystem routines
76 #include "opt_ddb.h"
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/kernel.h>
81 #include <sys/malloc.h>
82 #include <sys/mount.h>
83 #include <sys/proc.h>
84 #include <sys/vnode.h>
85 #include <sys/buf.h>
86 #include <sys/eventhandler.h>
87 #include <sys/kthread.h>
88 #include <sys/sysctl.h>
90 #include <machine/limits.h>
92 #include <sys/buf2.h>
93 #include <sys/thread2.h>
94 #include <sys/sysref2.h>
96 #include <vm/vm.h>
97 #include <vm/vm_object.h>
99 struct mountscan_info {
100 TAILQ_ENTRY(mountscan_info) msi_entry;
101 int msi_how;
102 struct mount *msi_node;
105 struct vmntvnodescan_info {
106 TAILQ_ENTRY(vmntvnodescan_info) entry;
107 struct vnode *vp;
110 static int vnlru_nowhere = 0;
111 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD,
112 &vnlru_nowhere, 0,
113 "Number of times the vnlru process ran without success");
116 static struct lwkt_token mntid_token;
118 /* note: mountlist exported to pstat */
119 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
120 static TAILQ_HEAD(,mountscan_info) mountscan_list;
121 static struct lwkt_token mountlist_token;
122 static TAILQ_HEAD(,vmntvnodescan_info) mntvnodescan_list;
123 struct lwkt_token mntvnode_token;
125 static TAILQ_HEAD(,bio_ops) bio_ops_list = TAILQ_HEAD_INITIALIZER(bio_ops_list);
128 * Called from vfsinit()
130 void
131 vfs_mount_init(void)
133 lwkt_token_init(&mountlist_token);
134 lwkt_token_init(&mntvnode_token);
135 lwkt_token_init(&mntid_token);
136 TAILQ_INIT(&mountscan_list);
137 TAILQ_INIT(&mntvnodescan_list);
141 * Support function called with mntvnode_token held to remove a vnode
142 * from the mountlist. We must update any list scans which are in progress.
144 static void
145 vremovevnodemnt(struct vnode *vp)
147 struct vmntvnodescan_info *info;
149 TAILQ_FOREACH(info, &mntvnodescan_list, entry) {
150 if (info->vp == vp)
151 info->vp = TAILQ_NEXT(vp, v_nmntvnodes);
153 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
157 * Allocate a new vnode and associate it with a tag, mount point, and
158 * operations vector.
160 * A VX locked and refd vnode is returned. The caller should setup the
161 * remaining fields and vx_put() or, if he wishes to leave a vref,
162 * vx_unlock() the vnode.
165 getnewvnode(enum vtagtype tag, struct mount *mp,
166 struct vnode **vpp, int lktimeout, int lkflags)
168 struct vnode *vp;
170 KKASSERT(mp != NULL);
172 vp = allocvnode(lktimeout, lkflags);
173 vp->v_tag = tag;
174 vp->v_data = NULL;
177 * By default the vnode is assigned the mount point's normal
178 * operations vector.
180 vp->v_ops = &mp->mnt_vn_use_ops;
183 * Placing the vnode on the mount point's queue makes it visible.
184 * VNON prevents it from being messed with, however.
186 insmntque(vp, mp);
189 * A VX locked & refd vnode is returned.
191 *vpp = vp;
192 return (0);
196 * This function creates vnodes with special operations vectors. The
197 * mount point is optional.
199 * This routine is being phased out.
202 getspecialvnode(enum vtagtype tag, struct mount *mp,
203 struct vop_ops **ops,
204 struct vnode **vpp, int lktimeout, int lkflags)
206 struct vnode *vp;
208 vp = allocvnode(lktimeout, lkflags);
209 vp->v_tag = tag;
210 vp->v_data = NULL;
211 vp->v_ops = ops;
214 * Placing the vnode on the mount point's queue makes it visible.
215 * VNON prevents it from being messed with, however.
217 insmntque(vp, mp);
220 * A VX locked & refd vnode is returned.
222 *vpp = vp;
223 return (0);
227 * Interlock against an unmount, return 0 on success, non-zero on failure.
229 * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount
230 * is in-progress.
232 * If no unmount is in-progress LK_NOWAIT is ignored. No other flag bits
233 * are used. A shared locked will be obtained and the filesystem will not
234 * be unmountable until the lock is released.
237 vfs_busy(struct mount *mp, int flags)
239 int lkflags;
241 if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
242 if (flags & LK_NOWAIT)
243 return (ENOENT);
244 /* XXX not MP safe */
245 mp->mnt_kern_flag |= MNTK_MWAIT;
247 * Since all busy locks are shared except the exclusive
248 * lock granted when unmounting, the only place that a
249 * wakeup needs to be done is at the release of the
250 * exclusive lock at the end of dounmount.
252 tsleep((caddr_t)mp, 0, "vfs_busy", 0);
253 return (ENOENT);
255 lkflags = LK_SHARED;
256 if (lockmgr(&mp->mnt_lock, lkflags))
257 panic("vfs_busy: unexpected lock failure");
258 return (0);
262 * Free a busy filesystem.
264 void
265 vfs_unbusy(struct mount *mp)
267 lockmgr(&mp->mnt_lock, LK_RELEASE);
271 * Lookup a filesystem type, and if found allocate and initialize
272 * a mount structure for it.
274 * Devname is usually updated by mount(8) after booting.
277 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
279 struct vfsconf *vfsp;
280 struct mount *mp;
282 if (fstypename == NULL)
283 return (ENODEV);
284 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
285 if (!strcmp(vfsp->vfc_name, fstypename))
286 break;
288 if (vfsp == NULL)
289 return (ENODEV);
290 mp = kmalloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
291 lockinit(&mp->mnt_lock, "vfslock", VLKTIMEOUT, 0);
292 vfs_busy(mp, LK_NOWAIT);
293 TAILQ_INIT(&mp->mnt_nvnodelist);
294 TAILQ_INIT(&mp->mnt_reservedvnlist);
295 TAILQ_INIT(&mp->mnt_jlist);
296 mp->mnt_nvnodelistsize = 0;
297 mp->mnt_vfc = vfsp;
298 mp->mnt_op = vfsp->vfc_vfsops;
299 mp->mnt_flag = MNT_RDONLY;
300 vfsp->vfc_refcount++;
301 mp->mnt_iosize_max = DFLTPHYS;
302 mp->mnt_stat.f_type = vfsp->vfc_typenum;
303 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
304 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
305 copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
306 *mpp = mp;
307 return (0);
311 * Lookup a mount point by filesystem identifier.
313 struct mount *
314 vfs_getvfs(fsid_t *fsid)
316 struct mount *mp;
317 lwkt_tokref ilock;
319 lwkt_gettoken(&ilock, &mountlist_token);
320 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
321 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
322 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
323 break;
326 lwkt_reltoken(&ilock);
327 return (mp);
331 * Get a new unique fsid. Try to make its val[0] unique, since this value
332 * will be used to create fake device numbers for stat(). Also try (but
333 * not so hard) make its val[0] unique mod 2^16, since some emulators only
334 * support 16-bit device numbers. We end up with unique val[0]'s for the
335 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
337 * Keep in mind that several mounts may be running in parallel. Starting
338 * the search one past where the previous search terminated is both a
339 * micro-optimization and a defense against returning the same fsid to
340 * different mounts.
342 void
343 vfs_getnewfsid(struct mount *mp)
345 static u_int16_t mntid_base;
346 lwkt_tokref ilock;
347 fsid_t tfsid;
348 int mtype;
350 lwkt_gettoken(&ilock, &mntid_token);
351 mtype = mp->mnt_vfc->vfc_typenum;
352 tfsid.val[1] = mtype;
353 mtype = (mtype & 0xFF) << 24;
354 for (;;) {
355 tfsid.val[0] = makeudev(255,
356 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
357 mntid_base++;
358 if (vfs_getvfs(&tfsid) == NULL)
359 break;
361 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
362 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
363 lwkt_reltoken(&ilock);
367 * Set the FSID for a new mount point to the template. Adjust
368 * the FSID to avoid collisions.
371 vfs_setfsid(struct mount *mp, fsid_t *template)
373 int didmunge = 0;
375 bzero(&mp->mnt_stat.f_fsid, sizeof(mp->mnt_stat.f_fsid));
376 for (;;) {
377 if (vfs_getvfs(template) == NULL)
378 break;
379 didmunge = 1;
380 ++template->val[1];
382 mp->mnt_stat.f_fsid = *template;
383 return(didmunge);
387 * This routine is called when we have too many vnodes. It attempts
388 * to free <count> vnodes and will potentially free vnodes that still
389 * have VM backing store (VM backing store is typically the cause
390 * of a vnode blowout so we want to do this). Therefore, this operation
391 * is not considered cheap.
393 * A number of conditions may prevent a vnode from being reclaimed.
394 * the buffer cache may have references on the vnode, a directory
395 * vnode may still have references due to the namei cache representing
396 * underlying files, or the vnode may be in active use. It is not
397 * desireable to reuse such vnodes. These conditions may cause the
398 * number of vnodes to reach some minimum value regardless of what
399 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low.
403 * This is a quick non-blocking check to determine if the vnode is a good
404 * candidate for being (eventually) vgone()'d. Returns 0 if the vnode is
405 * not a good candidate, 1 if it is.
407 static __inline int
408 vmightfree(struct vnode *vp, int page_count)
410 if (vp->v_flag & VRECLAIMED)
411 return (0);
412 #if 0
413 if ((vp->v_flag & VFREE) && TAILQ_EMPTY(&vp->v_namecache))
414 return (0);
415 #endif
416 if (sysref_isactive(&vp->v_sysref))
417 return (0);
418 if (vp->v_object && vp->v_object->resident_page_count >= page_count)
419 return (0);
420 return (1);
424 * The vnode was found to be possibly vgone()able and the caller has locked it
425 * (thus the usecount should be 1 now). Determine if the vnode is actually
426 * vgone()able, doing some cleanups in the process. Returns 1 if the vnode
427 * can be vgone()'d, 0 otherwise.
429 * Note that v_auxrefs may be non-zero because (A) this vnode is not a leaf
430 * in the namecache topology and (B) this vnode has buffer cache bufs.
431 * We cannot remove vnodes with non-leaf namecache associations. We do a
432 * tentitive leaf check prior to attempting to flush out any buffers but the
433 * 'real' test when all is said in done is that v_auxrefs must become 0 for
434 * the vnode to be freeable.
436 * We could theoretically just unconditionally flush when v_auxrefs != 0,
437 * but flushing data associated with non-leaf nodes (which are always
438 * directories), just throws it away for no benefit. It is the buffer
439 * cache's responsibility to choose buffers to recycle from the cached
440 * data point of view.
442 static int
443 visleaf(struct vnode *vp)
445 struct namecache *ncp;
447 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
448 if (!TAILQ_EMPTY(&ncp->nc_list))
449 return(0);
451 return(1);
455 * Try to clean up the vnode to the point where it can be vgone()'d, returning
456 * 0 if it cannot be vgone()'d (or already has been), 1 if it can. Unlike
457 * vmightfree() this routine may flush the vnode and block. Vnodes marked
458 * VFREE are still candidates for vgone()ing because they may hold namecache
459 * resources and could be blocking the namecache directory hierarchy (and
460 * related vnodes) from being freed.
462 static int
463 vtrytomakegoneable(struct vnode *vp, int page_count)
465 if (vp->v_flag & VRECLAIMED)
466 return (0);
467 if (vp->v_sysref.refcnt > 1)
468 return (0);
469 if (vp->v_object && vp->v_object->resident_page_count >= page_count)
470 return (0);
471 if (vp->v_auxrefs && visleaf(vp)) {
472 vinvalbuf(vp, V_SAVE, 0, 0);
473 #if 0 /* DEBUG */
474 kprintf((vp->v_auxrefs ? "vrecycle: vp %p failed: %s\n" :
475 "vrecycle: vp %p succeeded: %s\n"), vp,
476 (TAILQ_FIRST(&vp->v_namecache) ?
477 TAILQ_FIRST(&vp->v_namecache)->nc_name : "?"));
478 #endif
482 * This sequence may seem a little strange, but we need to optimize
483 * the critical path a bit. We can't recycle vnodes with other
484 * references and because we are trying to recycle an otherwise
485 * perfectly fine vnode we have to invalidate the namecache in a
486 * way that avoids possible deadlocks (since the vnode lock is being
487 * held here). Finally, we have to check for other references one
488 * last time in case something snuck in during the inval.
490 if (vp->v_sysref.refcnt > 1 || vp->v_auxrefs != 0)
491 return (0);
492 if (cache_inval_vp_nonblock(vp))
493 return (0);
494 return (vp->v_sysref.refcnt <= 1 && vp->v_auxrefs == 0);
498 * Reclaim up to 1/10 of the vnodes associated with a mount point. Try
499 * to avoid vnodes which have lots of resident pages (we are trying to free
500 * vnodes, not memory).
502 * This routine is a callback from the mountlist scan. The mount point
503 * in question will be busied.
505 static int
506 vlrureclaim(struct mount *mp, void *data)
508 struct vnode *vp;
509 lwkt_tokref ilock;
510 int done;
511 int trigger;
512 int usevnodes;
513 int count;
514 int trigger_mult = vnlru_nowhere;
517 * Calculate the trigger point for the resident pages check. The
518 * minimum trigger value is approximately the number of pages in
519 * the system divded by the number of vnodes. However, due to
520 * various other system memory overheads unrelated to data caching
521 * it is a good idea to double the trigger (at least).
523 * trigger_mult starts at 0. If the recycler is having problems
524 * finding enough freeable vnodes it will increase trigger_mult.
525 * This should not happen in normal operation, even on machines with
526 * low amounts of memory, but extraordinary memory use by the system
527 * verses the amount of cached data can trigger it.
529 usevnodes = desiredvnodes;
530 if (usevnodes <= 0)
531 usevnodes = 1;
532 trigger = vmstats.v_page_count * (trigger_mult + 2) / usevnodes;
534 done = 0;
535 lwkt_gettoken(&ilock, &mntvnode_token);
536 count = mp->mnt_nvnodelistsize / 10 + 1;
537 while (count && mp->mnt_syncer) {
539 * Next vnode. Use the special syncer vnode to placemark
540 * the LRU. This way the LRU code does not interfere with
541 * vmntvnodescan().
543 vp = TAILQ_NEXT(mp->mnt_syncer, v_nmntvnodes);
544 TAILQ_REMOVE(&mp->mnt_nvnodelist, mp->mnt_syncer, v_nmntvnodes);
545 if (vp) {
546 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp,
547 mp->mnt_syncer, v_nmntvnodes);
548 } else {
549 TAILQ_INSERT_HEAD(&mp->mnt_nvnodelist, mp->mnt_syncer,
550 v_nmntvnodes);
551 vp = TAILQ_NEXT(mp->mnt_syncer, v_nmntvnodes);
552 if (vp == NULL)
553 break;
557 * __VNODESCAN__
559 * The VP will stick around while we hold mntvnode_token,
560 * at least until we block, so we can safely do an initial
561 * check, and then must check again after we lock the vnode.
563 if (vp->v_type == VNON || /* syncer or indeterminant */
564 !vmightfree(vp, trigger) /* critical path opt */
566 --count;
567 continue;
571 * VX get the candidate vnode. If the VX get fails the
572 * vnode might still be on the mountlist. Our loop depends
573 * on us at least cycling the vnode to the end of the
574 * mountlist.
576 if (vx_get_nonblock(vp) != 0) {
577 --count;
578 continue;
582 * Since we blocked locking the vp, make sure it is still
583 * a candidate for reclamation. That is, it has not already
584 * been reclaimed and only has our VX reference associated
585 * with it.
587 if (vp->v_type == VNON || /* syncer or indeterminant */
588 (vp->v_flag & VRECLAIMED) ||
589 vp->v_mount != mp ||
590 !vtrytomakegoneable(vp, trigger) /* critical path opt */
592 --count;
593 vx_put(vp);
594 continue;
598 * All right, we are good, move the vp to the end of the
599 * mountlist and clean it out. The vget will have returned
600 * an error if the vnode was destroyed (VRECLAIMED set), so we
601 * do not have to check again. The vput() will move the
602 * vnode to the free list if the vgone() was successful.
604 KKASSERT(vp->v_mount == mp);
605 vgone_vxlocked(vp);
606 vx_put(vp);
607 ++done;
608 --count;
610 lwkt_reltoken(&ilock);
611 return (done);
615 * Attempt to recycle vnodes in a context that is always safe to block.
616 * Calling vlrurecycle() from the bowels of file system code has some
617 * interesting deadlock problems.
619 static struct thread *vnlruthread;
620 static int vnlruproc_sig;
622 void
623 vnlru_proc_wait(void)
625 if (vnlruproc_sig == 0) {
626 vnlruproc_sig = 1; /* avoid unnecessary wakeups */
627 wakeup(vnlruthread);
629 tsleep(&vnlruproc_sig, 0, "vlruwk", hz);
632 static void
633 vnlru_proc(void)
635 struct thread *td = curthread;
636 int done;
638 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td,
639 SHUTDOWN_PRI_FIRST);
641 crit_enter();
642 for (;;) {
643 kproc_suspend_loop();
646 * Try to free some vnodes if we have too many
648 if (numvnodes > desiredvnodes &&
649 freevnodes > desiredvnodes * 2 / 10) {
650 int count = numvnodes - desiredvnodes;
652 if (count > freevnodes / 100)
653 count = freevnodes / 100;
654 if (count < 5)
655 count = 5;
656 freesomevnodes(count);
660 * Nothing to do if most of our vnodes are already on
661 * the free list.
663 if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) {
664 vnlruproc_sig = 0;
665 wakeup(&vnlruproc_sig);
666 tsleep(td, 0, "vlruwt", hz);
667 continue;
669 cache_cleanneg(0);
670 done = mountlist_scan(vlrureclaim, NULL, MNTSCAN_FORWARD);
673 * The vlrureclaim() call only processes 1/10 of the vnodes
674 * on each mount. If we couldn't find any repeat the loop
675 * at least enough times to cover all available vnodes before
676 * we start sleeping. Complain if the failure extends past
677 * 30 second, every 30 seconds.
679 if (done == 0) {
680 ++vnlru_nowhere;
681 if (vnlru_nowhere % 10 == 0)
682 tsleep(td, 0, "vlrup", hz * 3);
683 if (vnlru_nowhere % 100 == 0)
684 kprintf("vnlru_proc: vnode recycler stopped working!\n");
685 if (vnlru_nowhere == 1000)
686 vnlru_nowhere = 900;
687 } else {
688 vnlru_nowhere = 0;
691 crit_exit();
695 * MOUNTLIST FUNCTIONS
699 * mountlist_insert (MP SAFE)
701 * Add a new mount point to the mount list.
703 void
704 mountlist_insert(struct mount *mp, int how)
706 lwkt_tokref ilock;
708 lwkt_gettoken(&ilock, &mountlist_token);
709 if (how == MNTINS_FIRST)
710 TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list);
711 else
712 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
713 lwkt_reltoken(&ilock);
717 * mountlist_interlock (MP SAFE)
719 * Execute the specified interlock function with the mountlist token
720 * held. The function will be called in a serialized fashion verses
721 * other functions called through this mechanism.
724 mountlist_interlock(int (*callback)(struct mount *), struct mount *mp)
726 lwkt_tokref ilock;
727 int error;
729 lwkt_gettoken(&ilock, &mountlist_token);
730 error = callback(mp);
731 lwkt_reltoken(&ilock);
732 return (error);
736 * mountlist_boot_getfirst (DURING BOOT ONLY)
738 * This function returns the first mount on the mountlist, which is
739 * expected to be the root mount. Since no interlocks are obtained
740 * this function is only safe to use during booting.
743 struct mount *
744 mountlist_boot_getfirst(void)
746 return(TAILQ_FIRST(&mountlist));
750 * mountlist_remove (MP SAFE)
752 * Remove a node from the mountlist. If this node is the next scan node
753 * for any active mountlist scans, the active mountlist scan will be
754 * adjusted to skip the node, thus allowing removals during mountlist
755 * scans.
757 void
758 mountlist_remove(struct mount *mp)
760 struct mountscan_info *msi;
761 lwkt_tokref ilock;
763 lwkt_gettoken(&ilock, &mountlist_token);
764 TAILQ_FOREACH(msi, &mountscan_list, msi_entry) {
765 if (msi->msi_node == mp) {
766 if (msi->msi_how & MNTSCAN_FORWARD)
767 msi->msi_node = TAILQ_NEXT(mp, mnt_list);
768 else
769 msi->msi_node = TAILQ_PREV(mp, mntlist, mnt_list);
772 TAILQ_REMOVE(&mountlist, mp, mnt_list);
773 lwkt_reltoken(&ilock);
777 * mountlist_scan (MP SAFE)
779 * Safely scan the mount points on the mount list. Unless otherwise
780 * specified each mount point will be busied prior to the callback and
781 * unbusied afterwords. The callback may safely remove any mount point
782 * without interfering with the scan. If the current callback
783 * mount is removed the scanner will not attempt to unbusy it.
785 * If a mount node cannot be busied it is silently skipped.
787 * The callback return value is aggregated and a total is returned. A return
788 * value of < 0 is not aggregated and will terminate the scan.
790 * MNTSCAN_FORWARD - the mountlist is scanned in the forward direction
791 * MNTSCAN_REVERSE - the mountlist is scanned in reverse
792 * MNTSCAN_NOBUSY - the scanner will make the callback without busying
793 * the mount node.
796 mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how)
798 struct mountscan_info info;
799 lwkt_tokref ilock;
800 struct mount *mp;
801 thread_t td;
802 int count;
803 int res;
805 lwkt_gettoken(&ilock, &mountlist_token);
807 info.msi_how = how;
808 info.msi_node = NULL; /* paranoia */
809 TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry);
811 res = 0;
812 td = curthread;
814 if (how & MNTSCAN_FORWARD) {
815 info.msi_node = TAILQ_FIRST(&mountlist);
816 while ((mp = info.msi_node) != NULL) {
817 if (how & MNTSCAN_NOBUSY) {
818 count = callback(mp, data);
819 } else if (vfs_busy(mp, LK_NOWAIT) == 0) {
820 count = callback(mp, data);
821 if (mp == info.msi_node)
822 vfs_unbusy(mp);
823 } else {
824 count = 0;
826 if (count < 0)
827 break;
828 res += count;
829 if (mp == info.msi_node)
830 info.msi_node = TAILQ_NEXT(mp, mnt_list);
832 } else if (how & MNTSCAN_REVERSE) {
833 info.msi_node = TAILQ_LAST(&mountlist, mntlist);
834 while ((mp = info.msi_node) != NULL) {
835 if (how & MNTSCAN_NOBUSY) {
836 count = callback(mp, data);
837 } else if (vfs_busy(mp, LK_NOWAIT) == 0) {
838 count = callback(mp, data);
839 if (mp == info.msi_node)
840 vfs_unbusy(mp);
841 } else {
842 count = 0;
844 if (count < 0)
845 break;
846 res += count;
847 if (mp == info.msi_node)
848 info.msi_node = TAILQ_PREV(mp, mntlist, mnt_list);
851 TAILQ_REMOVE(&mountscan_list, &info, msi_entry);
852 lwkt_reltoken(&ilock);
853 return(res);
857 * MOUNT RELATED VNODE FUNCTIONS
860 static struct kproc_desc vnlru_kp = {
861 "vnlru",
862 vnlru_proc,
863 &vnlruthread
865 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp)
868 * Move a vnode from one mount queue to another.
870 void
871 insmntque(struct vnode *vp, struct mount *mp)
873 lwkt_tokref ilock;
875 lwkt_gettoken(&ilock, &mntvnode_token);
877 * Delete from old mount point vnode list, if on one.
879 if (vp->v_mount != NULL) {
880 KASSERT(vp->v_mount->mnt_nvnodelistsize > 0,
881 ("bad mount point vnode list size"));
882 vremovevnodemnt(vp);
883 vp->v_mount->mnt_nvnodelistsize--;
886 * Insert into list of vnodes for the new mount point, if available.
887 * The 'end' of the LRU list is the vnode prior to mp->mnt_syncer.
889 if ((vp->v_mount = mp) == NULL) {
890 lwkt_reltoken(&ilock);
891 return;
893 if (mp->mnt_syncer) {
894 TAILQ_INSERT_BEFORE(mp->mnt_syncer, vp, v_nmntvnodes);
895 } else {
896 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
898 mp->mnt_nvnodelistsize++;
899 lwkt_reltoken(&ilock);
904 * Scan the vnodes under a mount point and issue appropriate callbacks.
906 * The fastfunc() callback is called with just the mountlist token held
907 * (no vnode lock). It may not block and the vnode may be undergoing
908 * modifications while the caller is processing it. The vnode will
909 * not be entirely destroyed, however, due to the fact that the mountlist
910 * token is held. A return value < 0 skips to the next vnode without calling
911 * the slowfunc(), a return value > 0 terminates the loop.
913 * The slowfunc() callback is called after the vnode has been successfully
914 * locked based on passed flags. The vnode is skipped if it gets rearranged
915 * or destroyed while blocking on the lock. A non-zero return value from
916 * the slow function terminates the loop. The slow function is allowed to
917 * arbitrarily block. The scanning code guarentees consistency of operation
918 * even if the slow function deletes or moves the node, or blocks and some
919 * other thread deletes or moves the node.
922 vmntvnodescan(
923 struct mount *mp,
924 int flags,
925 int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data),
926 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data),
927 void *data
929 struct vmntvnodescan_info info;
930 lwkt_tokref ilock;
931 struct vnode *vp;
932 int r = 0;
933 int maxcount = 1000000;
934 int stopcount = 0;
935 int count = 0;
937 lwkt_gettoken(&ilock, &mntvnode_token);
940 * If asked to do one pass stop after iterating available vnodes.
941 * Under heavy loads new vnodes can be added while we are scanning,
942 * so this isn't perfect. Create a slop factor of 2x.
944 if (flags & VMSC_ONEPASS)
945 stopcount = mp->mnt_nvnodelistsize * 2;
947 info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
948 TAILQ_INSERT_TAIL(&mntvnodescan_list, &info, entry);
949 while ((vp = info.vp) != NULL) {
950 if (--maxcount == 0)
951 panic("maxcount reached during vmntvnodescan");
954 * Skip if visible but not ready, or special (e.g.
955 * mp->mnt_syncer)
957 if (vp->v_type == VNON)
958 goto next;
959 KKASSERT(vp->v_mount == mp);
962 * Quick test. A negative return continues the loop without
963 * calling the slow test. 0 continues onto the slow test.
964 * A positive number aborts the loop.
966 if (fastfunc) {
967 if ((r = fastfunc(mp, vp, data)) < 0) {
968 r = 0;
969 goto next;
971 if (r)
972 break;
976 * Get a vxlock on the vnode, retry if it has moved or isn't
977 * in the mountlist where we expect it.
979 if (slowfunc) {
980 int error;
982 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) {
983 case VMSC_GETVP:
984 error = vget(vp, LK_EXCLUSIVE);
985 break;
986 case VMSC_GETVP|VMSC_NOWAIT:
987 error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT);
988 break;
989 case VMSC_GETVX:
990 vx_get(vp);
991 error = 0;
992 break;
993 default:
994 error = 0;
995 break;
997 if (error)
998 goto next;
1000 * Do not call the slow function if the vnode is
1001 * invalid or if it was ripped out from under us
1002 * while we (potentially) blocked.
1004 if (info.vp == vp && vp->v_type != VNON)
1005 r = slowfunc(mp, vp, data);
1008 * Cleanup
1010 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) {
1011 case VMSC_GETVP:
1012 case VMSC_GETVP|VMSC_NOWAIT:
1013 vput(vp);
1014 break;
1015 case VMSC_GETVX:
1016 vx_put(vp);
1017 break;
1018 default:
1019 break;
1021 if (r != 0)
1022 break;
1025 next:
1027 * Yield after some processing. Depending on the number
1028 * of vnodes, we might wind up running for a long time.
1029 * Because threads are not preemptable, time critical
1030 * userland processes might starve. Give them a chance
1031 * now and then.
1033 if (++count == 10000) {
1034 /* We really want to yield a bit, so we simply sleep a tick */
1035 tsleep(mp, 0, "vnodescn", 1);
1036 count = 0;
1040 * If doing one pass this decrements to zero. If it starts
1041 * at zero it is effectively unlimited for the purposes of
1042 * this loop.
1044 if (--stopcount == 0)
1045 break;
1048 * Iterate. If the vnode was ripped out from under us
1049 * info.vp will already point to the next vnode, otherwise
1050 * we have to obtain the next valid vnode ourselves.
1052 if (info.vp == vp)
1053 info.vp = TAILQ_NEXT(vp, v_nmntvnodes);
1055 TAILQ_REMOVE(&mntvnodescan_list, &info, entry);
1056 lwkt_reltoken(&ilock);
1057 return(r);
1061 * Remove any vnodes in the vnode table belonging to mount point mp.
1063 * If FORCECLOSE is not specified, there should not be any active ones,
1064 * return error if any are found (nb: this is a user error, not a
1065 * system error). If FORCECLOSE is specified, detach any active vnodes
1066 * that are found.
1068 * If WRITECLOSE is set, only flush out regular file vnodes open for
1069 * writing.
1071 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped.
1073 * `rootrefs' specifies the base reference count for the root vnode
1074 * of this filesystem. The root vnode is considered busy if its
1075 * v_sysref.refcnt exceeds this value. On a successful return, vflush()
1076 * will call vrele() on the root vnode exactly rootrefs times.
1077 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
1078 * be zero.
1080 #ifdef DIAGNOSTIC
1081 static int busyprt = 0; /* print out busy vnodes */
1082 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
1083 #endif
1085 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data);
1087 struct vflush_info {
1088 int flags;
1089 int busy;
1090 thread_t td;
1094 vflush(struct mount *mp, int rootrefs, int flags)
1096 struct thread *td = curthread; /* XXX */
1097 struct vnode *rootvp = NULL;
1098 int error;
1099 struct vflush_info vflush_info;
1101 if (rootrefs > 0) {
1102 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
1103 ("vflush: bad args"));
1105 * Get the filesystem root vnode. We can vput() it
1106 * immediately, since with rootrefs > 0, it won't go away.
1108 if ((error = VFS_ROOT(mp, &rootvp)) != 0)
1109 return (error);
1110 vput(rootvp);
1113 vflush_info.busy = 0;
1114 vflush_info.flags = flags;
1115 vflush_info.td = td;
1116 vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info);
1118 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
1120 * If just the root vnode is busy, and if its refcount
1121 * is equal to `rootrefs', then go ahead and kill it.
1123 KASSERT(vflush_info.busy > 0, ("vflush: not busy"));
1124 KASSERT(rootvp->v_sysref.refcnt >= rootrefs, ("vflush: rootrefs"));
1125 if (vflush_info.busy == 1 && rootvp->v_sysref.refcnt == rootrefs) {
1126 vx_lock(rootvp);
1127 vgone_vxlocked(rootvp);
1128 vx_unlock(rootvp);
1129 vflush_info.busy = 0;
1132 if (vflush_info.busy)
1133 return (EBUSY);
1134 for (; rootrefs > 0; rootrefs--)
1135 vrele(rootvp);
1136 return (0);
1140 * The scan callback is made with an VX locked vnode.
1142 static int
1143 vflush_scan(struct mount *mp, struct vnode *vp, void *data)
1145 struct vflush_info *info = data;
1146 struct vattr vattr;
1149 * Skip over a vnodes marked VSYSTEM.
1151 if ((info->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
1152 return(0);
1156 * If WRITECLOSE is set, flush out unlinked but still open
1157 * files (even if open only for reading) and regular file
1158 * vnodes open for writing.
1160 if ((info->flags & WRITECLOSE) &&
1161 (vp->v_type == VNON ||
1162 (VOP_GETATTR(vp, &vattr) == 0 &&
1163 vattr.va_nlink > 0)) &&
1164 (vp->v_writecount == 0 || vp->v_type != VREG)) {
1165 return(0);
1169 * If we are the only holder (refcnt of 1) or the vnode is in
1170 * termination (refcnt < 0), we can vgone the vnode.
1172 if (vp->v_sysref.refcnt <= 1) {
1173 vgone_vxlocked(vp);
1174 return(0);
1178 * If FORCECLOSE is set, forcibly close the vnode. For block
1179 * or character devices, revert to an anonymous device. For
1180 * all other files, just kill them.
1182 if (info->flags & FORCECLOSE) {
1183 if (vp->v_type != VBLK && vp->v_type != VCHR) {
1184 vgone_vxlocked(vp);
1185 } else {
1186 vclean_vxlocked(vp, 0);
1187 vp->v_ops = &spec_vnode_vops_p;
1188 insmntque(vp, NULL);
1190 return(0);
1192 #ifdef DIAGNOSTIC
1193 if (busyprt)
1194 vprint("vflush: busy vnode", vp);
1195 #endif
1196 ++info->busy;
1197 return(0);
1200 void
1201 add_bio_ops(struct bio_ops *ops)
1203 TAILQ_INSERT_TAIL(&bio_ops_list, ops, entry);
1206 void
1207 rem_bio_ops(struct bio_ops *ops)
1209 TAILQ_REMOVE(&bio_ops_list, ops, entry);
1213 * This calls the bio_ops io_sync function either for a mount point
1214 * or generally.
1216 * WARNING: softdeps is weirdly coded and just isn't happy unless
1217 * io_sync is called with a NULL mount from the general syncing code.
1219 void
1220 bio_ops_sync(struct mount *mp)
1222 struct bio_ops *ops;
1224 if (mp) {
1225 if ((ops = mp->mnt_bioops) != NULL)
1226 ops->io_sync(mp);
1227 } else {
1228 TAILQ_FOREACH(ops, &bio_ops_list, entry) {
1229 ops->io_sync(NULL);