Remove advertising clause from all that isn't contrib or userland bin.
[dragonfly.git] / sys / kern / vfs_mount.c
blob36b7f9751619e0ea9753a0cad10c5f49c9da41e7
1 /*
2 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * Copyright (c) 1989, 1993
35 * The Regents of the University of California. All rights reserved.
36 * (c) UNIX System Laboratories, Inc.
37 * All or some portions of this file are derived from material licensed
38 * to the University of California by American Telephone and Telegraph
39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 * the permission of UNIX System Laboratories, Inc.
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
68 * External virtual filesystem routines
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/malloc.h>
75 #include <sys/mount.h>
76 #include <sys/proc.h>
77 #include <sys/vnode.h>
78 #include <sys/buf.h>
79 #include <sys/eventhandler.h>
80 #include <sys/kthread.h>
81 #include <sys/sysctl.h>
83 #include <machine/limits.h>
85 #include <sys/buf2.h>
86 #include <sys/thread2.h>
87 #include <sys/sysref2.h>
89 #include <vm/vm.h>
90 #include <vm/vm_object.h>
92 struct mountscan_info {
93 TAILQ_ENTRY(mountscan_info) msi_entry;
94 int msi_how;
95 struct mount *msi_node;
98 struct vmntvnodescan_info {
99 TAILQ_ENTRY(vmntvnodescan_info) entry;
100 struct vnode *vp;
103 struct vnlru_info {
104 int pass;
107 static int vnlru_nowhere = 0;
108 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD,
109 &vnlru_nowhere, 0,
110 "Number of times the vnlru process ran without success");
113 static struct lwkt_token mntid_token;
114 static struct mount dummymount;
116 /* note: mountlist exported to pstat */
117 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
118 static TAILQ_HEAD(,mountscan_info) mountscan_list;
119 static struct lwkt_token mountlist_token;
121 static TAILQ_HEAD(,bio_ops) bio_ops_list = TAILQ_HEAD_INITIALIZER(bio_ops_list);
124 * Called from vfsinit()
126 void
127 vfs_mount_init(void)
129 lwkt_token_init(&mountlist_token, "mntlist");
130 lwkt_token_init(&mntid_token, "mntid");
131 TAILQ_INIT(&mountscan_list);
132 mount_init(&dummymount);
133 dummymount.mnt_flag |= MNT_RDONLY;
134 dummymount.mnt_kern_flag |= MNTK_ALL_MPSAFE;
138 * Support function called to remove a vnode from the mountlist and
139 * deal with side effects for scans in progress.
141 * Target mnt_token is held on call.
143 static void
144 vremovevnodemnt(struct vnode *vp)
146 struct vmntvnodescan_info *info;
147 struct mount *mp = vp->v_mount;
149 TAILQ_FOREACH(info, &mp->mnt_vnodescan_list, entry) {
150 if (info->vp == vp)
151 info->vp = TAILQ_NEXT(vp, v_nmntvnodes);
153 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
157 * Allocate a new vnode and associate it with a tag, mount point, and
158 * operations vector.
160 * A VX locked and refd vnode is returned. The caller should setup the
161 * remaining fields and vx_put() or, if he wishes to leave a vref,
162 * vx_unlock() the vnode.
165 getnewvnode(enum vtagtype tag, struct mount *mp,
166 struct vnode **vpp, int lktimeout, int lkflags)
168 struct vnode *vp;
170 KKASSERT(mp != NULL);
172 vp = allocvnode(lktimeout, lkflags);
173 vp->v_tag = tag;
174 vp->v_data = NULL;
177 * By default the vnode is assigned the mount point's normal
178 * operations vector.
180 vp->v_ops = &mp->mnt_vn_use_ops;
183 * Placing the vnode on the mount point's queue makes it visible.
184 * VNON prevents it from being messed with, however.
186 insmntque(vp, mp);
189 * A VX locked & refd vnode is returned.
191 *vpp = vp;
192 return (0);
196 * This function creates vnodes with special operations vectors. The
197 * mount point is optional.
199 * This routine is being phased out but is still used by vfs_conf to
200 * create vnodes for devices prior to the root mount (with mp == NULL).
203 getspecialvnode(enum vtagtype tag, struct mount *mp,
204 struct vop_ops **ops,
205 struct vnode **vpp, int lktimeout, int lkflags)
207 struct vnode *vp;
209 vp = allocvnode(lktimeout, lkflags);
210 vp->v_tag = tag;
211 vp->v_data = NULL;
212 vp->v_ops = ops;
214 if (mp == NULL)
215 mp = &dummymount;
218 * Placing the vnode on the mount point's queue makes it visible.
219 * VNON prevents it from being messed with, however.
221 insmntque(vp, mp);
224 * A VX locked & refd vnode is returned.
226 *vpp = vp;
227 return (0);
231 * Interlock against an unmount, return 0 on success, non-zero on failure.
233 * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount
234 * is in-progress.
236 * If no unmount is in-progress LK_NOWAIT is ignored. No other flag bits
237 * are used. A shared locked will be obtained and the filesystem will not
238 * be unmountable until the lock is released.
241 vfs_busy(struct mount *mp, int flags)
243 int lkflags;
245 atomic_add_int(&mp->mnt_refs, 1);
246 lwkt_gettoken(&mp->mnt_token);
247 if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
248 if (flags & LK_NOWAIT) {
249 lwkt_reltoken(&mp->mnt_token);
250 atomic_add_int(&mp->mnt_refs, -1);
251 return (ENOENT);
253 /* XXX not MP safe */
254 mp->mnt_kern_flag |= MNTK_MWAIT;
256 * Since all busy locks are shared except the exclusive
257 * lock granted when unmounting, the only place that a
258 * wakeup needs to be done is at the release of the
259 * exclusive lock at the end of dounmount.
261 tsleep((caddr_t)mp, 0, "vfs_busy", 0);
262 lwkt_reltoken(&mp->mnt_token);
263 atomic_add_int(&mp->mnt_refs, -1);
264 return (ENOENT);
266 lkflags = LK_SHARED;
267 if (lockmgr(&mp->mnt_lock, lkflags))
268 panic("vfs_busy: unexpected lock failure");
269 lwkt_reltoken(&mp->mnt_token);
270 return (0);
274 * Free a busy filesystem.
276 * Decrement refs before releasing the lock so e.g. a pending umount
277 * doesn't give us an unexpected busy error.
279 void
280 vfs_unbusy(struct mount *mp)
282 atomic_add_int(&mp->mnt_refs, -1);
283 lockmgr(&mp->mnt_lock, LK_RELEASE);
287 * Lookup a filesystem type, and if found allocate and initialize
288 * a mount structure for it.
290 * Devname is usually updated by mount(8) after booting.
293 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
295 struct vfsconf *vfsp;
296 struct mount *mp;
298 if (fstypename == NULL)
299 return (ENODEV);
301 vfsp = vfsconf_find_by_name(fstypename);
302 if (vfsp == NULL)
303 return (ENODEV);
304 mp = kmalloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
305 mount_init(mp);
306 lockinit(&mp->mnt_lock, "vfslock", VLKTIMEOUT, 0);
308 vfs_busy(mp, 0);
309 mp->mnt_vfc = vfsp;
310 mp->mnt_op = vfsp->vfc_vfsops;
311 vfsp->vfc_refcount++;
312 mp->mnt_stat.f_type = vfsp->vfc_typenum;
313 mp->mnt_flag |= MNT_RDONLY;
314 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
315 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
316 copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
317 *mpp = mp;
318 return (0);
322 * Basic mount structure initialization
324 void
325 mount_init(struct mount *mp)
327 lockinit(&mp->mnt_lock, "vfslock", hz*5, 0);
328 lwkt_token_init(&mp->mnt_token, "permnt");
330 TAILQ_INIT(&mp->mnt_vnodescan_list);
331 TAILQ_INIT(&mp->mnt_nvnodelist);
332 TAILQ_INIT(&mp->mnt_reservedvnlist);
333 TAILQ_INIT(&mp->mnt_jlist);
334 mp->mnt_nvnodelistsize = 0;
335 mp->mnt_flag = 0;
336 mp->mnt_iosize_max = MAXPHYS;
340 * Lookup a mount point by filesystem identifier.
342 struct mount *
343 vfs_getvfs(fsid_t *fsid)
345 struct mount *mp;
347 lwkt_gettoken(&mountlist_token);
348 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
349 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
350 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
351 break;
354 lwkt_reltoken(&mountlist_token);
355 return (mp);
359 * Get a new unique fsid. Try to make its val[0] unique, since this value
360 * will be used to create fake device numbers for stat(). Also try (but
361 * not so hard) make its val[0] unique mod 2^16, since some emulators only
362 * support 16-bit device numbers. We end up with unique val[0]'s for the
363 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
365 * Keep in mind that several mounts may be running in parallel. Starting
366 * the search one past where the previous search terminated is both a
367 * micro-optimization and a defense against returning the same fsid to
368 * different mounts.
370 void
371 vfs_getnewfsid(struct mount *mp)
373 static u_int16_t mntid_base;
374 fsid_t tfsid;
375 int mtype;
377 lwkt_gettoken(&mntid_token);
378 mtype = mp->mnt_vfc->vfc_typenum;
379 tfsid.val[1] = mtype;
380 mtype = (mtype & 0xFF) << 24;
381 for (;;) {
382 tfsid.val[0] = makeudev(255,
383 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
384 mntid_base++;
385 if (vfs_getvfs(&tfsid) == NULL)
386 break;
388 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
389 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
390 lwkt_reltoken(&mntid_token);
394 * Set the FSID for a new mount point to the template. Adjust
395 * the FSID to avoid collisions.
398 vfs_setfsid(struct mount *mp, fsid_t *template)
400 int didmunge = 0;
402 bzero(&mp->mnt_stat.f_fsid, sizeof(mp->mnt_stat.f_fsid));
403 for (;;) {
404 if (vfs_getvfs(template) == NULL)
405 break;
406 didmunge = 1;
407 ++template->val[1];
409 mp->mnt_stat.f_fsid = *template;
410 return(didmunge);
414 * This routine is called when we have too many vnodes. It attempts
415 * to free <count> vnodes and will potentially free vnodes that still
416 * have VM backing store (VM backing store is typically the cause
417 * of a vnode blowout so we want to do this). Therefore, this operation
418 * is not considered cheap.
420 * A number of conditions may prevent a vnode from being reclaimed.
421 * the buffer cache may have references on the vnode, a directory
422 * vnode may still have references due to the namei cache representing
423 * underlying files, or the vnode may be in active use. It is not
424 * desireable to reuse such vnodes. These conditions may cause the
425 * number of vnodes to reach some minimum value regardless of what
426 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low.
430 * This is a quick non-blocking check to determine if the vnode is a good
431 * candidate for being (eventually) vgone()'d. Returns 0 if the vnode is
432 * not a good candidate, 1 if it is.
434 static __inline int
435 vmightfree(struct vnode *vp, int page_count, int pass)
437 if (vp->v_flag & VRECLAIMED)
438 return (0);
439 #if 0
440 if ((vp->v_flag & VFREE) && TAILQ_EMPTY(&vp->v_namecache))
441 return (0);
442 #endif
443 if (sysref_isactive(&vp->v_sysref))
444 return (0);
445 if (vp->v_object && vp->v_object->resident_page_count >= page_count)
446 return (0);
449 * XXX horrible hack. Up to four passes will be taken. Each pass
450 * makes a larger set of vnodes eligible. For now what this really
451 * means is that we try to recycle files opened only once before
452 * recycling files opened multiple times.
454 switch(vp->v_flag & (VAGE0 | VAGE1)) {
455 case 0:
456 if (pass < 3)
457 return(0);
458 break;
459 case VAGE0:
460 if (pass < 2)
461 return(0);
462 break;
463 case VAGE1:
464 if (pass < 1)
465 return(0);
466 break;
467 case VAGE0 | VAGE1:
468 break;
470 return (1);
474 * The vnode was found to be possibly vgone()able and the caller has locked it
475 * (thus the usecount should be 1 now). Determine if the vnode is actually
476 * vgone()able, doing some cleanups in the process. Returns 1 if the vnode
477 * can be vgone()'d, 0 otherwise.
479 * Note that v_auxrefs may be non-zero because (A) this vnode is not a leaf
480 * in the namecache topology and (B) this vnode has buffer cache bufs.
481 * We cannot remove vnodes with non-leaf namecache associations. We do a
482 * tentitive leaf check prior to attempting to flush out any buffers but the
483 * 'real' test when all is said in done is that v_auxrefs must become 0 for
484 * the vnode to be freeable.
486 * We could theoretically just unconditionally flush when v_auxrefs != 0,
487 * but flushing data associated with non-leaf nodes (which are always
488 * directories), just throws it away for no benefit. It is the buffer
489 * cache's responsibility to choose buffers to recycle from the cached
490 * data point of view.
492 static int
493 visleaf(struct vnode *vp)
495 struct namecache *ncp;
497 spin_lock(&vp->v_spin);
498 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
499 if (!TAILQ_EMPTY(&ncp->nc_list)) {
500 spin_unlock(&vp->v_spin);
501 return(0);
504 spin_unlock(&vp->v_spin);
505 return(1);
509 * Try to clean up the vnode to the point where it can be vgone()'d, returning
510 * 0 if it cannot be vgone()'d (or already has been), 1 if it can. Unlike
511 * vmightfree() this routine may flush the vnode and block. Vnodes marked
512 * VFREE are still candidates for vgone()ing because they may hold namecache
513 * resources and could be blocking the namecache directory hierarchy (and
514 * related vnodes) from being freed.
516 static int
517 vtrytomakegoneable(struct vnode *vp, int page_count)
519 if (vp->v_flag & VRECLAIMED)
520 return (0);
521 if (vp->v_sysref.refcnt > 1)
522 return (0);
523 if (vp->v_object && vp->v_object->resident_page_count >= page_count)
524 return (0);
525 if (vp->v_auxrefs && visleaf(vp)) {
526 vinvalbuf(vp, V_SAVE, 0, 0);
527 #if 0 /* DEBUG */
528 kprintf((vp->v_auxrefs ? "vrecycle: vp %p failed: %s\n" :
529 "vrecycle: vp %p succeeded: %s\n"), vp,
530 (TAILQ_FIRST(&vp->v_namecache) ?
531 TAILQ_FIRST(&vp->v_namecache)->nc_name : "?"));
532 #endif
536 * This sequence may seem a little strange, but we need to optimize
537 * the critical path a bit. We can't recycle vnodes with other
538 * references and because we are trying to recycle an otherwise
539 * perfectly fine vnode we have to invalidate the namecache in a
540 * way that avoids possible deadlocks (since the vnode lock is being
541 * held here). Finally, we have to check for other references one
542 * last time in case something snuck in during the inval.
544 if (vp->v_sysref.refcnt > 1 || vp->v_auxrefs != 0)
545 return (0);
546 if (cache_inval_vp_nonblock(vp))
547 return (0);
548 return (vp->v_sysref.refcnt <= 1 && vp->v_auxrefs == 0);
552 * Reclaim up to 1/10 of the vnodes associated with a mount point. Try
553 * to avoid vnodes which have lots of resident pages (we are trying to free
554 * vnodes, not memory).
556 * This routine is a callback from the mountlist scan. The mount point
557 * in question will be busied.
559 * NOTE: The 1/10 reclamation also ensures that the inactive data set
560 * (the vnodes being recycled by the one-time use) does not degenerate
561 * into too-small a set. This is important because once a vnode is
562 * marked as not being one-time-use (VAGE0/VAGE1 both 0) that vnode
563 * will not be destroyed EXCEPT by this mechanism. VM pages can still
564 * be cleaned/freed by the pageout daemon.
566 static int
567 vlrureclaim(struct mount *mp, void *data)
569 struct vnlru_info *info = data;
570 struct vnode *vp;
571 int done;
572 int trigger;
573 int usevnodes;
574 int count;
575 int trigger_mult = vnlru_nowhere;
578 * Calculate the trigger point for the resident pages check. The
579 * minimum trigger value is approximately the number of pages in
580 * the system divded by the number of vnodes. However, due to
581 * various other system memory overheads unrelated to data caching
582 * it is a good idea to double the trigger (at least).
584 * trigger_mult starts at 0. If the recycler is having problems
585 * finding enough freeable vnodes it will increase trigger_mult.
586 * This should not happen in normal operation, even on machines with
587 * low amounts of memory, but extraordinary memory use by the system
588 * verses the amount of cached data can trigger it.
590 * (long) -> deal with 64 bit machines, intermediate overflow
592 usevnodes = desiredvnodes;
593 if (usevnodes <= 0)
594 usevnodes = 1;
595 trigger = (long)vmstats.v_page_count * (trigger_mult + 2) / usevnodes;
597 done = 0;
598 lwkt_gettoken(&mp->mnt_token);
599 count = mp->mnt_nvnodelistsize / 10 + 1;
601 while (count && mp->mnt_syncer) {
603 * Next vnode. Use the special syncer vnode to placemark
604 * the LRU. This way the LRU code does not interfere with
605 * vmntvnodescan().
607 vp = TAILQ_NEXT(mp->mnt_syncer, v_nmntvnodes);
608 TAILQ_REMOVE(&mp->mnt_nvnodelist, mp->mnt_syncer, v_nmntvnodes);
609 if (vp) {
610 TAILQ_INSERT_AFTER(&mp->mnt_nvnodelist, vp,
611 mp->mnt_syncer, v_nmntvnodes);
612 } else {
613 TAILQ_INSERT_HEAD(&mp->mnt_nvnodelist, mp->mnt_syncer,
614 v_nmntvnodes);
615 vp = TAILQ_NEXT(mp->mnt_syncer, v_nmntvnodes);
616 if (vp == NULL)
617 break;
621 * __VNODESCAN__
623 * The VP will stick around while we hold mnt_token,
624 * at least until we block, so we can safely do an initial
625 * check, and then must check again after we lock the vnode.
627 if (vp->v_type == VNON || /* syncer or indeterminant */
628 !vmightfree(vp, trigger, info->pass) /* critical path opt */
630 --count;
631 continue;
635 * VX get the candidate vnode. If the VX get fails the
636 * vnode might still be on the mountlist. Our loop depends
637 * on us at least cycling the vnode to the end of the
638 * mountlist.
640 if (vx_get_nonblock(vp) != 0) {
641 --count;
642 continue;
646 * Since we blocked locking the vp, make sure it is still
647 * a candidate for reclamation. That is, it has not already
648 * been reclaimed and only has our VX reference associated
649 * with it.
651 if (vp->v_type == VNON || /* syncer or indeterminant */
652 (vp->v_flag & VRECLAIMED) ||
653 vp->v_mount != mp ||
654 !vtrytomakegoneable(vp, trigger) /* critical path opt */
656 --count;
657 vx_put(vp);
658 continue;
662 * All right, we are good, move the vp to the end of the
663 * mountlist and clean it out. The vget will have returned
664 * an error if the vnode was destroyed (VRECLAIMED set), so we
665 * do not have to check again. The vput() will move the
666 * vnode to the free list if the vgone() was successful.
668 KKASSERT(vp->v_mount == mp);
669 vgone_vxlocked(vp);
670 vx_put(vp);
671 ++done;
672 --count;
674 lwkt_reltoken(&mp->mnt_token);
675 return (done);
679 * Attempt to recycle vnodes in a context that is always safe to block.
680 * Calling vlrurecycle() from the bowels of file system code has some
681 * interesting deadlock problems.
683 static struct thread *vnlruthread;
685 static void
686 vnlru_proc(void)
688 struct thread *td = curthread;
689 struct vnlru_info info;
690 int done;
692 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td,
693 SHUTDOWN_PRI_FIRST);
695 for (;;) {
696 kproc_suspend_loop();
699 * Do some opportunistic roving.
701 if (numvnodes > 100000)
702 vnode_free_rover_scan(50);
703 else if (numvnodes > 10000)
704 vnode_free_rover_scan(20);
705 else
706 vnode_free_rover_scan(5);
709 * Try to free some vnodes if we have too many
711 * (long) -> deal with 64 bit machines, intermediate overflow
713 if (numvnodes > desiredvnodes &&
714 freevnodes > desiredvnodes * 2 / 10) {
715 int count = numvnodes - desiredvnodes;
717 if (count > freevnodes / 100)
718 count = freevnodes / 100;
719 if (count < 5)
720 count = 5;
721 freesomevnodes(count);
725 * Do non-critical-path (more robust) cache cleaning,
726 * even if vnode counts are nominal, to try to avoid
727 * having to do it in the critical path.
729 cache_hysteresis(0);
732 * Nothing to do if most of our vnodes are already on
733 * the free list.
735 if (numvnodes - freevnodes <= (long)desiredvnodes * 9 / 10) {
736 tsleep(vnlruthread, 0, "vlruwt", hz);
737 continue;
741 * The pass iterates through the four combinations of
742 * VAGE0/VAGE1. We want to get rid of aged small files
743 * first.
745 info.pass = 0;
746 done = 0;
747 while (done == 0 && info.pass < 4) {
748 done = mountlist_scan(vlrureclaim, &info,
749 MNTSCAN_FORWARD);
750 ++info.pass;
754 * The vlrureclaim() call only processes 1/10 of the vnodes
755 * on each mount. If we couldn't find any repeat the loop
756 * at least enough times to cover all available vnodes before
757 * we start sleeping. Complain if the failure extends past
758 * 30 second, every 30 seconds.
760 if (done == 0) {
761 ++vnlru_nowhere;
762 if (vnlru_nowhere % 10 == 0)
763 tsleep(vnlruthread, 0, "vlrup", hz * 3);
764 if (vnlru_nowhere % 100 == 0)
765 kprintf("vnlru_proc: vnode recycler stopped working!\n");
766 if (vnlru_nowhere == 1000)
767 vnlru_nowhere = 900;
768 } else {
769 vnlru_nowhere = 0;
775 * MOUNTLIST FUNCTIONS
779 * mountlist_insert (MP SAFE)
781 * Add a new mount point to the mount list.
783 void
784 mountlist_insert(struct mount *mp, int how)
786 lwkt_gettoken(&mountlist_token);
787 if (how == MNTINS_FIRST)
788 TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list);
789 else
790 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
791 lwkt_reltoken(&mountlist_token);
795 * mountlist_interlock (MP SAFE)
797 * Execute the specified interlock function with the mountlist token
798 * held. The function will be called in a serialized fashion verses
799 * other functions called through this mechanism.
802 mountlist_interlock(int (*callback)(struct mount *), struct mount *mp)
804 int error;
806 lwkt_gettoken(&mountlist_token);
807 error = callback(mp);
808 lwkt_reltoken(&mountlist_token);
809 return (error);
813 * mountlist_boot_getfirst (DURING BOOT ONLY)
815 * This function returns the first mount on the mountlist, which is
816 * expected to be the root mount. Since no interlocks are obtained
817 * this function is only safe to use during booting.
820 struct mount *
821 mountlist_boot_getfirst(void)
823 return(TAILQ_FIRST(&mountlist));
827 * mountlist_remove (MP SAFE)
829 * Remove a node from the mountlist. If this node is the next scan node
830 * for any active mountlist scans, the active mountlist scan will be
831 * adjusted to skip the node, thus allowing removals during mountlist
832 * scans.
834 void
835 mountlist_remove(struct mount *mp)
837 struct mountscan_info *msi;
839 lwkt_gettoken(&mountlist_token);
840 TAILQ_FOREACH(msi, &mountscan_list, msi_entry) {
841 if (msi->msi_node == mp) {
842 if (msi->msi_how & MNTSCAN_FORWARD)
843 msi->msi_node = TAILQ_NEXT(mp, mnt_list);
844 else
845 msi->msi_node = TAILQ_PREV(mp, mntlist, mnt_list);
848 TAILQ_REMOVE(&mountlist, mp, mnt_list);
849 lwkt_reltoken(&mountlist_token);
853 * mountlist_exists (MP SAFE)
855 * Checks if a node exists in the mountlist.
856 * This function is mainly used by VFS quota code to check if a
857 * cached nullfs struct mount pointer is still valid at use time
859 * FIXME: there is no warranty the mp passed to that function
860 * will be the same one used by VFS_ACCOUNT() later
863 mountlist_exists(struct mount *mp)
865 int node_exists = 0;
866 struct mount* lmp;
868 lwkt_gettoken(&mountlist_token);
869 TAILQ_FOREACH(lmp, &mountlist, mnt_list) {
870 if (lmp == mp) {
871 node_exists = 1;
872 break;
875 lwkt_reltoken(&mountlist_token);
876 return(node_exists);
880 * mountlist_scan (MP SAFE)
882 * Safely scan the mount points on the mount list. Unless otherwise
883 * specified each mount point will be busied prior to the callback and
884 * unbusied afterwords. The callback may safely remove any mount point
885 * without interfering with the scan. If the current callback
886 * mount is removed the scanner will not attempt to unbusy it.
888 * If a mount node cannot be busied it is silently skipped.
890 * The callback return value is aggregated and a total is returned. A return
891 * value of < 0 is not aggregated and will terminate the scan.
893 * MNTSCAN_FORWARD - the mountlist is scanned in the forward direction
894 * MNTSCAN_REVERSE - the mountlist is scanned in reverse
895 * MNTSCAN_NOBUSY - the scanner will make the callback without busying
896 * the mount node.
899 mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how)
901 struct mountscan_info info;
902 struct mount *mp;
903 int count;
904 int res;
906 lwkt_gettoken(&mountlist_token);
908 info.msi_how = how;
909 info.msi_node = NULL; /* paranoia */
910 TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry);
912 res = 0;
914 if (how & MNTSCAN_FORWARD) {
915 info.msi_node = TAILQ_FIRST(&mountlist);
916 while ((mp = info.msi_node) != NULL) {
917 if (how & MNTSCAN_NOBUSY) {
918 count = callback(mp, data);
919 } else if (vfs_busy(mp, LK_NOWAIT) == 0) {
920 count = callback(mp, data);
921 if (mp == info.msi_node)
922 vfs_unbusy(mp);
923 } else {
924 count = 0;
926 if (count < 0)
927 break;
928 res += count;
929 if (mp == info.msi_node)
930 info.msi_node = TAILQ_NEXT(mp, mnt_list);
932 } else if (how & MNTSCAN_REVERSE) {
933 info.msi_node = TAILQ_LAST(&mountlist, mntlist);
934 while ((mp = info.msi_node) != NULL) {
935 if (how & MNTSCAN_NOBUSY) {
936 count = callback(mp, data);
937 } else if (vfs_busy(mp, LK_NOWAIT) == 0) {
938 count = callback(mp, data);
939 if (mp == info.msi_node)
940 vfs_unbusy(mp);
941 } else {
942 count = 0;
944 if (count < 0)
945 break;
946 res += count;
947 if (mp == info.msi_node)
948 info.msi_node = TAILQ_PREV(mp, mntlist, mnt_list);
951 TAILQ_REMOVE(&mountscan_list, &info, msi_entry);
952 lwkt_reltoken(&mountlist_token);
953 return(res);
957 * MOUNT RELATED VNODE FUNCTIONS
960 static struct kproc_desc vnlru_kp = {
961 "vnlru",
962 vnlru_proc,
963 &vnlruthread
965 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp)
968 * Move a vnode from one mount queue to another.
970 * MPSAFE
972 void
973 insmntque(struct vnode *vp, struct mount *mp)
975 struct mount *omp;
978 * Delete from old mount point vnode list, if on one.
980 if ((omp = vp->v_mount) != NULL) {
981 lwkt_gettoken(&omp->mnt_token);
982 KKASSERT(omp == vp->v_mount);
983 KASSERT(omp->mnt_nvnodelistsize > 0,
984 ("bad mount point vnode list size"));
985 vremovevnodemnt(vp);
986 omp->mnt_nvnodelistsize--;
987 lwkt_reltoken(&omp->mnt_token);
991 * Insert into list of vnodes for the new mount point, if available.
992 * The 'end' of the LRU list is the vnode prior to mp->mnt_syncer.
994 if (mp == NULL) {
995 vp->v_mount = NULL;
996 return;
998 lwkt_gettoken(&mp->mnt_token);
999 vp->v_mount = mp;
1000 if (mp->mnt_syncer) {
1001 TAILQ_INSERT_BEFORE(mp->mnt_syncer, vp, v_nmntvnodes);
1002 } else {
1003 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
1005 mp->mnt_nvnodelistsize++;
1006 lwkt_reltoken(&mp->mnt_token);
1011 * Scan the vnodes under a mount point and issue appropriate callbacks.
1013 * The fastfunc() callback is called with just the mountlist token held
1014 * (no vnode lock). It may not block and the vnode may be undergoing
1015 * modifications while the caller is processing it. The vnode will
1016 * not be entirely destroyed, however, due to the fact that the mountlist
1017 * token is held. A return value < 0 skips to the next vnode without calling
1018 * the slowfunc(), a return value > 0 terminates the loop.
1020 * The slowfunc() callback is called after the vnode has been successfully
1021 * locked based on passed flags. The vnode is skipped if it gets rearranged
1022 * or destroyed while blocking on the lock. A non-zero return value from
1023 * the slow function terminates the loop. The slow function is allowed to
1024 * arbitrarily block. The scanning code guarentees consistency of operation
1025 * even if the slow function deletes or moves the node, or blocks and some
1026 * other thread deletes or moves the node.
1028 * NOTE: We hold vmobj_token to prevent a VM object from being destroyed
1029 * out from under the fastfunc()'s vnode test. It will not prevent
1030 * v_object from getting NULL'd out but it will ensure that the
1031 * pointer (if we race) will remain stable.
1034 vmntvnodescan(
1035 struct mount *mp,
1036 int flags,
1037 int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data),
1038 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data),
1039 void *data
1041 struct vmntvnodescan_info info;
1042 struct vnode *vp;
1043 int r = 0;
1044 int maxcount = mp->mnt_nvnodelistsize * 2;
1045 int stopcount = 0;
1046 int count = 0;
1048 lwkt_gettoken(&mp->mnt_token);
1049 lwkt_gettoken(&vmobj_token);
1052 * If asked to do one pass stop after iterating available vnodes.
1053 * Under heavy loads new vnodes can be added while we are scanning,
1054 * so this isn't perfect. Create a slop factor of 2x.
1056 if (flags & VMSC_ONEPASS)
1057 stopcount = mp->mnt_nvnodelistsize;
1059 info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
1060 TAILQ_INSERT_TAIL(&mp->mnt_vnodescan_list, &info, entry);
1062 while ((vp = info.vp) != NULL) {
1063 if (--maxcount == 0) {
1064 kprintf("Warning: excessive fssync iteration\n");
1065 maxcount = mp->mnt_nvnodelistsize * 2;
1069 * Skip if visible but not ready, or special (e.g.
1070 * mp->mnt_syncer)
1072 if (vp->v_type == VNON)
1073 goto next;
1074 KKASSERT(vp->v_mount == mp);
1077 * Quick test. A negative return continues the loop without
1078 * calling the slow test. 0 continues onto the slow test.
1079 * A positive number aborts the loop.
1081 if (fastfunc) {
1082 if ((r = fastfunc(mp, vp, data)) < 0) {
1083 r = 0;
1084 goto next;
1086 if (r)
1087 break;
1091 * Get a vxlock on the vnode, retry if it has moved or isn't
1092 * in the mountlist where we expect it.
1094 if (slowfunc) {
1095 int error;
1097 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) {
1098 case VMSC_GETVP:
1099 error = vget(vp, LK_EXCLUSIVE);
1100 break;
1101 case VMSC_GETVP|VMSC_NOWAIT:
1102 error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT);
1103 break;
1104 case VMSC_GETVX:
1105 vx_get(vp);
1106 error = 0;
1107 break;
1108 default:
1109 error = 0;
1110 break;
1112 if (error)
1113 goto next;
1115 * Do not call the slow function if the vnode is
1116 * invalid or if it was ripped out from under us
1117 * while we (potentially) blocked.
1119 if (info.vp == vp && vp->v_type != VNON)
1120 r = slowfunc(mp, vp, data);
1123 * Cleanup
1125 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) {
1126 case VMSC_GETVP:
1127 case VMSC_GETVP|VMSC_NOWAIT:
1128 vput(vp);
1129 break;
1130 case VMSC_GETVX:
1131 vx_put(vp);
1132 break;
1133 default:
1134 break;
1136 if (r != 0)
1137 break;
1140 next:
1142 * Yield after some processing. Depending on the number
1143 * of vnodes, we might wind up running for a long time.
1144 * Because threads are not preemptable, time critical
1145 * userland processes might starve. Give them a chance
1146 * now and then.
1148 if (++count == 10000) {
1149 /* We really want to yield a bit, so we simply sleep a tick */
1150 tsleep(mp, 0, "vnodescn", 1);
1151 count = 0;
1155 * If doing one pass this decrements to zero. If it starts
1156 * at zero it is effectively unlimited for the purposes of
1157 * this loop.
1159 if (--stopcount == 0)
1160 break;
1163 * Iterate. If the vnode was ripped out from under us
1164 * info.vp will already point to the next vnode, otherwise
1165 * we have to obtain the next valid vnode ourselves.
1167 if (info.vp == vp)
1168 info.vp = TAILQ_NEXT(vp, v_nmntvnodes);
1171 TAILQ_REMOVE(&mp->mnt_vnodescan_list, &info, entry);
1172 lwkt_reltoken(&vmobj_token);
1173 lwkt_reltoken(&mp->mnt_token);
1174 return(r);
1178 * Remove any vnodes in the vnode table belonging to mount point mp.
1180 * If FORCECLOSE is not specified, there should not be any active ones,
1181 * return error if any are found (nb: this is a user error, not a
1182 * system error). If FORCECLOSE is specified, detach any active vnodes
1183 * that are found.
1185 * If WRITECLOSE is set, only flush out regular file vnodes open for
1186 * writing.
1188 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped.
1190 * `rootrefs' specifies the base reference count for the root vnode
1191 * of this filesystem. The root vnode is considered busy if its
1192 * v_sysref.refcnt exceeds this value. On a successful return, vflush()
1193 * will call vrele() on the root vnode exactly rootrefs times.
1194 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
1195 * be zero.
1197 #ifdef DIAGNOSTIC
1198 static int busyprt = 0; /* print out busy vnodes */
1199 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
1200 #endif
1202 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data);
1204 struct vflush_info {
1205 int flags;
1206 int busy;
1207 thread_t td;
1211 vflush(struct mount *mp, int rootrefs, int flags)
1213 struct thread *td = curthread; /* XXX */
1214 struct vnode *rootvp = NULL;
1215 int error;
1216 struct vflush_info vflush_info;
1218 if (rootrefs > 0) {
1219 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
1220 ("vflush: bad args"));
1222 * Get the filesystem root vnode. We can vput() it
1223 * immediately, since with rootrefs > 0, it won't go away.
1225 if ((error = VFS_ROOT(mp, &rootvp)) != 0) {
1226 if ((flags & FORCECLOSE) == 0)
1227 return (error);
1228 rootrefs = 0;
1229 /* continue anyway */
1231 if (rootrefs)
1232 vput(rootvp);
1235 vflush_info.busy = 0;
1236 vflush_info.flags = flags;
1237 vflush_info.td = td;
1238 vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info);
1240 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
1242 * If just the root vnode is busy, and if its refcount
1243 * is equal to `rootrefs', then go ahead and kill it.
1245 KASSERT(vflush_info.busy > 0, ("vflush: not busy"));
1246 KASSERT(rootvp->v_sysref.refcnt >= rootrefs, ("vflush: rootrefs"));
1247 if (vflush_info.busy == 1 && rootvp->v_sysref.refcnt == rootrefs) {
1248 vx_lock(rootvp);
1249 vgone_vxlocked(rootvp);
1250 vx_unlock(rootvp);
1251 vflush_info.busy = 0;
1254 if (vflush_info.busy)
1255 return (EBUSY);
1256 for (; rootrefs > 0; rootrefs--)
1257 vrele(rootvp);
1258 return (0);
1262 * The scan callback is made with an VX locked vnode.
1264 static int
1265 vflush_scan(struct mount *mp, struct vnode *vp, void *data)
1267 struct vflush_info *info = data;
1268 struct vattr vattr;
1269 int flags = info->flags;
1272 * Skip over a vnodes marked VSYSTEM.
1274 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
1275 return(0);
1279 * Do not force-close VCHR or VBLK vnodes
1281 if (vp->v_type == VCHR || vp->v_type == VBLK)
1282 flags &= ~(WRITECLOSE|FORCECLOSE);
1285 * If WRITECLOSE is set, flush out unlinked but still open
1286 * files (even if open only for reading) and regular file
1287 * vnodes open for writing.
1289 if ((flags & WRITECLOSE) &&
1290 (vp->v_type == VNON ||
1291 (VOP_GETATTR(vp, &vattr) == 0 &&
1292 vattr.va_nlink > 0)) &&
1293 (vp->v_writecount == 0 || vp->v_type != VREG)) {
1294 return(0);
1298 * If we are the only holder (refcnt of 1) or the vnode is in
1299 * termination (refcnt < 0), we can vgone the vnode.
1301 if (vp->v_sysref.refcnt <= 1) {
1302 vgone_vxlocked(vp);
1303 return(0);
1307 * If FORCECLOSE is set, forcibly destroy the vnode and then move
1308 * it to a dummymount structure so vop_*() functions don't deref
1309 * a NULL pointer.
1311 if (flags & FORCECLOSE) {
1312 vhold(vp);
1313 vgone_vxlocked(vp);
1314 if (vp->v_mount == NULL)
1315 insmntque(vp, &dummymount);
1316 vdrop(vp);
1317 return(0);
1319 if (vp->v_type == VCHR || vp->v_type == VBLK)
1320 kprintf("vflush: Warning, cannot destroy busy device vnode\n");
1321 #ifdef DIAGNOSTIC
1322 if (busyprt)
1323 vprint("vflush: busy vnode", vp);
1324 #endif
1325 ++info->busy;
1326 return(0);
1329 void
1330 add_bio_ops(struct bio_ops *ops)
1332 TAILQ_INSERT_TAIL(&bio_ops_list, ops, entry);
1335 void
1336 rem_bio_ops(struct bio_ops *ops)
1338 TAILQ_REMOVE(&bio_ops_list, ops, entry);
1342 * This calls the bio_ops io_sync function either for a mount point
1343 * or generally.
1345 * WARNING: softdeps is weirdly coded and just isn't happy unless
1346 * io_sync is called with a NULL mount from the general syncing code.
1348 void
1349 bio_ops_sync(struct mount *mp)
1351 struct bio_ops *ops;
1353 if (mp) {
1354 if ((ops = mp->mnt_bioops) != NULL)
1355 ops->io_sync(mp);
1356 } else {
1357 TAILQ_FOREACH(ops, &bio_ops_list, entry) {
1358 ops->io_sync(NULL);
1364 * Lookup a mount point by nch
1366 struct mount *
1367 mount_get_by_nc(struct namecache *ncp)
1369 struct mount *mp = NULL;
1371 lwkt_gettoken(&mountlist_token);
1372 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1373 if (ncp == mp->mnt_ncmountpt.ncp)
1374 break;
1376 lwkt_reltoken(&mountlist_token);
1377 return (mp);