mtree/BSD.root.dist: Use spaces.
[dragonfly.git] / sys / kern / vfs_mount.c
blob2f88b3aff241ffa723c518b6596a608346fadb57
1 /*
2 * Copyright (c) 2004,2013 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * Copyright (c) 1989, 1993
35 * The Regents of the University of California. All rights reserved.
36 * (c) UNIX System Laboratories, Inc.
37 * All or some portions of this file are derived from material licensed
38 * to the University of California by American Telephone and Telegraph
39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 * the permission of UNIX System Laboratories, Inc.
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
68 * External virtual filesystem routines
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/malloc.h>
75 #include <sys/mount.h>
76 #include <sys/proc.h>
77 #include <sys/vnode.h>
78 #include <sys/buf.h>
79 #include <sys/eventhandler.h>
80 #include <sys/kthread.h>
81 #include <sys/sysctl.h>
83 #include <machine/limits.h>
85 #include <sys/buf2.h>
86 #include <sys/thread2.h>
88 #include <vm/vm.h>
89 #include <vm/vm_object.h>
91 struct mountscan_info {
92 TAILQ_ENTRY(mountscan_info) msi_entry;
93 int msi_how;
94 struct mount *msi_node;
97 struct vmntvnodescan_info {
98 TAILQ_ENTRY(vmntvnodescan_info) entry;
99 struct vnode *vp;
102 struct vnlru_info {
103 int pass;
106 static int vnlru_nowhere = 0;
107 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD,
108 &vnlru_nowhere, 0,
109 "Number of times the vnlru process ran without success");
112 static struct lwkt_token mntid_token;
113 static struct mount dummymount;
115 /* note: mountlist exported to pstat */
116 struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
117 static TAILQ_HEAD(,mountscan_info) mountscan_list;
118 static struct lwkt_token mountlist_token;
120 static TAILQ_HEAD(,bio_ops) bio_ops_list = TAILQ_HEAD_INITIALIZER(bio_ops_list);
123 * Called from vfsinit()
125 void
126 vfs_mount_init(void)
128 lwkt_token_init(&mountlist_token, "mntlist");
129 lwkt_token_init(&mntid_token, "mntid");
130 TAILQ_INIT(&mountscan_list);
131 mount_init(&dummymount);
132 dummymount.mnt_flag |= MNT_RDONLY;
133 dummymount.mnt_kern_flag |= MNTK_ALL_MPSAFE;
137 * Support function called to remove a vnode from the mountlist and
138 * deal with side effects for scans in progress.
140 * Target mnt_token is held on call.
142 static void
143 vremovevnodemnt(struct vnode *vp)
145 struct vmntvnodescan_info *info;
146 struct mount *mp = vp->v_mount;
148 TAILQ_FOREACH(info, &mp->mnt_vnodescan_list, entry) {
149 if (info->vp == vp)
150 info->vp = TAILQ_NEXT(vp, v_nmntvnodes);
152 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
156 * Allocate a new vnode and associate it with a tag, mount point, and
157 * operations vector.
159 * A VX locked and refd vnode is returned. The caller should setup the
160 * remaining fields and vx_put() or, if he wishes to leave a vref,
161 * vx_unlock() the vnode.
164 getnewvnode(enum vtagtype tag, struct mount *mp,
165 struct vnode **vpp, int lktimeout, int lkflags)
167 struct vnode *vp;
169 KKASSERT(mp != NULL);
171 vp = allocvnode(lktimeout, lkflags);
172 vp->v_tag = tag;
173 vp->v_data = NULL;
176 * By default the vnode is assigned the mount point's normal
177 * operations vector.
179 vp->v_ops = &mp->mnt_vn_use_ops;
180 vp->v_pbuf_count = nswbuf_kva / NSWBUF_SPLIT;
183 * Placing the vnode on the mount point's queue makes it visible.
184 * VNON prevents it from being messed with, however.
186 insmntque(vp, mp);
189 * A VX locked & refd vnode is returned.
191 *vpp = vp;
192 return (0);
196 * This function creates vnodes with special operations vectors. The
197 * mount point is optional.
199 * This routine is being phased out but is still used by vfs_conf to
200 * create vnodes for devices prior to the root mount (with mp == NULL).
203 getspecialvnode(enum vtagtype tag, struct mount *mp,
204 struct vop_ops **ops,
205 struct vnode **vpp, int lktimeout, int lkflags)
207 struct vnode *vp;
209 vp = allocvnode(lktimeout, lkflags);
210 vp->v_tag = tag;
211 vp->v_data = NULL;
212 vp->v_ops = ops;
214 if (mp == NULL)
215 mp = &dummymount;
218 * Placing the vnode on the mount point's queue makes it visible.
219 * VNON prevents it from being messed with, however.
221 insmntque(vp, mp);
224 * A VX locked & refd vnode is returned.
226 *vpp = vp;
227 return (0);
231 * Interlock against an unmount, return 0 on success, non-zero on failure.
233 * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount
234 * is in-progress.
236 * If no unmount is in-progress LK_NOWAIT is ignored. No other flag bits
237 * are used. A shared locked will be obtained and the filesystem will not
238 * be unmountable until the lock is released.
241 vfs_busy(struct mount *mp, int flags)
243 int lkflags;
245 atomic_add_int(&mp->mnt_refs, 1);
246 lwkt_gettoken(&mp->mnt_token);
247 if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
248 if (flags & LK_NOWAIT) {
249 lwkt_reltoken(&mp->mnt_token);
250 atomic_add_int(&mp->mnt_refs, -1);
251 return (ENOENT);
253 /* XXX not MP safe */
254 mp->mnt_kern_flag |= MNTK_MWAIT;
257 * Since all busy locks are shared except the exclusive
258 * lock granted when unmounting, the only place that a
259 * wakeup needs to be done is at the release of the
260 * exclusive lock at the end of dounmount.
262 * WARNING! mp can potentially go away once we release
263 * our ref.
265 tsleep((caddr_t)mp, 0, "vfs_busy", 0);
266 lwkt_reltoken(&mp->mnt_token);
267 atomic_add_int(&mp->mnt_refs, -1);
268 return (ENOENT);
270 lkflags = LK_SHARED;
271 if (lockmgr(&mp->mnt_lock, lkflags))
272 panic("vfs_busy: unexpected lock failure");
273 lwkt_reltoken(&mp->mnt_token);
274 return (0);
278 * Free a busy filesystem.
280 * Once refs is decremented the mount point can potentially get ripped
281 * out from under us, but we want to clean up our refs before unlocking
282 * so do a hold/drop around the whole mess.
284 * This is not in the critical path (I hope).
286 void
287 vfs_unbusy(struct mount *mp)
289 mount_hold(mp);
290 atomic_add_int(&mp->mnt_refs, -1);
291 lockmgr(&mp->mnt_lock, LK_RELEASE);
292 mount_drop(mp);
296 * Lookup a filesystem type, and if found allocate and initialize
297 * a mount structure for it.
299 * Devname is usually updated by mount(8) after booting.
302 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
304 struct vfsconf *vfsp;
305 struct mount *mp;
307 if (fstypename == NULL)
308 return (ENODEV);
310 vfsp = vfsconf_find_by_name(fstypename);
311 if (vfsp == NULL)
312 return (ENODEV);
313 mp = kmalloc(sizeof(struct mount), M_MOUNT, M_WAITOK | M_ZERO);
314 mount_init(mp);
315 lockinit(&mp->mnt_lock, "vfslock", VLKTIMEOUT, 0);
317 vfs_busy(mp, 0);
318 mp->mnt_vfc = vfsp;
319 mp->mnt_op = vfsp->vfc_vfsops;
320 mp->mnt_pbuf_count = nswbuf_kva / NSWBUF_SPLIT;
321 vfsp->vfc_refcount++;
322 mp->mnt_stat.f_type = vfsp->vfc_typenum;
323 mp->mnt_flag |= MNT_RDONLY;
324 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
325 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
326 copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
329 * Pre-set MPSAFE flags for VFS_MOUNT() call.
331 if (vfsp->vfc_flags & VFCF_MPSAFE)
332 mp->mnt_kern_flag |= MNTK_ALL_MPSAFE;
334 *mpp = mp;
336 return (0);
340 * Basic mount structure initialization
342 void
343 mount_init(struct mount *mp)
345 lockinit(&mp->mnt_lock, "vfslock", hz*5, 0);
346 lwkt_token_init(&mp->mnt_token, "permnt");
348 TAILQ_INIT(&mp->mnt_vnodescan_list);
349 TAILQ_INIT(&mp->mnt_nvnodelist);
350 TAILQ_INIT(&mp->mnt_reservedvnlist);
351 TAILQ_INIT(&mp->mnt_jlist);
352 mp->mnt_nvnodelistsize = 0;
353 mp->mnt_flag = 0;
354 mp->mnt_hold = 1; /* hold for umount last drop */
355 mp->mnt_iosize_max = MAXPHYS;
356 vn_syncer_thr_create(mp);
359 void
360 mount_hold(struct mount *mp)
362 atomic_add_int(&mp->mnt_hold, 1);
365 void
366 mount_drop(struct mount *mp)
368 if (atomic_fetchadd_int(&mp->mnt_hold, -1) == 1) {
369 KKASSERT(mp->mnt_refs == 0);
370 kfree(mp, M_MOUNT);
375 * Lookup a mount point by filesystem identifier.
377 * If not NULL, the returned mp is held and the caller is expected to drop
378 * it via mount_drop().
380 struct mount *
381 vfs_getvfs(fsid_t *fsid)
383 struct mount *mp;
385 lwkt_gettoken_shared(&mountlist_token);
386 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
387 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
388 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
389 mount_hold(mp);
390 break;
393 lwkt_reltoken(&mountlist_token);
394 return (mp);
398 * Get a new unique fsid. Try to make its val[0] unique, since this value
399 * will be used to create fake device numbers for stat(). Also try (but
400 * not so hard) make its val[0] unique mod 2^16, since some emulators only
401 * support 16-bit device numbers. We end up with unique val[0]'s for the
402 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
404 * Keep in mind that several mounts may be running in parallel. Starting
405 * the search one past where the previous search terminated is both a
406 * micro-optimization and a defense against returning the same fsid to
407 * different mounts.
409 void
410 vfs_getnewfsid(struct mount *mp)
412 static u_int16_t mntid_base;
413 struct mount *mptmp;
414 fsid_t tfsid;
415 int mtype;
417 lwkt_gettoken(&mntid_token);
418 mtype = mp->mnt_vfc->vfc_typenum;
419 tfsid.val[1] = mtype;
420 mtype = (mtype & 0xFF) << 24;
421 for (;;) {
422 tfsid.val[0] = makeudev(255,
423 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
424 mntid_base++;
425 mptmp = vfs_getvfs(&tfsid);
426 if (mptmp == NULL)
427 break;
428 mount_drop(mptmp);
430 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
431 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
432 lwkt_reltoken(&mntid_token);
436 * Set the FSID for a new mount point to the template. Adjust
437 * the FSID to avoid collisions.
440 vfs_setfsid(struct mount *mp, fsid_t *template)
442 struct mount *mptmp;
443 int didmunge = 0;
445 bzero(&mp->mnt_stat.f_fsid, sizeof(mp->mnt_stat.f_fsid));
447 lwkt_gettoken(&mntid_token);
448 for (;;) {
449 mptmp = vfs_getvfs(template);
450 if (mptmp == NULL)
451 break;
452 mount_drop(mptmp);
453 didmunge = 1;
454 ++template->val[1];
456 mp->mnt_stat.f_fsid = *template;
457 lwkt_reltoken(&mntid_token);
459 return(didmunge);
463 * This routine is called when we have too many vnodes. It attempts
464 * to free <count> vnodes and will potentially free vnodes that still
465 * have VM backing store (VM backing store is typically the cause
466 * of a vnode blowout so we want to do this). Therefore, this operation
467 * is not considered cheap.
469 * A number of conditions may prevent a vnode from being reclaimed.
470 * the buffer cache may have references on the vnode, a directory
471 * vnode may still have references due to the namei cache representing
472 * underlying files, or the vnode may be in active use. It is not
473 * desireable to reuse such vnodes. These conditions may cause the
474 * number of vnodes to reach some minimum value regardless of what
475 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low.
479 * Attempt to recycle vnodes in a context that is always safe to block.
480 * Calling vlrurecycle() from the bowels of file system code has some
481 * interesting deadlock problems.
483 static struct thread *vnlruthread;
485 static void
486 vnlru_proc(void)
488 struct thread *td = curthread;
490 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td,
491 SHUTDOWN_PRI_FIRST);
493 for (;;) {
494 int ncachedandinactive;
496 kproc_suspend_loop();
499 * Try to free some vnodes if we have too many. Trigger based
500 * on potentially freeable vnodes but calculate the count
501 * based on total vnodes.
503 * (long) -> deal with 64 bit machines, intermediate overflow
505 synchronizevnodecount();
506 ncachedandinactive = countcachedandinactivevnodes();
507 if (numvnodes >= maxvnodes * 9 / 10 &&
508 ncachedandinactive >= maxvnodes * 5 / 10) {
509 int count = numvnodes - maxvnodes * 9 / 10;
511 if (count > (ncachedandinactive) / 100)
512 count = (ncachedandinactive) / 100;
513 if (count < 5)
514 count = 5;
515 freesomevnodes(count);
519 * Do non-critical-path (more robust) cache cleaning,
520 * even if vnode counts are nominal, to try to avoid
521 * having to do it in the critical path.
523 cache_hysteresis(0);
526 * Nothing to do if most of our vnodes are already on
527 * the free list.
529 synchronizevnodecount();
530 ncachedandinactive = countcachedandinactivevnodes();
531 if (numvnodes <= maxvnodes * 9 / 10 ||
532 ncachedandinactive <= maxvnodes * 5 / 10) {
533 tsleep(vnlruthread, 0, "vlruwt", hz);
534 continue;
540 * MOUNTLIST FUNCTIONS
544 * mountlist_insert (MP SAFE)
546 * Add a new mount point to the mount list.
548 void
549 mountlist_insert(struct mount *mp, int how)
551 lwkt_gettoken(&mountlist_token);
552 if (how == MNTINS_FIRST)
553 TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list);
554 else
555 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
556 lwkt_reltoken(&mountlist_token);
560 * mountlist_interlock (MP SAFE)
562 * Execute the specified interlock function with the mountlist token
563 * held. The function will be called in a serialized fashion verses
564 * other functions called through this mechanism.
566 * The function is expected to be very short-lived.
569 mountlist_interlock(int (*callback)(struct mount *), struct mount *mp)
571 int error;
573 lwkt_gettoken(&mountlist_token);
574 error = callback(mp);
575 lwkt_reltoken(&mountlist_token);
576 return (error);
580 * mountlist_boot_getfirst (DURING BOOT ONLY)
582 * This function returns the first mount on the mountlist, which is
583 * expected to be the root mount. Since no interlocks are obtained
584 * this function is only safe to use during booting.
587 struct mount *
588 mountlist_boot_getfirst(void)
590 return(TAILQ_FIRST(&mountlist));
594 * mountlist_remove (MP SAFE)
596 * Remove a node from the mountlist. If this node is the next scan node
597 * for any active mountlist scans, the active mountlist scan will be
598 * adjusted to skip the node, thus allowing removals during mountlist
599 * scans.
601 void
602 mountlist_remove(struct mount *mp)
604 struct mountscan_info *msi;
606 lwkt_gettoken(&mountlist_token);
607 TAILQ_FOREACH(msi, &mountscan_list, msi_entry) {
608 if (msi->msi_node == mp) {
609 if (msi->msi_how & MNTSCAN_FORWARD)
610 msi->msi_node = TAILQ_NEXT(mp, mnt_list);
611 else
612 msi->msi_node = TAILQ_PREV(mp, mntlist,
613 mnt_list);
616 TAILQ_REMOVE(&mountlist, mp, mnt_list);
617 lwkt_reltoken(&mountlist_token);
621 * mountlist_exists (MP SAFE)
623 * Checks if a node exists in the mountlist.
624 * This function is mainly used by VFS quota code to check if a
625 * cached nullfs struct mount pointer is still valid at use time
627 * FIXME: there is no warranty the mp passed to that function
628 * will be the same one used by VFS_ACCOUNT() later
631 mountlist_exists(struct mount *mp)
633 int node_exists = 0;
634 struct mount* lmp;
636 lwkt_gettoken_shared(&mountlist_token);
637 TAILQ_FOREACH(lmp, &mountlist, mnt_list) {
638 if (lmp == mp) {
639 node_exists = 1;
640 break;
643 lwkt_reltoken(&mountlist_token);
645 return(node_exists);
649 * mountlist_scan
651 * Safely scan the mount points on the mount list. Each mountpoint
652 * is held across the callback. The callback is responsible for
653 * acquiring any further tokens or locks.
655 * Unless otherwise specified each mount point will be busied prior to the
656 * callback and unbusied afterwords. The callback may safely remove any
657 * mount point without interfering with the scan. If the current callback
658 * mount is removed the scanner will not attempt to unbusy it.
660 * If a mount node cannot be busied it is silently skipped.
662 * The callback return value is aggregated and a total is returned. A return
663 * value of < 0 is not aggregated and will terminate the scan.
665 * MNTSCAN_FORWARD - the mountlist is scanned in the forward direction
666 * MNTSCAN_REVERSE - the mountlist is scanned in reverse
667 * MNTSCAN_NOBUSY - the scanner will make the callback without busying
668 * the mount node.
670 * NOTE: mountlist_token is not held across the callback.
673 mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how)
675 struct mountscan_info info;
676 struct mount *mp;
677 int count;
678 int res;
680 lwkt_gettoken(&mountlist_token);
681 info.msi_how = how;
682 info.msi_node = NULL; /* paranoia */
683 TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry);
684 lwkt_reltoken(&mountlist_token);
686 res = 0;
687 lwkt_gettoken_shared(&mountlist_token);
689 if (how & MNTSCAN_FORWARD) {
690 info.msi_node = TAILQ_FIRST(&mountlist);
691 while ((mp = info.msi_node) != NULL) {
692 mount_hold(mp);
693 if (how & MNTSCAN_NOBUSY) {
694 lwkt_reltoken(&mountlist_token);
695 count = callback(mp, data);
696 lwkt_gettoken_shared(&mountlist_token);
697 } else if (vfs_busy(mp, LK_NOWAIT) == 0) {
698 lwkt_reltoken(&mountlist_token);
699 count = callback(mp, data);
700 lwkt_gettoken_shared(&mountlist_token);
701 if (mp == info.msi_node)
702 vfs_unbusy(mp);
703 } else {
704 count = 0;
706 mount_drop(mp);
707 if (count < 0)
708 break;
709 res += count;
710 if (mp == info.msi_node)
711 info.msi_node = TAILQ_NEXT(mp, mnt_list);
713 } else if (how & MNTSCAN_REVERSE) {
714 info.msi_node = TAILQ_LAST(&mountlist, mntlist);
715 while ((mp = info.msi_node) != NULL) {
716 mount_hold(mp);
717 if (how & MNTSCAN_NOBUSY) {
718 lwkt_reltoken(&mountlist_token);
719 count = callback(mp, data);
720 lwkt_gettoken_shared(&mountlist_token);
721 } else if (vfs_busy(mp, LK_NOWAIT) == 0) {
722 lwkt_reltoken(&mountlist_token);
723 count = callback(mp, data);
724 lwkt_gettoken_shared(&mountlist_token);
725 if (mp == info.msi_node)
726 vfs_unbusy(mp);
727 } else {
728 count = 0;
730 mount_drop(mp);
731 if (count < 0)
732 break;
733 res += count;
734 if (mp == info.msi_node)
735 info.msi_node = TAILQ_PREV(mp, mntlist,
736 mnt_list);
739 lwkt_reltoken(&mountlist_token);
741 lwkt_gettoken(&mountlist_token);
742 TAILQ_REMOVE(&mountscan_list, &info, msi_entry);
743 lwkt_reltoken(&mountlist_token);
745 return(res);
749 * MOUNT RELATED VNODE FUNCTIONS
752 static struct kproc_desc vnlru_kp = {
753 "vnlru",
754 vnlru_proc,
755 &vnlruthread
757 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp);
760 * Move a vnode from one mount queue to another.
762 void
763 insmntque(struct vnode *vp, struct mount *mp)
765 struct mount *omp;
768 * Delete from old mount point vnode list, if on one.
770 if ((omp = vp->v_mount) != NULL) {
771 lwkt_gettoken(&omp->mnt_token);
772 KKASSERT(omp == vp->v_mount);
773 KASSERT(omp->mnt_nvnodelistsize > 0,
774 ("bad mount point vnode list size"));
775 vremovevnodemnt(vp);
776 omp->mnt_nvnodelistsize--;
777 lwkt_reltoken(&omp->mnt_token);
781 * Insert into list of vnodes for the new mount point, if available.
782 * The 'end' of the LRU list is the vnode prior to mp->mnt_syncer.
784 if (mp == NULL) {
785 vp->v_mount = NULL;
786 return;
788 lwkt_gettoken(&mp->mnt_token);
789 vp->v_mount = mp;
790 if (mp->mnt_syncer) {
791 TAILQ_INSERT_BEFORE(mp->mnt_syncer, vp, v_nmntvnodes);
792 } else {
793 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
795 mp->mnt_nvnodelistsize++;
796 lwkt_reltoken(&mp->mnt_token);
801 * Scan the vnodes under a mount point and issue appropriate callbacks.
803 * The fastfunc() callback is called with just the mountlist token held
804 * (no vnode lock). It may not block and the vnode may be undergoing
805 * modifications while the caller is processing it. The vnode will
806 * not be entirely destroyed, however, due to the fact that the mountlist
807 * token is held. A return value < 0 skips to the next vnode without calling
808 * the slowfunc(), a return value > 0 terminates the loop.
810 * WARNING! The fastfunc() should not indirect through vp->v_object, the vp
811 * data structure is unstable when called from fastfunc().
813 * The slowfunc() callback is called after the vnode has been successfully
814 * locked based on passed flags. The vnode is skipped if it gets rearranged
815 * or destroyed while blocking on the lock. A non-zero return value from
816 * the slow function terminates the loop. The slow function is allowed to
817 * arbitrarily block. The scanning code guarentees consistency of operation
818 * even if the slow function deletes or moves the node, or blocks and some
819 * other thread deletes or moves the node.
822 vmntvnodescan(
823 struct mount *mp,
824 int flags,
825 int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data),
826 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data),
827 void *data
829 struct vmntvnodescan_info info;
830 struct vnode *vp;
831 int r = 0;
832 int maxcount = mp->mnt_nvnodelistsize * 2;
833 int stopcount = 0;
834 int count = 0;
836 lwkt_gettoken(&mp->mnt_token);
839 * If asked to do one pass stop after iterating available vnodes.
840 * Under heavy loads new vnodes can be added while we are scanning,
841 * so this isn't perfect. Create a slop factor of 2x.
843 if (flags & VMSC_ONEPASS)
844 stopcount = mp->mnt_nvnodelistsize;
846 info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
847 TAILQ_INSERT_TAIL(&mp->mnt_vnodescan_list, &info, entry);
849 while ((vp = info.vp) != NULL) {
850 if (--maxcount == 0) {
851 kprintf("Warning: excessive fssync iteration\n");
852 maxcount = mp->mnt_nvnodelistsize * 2;
856 * Skip if visible but not ready, or special (e.g.
857 * mp->mnt_syncer)
859 if (vp->v_type == VNON)
860 goto next;
861 KKASSERT(vp->v_mount == mp);
864 * Quick test. A negative return continues the loop without
865 * calling the slow test. 0 continues onto the slow test.
866 * A positive number aborts the loop.
868 if (fastfunc) {
869 if ((r = fastfunc(mp, vp, data)) < 0) {
870 r = 0;
871 goto next;
873 if (r)
874 break;
878 * Get a vxlock on the vnode, retry if it has moved or isn't
879 * in the mountlist where we expect it.
881 if (slowfunc) {
882 int error;
884 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) {
885 case VMSC_GETVP:
886 error = vget(vp, LK_EXCLUSIVE);
887 break;
888 case VMSC_GETVP|VMSC_NOWAIT:
889 error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT);
890 break;
891 case VMSC_GETVX:
892 vx_get(vp);
893 error = 0;
894 break;
895 default:
896 error = 0;
897 break;
899 if (error)
900 goto next;
902 * Do not call the slow function if the vnode is
903 * invalid or if it was ripped out from under us
904 * while we (potentially) blocked.
906 if (info.vp == vp && vp->v_type != VNON)
907 r = slowfunc(mp, vp, data);
910 * Cleanup
912 switch(flags & (VMSC_GETVP|VMSC_GETVX|VMSC_NOWAIT)) {
913 case VMSC_GETVP:
914 case VMSC_GETVP|VMSC_NOWAIT:
915 vput(vp);
916 break;
917 case VMSC_GETVX:
918 vx_put(vp);
919 break;
920 default:
921 break;
923 if (r != 0)
924 break;
927 next:
929 * Yield after some processing. Depending on the number
930 * of vnodes, we might wind up running for a long time.
931 * Because threads are not preemptable, time critical
932 * userland processes might starve. Give them a chance
933 * now and then.
935 if (++count == 10000) {
937 * We really want to yield a bit, so we simply
938 * sleep a tick
940 tsleep(mp, 0, "vnodescn", 1);
941 count = 0;
945 * If doing one pass this decrements to zero. If it starts
946 * at zero it is effectively unlimited for the purposes of
947 * this loop.
949 if (--stopcount == 0)
950 break;
953 * Iterate. If the vnode was ripped out from under us
954 * info.vp will already point to the next vnode, otherwise
955 * we have to obtain the next valid vnode ourselves.
957 if (info.vp == vp)
958 info.vp = TAILQ_NEXT(vp, v_nmntvnodes);
961 TAILQ_REMOVE(&mp->mnt_vnodescan_list, &info, entry);
962 lwkt_reltoken(&mp->mnt_token);
963 return(r);
967 * Remove any vnodes in the vnode table belonging to mount point mp.
969 * If FORCECLOSE is not specified, there should not be any active ones,
970 * return error if any are found (nb: this is a user error, not a
971 * system error). If FORCECLOSE is specified, detach any active vnodes
972 * that are found.
974 * If WRITECLOSE is set, only flush out regular file vnodes open for
975 * writing.
977 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped.
979 * `rootrefs' specifies the base reference count for the root vnode
980 * of this filesystem. The root vnode is considered busy if its
981 * v_refcnt exceeds this value. On a successful return, vflush()
982 * will call vrele() on the root vnode exactly rootrefs times.
983 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
984 * be zero.
986 #ifdef DIAGNOSTIC
987 static int busyprt = 0; /* print out busy vnodes */
988 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
989 #endif
991 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data);
993 struct vflush_info {
994 int flags;
995 int busy;
996 thread_t td;
1000 vflush(struct mount *mp, int rootrefs, int flags)
1002 struct thread *td = curthread; /* XXX */
1003 struct vnode *rootvp = NULL;
1004 int error;
1005 struct vflush_info vflush_info;
1007 if (rootrefs > 0) {
1008 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
1009 ("vflush: bad args"));
1011 * Get the filesystem root vnode. We can vput() it
1012 * immediately, since with rootrefs > 0, it won't go away.
1014 if ((error = VFS_ROOT(mp, &rootvp)) != 0) {
1015 if ((flags & FORCECLOSE) == 0)
1016 return (error);
1017 rootrefs = 0;
1018 /* continue anyway */
1020 if (rootrefs)
1021 vput(rootvp);
1024 vflush_info.busy = 0;
1025 vflush_info.flags = flags;
1026 vflush_info.td = td;
1027 vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info);
1029 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
1031 * If just the root vnode is busy, and if its refcount
1032 * is equal to `rootrefs', then go ahead and kill it.
1034 KASSERT(vflush_info.busy > 0, ("vflush: not busy"));
1035 KASSERT(VREFCNT(rootvp) >= rootrefs, ("vflush: rootrefs"));
1036 if (vflush_info.busy == 1 && VREFCNT(rootvp) == rootrefs) {
1037 vx_lock(rootvp);
1038 vgone_vxlocked(rootvp);
1039 vx_unlock(rootvp);
1040 vflush_info.busy = 0;
1043 if (vflush_info.busy)
1044 return (EBUSY);
1045 for (; rootrefs > 0; rootrefs--)
1046 vrele(rootvp);
1047 return (0);
1051 * The scan callback is made with an VX locked vnode.
1053 static int
1054 vflush_scan(struct mount *mp, struct vnode *vp, void *data)
1056 struct vflush_info *info = data;
1057 struct vattr vattr;
1058 int flags = info->flags;
1061 * Generally speaking try to deactivate on 0 refs (catch-all)
1063 atomic_set_int(&vp->v_refcnt, VREF_FINALIZE);
1066 * Skip over a vnodes marked VSYSTEM.
1068 if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
1069 return(0);
1073 * Do not force-close VCHR or VBLK vnodes
1075 if (vp->v_type == VCHR || vp->v_type == VBLK)
1076 flags &= ~(WRITECLOSE|FORCECLOSE);
1079 * If WRITECLOSE is set, flush out unlinked but still open
1080 * files (even if open only for reading) and regular file
1081 * vnodes open for writing.
1083 if ((flags & WRITECLOSE) &&
1084 (vp->v_type == VNON ||
1085 (VOP_GETATTR(vp, &vattr) == 0 &&
1086 vattr.va_nlink > 0)) &&
1087 (vp->v_writecount == 0 || vp->v_type != VREG)) {
1088 return(0);
1092 * If we are the only holder (refcnt of 1) or the vnode is in
1093 * termination (refcnt < 0), we can vgone the vnode.
1095 if (VREFCNT(vp) <= 1) {
1096 vgone_vxlocked(vp);
1097 return(0);
1101 * If FORCECLOSE is set, forcibly destroy the vnode and then move
1102 * it to a dummymount structure so vop_*() functions don't deref
1103 * a NULL pointer.
1105 if (flags & FORCECLOSE) {
1106 vhold(vp);
1107 vgone_vxlocked(vp);
1108 if (vp->v_mount == NULL)
1109 insmntque(vp, &dummymount);
1110 vdrop(vp);
1111 return(0);
1113 if (vp->v_type == VCHR || vp->v_type == VBLK)
1114 kprintf("vflush: Warning, cannot destroy busy device vnode\n");
1115 #ifdef DIAGNOSTIC
1116 if (busyprt)
1117 vprint("vflush: busy vnode", vp);
1118 #endif
1119 ++info->busy;
1120 return(0);
1123 void
1124 add_bio_ops(struct bio_ops *ops)
1126 TAILQ_INSERT_TAIL(&bio_ops_list, ops, entry);
1129 void
1130 rem_bio_ops(struct bio_ops *ops)
1132 TAILQ_REMOVE(&bio_ops_list, ops, entry);
1136 * This calls the bio_ops io_sync function either for a mount point
1137 * or generally.
1139 * WARNING: softdeps is weirdly coded and just isn't happy unless
1140 * io_sync is called with a NULL mount from the general syncing code.
1142 void
1143 bio_ops_sync(struct mount *mp)
1145 struct bio_ops *ops;
1147 if (mp) {
1148 if ((ops = mp->mnt_bioops) != NULL)
1149 ops->io_sync(mp);
1150 } else {
1151 TAILQ_FOREACH(ops, &bio_ops_list, entry) {
1152 ops->io_sync(NULL);
1158 * Lookup a mount point by nch
1160 struct mount *
1161 mount_get_by_nc(struct namecache *ncp)
1163 struct mount *mp = NULL;
1165 lwkt_gettoken_shared(&mountlist_token);
1166 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1167 if (ncp == mp->mnt_ncmountpt.ncp)
1168 break;
1170 lwkt_reltoken(&mountlist_token);
1172 return (mp);