More -Wwrite-strings cleanup and make sure you can actually play it.
[dragonfly.git] / sys / kern / vfs_mount.c
blob9f5b4105d48516e4b55ff33cb510e0911482182e
1 /*
2 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * Copyright (c) 1989, 1993
35 * The Regents of the University of California. All rights reserved.
36 * (c) UNIX System Laboratories, Inc.
37 * All or some portions of this file are derived from material licensed
38 * to the University of California by American Telephone and Telegraph
39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 * the permission of UNIX System Laboratories, Inc.
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by the University of
53 * California, Berkeley and its contributors.
54 * 4. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * SUCH DAMAGE.
70 * $DragonFly: src/sys/kern/vfs_mount.c,v 1.10 2005/04/20 17:01:50 dillon Exp $
74 * External virtual filesystem routines
76 #include "opt_ddb.h"
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/kernel.h>
81 #include <sys/malloc.h>
82 #include <sys/mount.h>
83 #include <sys/proc.h>
84 #include <sys/vnode.h>
85 #include <sys/buf.h>
86 #include <sys/eventhandler.h>
87 #include <sys/kthread.h>
88 #include <sys/sysctl.h>
90 #include <machine/limits.h>
92 #include <sys/buf2.h>
93 #include <sys/thread2.h>
95 #include <vm/vm.h>
96 #include <vm/vm_object.h>
98 struct mountscan_info {
99 TAILQ_ENTRY(mountscan_info) msi_entry;
100 int msi_how;
101 struct mount *msi_node;
104 struct vmntvnodescan_info {
105 TAILQ_ENTRY(vmntvnodescan_info) entry;
106 struct vnode *vp;
109 static int vnlru_nowhere = 0;
110 SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RD,
111 &vnlru_nowhere, 0,
112 "Number of times the vnlru process ran without success");
115 static struct lwkt_token mntid_token;
117 static struct mntlist mountlist = TAILQ_HEAD_INITIALIZER(mountlist);
118 static TAILQ_HEAD(,mountscan_info) mountscan_list;
119 static struct lwkt_token mountlist_token;
120 static TAILQ_HEAD(,vmntvnodescan_info) mntvnodescan_list;
121 struct lwkt_token mntvnode_token;
124 * Called from vfsinit()
126 void
127 vfs_mount_init(void)
129 lwkt_token_init(&mountlist_token);
130 lwkt_token_init(&mntvnode_token);
131 lwkt_token_init(&mntid_token);
132 TAILQ_INIT(&mountscan_list);
133 TAILQ_INIT(&mntvnodescan_list);
137 * Support function called with mntvnode_token held to remove a vnode
138 * from the mountlist. We must update any list scans which are in progress.
140 static void
141 vremovevnodemnt(struct vnode *vp)
143 struct vmntvnodescan_info *info;
145 TAILQ_FOREACH(info, &mntvnodescan_list, entry) {
146 if (info->vp == vp)
147 info->vp = TAILQ_NEXT(vp, v_nmntvnodes);
149 TAILQ_REMOVE(&vp->v_mount->mnt_nvnodelist, vp, v_nmntvnodes);
153 * Support function called with mntvnode_token held to move a vnode to
154 * the end of the list.
156 static void
157 vmovevnodetoend(struct mount *mp, struct vnode *vp)
159 vremovevnodemnt(vp);
160 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
165 * Allocate a new vnode and associate it with a tag, mount point, and
166 * operations vector.
168 * A VX locked and refd vnode is returned. The caller should setup the
169 * remaining fields and vx_put() or, if he wishes to leave a vref,
170 * vx_unlock() the vnode.
173 getnewvnode(enum vtagtype tag, struct mount *mp,
174 struct vnode **vpp, int lktimeout, int lkflags)
176 struct vnode *vp;
178 KKASSERT(mp != NULL);
180 vp = allocvnode(lktimeout, lkflags);
181 vp->v_tag = tag;
182 vp->v_data = NULL;
185 * By default the vnode is assigned the mount point's normal
186 * operations vector.
188 vp->v_ops = &mp->mnt_vn_use_ops;
191 * Placing the vnode on the mount point's queue makes it visible.
192 * VNON prevents it from being messed with, however.
194 insmntque(vp, mp);
195 vfs_object_create(vp, curthread);
198 * A VX locked & refd vnode is returned.
200 *vpp = vp;
201 return (0);
205 * This function creates vnodes with special operations vectors. The
206 * mount point is optional.
208 * This routine is being phased out.
211 getspecialvnode(enum vtagtype tag, struct mount *mp,
212 struct vop_ops **ops_pp,
213 struct vnode **vpp, int lktimeout, int lkflags)
215 struct vnode *vp;
217 vp = allocvnode(lktimeout, lkflags);
218 vp->v_tag = tag;
219 vp->v_data = NULL;
220 vp->v_ops = ops_pp;
223 * Placing the vnode on the mount point's queue makes it visible.
224 * VNON prevents it from being messed with, however.
226 insmntque(vp, mp);
227 vfs_object_create(vp, curthread);
230 * A VX locked & refd vnode is returned.
232 *vpp = vp;
233 return (0);
237 * Interlock against an unmount, return 0 on success, non-zero on failure.
239 * The passed flag may be 0 or LK_NOWAIT and is only used if an unmount
240 * is in-progress.
242 * If no unmount is in-progress LK_NOWAIT is ignored. No other flag bits
243 * are used. A shared locked will be obtained and the filesystem will not
244 * be unmountable until the lock is released.
247 vfs_busy(struct mount *mp, int flags, struct thread *td)
249 int lkflags;
251 if (mp->mnt_kern_flag & MNTK_UNMOUNT) {
252 if (flags & LK_NOWAIT)
253 return (ENOENT);
254 /* XXX not MP safe */
255 mp->mnt_kern_flag |= MNTK_MWAIT;
257 * Since all busy locks are shared except the exclusive
258 * lock granted when unmounting, the only place that a
259 * wakeup needs to be done is at the release of the
260 * exclusive lock at the end of dounmount.
262 tsleep((caddr_t)mp, 0, "vfs_busy", 0);
263 return (ENOENT);
265 lkflags = LK_SHARED | LK_NOPAUSE;
266 if (lockmgr(&mp->mnt_lock, lkflags, NULL, td))
267 panic("vfs_busy: unexpected lock failure");
268 return (0);
272 * Free a busy filesystem.
274 void
275 vfs_unbusy(struct mount *mp, struct thread *td)
277 lockmgr(&mp->mnt_lock, LK_RELEASE, NULL, td);
281 * Lookup a filesystem type, and if found allocate and initialize
282 * a mount structure for it.
284 * Devname is usually updated by mount(8) after booting.
287 vfs_rootmountalloc(char *fstypename, char *devname, struct mount **mpp)
289 struct thread *td = curthread; /* XXX */
290 struct vfsconf *vfsp;
291 struct mount *mp;
293 if (fstypename == NULL)
294 return (ENODEV);
295 for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
296 if (!strcmp(vfsp->vfc_name, fstypename))
297 break;
299 if (vfsp == NULL)
300 return (ENODEV);
301 mp = malloc(sizeof(struct mount), M_MOUNT, M_WAITOK);
302 bzero((char *)mp, (u_long)sizeof(struct mount));
303 lockinit(&mp->mnt_lock, 0, "vfslock", VLKTIMEOUT, LK_NOPAUSE);
304 vfs_busy(mp, LK_NOWAIT, td);
305 TAILQ_INIT(&mp->mnt_nvnodelist);
306 TAILQ_INIT(&mp->mnt_reservedvnlist);
307 TAILQ_INIT(&mp->mnt_jlist);
308 mp->mnt_nvnodelistsize = 0;
309 mp->mnt_vfc = vfsp;
310 mp->mnt_op = vfsp->vfc_vfsops;
311 mp->mnt_flag = MNT_RDONLY;
312 mp->mnt_vnodecovered = NULLVP;
313 vfsp->vfc_refcount++;
314 mp->mnt_iosize_max = DFLTPHYS;
315 mp->mnt_stat.f_type = vfsp->vfc_typenum;
316 mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
317 strncpy(mp->mnt_stat.f_fstypename, vfsp->vfc_name, MFSNAMELEN);
318 copystr(devname, mp->mnt_stat.f_mntfromname, MNAMELEN - 1, 0);
319 *mpp = mp;
320 return (0);
324 * Lookup a mount point by filesystem identifier.
326 struct mount *
327 vfs_getvfs(fsid_t *fsid)
329 struct mount *mp;
330 lwkt_tokref ilock;
332 lwkt_gettoken(&ilock, &mountlist_token);
333 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
334 if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] &&
335 mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) {
336 break;
339 lwkt_reltoken(&ilock);
340 return (mp);
344 * Get a new unique fsid. Try to make its val[0] unique, since this value
345 * will be used to create fake device numbers for stat(). Also try (but
346 * not so hard) make its val[0] unique mod 2^16, since some emulators only
347 * support 16-bit device numbers. We end up with unique val[0]'s for the
348 * first 2^16 calls and unique val[0]'s mod 2^16 for the first 2^8 calls.
350 * Keep in mind that several mounts may be running in parallel. Starting
351 * the search one past where the previous search terminated is both a
352 * micro-optimization and a defense against returning the same fsid to
353 * different mounts.
355 void
356 vfs_getnewfsid(struct mount *mp)
358 static u_int16_t mntid_base;
359 lwkt_tokref ilock;
360 fsid_t tfsid;
361 int mtype;
363 lwkt_gettoken(&ilock, &mntid_token);
364 mtype = mp->mnt_vfc->vfc_typenum;
365 tfsid.val[1] = mtype;
366 mtype = (mtype & 0xFF) << 24;
367 for (;;) {
368 tfsid.val[0] = makeudev(255,
369 mtype | ((mntid_base & 0xFF00) << 8) | (mntid_base & 0xFF));
370 mntid_base++;
371 if (vfs_getvfs(&tfsid) == NULL)
372 break;
374 mp->mnt_stat.f_fsid.val[0] = tfsid.val[0];
375 mp->mnt_stat.f_fsid.val[1] = tfsid.val[1];
376 lwkt_reltoken(&ilock);
380 * This routine is called when we have too many vnodes. It attempts
381 * to free <count> vnodes and will potentially free vnodes that still
382 * have VM backing store (VM backing store is typically the cause
383 * of a vnode blowout so we want to do this). Therefore, this operation
384 * is not considered cheap.
386 * A number of conditions may prevent a vnode from being reclaimed.
387 * the buffer cache may have references on the vnode, a directory
388 * vnode may still have references due to the namei cache representing
389 * underlying files, or the vnode may be in active use. It is not
390 * desireable to reuse such vnodes. These conditions may cause the
391 * number of vnodes to reach some minimum value regardless of what
392 * you set kern.maxvnodes to. Do not set kern.maxvnodes too low.
396 * This is a quick non-blocking check to determine if the vnode is a good
397 * candidate for being (eventually) vgone()'d. Returns 0 if the vnode is
398 * not a good candidate, 1 if it is.
400 * vnodes marked VFREE are already on the free list, but may still need
401 * to be recycled due to eating namecache resources and potentially blocking
402 * the namecache directory chain and related vnodes from being freed.
404 static __inline int
405 vmightfree(struct vnode *vp, int page_count)
407 if (vp->v_flag & VRECLAIMED)
408 return (0);
409 if ((vp->v_flag & VFREE) && TAILQ_EMPTY(&vp->v_namecache))
410 return (0);
411 if (vp->v_usecount != 0)
412 return (0);
413 if (vp->v_object && vp->v_object->resident_page_count >= page_count)
414 return (0);
415 return (1);
419 * The vnode was found to be possibly vgone()able and the caller has locked it
420 * (thus the usecount should be 1 now). Determine if the vnode is actually
421 * vgone()able, doing some cleanups in the process. Returns 1 if the vnode
422 * can be vgone()'d, 0 otherwise.
424 * Note that v_holdcnt may be non-zero because (A) this vnode is not a leaf
425 * in the namecache topology and (B) this vnode has buffer cache bufs.
426 * We cannot remove vnodes with non-leaf namecache associations. We do a
427 * tentitive leaf check prior to attempting to flush out any buffers but the
428 * 'real' test when all is said in done is that v_holdcnt must become 0 for
429 * the vnode to be freeable.
431 * We could theoretically just unconditionally flush when v_holdcnt != 0,
432 * but flushing data associated with non-leaf nodes (which are always
433 * directories), just throws it away for no benefit. It is the buffer
434 * cache's responsibility to choose buffers to recycle from the cached
435 * data point of view.
437 static int
438 visleaf(struct vnode *vp)
440 struct namecache *ncp;
442 TAILQ_FOREACH(ncp, &vp->v_namecache, nc_vnode) {
443 if (!TAILQ_EMPTY(&ncp->nc_list))
444 return(0);
446 return(1);
450 * Try to clean up the vnode to the point where it can be vgone()'d, returning
451 * 0 if it cannot be vgone()'d (or already has been), 1 if it can. Unlike
452 * vmightfree() this routine may flush the vnode and block. Vnodes marked
453 * VFREE are still candidates for vgone()ing because they may hold namecache
454 * resources and could be blocking the namecache directory hierarchy (and
455 * related vnodes) from being freed.
457 static int
458 vtrytomakegoneable(struct vnode *vp, int page_count)
460 if (vp->v_flag & VRECLAIMED)
461 return (0);
462 if (vp->v_usecount != 1)
463 return (0);
464 if (vp->v_object && vp->v_object->resident_page_count >= page_count)
465 return (0);
466 if (vp->v_holdcnt && visleaf(vp)) {
467 vinvalbuf(vp, V_SAVE, NULL, 0, 0);
468 #if 0 /* DEBUG */
469 printf((vp->v_holdcnt ? "vrecycle: vp %p failed: %s\n" :
470 "vrecycle: vp %p succeeded: %s\n"), vp,
471 (TAILQ_FIRST(&vp->v_namecache) ?
472 TAILQ_FIRST(&vp->v_namecache)->nc_name : "?"));
473 #endif
475 return(vp->v_usecount == 1 && vp->v_holdcnt == 0);
479 * Reclaim up to 1/10 of the vnodes associated with a mount point. Try
480 * to avoid vnodes which have lots of resident pages (we are trying to free
481 * vnodes, not memory).
483 * This routine is a callback from the mountlist scan. The mount point
484 * in question will be busied.
486 static int
487 vlrureclaim(struct mount *mp, void *data)
489 struct vnode *vp;
490 lwkt_tokref ilock;
491 int done;
492 int trigger;
493 int usevnodes;
494 int count;
495 int trigger_mult = vnlru_nowhere;
498 * Calculate the trigger point for the resident pages check. The
499 * minimum trigger value is approximately the number of pages in
500 * the system divded by the number of vnodes. However, due to
501 * various other system memory overheads unrelated to data caching
502 * it is a good idea to double the trigger (at least).
504 * trigger_mult starts at 0. If the recycler is having problems
505 * finding enough freeable vnodes it will increase trigger_mult.
506 * This should not happen in normal operation, even on machines with
507 * low amounts of memory, but extraordinary memory use by the system
508 * verses the amount of cached data can trigger it.
510 usevnodes = desiredvnodes;
511 if (usevnodes <= 0)
512 usevnodes = 1;
513 trigger = vmstats.v_page_count * (trigger_mult + 2) / usevnodes;
515 done = 0;
516 lwkt_gettoken(&ilock, &mntvnode_token);
517 count = mp->mnt_nvnodelistsize / 10 + 1;
518 while (count && (vp = TAILQ_FIRST(&mp->mnt_nvnodelist)) != NULL) {
520 * __VNODESCAN__
522 * The VP will stick around while we hold mntvnode_token,
523 * at least until we block, so we can safely do an initial
524 * check, and then must check again after we lock the vnode.
526 if (vp->v_type == VNON || /* XXX */
527 vp->v_type == VBAD || /* XXX */
528 !vmightfree(vp, trigger) /* critical path opt */
530 vmovevnodetoend(mp, vp);
531 --count;
532 continue;
536 * VX get the candidate vnode. If the VX get fails the
537 * vnode might still be on the mountlist. Our loop depends
538 * on us at least cycling the vnode to the end of the
539 * mountlist.
541 if (vx_get_nonblock(vp) != 0) {
542 if (vp->v_mount == mp)
543 vmovevnodetoend(mp, vp);
544 --count;
545 continue;
549 * Since we blocked locking the vp, make sure it is still
550 * a candidate for reclamation. That is, it has not already
551 * been reclaimed and only has our VX reference associated
552 * with it.
554 if (vp->v_type == VNON || /* XXX */
555 vp->v_type == VBAD || /* XXX */
556 (vp->v_flag & VRECLAIMED) ||
557 vp->v_mount != mp ||
558 !vtrytomakegoneable(vp, trigger) /* critical path opt */
560 if (vp->v_mount == mp)
561 vmovevnodetoend(mp, vp);
562 --count;
563 vx_put(vp);
564 continue;
568 * All right, we are good, move the vp to the end of the
569 * mountlist and clean it out. The vget will have returned
570 * an error if the vnode was destroyed (VRECLAIMED set), so we
571 * do not have to check again. The vput() will move the
572 * vnode to the free list if the vgone() was successful.
574 KKASSERT(vp->v_mount == mp);
575 vmovevnodetoend(mp, vp);
576 vgone(vp);
577 vx_put(vp);
578 ++done;
579 --count;
581 lwkt_reltoken(&ilock);
582 return (done);
586 * Attempt to recycle vnodes in a context that is always safe to block.
587 * Calling vlrurecycle() from the bowels of file system code has some
588 * interesting deadlock problems.
590 static struct thread *vnlruthread;
591 static int vnlruproc_sig;
593 void
594 vnlru_proc_wait(void)
596 if (vnlruproc_sig == 0) {
597 vnlruproc_sig = 1; /* avoid unnecessary wakeups */
598 wakeup(vnlruthread);
600 tsleep(&vnlruproc_sig, 0, "vlruwk", hz);
603 static void
604 vnlru_proc(void)
606 struct thread *td = curthread;
607 int done;
608 int s;
610 EVENTHANDLER_REGISTER(shutdown_pre_sync, shutdown_kproc, td,
611 SHUTDOWN_PRI_FIRST);
613 s = splbio();
614 for (;;) {
615 kproc_suspend_loop();
616 if (numvnodes - freevnodes <= desiredvnodes * 9 / 10) {
617 vnlruproc_sig = 0;
618 wakeup(&vnlruproc_sig);
619 tsleep(td, 0, "vlruwt", hz);
620 continue;
622 cache_cleanneg(0);
623 done = mountlist_scan(vlrureclaim, NULL, MNTSCAN_FORWARD);
626 * The vlrureclaim() call only processes 1/10 of the vnodes
627 * on each mount. If we couldn't find any repeat the loop
628 * at least enough times to cover all available vnodes before
629 * we start sleeping. Complain if the failure extends past
630 * 30 second, every 30 seconds.
632 if (done == 0) {
633 ++vnlru_nowhere;
634 if (vnlru_nowhere % 10 == 0)
635 tsleep(td, 0, "vlrup", hz * 3);
636 if (vnlru_nowhere % 100 == 0)
637 printf("vnlru_proc: vnode recycler stopped working!\n");
638 if (vnlru_nowhere == 1000)
639 vnlru_nowhere = 900;
640 } else {
641 vnlru_nowhere = 0;
644 splx(s);
648 * MOUNTLIST FUNCTIONS
652 * mountlist_insert (MP SAFE)
654 * Add a new mount point to the mount list.
656 void
657 mountlist_insert(struct mount *mp, int how)
659 lwkt_tokref ilock;
661 lwkt_gettoken(&ilock, &mountlist_token);
662 if (how == MNTINS_FIRST)
663 TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list);
664 else
665 TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
666 lwkt_reltoken(&ilock);
670 * mountlist_interlock (MP SAFE)
672 * Execute the specified interlock function with the mountlist token
673 * held. The function will be called in a serialized fashion verses
674 * other functions called through this mechanism.
677 mountlist_interlock(int (*callback)(struct mount *), struct mount *mp)
679 lwkt_tokref ilock;
680 int error;
682 lwkt_gettoken(&ilock, &mountlist_token);
683 error = callback(mp);
684 lwkt_reltoken(&ilock);
685 return (error);
689 * mountlist_boot_getfirst (DURING BOOT ONLY)
691 * This function returns the first mount on the mountlist, which is
692 * expected to be the root mount. Since no interlocks are obtained
693 * this function is only safe to use during booting.
696 struct mount *
697 mountlist_boot_getfirst(void)
699 return(TAILQ_FIRST(&mountlist));
703 * mountlist_remove (MP SAFE)
705 * Remove a node from the mountlist. If this node is the next scan node
706 * for any active mountlist scans, the active mountlist scan will be
707 * adjusted to skip the node, thus allowing removals during mountlist
708 * scans.
710 void
711 mountlist_remove(struct mount *mp)
713 struct mountscan_info *msi;
714 lwkt_tokref ilock;
716 lwkt_gettoken(&ilock, &mountlist_token);
717 TAILQ_FOREACH(msi, &mountscan_list, msi_entry) {
718 if (msi->msi_node == mp) {
719 if (msi->msi_how & MNTSCAN_FORWARD)
720 msi->msi_node = TAILQ_NEXT(mp, mnt_list);
721 else
722 msi->msi_node = TAILQ_PREV(mp, mntlist, mnt_list);
725 TAILQ_REMOVE(&mountlist, mp, mnt_list);
726 lwkt_reltoken(&ilock);
730 * mountlist_scan (MP SAFE)
732 * Safely scan the mount points on the mount list. Unless otherwise
733 * specified each mount point will be busied prior to the callback and
734 * unbusied afterwords. The callback may safely remove any mount point
735 * without interfering with the scan. If the current callback
736 * mount is removed the scanner will not attempt to unbusy it.
738 * If a mount node cannot be busied it is silently skipped.
740 * The callback return value is aggregated and a total is returned. A return
741 * value of < 0 is not aggregated and will terminate the scan.
743 * MNTSCAN_FORWARD - the mountlist is scanned in the forward direction
744 * MNTSCAN_REVERSE - the mountlist is scanned in reverse
745 * MNTSCAN_NOBUSY - the scanner will make the callback without busying
746 * the mount node.
749 mountlist_scan(int (*callback)(struct mount *, void *), void *data, int how)
751 struct mountscan_info info;
752 lwkt_tokref ilock;
753 struct mount *mp;
754 thread_t td;
755 int count;
756 int res;
758 lwkt_gettoken(&ilock, &mountlist_token);
760 info.msi_how = how;
761 info.msi_node = NULL; /* paranoia */
762 TAILQ_INSERT_TAIL(&mountscan_list, &info, msi_entry);
764 res = 0;
765 td = curthread;
767 if (how & MNTSCAN_FORWARD) {
768 info.msi_node = TAILQ_FIRST(&mountlist);
769 while ((mp = info.msi_node) != NULL) {
770 if (how & MNTSCAN_NOBUSY) {
771 count = callback(mp, data);
772 } else if (vfs_busy(mp, LK_NOWAIT, td) == 0) {
773 count = callback(mp, data);
774 if (mp == info.msi_node)
775 vfs_unbusy(mp, td);
776 } else {
777 count = 0;
779 if (count < 0)
780 break;
781 res += count;
782 if (mp == info.msi_node)
783 info.msi_node = TAILQ_NEXT(mp, mnt_list);
785 } else if (how & MNTSCAN_REVERSE) {
786 info.msi_node = TAILQ_LAST(&mountlist, mntlist);
787 while ((mp = info.msi_node) != NULL) {
788 if (how & MNTSCAN_NOBUSY) {
789 count = callback(mp, data);
790 } else if (vfs_busy(mp, LK_NOWAIT, td) == 0) {
791 count = callback(mp, data);
792 if (mp == info.msi_node)
793 vfs_unbusy(mp, td);
794 } else {
795 count = 0;
797 if (count < 0)
798 break;
799 res += count;
800 if (mp == info.msi_node)
801 info.msi_node = TAILQ_PREV(mp, mntlist, mnt_list);
804 TAILQ_REMOVE(&mountscan_list, &info, msi_entry);
805 lwkt_reltoken(&ilock);
806 return(res);
810 * MOUNT RELATED VNODE FUNCTIONS
813 static struct kproc_desc vnlru_kp = {
814 "vnlru",
815 vnlru_proc,
816 &vnlruthread
818 SYSINIT(vnlru, SI_SUB_KTHREAD_UPDATE, SI_ORDER_FIRST, kproc_start, &vnlru_kp)
821 * Move a vnode from one mount queue to another.
823 void
824 insmntque(struct vnode *vp, struct mount *mp)
826 lwkt_tokref ilock;
828 lwkt_gettoken(&ilock, &mntvnode_token);
830 * Delete from old mount point vnode list, if on one.
832 if (vp->v_mount != NULL) {
833 KASSERT(vp->v_mount->mnt_nvnodelistsize > 0,
834 ("bad mount point vnode list size"));
835 vremovevnodemnt(vp);
836 vp->v_mount->mnt_nvnodelistsize--;
839 * Insert into list of vnodes for the new mount point, if available.
841 if ((vp->v_mount = mp) == NULL) {
842 lwkt_reltoken(&ilock);
843 return;
845 TAILQ_INSERT_TAIL(&mp->mnt_nvnodelist, vp, v_nmntvnodes);
846 mp->mnt_nvnodelistsize++;
847 lwkt_reltoken(&ilock);
852 * Scan the vnodes under a mount point and issue appropriate callbacks.
854 * The fastfunc() callback is called with just the mountlist token held
855 * (no vnode lock). It may not block and the vnode may be undergoing
856 * modifications while the caller is processing it. The vnode will
857 * not be entirely destroyed, however, due to the fact that the mountlist
858 * token is held. A return value < 0 skips to the next vnode without calling
859 * the slowfunc(), a return value > 0 terminates the loop.
861 * The slowfunc() callback is called after the vnode has been successfully
862 * locked based on passed flags. The vnode is skipped if it gets rearranged
863 * or destroyed while blocking on the lock. A non-zero return value from
864 * the slow function terminates the loop. The slow function is allowed to
865 * arbitrarily block. The scanning code guarentees consistency of operation
866 * even if the slow function deletes or moves the node, or blocks and some
867 * other thread deletes or moves the node.
870 vmntvnodescan(
871 struct mount *mp,
872 int flags,
873 int (*fastfunc)(struct mount *mp, struct vnode *vp, void *data),
874 int (*slowfunc)(struct mount *mp, struct vnode *vp, void *data),
875 void *data
877 struct vmntvnodescan_info info;
878 lwkt_tokref ilock;
879 struct vnode *vp;
880 int r = 0;
881 int maxcount = 1000000;
883 lwkt_gettoken(&ilock, &mntvnode_token);
885 info.vp = TAILQ_FIRST(&mp->mnt_nvnodelist);
886 TAILQ_INSERT_TAIL(&mntvnodescan_list, &info, entry);
887 while ((vp = info.vp) != NULL) {
888 if (--maxcount == 0)
889 panic("maxcount reached during vmntvnodescan");
891 if (vp->v_type == VNON) /* visible but not ready */
892 goto next;
893 KKASSERT(vp->v_mount == mp);
896 * Quick test. A negative return continues the loop without
897 * calling the slow test. 0 continues onto the slow test.
898 * A positive number aborts the loop.
900 if (fastfunc) {
901 if ((r = fastfunc(mp, vp, data)) < 0)
902 goto next;
903 if (r)
904 break;
908 * Get a vxlock on the vnode, retry if it has moved or isn't
909 * in the mountlist where we expect it.
911 if (slowfunc) {
912 int error;
914 switch(flags) {
915 case VMSC_GETVP:
916 error = vget(vp, LK_EXCLUSIVE, curthread);
917 break;
918 case VMSC_GETVP|VMSC_NOWAIT:
919 error = vget(vp, LK_EXCLUSIVE|LK_NOWAIT,
920 curthread);
921 break;
922 case VMSC_GETVX:
923 error = vx_get(vp);
924 break;
925 case VMSC_REFVP:
926 vref(vp);
927 /* fall through */
928 default:
929 error = 0;
930 break;
932 if (error)
933 goto next;
935 * Do not call the slow function if the vnode is
936 * invalid or if it was ripped out from under us
937 * while we (potentially) blocked.
939 if (info.vp == vp && vp->v_type != VNON)
940 r = slowfunc(mp, vp, data);
943 * Cleanup
945 switch(flags) {
946 case VMSC_GETVP:
947 case VMSC_GETVP|VMSC_NOWAIT:
948 vput(vp);
949 break;
950 case VMSC_GETVX:
951 vx_put(vp);
952 break;
953 case VMSC_REFVP:
954 vrele(vp);
955 /* fall through */
956 default:
957 break;
959 if (r != 0)
960 break;
964 * Iterate. If the vnode was ripped out from under us
965 * info.vp will already point to the next vnode, otherwise
966 * we have to obtain the next valid vnode ourselves.
968 next:
969 if (info.vp == vp)
970 info.vp = TAILQ_NEXT(vp, v_nmntvnodes);
972 TAILQ_REMOVE(&mntvnodescan_list, &info, entry);
973 lwkt_reltoken(&ilock);
974 return(r);
978 * Remove any vnodes in the vnode table belonging to mount point mp.
980 * If FORCECLOSE is not specified, there should not be any active ones,
981 * return error if any are found (nb: this is a user error, not a
982 * system error). If FORCECLOSE is specified, detach any active vnodes
983 * that are found.
985 * If WRITECLOSE is set, only flush out regular file vnodes open for
986 * writing.
988 * SKIPSYSTEM causes any vnodes marked VSYSTEM to be skipped.
990 * `rootrefs' specifies the base reference count for the root vnode
991 * of this filesystem. The root vnode is considered busy if its
992 * v_usecount exceeds this value. On a successful return, vflush()
993 * will call vrele() on the root vnode exactly rootrefs times.
994 * If the SKIPSYSTEM or WRITECLOSE flags are specified, rootrefs must
995 * be zero.
997 #ifdef DIAGNOSTIC
998 static int busyprt = 0; /* print out busy vnodes */
999 SYSCTL_INT(_debug, OID_AUTO, busyprt, CTLFLAG_RW, &busyprt, 0, "");
1000 #endif
1002 static int vflush_scan(struct mount *mp, struct vnode *vp, void *data);
1004 struct vflush_info {
1005 int flags;
1006 int busy;
1007 thread_t td;
1011 vflush(struct mount *mp, int rootrefs, int flags)
1013 struct thread *td = curthread; /* XXX */
1014 struct vnode *rootvp = NULL;
1015 int error;
1016 struct vflush_info vflush_info;
1018 if (rootrefs > 0) {
1019 KASSERT((flags & (SKIPSYSTEM | WRITECLOSE)) == 0,
1020 ("vflush: bad args"));
1022 * Get the filesystem root vnode. We can vput() it
1023 * immediately, since with rootrefs > 0, it won't go away.
1025 if ((error = VFS_ROOT(mp, &rootvp)) != 0)
1026 return (error);
1027 vput(rootvp);
1030 vflush_info.busy = 0;
1031 vflush_info.flags = flags;
1032 vflush_info.td = td;
1033 vmntvnodescan(mp, VMSC_GETVX, NULL, vflush_scan, &vflush_info);
1035 if (rootrefs > 0 && (flags & FORCECLOSE) == 0) {
1037 * If just the root vnode is busy, and if its refcount
1038 * is equal to `rootrefs', then go ahead and kill it.
1040 KASSERT(vflush_info.busy > 0, ("vflush: not busy"));
1041 KASSERT(rootvp->v_usecount >= rootrefs, ("vflush: rootrefs"));
1042 if (vflush_info.busy == 1 && rootvp->v_usecount == rootrefs) {
1043 if (vx_lock(rootvp) == 0) {
1044 vgone(rootvp);
1045 vx_unlock(rootvp);
1046 vflush_info.busy = 0;
1050 if (vflush_info.busy)
1051 return (EBUSY);
1052 for (; rootrefs > 0; rootrefs--)
1053 vrele(rootvp);
1054 return (0);
1058 * The scan callback is made with an VX locked vnode.
1060 static int
1061 vflush_scan(struct mount *mp, struct vnode *vp, void *data)
1063 struct vflush_info *info = data;
1064 struct vattr vattr;
1067 * Skip over a vnodes marked VSYSTEM.
1069 if ((info->flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) {
1070 return(0);
1074 * If WRITECLOSE is set, flush out unlinked but still open
1075 * files (even if open only for reading) and regular file
1076 * vnodes open for writing.
1078 if ((info->flags & WRITECLOSE) &&
1079 (vp->v_type == VNON ||
1080 (VOP_GETATTR(vp, &vattr, info->td) == 0 &&
1081 vattr.va_nlink > 0)) &&
1082 (vp->v_writecount == 0 || vp->v_type != VREG)) {
1083 return(0);
1087 * With v_usecount == 0, all we need to do is clear out the
1088 * vnode data structures and we are done.
1090 if (vp->v_usecount == 1) {
1091 vgone(vp);
1092 return(0);
1096 * If FORCECLOSE is set, forcibly close the vnode. For block
1097 * or character devices, revert to an anonymous device. For
1098 * all other files, just kill them.
1100 if (info->flags & FORCECLOSE) {
1101 if (vp->v_type != VBLK && vp->v_type != VCHR) {
1102 vgone(vp);
1103 } else {
1104 vclean(vp, 0, info->td);
1105 vp->v_ops = &spec_vnode_vops;
1106 insmntque(vp, NULL);
1108 return(0);
1110 #ifdef DIAGNOSTIC
1111 if (busyprt)
1112 vprint("vflush: busy vnode", vp);
1113 #endif
1114 ++info->busy;
1115 return(0);