kernel - Cleanup, add assertions in the vnode freeing path
[dragonfly.git] / sys / kern / vfs_lock.c
blob99f841954316f88bc25f0618b6d8eee111f03847
1 /*
2 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/kern/vfs_lock.c,v 1.30 2008/06/30 03:57:41 dillon Exp $
38 * External virtual filesystem routines
40 #include "opt_ddb.h"
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/malloc.h>
46 #include <sys/mount.h>
47 #include <sys/proc.h>
48 #include <sys/vnode.h>
49 #include <sys/buf.h>
50 #include <sys/sysctl.h>
52 #include <machine/limits.h>
54 #include <vm/vm.h>
55 #include <vm/vm_object.h>
57 #include <sys/buf2.h>
58 #include <sys/thread2.h>
59 #include <sys/sysref2.h>
61 static void vnode_terminate(struct vnode *vp);
62 static boolean_t vnode_ctor(void *obj, void *private, int ocflags);
63 static void vnode_dtor(void *obj, void *private);
65 static MALLOC_DEFINE(M_VNODE, "vnodes", "vnode structures");
66 static struct sysref_class vnode_sysref_class = {
67 .name = "vnode",
68 .mtype = M_VNODE,
69 .proto = SYSREF_PROTO_VNODE,
70 .offset = offsetof(struct vnode, v_sysref),
71 .objsize = sizeof(struct vnode),
72 .mag_capacity = 256,
73 .flags = SRC_MANAGEDINIT,
74 .ctor = vnode_ctor,
75 .dtor = vnode_dtor,
76 .ops = {
77 .terminate = (sysref_terminate_func_t)vnode_terminate,
78 .lock = (sysref_terminate_func_t)vx_lock,
79 .unlock = (sysref_terminate_func_t)vx_unlock
84 * The vnode free list hold inactive vnodes. Aged inactive vnodes
85 * are inserted prior to the mid point, and otherwise inserted
86 * at the tail.
88 static TAILQ_HEAD(freelst, vnode) vnode_free_list;
89 static struct vnode vnode_free_mid;
90 static struct spinlock vfs_spin = SPINLOCK_INITIALIZER(vfs_spin);
92 int freevnodes = 0;
93 SYSCTL_INT(_debug, OID_AUTO, freevnodes, CTLFLAG_RD,
94 &freevnodes, 0, "");
95 static int wantfreevnodes = 25;
96 SYSCTL_INT(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW,
97 &wantfreevnodes, 0, "");
98 #ifdef TRACKVNODE
99 static ulong trackvnode;
100 SYSCTL_ULONG(_debug, OID_AUTO, trackvnode, CTLFLAG_RW,
101 &trackvnode, 0, "");
102 #endif
105 * Called from vfsinit()
107 void
108 vfs_lock_init(void)
110 TAILQ_INIT(&vnode_free_list);
111 TAILQ_INSERT_HEAD(&vnode_free_list, &vnode_free_mid, v_freelist);
112 spin_init(&vfs_spin);
113 kmalloc_raise_limit(M_VNODE, 0); /* unlimited */
117 * Misc functions
119 static __inline
120 void
121 _vsetflags(struct vnode *vp, int flags)
123 atomic_set_int(&vp->v_flag, flags);
126 static __inline
127 void
128 _vclrflags(struct vnode *vp, int flags)
130 atomic_clear_int(&vp->v_flag, flags);
133 void
134 vsetflags(struct vnode *vp, int flags)
136 _vsetflags(vp, flags);
139 void
140 vclrflags(struct vnode *vp, int flags)
142 _vclrflags(vp, flags);
146 * Inline helper functions.
148 * WARNING: vbusy() may only be called while the vnode lock or VX lock
149 * is held. The vnode spinlock need not be held.
151 * MPSAFE
153 static __inline
154 void
155 __vbusy_interlocked(struct vnode *vp)
157 KKASSERT(vp->v_flag & VFREE);
158 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
159 freevnodes--;
160 _vclrflags(vp, VFREE);
163 static __inline
164 void
165 __vbusy(struct vnode *vp)
167 #ifdef TRACKVNODE
168 if ((ulong)vp == trackvnode)
169 kprintf("__vbusy %p %08x\n", vp, vp->v_flag);
170 #endif
171 spin_lock_wr(&vfs_spin);
172 __vbusy_interlocked(vp);
173 spin_unlock_wr(&vfs_spin);
177 * Put a vnode on the free list. The caller has cleared VCACHED or owns the
178 * implied sysref related to having removed the vnode from the freelist
179 * (and VCACHED is already clear in that case).
181 * MPSAFE
183 static __inline
184 void
185 __vfree(struct vnode *vp)
187 #ifdef TRACKVNODE
188 if ((ulong)vp == trackvnode) {
189 kprintf("__vfree %p %08x\n", vp, vp->v_flag);
190 print_backtrace();
192 #endif
193 spin_lock_wr(&vfs_spin);
194 KKASSERT((vp->v_flag & VFREE) == 0);
195 if (vp->v_flag & VRECLAIMED)
196 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
197 else if (vp->v_flag & (VAGE0 | VAGE1))
198 TAILQ_INSERT_BEFORE(&vnode_free_mid, vp, v_freelist);
199 else
200 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
201 freevnodes++;
202 _vsetflags(vp, VFREE);
203 spin_unlock_wr(&vfs_spin);
207 * Put a vnode on the free list. The caller has cleared VCACHED or owns the
208 * implied sysref related to having removed the vnode from the freelist
209 * (and VCACHED is already clear in that case).
211 * MPSAFE
213 static __inline
214 void
215 __vfreetail(struct vnode *vp)
217 #ifdef TRACKVNODE
218 if ((ulong)vp == trackvnode)
219 kprintf("__vfreetail %p %08x\n", vp, vp->v_flag);
220 #endif
221 spin_lock_wr(&vfs_spin);
222 KKASSERT((vp->v_flag & VFREE) == 0);
223 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
224 freevnodes++;
225 _vsetflags(vp, VFREE);
226 spin_unlock_wr(&vfs_spin);
230 * Return a C boolean if we should put the vnode on the freelist (VFREE),
231 * or leave it / mark it as VCACHED.
233 * This routine is only valid if the vnode is already either VFREE or
234 * VCACHED, or if it can become VFREE or VCACHED via vnode_terminate().
236 * WARNING! This functions is typically called with v_spinlock held.
238 * MPSAFE
240 static __inline boolean_t
241 vshouldfree(struct vnode *vp)
243 return (vp->v_auxrefs == 0 &&
244 (vp->v_object == NULL || vp->v_object->resident_page_count == 0));
248 * Add a ref to an active vnode. This function should never be called
249 * with an inactive vnode (use vget() instead).
251 * MPSAFE
253 void
254 vref(struct vnode *vp)
256 KKASSERT(vp->v_sysref.refcnt > 0 &&
257 (vp->v_flag & (VFREE|VINACTIVE)) == 0);
258 sysref_get(&vp->v_sysref);
262 * Release a ref on an active or inactive vnode. The sysref termination
263 * function will be called when the active last active reference is released,
264 * and the vnode is returned to the objcache when the last inactive
265 * reference is released.
267 void
268 vrele(struct vnode *vp)
270 sysref_put(&vp->v_sysref);
274 * Add an auxiliary data structure reference to the vnode. Auxiliary
275 * references do not change the state of the vnode or prevent them
276 * from being deactivated, reclaimed, or placed on or removed from
277 * the free list.
279 * An auxiliary reference DOES prevent the vnode from being destroyed,
280 * allowing you to vx_lock() it, test state, etc.
282 * An auxiliary reference DOES NOT move a vnode out of the VFREE state
283 * once it has entered it.
285 * WARNING! vhold() and vhold_interlocked() must not acquire v_spinlock.
286 * The spinlock may or may not already be held by the caller.
287 * vdrop() will clean up the free list state.
289 * MPSAFE
291 void
292 vhold(struct vnode *vp)
294 KKASSERT(vp->v_sysref.refcnt != 0);
295 atomic_add_int(&vp->v_auxrefs, 1);
298 void
299 vhold_interlocked(struct vnode *vp)
301 atomic_add_int(&vp->v_auxrefs, 1);
305 * Remove an auxiliary reference from the vnode.
307 * vdrop needs to check for a VCACHE->VFREE transition to catch cases
308 * where a vnode is held past its reclamation. We use v_spinlock to
309 * interlock VCACHED -> !VCACHED transitions.
311 * MPSAFE
313 void
314 vdrop(struct vnode *vp)
316 KKASSERT(vp->v_sysref.refcnt != 0 && vp->v_auxrefs > 0);
317 spin_lock_wr(&vp->v_spinlock);
318 atomic_subtract_int(&vp->v_auxrefs, 1);
319 if ((vp->v_flag & VCACHED) && vshouldfree(vp)) {
320 _vclrflags(vp, VCACHED);
321 __vfree(vp);
323 spin_unlock_wr(&vp->v_spinlock);
327 * This function is called when the last active reference on the vnode
328 * is released, typically via vrele(). SYSREF will VX lock the vnode
329 * and then give the vnode a negative ref count, indicating that it is
330 * undergoing termination or is being set aside for the cache, and one
331 * final sysref_put() is required to actually return it to the memory
332 * subsystem.
334 * Additional inactive sysrefs may race us but that's ok. Reactivations
335 * cannot race us because the sysref code interlocked with the VX lock
336 * (which is held on call).
338 * MPSAFE
340 void
341 vnode_terminate(struct vnode *vp)
344 * We own the VX lock, it should not be possible for someone else
345 * to have reactivated the vp.
347 KKASSERT(sysref_isinactive(&vp->v_sysref));
350 * Deactivate the vnode by marking it VFREE or VCACHED.
351 * The vnode can be reactivated from either state until
352 * reclaimed. These states inherit the 'last' sysref on the
353 * vnode.
355 * NOTE: There may be additional inactive references from
356 * other entities blocking on the VX lock while we hold it,
357 * but this does not prevent us from changing the vnode's
358 * state.
360 * NOTE: The vnode could already be marked inactive. XXX
361 * how?
363 * NOTE: v_mount may be NULL due to assignment to
364 * dead_vnode_vops
366 * NOTE: The vnode may be marked inactive with dirty buffers
367 * or dirty pages in its cached VM object still present.
369 * NOTE: VCACHED should not be set on entry. We lose control
370 * of the sysref the instant the vnode is placed on the
371 * free list or when VCACHED is set.
373 * The VX lock is required when transitioning to
374 * +VCACHED but is not sufficient for the vshouldfree()
375 * interlocked test or when transitioning to -VCACHED.
377 if ((vp->v_flag & VINACTIVE) == 0) {
378 _vsetflags(vp, VINACTIVE);
379 if (vp->v_mount)
380 VOP_INACTIVE(vp);
382 spin_lock_wr(&vp->v_spinlock);
383 KKASSERT((vp->v_flag & (VFREE|VCACHED)) == 0);
384 if (vshouldfree(vp))
385 __vfree(vp);
386 else
387 _vsetflags(vp, VCACHED); /* inactive but not yet free*/
388 spin_unlock_wr(&vp->v_spinlock);
389 vx_unlock(vp);
393 * Physical vnode constructor / destructor. These are only executed on
394 * the backend of the objcache. They are NOT executed on every vnode
395 * allocation or deallocation.
397 * MPSAFE
399 boolean_t
400 vnode_ctor(void *obj, void *private, int ocflags)
402 struct vnode *vp = obj;
404 lwkt_token_init(&vp->v_token);
405 lockinit(&vp->v_lock, "vnode", 0, 0);
406 ccms_dataspace_init(&vp->v_ccms);
407 TAILQ_INIT(&vp->v_namecache);
408 RB_INIT(&vp->v_rbclean_tree);
409 RB_INIT(&vp->v_rbdirty_tree);
410 RB_INIT(&vp->v_rbhash_tree);
411 return(TRUE);
415 * MPSAFE
417 void
418 vnode_dtor(void *obj, void *private)
420 struct vnode *vp = obj;
422 KKASSERT((vp->v_flag & (VCACHED|VFREE)) == 0);
423 ccms_dataspace_destroy(&vp->v_ccms);
426 /****************************************************************
427 * VX LOCKING FUNCTIONS *
428 ****************************************************************
430 * These functions lock vnodes for reclamation and deactivation related
431 * activities. The caller must already be holding some sort of reference
432 * on the vnode.
434 * MPSAFE
436 void
437 vx_lock(struct vnode *vp)
439 lockmgr(&vp->v_lock, LK_EXCLUSIVE);
443 * The non-blocking version also uses a slightly different mechanic.
444 * This function will explicitly fail not only if it cannot acquire
445 * the lock normally, but also if the caller already holds a lock.
447 * The adjusted mechanic is used to close a loophole where complex
448 * VOP_RECLAIM code can circle around recursively and allocate the
449 * same vnode it is trying to destroy from the freelist.
451 * Any filesystem (aka UFS) which puts LK_CANRECURSE in lk_flags can
452 * cause the incorrect behavior to occur. If not for that lockmgr()
453 * would do the right thing.
455 static int
456 vx_lock_nonblock(struct vnode *vp)
458 if (lockcountnb(&vp->v_lock))
459 return(EBUSY);
460 return(lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT | LK_NOSPINWAIT));
463 void
464 vx_unlock(struct vnode *vp)
466 lockmgr(&vp->v_lock, LK_RELEASE);
469 /****************************************************************
470 * VNODE ACQUISITION FUNCTIONS *
471 ****************************************************************
473 * These functions must be used when accessing a vnode via an auxiliary
474 * reference such as the namecache or free list, or when you wish to
475 * do a combo ref+lock sequence.
477 * These functions are MANDATORY for any code chain accessing a vnode
478 * whos activation state is not known.
480 * vget() can be called with LK_NOWAIT and will return EBUSY if the
481 * lock cannot be immediately acquired.
483 * vget()/vput() are used when reactivation is desired.
485 * vx_get() and vx_put() are used when reactivation is not desired.
488 vget(struct vnode *vp, int flags)
490 int error;
493 * A lock type must be passed
495 if ((flags & LK_TYPE_MASK) == 0) {
496 panic("vget() called with no lock specified!");
497 /* NOT REACHED */
501 * Reference the structure and then acquire the lock. 0->1
502 * transitions and refs during termination are allowed here so
503 * call sysref directly.
505 * NOTE: The requested lock might be a shared lock and does
506 * not protect our access to the refcnt or other fields.
508 sysref_get(&vp->v_sysref);
509 if ((error = vn_lock(vp, flags)) != 0) {
511 * The lock failed, undo and return an error.
513 sysref_put(&vp->v_sysref);
514 } else if (vp->v_flag & VRECLAIMED) {
516 * The node is being reclaimed and cannot be reactivated
517 * any more, undo and return ENOENT.
519 vn_unlock(vp);
520 vrele(vp);
521 error = ENOENT;
522 } else {
524 * If the vnode is marked VFREE or VCACHED it needs to be
525 * reactivated, otherwise it had better already be active.
526 * VINACTIVE must also be cleared.
528 * In the VFREE/VCACHED case we have to throw away the
529 * sysref that was earmarking those cases and preventing
530 * the vnode from being destroyed. Our sysref is still held.
532 * We are allowed to reactivate the vnode while we hold
533 * the VX lock, assuming it can be reactivated.
535 spin_lock_wr(&vp->v_spinlock);
536 if (vp->v_flag & VFREE) {
537 __vbusy(vp);
538 sysref_activate(&vp->v_sysref);
539 spin_unlock_wr(&vp->v_spinlock);
540 sysref_put(&vp->v_sysref);
541 } else if (vp->v_flag & VCACHED) {
542 _vclrflags(vp, VCACHED);
543 sysref_activate(&vp->v_sysref);
544 spin_unlock_wr(&vp->v_spinlock);
545 sysref_put(&vp->v_sysref);
546 } else {
547 if (sysref_isinactive(&vp->v_sysref)) {
548 sysref_activate(&vp->v_sysref);
549 kprintf("Warning vp %p reactivation race\n",
550 vp);
552 spin_unlock_wr(&vp->v_spinlock);
554 _vclrflags(vp, VINACTIVE);
555 error = 0;
557 return(error);
561 * MPSAFE
563 void
564 vput(struct vnode *vp)
566 vn_unlock(vp);
567 vrele(vp);
571 * XXX The vx_*() locks should use auxrefs, not the main reference counter.
573 * MPSAFE
575 void
576 vx_get(struct vnode *vp)
578 sysref_get(&vp->v_sysref);
579 lockmgr(&vp->v_lock, LK_EXCLUSIVE);
583 * MPSAFE
586 vx_get_nonblock(struct vnode *vp)
588 int error;
590 sysref_get(&vp->v_sysref);
591 error = lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT);
592 if (error)
593 sysref_put(&vp->v_sysref);
594 return(error);
598 * Relase a VX lock that also held a ref on the vnode.
600 * vx_put needs to check for a VCACHED->VFREE transition to catch the
601 * case where e.g. vnlru issues a vgone*().
603 * MPSAFE
605 void
606 vx_put(struct vnode *vp)
608 spin_lock_wr(&vp->v_spinlock);
609 if ((vp->v_flag & VCACHED) && vshouldfree(vp)) {
610 _vclrflags(vp, VCACHED);
611 __vfree(vp);
613 spin_unlock_wr(&vp->v_spinlock);
614 lockmgr(&vp->v_lock, LK_RELEASE);
615 sysref_put(&vp->v_sysref);
619 * Try to reuse a vnode from the free list.
621 * NOTE: The returned vnode is not completely initialized.
623 * WARNING: The freevnodes count can race, NULL can be returned even if
624 * freevnodes != 0.
626 * MPSAFE
628 static
629 struct vnode *
630 allocfreevnode(void)
632 struct vnode *vp;
633 int count;
635 for (count = 0; count < freevnodes; count++) {
637 * Try to lock the first vnode on the free list.
638 * Cycle if we can't.
640 * We use a bad hack in vx_lock_nonblock() which avoids
641 * the lock order reversal between vfs_spin and v_spinlock.
642 * This is very fragile code and I don't want to use
643 * vhold here.
645 spin_lock_wr(&vfs_spin);
646 vp = TAILQ_FIRST(&vnode_free_list);
647 if (vp == &vnode_free_mid)
648 vp = TAILQ_NEXT(vp, v_freelist);
649 if (vx_lock_nonblock(vp)) {
650 KKASSERT(vp->v_flag & VFREE);
651 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
652 TAILQ_INSERT_TAIL(&vnode_free_list,
653 vp, v_freelist);
654 spin_unlock_wr(&vfs_spin);
655 continue;
659 * We inherit the sysref associated the vnode on the free
660 * list. Because VCACHED is clear the vnode will not
661 * be placed back on the free list. We own the sysref
662 * free and clear and thus control the disposition of
663 * the vnode.
665 __vbusy_interlocked(vp);
666 spin_unlock_wr(&vfs_spin);
667 #ifdef TRACKVNODE
668 if ((ulong)vp == trackvnode)
669 kprintf("allocfreevnode %p %08x\n", vp, vp->v_flag);
670 #endif
672 * Do not reclaim/reuse a vnode while auxillary refs exists.
673 * This includes namecache refs due to a related ncp being
674 * locked or having children.
676 * We will make this test several times as auxrefs can
677 * get incremented on us without any spinlocks being held
678 * until we have removed all namecache and inode references
679 * to the vnode.
681 * Because VCACHED is already in the correct state (cleared)
682 * we cannot race other vdrop()s occuring at the same time
683 * and can safely place vp on the free list.
685 * The free list association reinherits the sysref.
687 if (vp->v_auxrefs) {
688 __vfreetail(vp);
689 vx_unlock(vp);
690 continue;
694 * We inherit the reference that was previously associated
695 * with the vnode being on the free list. VCACHED had better
696 * not be set because the reference and VX lock prevents
697 * the sysref from transitioning to an active state.
699 KKASSERT((vp->v_flag & (VINACTIVE|VCACHED)) == VINACTIVE);
700 KKASSERT(sysref_isinactive(&vp->v_sysref));
703 * Holding the VX lock on an inactive vnode prevents it
704 * from being reactivated or reused. New namecache
705 * associations can only be made using active vnodes.
707 * Another thread may be blocked on our vnode lock while
708 * holding a namecache lock. We can only reuse this vnode
709 * if we can clear all namecache associations without
710 * blocking.
712 * Because VCACHED is already in the correct state (cleared)
713 * we cannot race other vdrop()s occuring at the same time
714 * and can safely place vp on the free list.
716 if ((vp->v_flag & VRECLAIMED) == 0) {
717 if (cache_inval_vp_nonblock(vp)) {
718 __vfreetail(vp);
719 vx_unlock(vp);
720 continue;
722 vgone_vxlocked(vp);
723 /* vnode is still VX locked */
727 * We can reuse the vnode if no primary or auxiliary
728 * references remain other then ours, else put it
729 * back on the free list and keep looking.
731 * Either the free list inherits the last reference
732 * or we fall through and sysref_activate() the last
733 * reference.
735 * Since the vnode is in a VRECLAIMED state, no new
736 * namecache associations could have been made.
738 KKASSERT(TAILQ_EMPTY(&vp->v_namecache));
739 if (vp->v_auxrefs ||
740 !sysref_islastdeactivation(&vp->v_sysref)) {
741 __vfreetail(vp);
742 vx_unlock(vp);
743 continue;
747 * Return a VX locked vnode suitable for reuse. The caller
748 * inherits the sysref.
750 return(vp);
752 return(NULL);
756 * Obtain a new vnode from the freelist, allocating more if necessary.
757 * The returned vnode is VX locked & refd.
759 * All new vnodes set the VAGE flags. An open() of the vnode will
760 * decrement the (2-bit) flags. Vnodes which are opened several times
761 * are thus retained in the cache over vnodes which are merely stat()d.
763 * MPSAFE
765 struct vnode *
766 allocvnode(int lktimeout, int lkflags)
768 struct vnode *vp;
771 * Try to reuse vnodes if we hit the max. This situation only
772 * occurs in certain large-memory (2G+) situations. We cannot
773 * attempt to directly reclaim vnodes due to nasty recursion
774 * problems.
776 while (numvnodes - freevnodes > desiredvnodes)
777 vnlru_proc_wait();
780 * Try to build up as many vnodes as we can before reallocating
781 * from the free list. A vnode on the free list simply means
782 * that it is inactive with no resident pages. It may or may not
783 * have been reclaimed and could have valuable information associated
784 * with it that we shouldn't throw away unless we really need to.
786 * HAMMER NOTE: Re-establishing a vnode is a fairly expensive
787 * operation for HAMMER but this should benefit UFS as well.
789 if (freevnodes >= wantfreevnodes && numvnodes >= desiredvnodes)
790 vp = allocfreevnode();
791 else
792 vp = NULL;
793 if (vp == NULL) {
794 vp = sysref_alloc(&vnode_sysref_class);
795 KKASSERT((vp->v_flag & (VCACHED|VFREE)) == 0);
796 lockmgr(&vp->v_lock, LK_EXCLUSIVE);
797 numvnodes++;
801 * We are using a managed sysref class, vnode fields are only
802 * zerod on initial allocation from the backing store, not
803 * on reallocation. Thus we have to clear these fields for both
804 * reallocation and reuse.
806 #ifdef INVARIANTS
807 if (vp->v_data)
808 panic("cleaned vnode isn't");
809 if (bio_track_active(&vp->v_track_read) ||
810 bio_track_active(&vp->v_track_write)) {
811 panic("Clean vnode has pending I/O's");
813 if (vp->v_flag & VONWORKLST)
814 panic("Clean vnode still pending on syncer worklist!");
815 if (!RB_EMPTY(&vp->v_rbdirty_tree))
816 panic("Clean vnode still has dirty buffers!");
817 if (!RB_EMPTY(&vp->v_rbclean_tree))
818 panic("Clean vnode still has clean buffers!");
819 if (!RB_EMPTY(&vp->v_rbhash_tree))
820 panic("Clean vnode still on hash tree!");
821 KKASSERT(vp->v_mount == NULL);
822 #endif
823 vp->v_flag = VAGE0 | VAGE1;
824 vp->v_lastw = 0;
825 vp->v_lasta = 0;
826 vp->v_cstart = 0;
827 vp->v_clen = 0;
828 vp->v_socket = 0;
829 vp->v_opencount = 0;
830 vp->v_writecount = 0; /* XXX */
833 * lktimeout only applies when LK_TIMELOCK is used, and only
834 * the pageout daemon uses it. The timeout may not be zero
835 * or the pageout daemon can deadlock in low-VM situations.
837 if (lktimeout == 0)
838 lktimeout = hz / 10;
839 lockreinit(&vp->v_lock, "vnode", lktimeout, lkflags);
840 KKASSERT(TAILQ_EMPTY(&vp->v_namecache));
841 /* exclusive lock still held */
844 * Note: sysref needs to be activated to convert -0x40000000 to +1.
845 * The -0x40000000 comes from the last ref on reuse, and from
846 * sysref_init() on allocate.
848 sysref_activate(&vp->v_sysref);
849 vp->v_filesize = NOOFFSET;
850 vp->v_type = VNON;
851 vp->v_tag = 0;
852 vp->v_ops = NULL;
853 vp->v_data = NULL;
854 KKASSERT(vp->v_mount == NULL);
856 return (vp);
860 * MPSAFE
863 freesomevnodes(int n)
865 struct vnode *vp;
866 int count = 0;
868 while (n) {
869 --n;
870 if ((vp = allocfreevnode()) == NULL)
871 break;
872 vx_put(vp);
873 --numvnodes;
875 return(count);