2 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * $DragonFly: src/sys/kern/vfs_lock.c,v 1.30 2008/06/30 03:57:41 dillon Exp $
38 * External virtual filesystem routines
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/malloc.h>
46 #include <sys/mount.h>
48 #include <sys/vnode.h>
50 #include <sys/sysctl.h>
52 #include <machine/limits.h>
55 #include <vm/vm_object.h>
58 #include <sys/thread2.h>
59 #include <sys/sysref2.h>
61 static void vnode_terminate(struct vnode
*vp
);
62 static boolean_t
vnode_ctor(void *obj
, void *private, int ocflags
);
63 static void vnode_dtor(void *obj
, void *private);
65 static MALLOC_DEFINE(M_VNODE
, "vnodes", "vnode structures");
66 static struct sysref_class vnode_sysref_class
= {
69 .proto
= SYSREF_PROTO_VNODE
,
70 .offset
= offsetof(struct vnode
, v_sysref
),
71 .objsize
= sizeof(struct vnode
),
73 .flags
= SRC_MANAGEDINIT
,
77 .terminate
= (sysref_terminate_func_t
)vnode_terminate
,
78 .lock
= (sysref_terminate_func_t
)vx_lock
,
79 .unlock
= (sysref_terminate_func_t
)vx_unlock
84 * The vnode free list hold inactive vnodes. Aged inactive vnodes
85 * are inserted prior to the mid point, and otherwise inserted
88 static TAILQ_HEAD(freelst
, vnode
) vnode_free_list
;
89 static struct vnode vnode_free_mid1
;
90 static struct vnode vnode_free_mid2
;
91 static struct vnode vnode_free_rover
;
92 static struct spinlock vfs_spin
= SPINLOCK_INITIALIZER(vfs_spin
);
93 static enum { ROVER_MID1
, ROVER_MID2
} rover_state
= ROVER_MID2
;
96 SYSCTL_INT(_debug
, OID_AUTO
, freevnodes
, CTLFLAG_RD
,
98 static int wantfreevnodes
= 25;
99 SYSCTL_INT(_debug
, OID_AUTO
, wantfreevnodes
, CTLFLAG_RW
,
100 &wantfreevnodes
, 0, "");
102 static ulong trackvnode
;
103 SYSCTL_ULONG(_debug
, OID_AUTO
, trackvnode
, CTLFLAG_RW
,
108 * Called from vfsinit()
113 TAILQ_INIT(&vnode_free_list
);
114 TAILQ_INSERT_TAIL(&vnode_free_list
, &vnode_free_mid1
, v_freelist
);
115 TAILQ_INSERT_TAIL(&vnode_free_list
, &vnode_free_mid2
, v_freelist
);
116 TAILQ_INSERT_TAIL(&vnode_free_list
, &vnode_free_rover
, v_freelist
);
117 spin_init(&vfs_spin
);
118 kmalloc_raise_limit(M_VNODE
, 0); /* unlimited */
126 _vsetflags(struct vnode
*vp
, int flags
)
128 atomic_set_int(&vp
->v_flag
, flags
);
133 _vclrflags(struct vnode
*vp
, int flags
)
135 atomic_clear_int(&vp
->v_flag
, flags
);
139 vsetflags(struct vnode
*vp
, int flags
)
141 _vsetflags(vp
, flags
);
145 vclrflags(struct vnode
*vp
, int flags
)
147 _vclrflags(vp
, flags
);
151 * Inline helper functions.
153 * WARNING: vbusy() may only be called while the vnode lock or VX lock
154 * is held. The vnode spinlock need not be held.
160 __vbusy_interlocked(struct vnode
*vp
)
162 KKASSERT(vp
->v_flag
& VFREE
);
163 TAILQ_REMOVE(&vnode_free_list
, vp
, v_freelist
);
165 _vclrflags(vp
, VFREE
);
170 __vbusy(struct vnode
*vp
)
173 if ((ulong
)vp
== trackvnode
)
174 kprintf("__vbusy %p %08x\n", vp
, vp
->v_flag
);
176 spin_lock_wr(&vfs_spin
);
177 __vbusy_interlocked(vp
);
178 spin_unlock_wr(&vfs_spin
);
182 * Put a vnode on the free list. The caller has cleared VCACHED or owns the
183 * implied sysref related to having removed the vnode from the freelist
184 * (and VCACHED is already clear in that case).
190 __vfree(struct vnode
*vp
)
193 if ((ulong
)vp
== trackvnode
) {
194 kprintf("__vfree %p %08x\n", vp
, vp
->v_flag
);
198 spin_lock_wr(&vfs_spin
);
199 KKASSERT((vp
->v_flag
& VFREE
) == 0);
202 * Distinguish between basically dead vnodes, vnodes with cached
203 * data, and vnodes without cached data. A rover will shift the
204 * vnodes around as their cache status is lost.
206 if (vp
->v_flag
& VRECLAIMED
) {
207 TAILQ_INSERT_HEAD(&vnode_free_list
, vp
, v_freelist
);
208 } else if (vp
->v_object
&& vp
->v_object
->resident_page_count
) {
209 TAILQ_INSERT_TAIL(&vnode_free_list
, vp
, v_freelist
);
210 } else if (vp
->v_object
&& vp
->v_object
->swblock_count
) {
211 TAILQ_INSERT_BEFORE(&vnode_free_mid2
, vp
, v_freelist
);
213 TAILQ_INSERT_BEFORE(&vnode_free_mid1
, vp
, v_freelist
);
216 _vsetflags(vp
, VFREE
);
217 spin_unlock_wr(&vfs_spin
);
221 * Put a vnode on the free list. The caller has cleared VCACHED or owns the
222 * implied sysref related to having removed the vnode from the freelist
223 * (and VCACHED is already clear in that case).
229 __vfreetail(struct vnode
*vp
)
232 if ((ulong
)vp
== trackvnode
)
233 kprintf("__vfreetail %p %08x\n", vp
, vp
->v_flag
);
235 spin_lock_wr(&vfs_spin
);
236 KKASSERT((vp
->v_flag
& VFREE
) == 0);
237 TAILQ_INSERT_TAIL(&vnode_free_list
, vp
, v_freelist
);
239 _vsetflags(vp
, VFREE
);
240 spin_unlock_wr(&vfs_spin
);
244 * Return a C boolean if we should put the vnode on the freelist (VFREE),
245 * or leave it / mark it as VCACHED.
247 * This routine is only valid if the vnode is already either VFREE or
248 * VCACHED, or if it can become VFREE or VCACHED via vnode_terminate().
250 * WARNING! This functions is typically called with v_spinlock held.
254 static __inline boolean_t
255 vshouldfree(struct vnode
*vp
)
257 return (vp
->v_auxrefs
== 0 &&
258 (vp
->v_object
== NULL
|| vp
->v_object
->resident_page_count
== 0));
262 * Add a ref to an active vnode. This function should never be called
263 * with an inactive vnode (use vget() instead).
268 vref(struct vnode
*vp
)
270 KKASSERT(vp
->v_sysref
.refcnt
> 0 &&
271 (vp
->v_flag
& (VFREE
|VINACTIVE
)) == 0);
272 sysref_get(&vp
->v_sysref
);
276 * Release a ref on an active or inactive vnode. The sysref termination
277 * function will be called when the active last active reference is released,
278 * and the vnode is returned to the objcache when the last inactive
279 * reference is released.
282 vrele(struct vnode
*vp
)
284 sysref_put(&vp
->v_sysref
);
288 * Add an auxiliary data structure reference to the vnode. Auxiliary
289 * references do not change the state of the vnode or prevent them
290 * from being deactivated, reclaimed, or placed on or removed from
293 * An auxiliary reference DOES prevent the vnode from being destroyed,
294 * allowing you to vx_lock() it, test state, etc.
296 * An auxiliary reference DOES NOT move a vnode out of the VFREE state
297 * once it has entered it.
299 * WARNING! vhold() and vhold_interlocked() must not acquire v_spinlock.
300 * The spinlock may or may not already be held by the caller.
301 * vdrop() will clean up the free list state.
306 vhold(struct vnode
*vp
)
308 KKASSERT(vp
->v_sysref
.refcnt
!= 0);
309 atomic_add_int(&vp
->v_auxrefs
, 1);
313 vhold_interlocked(struct vnode
*vp
)
315 atomic_add_int(&vp
->v_auxrefs
, 1);
319 * Remove an auxiliary reference from the vnode.
321 * vdrop needs to check for a VCACHE->VFREE transition to catch cases
322 * where a vnode is held past its reclamation. We use v_spinlock to
323 * interlock VCACHED -> !VCACHED transitions.
328 vdrop(struct vnode
*vp
)
330 KKASSERT(vp
->v_sysref
.refcnt
!= 0 && vp
->v_auxrefs
> 0);
331 spin_lock_wr(&vp
->v_spinlock
);
332 atomic_subtract_int(&vp
->v_auxrefs
, 1);
333 if ((vp
->v_flag
& VCACHED
) && vshouldfree(vp
)) {
334 _vclrflags(vp
, VCACHED
);
337 spin_unlock_wr(&vp
->v_spinlock
);
341 * This function is called when the last active reference on the vnode
342 * is released, typically via vrele(). SYSREF will VX lock the vnode
343 * and then give the vnode a negative ref count, indicating that it is
344 * undergoing termination or is being set aside for the cache, and one
345 * final sysref_put() is required to actually return it to the memory
348 * Additional inactive sysrefs may race us but that's ok. Reactivations
349 * cannot race us because the sysref code interlocked with the VX lock
350 * (which is held on call).
355 vnode_terminate(struct vnode
*vp
)
358 * We own the VX lock, it should not be possible for someone else
359 * to have reactivated the vp.
361 KKASSERT(sysref_isinactive(&vp
->v_sysref
));
364 * Deactivate the vnode by marking it VFREE or VCACHED.
365 * The vnode can be reactivated from either state until
366 * reclaimed. These states inherit the 'last' sysref on the
369 * NOTE: There may be additional inactive references from
370 * other entities blocking on the VX lock while we hold it,
371 * but this does not prevent us from changing the vnode's
374 * NOTE: The vnode could already be marked inactive. XXX
377 * NOTE: v_mount may be NULL due to assignment to
380 * NOTE: The vnode may be marked inactive with dirty buffers
381 * or dirty pages in its cached VM object still present.
383 * NOTE: VCACHED should not be set on entry. We lose control
384 * of the sysref the instant the vnode is placed on the
385 * free list or when VCACHED is set.
387 * The VX lock is required when transitioning to
388 * +VCACHED but is not sufficient for the vshouldfree()
389 * interlocked test or when transitioning to -VCACHED.
391 if ((vp
->v_flag
& VINACTIVE
) == 0) {
392 _vsetflags(vp
, VINACTIVE
);
396 spin_lock_wr(&vp
->v_spinlock
);
397 KKASSERT((vp
->v_flag
& (VFREE
|VCACHED
)) == 0);
401 _vsetflags(vp
, VCACHED
); /* inactive but not yet free*/
402 spin_unlock_wr(&vp
->v_spinlock
);
407 * Physical vnode constructor / destructor. These are only executed on
408 * the backend of the objcache. They are NOT executed on every vnode
409 * allocation or deallocation.
414 vnode_ctor(void *obj
, void *private, int ocflags
)
416 struct vnode
*vp
= obj
;
418 lwkt_token_init(&vp
->v_token
);
419 lockinit(&vp
->v_lock
, "vnode", 0, 0);
420 ccms_dataspace_init(&vp
->v_ccms
);
421 TAILQ_INIT(&vp
->v_namecache
);
422 RB_INIT(&vp
->v_rbclean_tree
);
423 RB_INIT(&vp
->v_rbdirty_tree
);
424 RB_INIT(&vp
->v_rbhash_tree
);
432 vnode_dtor(void *obj
, void *private)
434 struct vnode
*vp
= obj
;
436 KKASSERT((vp
->v_flag
& (VCACHED
|VFREE
)) == 0);
437 ccms_dataspace_destroy(&vp
->v_ccms
);
440 /****************************************************************
441 * VX LOCKING FUNCTIONS *
442 ****************************************************************
444 * These functions lock vnodes for reclamation and deactivation related
445 * activities. The caller must already be holding some sort of reference
451 vx_lock(struct vnode
*vp
)
453 lockmgr(&vp
->v_lock
, LK_EXCLUSIVE
);
457 * The non-blocking version also uses a slightly different mechanic.
458 * This function will explicitly fail not only if it cannot acquire
459 * the lock normally, but also if the caller already holds a lock.
461 * The adjusted mechanic is used to close a loophole where complex
462 * VOP_RECLAIM code can circle around recursively and allocate the
463 * same vnode it is trying to destroy from the freelist.
465 * Any filesystem (aka UFS) which puts LK_CANRECURSE in lk_flags can
466 * cause the incorrect behavior to occur. If not for that lockmgr()
467 * would do the right thing.
470 vx_lock_nonblock(struct vnode
*vp
)
472 if (lockcountnb(&vp
->v_lock
))
474 return(lockmgr(&vp
->v_lock
, LK_EXCLUSIVE
| LK_NOWAIT
| LK_NOSPINWAIT
));
478 vx_unlock(struct vnode
*vp
)
480 lockmgr(&vp
->v_lock
, LK_RELEASE
);
483 /****************************************************************
484 * VNODE ACQUISITION FUNCTIONS *
485 ****************************************************************
487 * These functions must be used when accessing a vnode via an auxiliary
488 * reference such as the namecache or free list, or when you wish to
489 * do a combo ref+lock sequence.
491 * These functions are MANDATORY for any code chain accessing a vnode
492 * whos activation state is not known.
494 * vget() can be called with LK_NOWAIT and will return EBUSY if the
495 * lock cannot be immediately acquired.
497 * vget()/vput() are used when reactivation is desired.
499 * vx_get() and vx_put() are used when reactivation is not desired.
502 vget(struct vnode
*vp
, int flags
)
507 * A lock type must be passed
509 if ((flags
& LK_TYPE_MASK
) == 0) {
510 panic("vget() called with no lock specified!");
515 * Reference the structure and then acquire the lock. 0->1
516 * transitions and refs during termination are allowed here so
517 * call sysref directly.
519 * NOTE: The requested lock might be a shared lock and does
520 * not protect our access to the refcnt or other fields.
522 sysref_get(&vp
->v_sysref
);
523 if ((error
= vn_lock(vp
, flags
)) != 0) {
525 * The lock failed, undo and return an error.
527 sysref_put(&vp
->v_sysref
);
528 } else if (vp
->v_flag
& VRECLAIMED
) {
530 * The node is being reclaimed and cannot be reactivated
531 * any more, undo and return ENOENT.
538 * If the vnode is marked VFREE or VCACHED it needs to be
539 * reactivated, otherwise it had better already be active.
540 * VINACTIVE must also be cleared.
542 * In the VFREE/VCACHED case we have to throw away the
543 * sysref that was earmarking those cases and preventing
544 * the vnode from being destroyed. Our sysref is still held.
546 * We are allowed to reactivate the vnode while we hold
547 * the VX lock, assuming it can be reactivated.
549 spin_lock_wr(&vp
->v_spinlock
);
550 if (vp
->v_flag
& VFREE
) {
552 sysref_activate(&vp
->v_sysref
);
553 spin_unlock_wr(&vp
->v_spinlock
);
554 sysref_put(&vp
->v_sysref
);
555 } else if (vp
->v_flag
& VCACHED
) {
556 _vclrflags(vp
, VCACHED
);
557 sysref_activate(&vp
->v_sysref
);
558 spin_unlock_wr(&vp
->v_spinlock
);
559 sysref_put(&vp
->v_sysref
);
561 if (sysref_isinactive(&vp
->v_sysref
)) {
562 sysref_activate(&vp
->v_sysref
);
563 kprintf("Warning vp %p reactivation race\n",
566 spin_unlock_wr(&vp
->v_spinlock
);
568 _vclrflags(vp
, VINACTIVE
);
578 vput(struct vnode
*vp
)
585 * XXX The vx_*() locks should use auxrefs, not the main reference counter.
590 vx_get(struct vnode
*vp
)
592 sysref_get(&vp
->v_sysref
);
593 lockmgr(&vp
->v_lock
, LK_EXCLUSIVE
);
600 vx_get_nonblock(struct vnode
*vp
)
604 sysref_get(&vp
->v_sysref
);
605 error
= lockmgr(&vp
->v_lock
, LK_EXCLUSIVE
| LK_NOWAIT
);
607 sysref_put(&vp
->v_sysref
);
612 * Relase a VX lock that also held a ref on the vnode.
614 * vx_put needs to check for a VCACHED->VFREE transition to catch the
615 * case where e.g. vnlru issues a vgone*().
620 vx_put(struct vnode
*vp
)
622 spin_lock_wr(&vp
->v_spinlock
);
623 if ((vp
->v_flag
& VCACHED
) && vshouldfree(vp
)) {
624 _vclrflags(vp
, VCACHED
);
627 spin_unlock_wr(&vp
->v_spinlock
);
628 lockmgr(&vp
->v_lock
, LK_RELEASE
);
629 sysref_put(&vp
->v_sysref
);
633 * The rover looks for vnodes past the midline with no cached data and
634 * moves them to before the midline. If we do not do this the midline
635 * can wind up in a degenerate state.
639 vnode_rover_locked(void)
644 * Get the vnode after the rover. The rover roves between mid1 and
645 * the end so the only special vnode it can encounter is mid2.
647 vp
= TAILQ_NEXT(&vnode_free_rover
, v_freelist
);
648 if (vp
== &vnode_free_mid2
) {
649 vp
= TAILQ_NEXT(vp
, v_freelist
);
650 rover_state
= ROVER_MID2
;
652 KKASSERT(vp
!= &vnode_free_mid1
);
655 * Start over if we finished the scan.
657 TAILQ_REMOVE(&vnode_free_list
, &vnode_free_rover
, v_freelist
);
659 TAILQ_INSERT_AFTER(&vnode_free_list
, &vnode_free_mid1
,
660 &vnode_free_rover
, v_freelist
);
661 rover_state
= ROVER_MID1
;
664 TAILQ_INSERT_AFTER(&vnode_free_list
, vp
, &vnode_free_rover
, v_freelist
);
667 * Shift vp if appropriate.
669 if (vp
->v_object
&& vp
->v_object
->resident_page_count
) {
671 * Promote vnode with resident pages to section 3.
672 * (This case shouldn't happen).
674 if (rover_state
== ROVER_MID1
) {
675 TAILQ_REMOVE(&vnode_free_list
, vp
, v_freelist
);
676 TAILQ_INSERT_TAIL(&vnode_free_list
, vp
, v_freelist
);
678 } else if (vp
->v_object
&& vp
->v_object
->swblock_count
) {
680 * Demote vnode with only swap pages to section 2
682 if (rover_state
== ROVER_MID2
) {
683 TAILQ_REMOVE(&vnode_free_list
, vp
, v_freelist
);
684 TAILQ_INSERT_BEFORE(&vnode_free_mid2
, vp
, v_freelist
);
688 * Demote vnode with no cached data to section 1
690 TAILQ_REMOVE(&vnode_free_list
, vp
, v_freelist
);
691 TAILQ_INSERT_BEFORE(&vnode_free_mid1
, vp
, v_freelist
);
696 * Try to reuse a vnode from the free list.
698 * NOTE: The returned vnode is not completely initialized.
700 * WARNING: The freevnodes count can race, NULL can be returned even if
712 for (count
= 0; count
< freevnodes
; count
++) {
714 * Try to lock the first vnode on the free list.
717 * We use a bad hack in vx_lock_nonblock() which avoids
718 * the lock order reversal between vfs_spin and v_spinlock.
719 * This is very fragile code and I don't want to use
722 spin_lock_wr(&vfs_spin
);
723 vnode_rover_locked();
724 vnode_rover_locked();
725 vp
= TAILQ_FIRST(&vnode_free_list
);
726 while (vp
== &vnode_free_mid1
|| vp
== &vnode_free_mid2
||
727 vp
== &vnode_free_rover
) {
728 vp
= TAILQ_NEXT(vp
, v_freelist
);
732 if (vx_lock_nonblock(vp
)) {
733 KKASSERT(vp
->v_flag
& VFREE
);
734 TAILQ_REMOVE(&vnode_free_list
, vp
, v_freelist
);
735 TAILQ_INSERT_TAIL(&vnode_free_list
,
737 spin_unlock_wr(&vfs_spin
);
742 * We inherit the sysref associated the vnode on the free
743 * list. Because VCACHED is clear the vnode will not
744 * be placed back on the free list. We own the sysref
745 * free and clear and thus control the disposition of
748 __vbusy_interlocked(vp
);
749 spin_unlock_wr(&vfs_spin
);
751 if ((ulong
)vp
== trackvnode
)
752 kprintf("allocfreevnode %p %08x\n", vp
, vp
->v_flag
);
755 * Do not reclaim/reuse a vnode while auxillary refs exists.
756 * This includes namecache refs due to a related ncp being
757 * locked or having children.
759 * We will make this test several times as auxrefs can
760 * get incremented on us without any spinlocks being held
761 * until we have removed all namecache and inode references
764 * Because VCACHED is already in the correct state (cleared)
765 * we cannot race other vdrop()s occuring at the same time
766 * and can safely place vp on the free list.
768 * The free list association reinherits the sysref.
777 * We inherit the reference that was previously associated
778 * with the vnode being on the free list. VCACHED had better
779 * not be set because the reference and VX lock prevents
780 * the sysref from transitioning to an active state.
782 KKASSERT((vp
->v_flag
& (VINACTIVE
|VCACHED
)) == VINACTIVE
);
783 KKASSERT(sysref_isinactive(&vp
->v_sysref
));
786 * Holding the VX lock on an inactive vnode prevents it
787 * from being reactivated or reused. New namecache
788 * associations can only be made using active vnodes.
790 * Another thread may be blocked on our vnode lock while
791 * holding a namecache lock. We can only reuse this vnode
792 * if we can clear all namecache associations without
795 * Because VCACHED is already in the correct state (cleared)
796 * we cannot race other vdrop()s occuring at the same time
797 * and can safely place vp on the free list.
799 if ((vp
->v_flag
& VRECLAIMED
) == 0) {
800 if (cache_inval_vp_nonblock(vp
)) {
806 /* vnode is still VX locked */
810 * We can reuse the vnode if no primary or auxiliary
811 * references remain other then ours, else put it
812 * back on the free list and keep looking.
814 * Either the free list inherits the last reference
815 * or we fall through and sysref_activate() the last
818 * Since the vnode is in a VRECLAIMED state, no new
819 * namecache associations could have been made.
821 KKASSERT(TAILQ_EMPTY(&vp
->v_namecache
));
823 !sysref_islastdeactivation(&vp
->v_sysref
)) {
830 * Return a VX locked vnode suitable for reuse. The caller
831 * inherits the sysref.
839 * Obtain a new vnode from the freelist, allocating more if necessary.
840 * The returned vnode is VX locked & refd.
842 * All new vnodes set the VAGE flags. An open() of the vnode will
843 * decrement the (2-bit) flags. Vnodes which are opened several times
844 * are thus retained in the cache over vnodes which are merely stat()d.
849 allocvnode(int lktimeout
, int lkflags
)
854 * Try to reuse vnodes if we hit the max. This situation only
855 * occurs in certain large-memory (2G+) situations. We cannot
856 * attempt to directly reclaim vnodes due to nasty recursion
859 while (numvnodes
- freevnodes
> desiredvnodes
)
863 * Try to build up as many vnodes as we can before reallocating
864 * from the free list. A vnode on the free list simply means
865 * that it is inactive with no resident pages. It may or may not
866 * have been reclaimed and could have valuable information associated
867 * with it that we shouldn't throw away unless we really need to.
869 * HAMMER NOTE: Re-establishing a vnode is a fairly expensive
870 * operation for HAMMER but this should benefit UFS as well.
872 if (freevnodes
>= wantfreevnodes
&& numvnodes
>= desiredvnodes
)
873 vp
= allocfreevnode();
877 vp
= sysref_alloc(&vnode_sysref_class
);
878 KKASSERT((vp
->v_flag
& (VCACHED
|VFREE
)) == 0);
879 lockmgr(&vp
->v_lock
, LK_EXCLUSIVE
);
884 * We are using a managed sysref class, vnode fields are only
885 * zerod on initial allocation from the backing store, not
886 * on reallocation. Thus we have to clear these fields for both
887 * reallocation and reuse.
891 panic("cleaned vnode isn't");
892 if (bio_track_active(&vp
->v_track_read
) ||
893 bio_track_active(&vp
->v_track_write
)) {
894 panic("Clean vnode has pending I/O's");
896 if (vp
->v_flag
& VONWORKLST
)
897 panic("Clean vnode still pending on syncer worklist!");
898 if (!RB_EMPTY(&vp
->v_rbdirty_tree
))
899 panic("Clean vnode still has dirty buffers!");
900 if (!RB_EMPTY(&vp
->v_rbclean_tree
))
901 panic("Clean vnode still has clean buffers!");
902 if (!RB_EMPTY(&vp
->v_rbhash_tree
))
903 panic("Clean vnode still on hash tree!");
904 KKASSERT(vp
->v_mount
== NULL
);
906 vp
->v_flag
= VAGE0
| VAGE1
;
913 vp
->v_writecount
= 0; /* XXX */
916 * lktimeout only applies when LK_TIMELOCK is used, and only
917 * the pageout daemon uses it. The timeout may not be zero
918 * or the pageout daemon can deadlock in low-VM situations.
922 lockreinit(&vp
->v_lock
, "vnode", lktimeout
, lkflags
);
923 KKASSERT(TAILQ_EMPTY(&vp
->v_namecache
));
924 /* exclusive lock still held */
927 * Note: sysref needs to be activated to convert -0x40000000 to +1.
928 * The -0x40000000 comes from the last ref on reuse, and from
929 * sysref_init() on allocate.
931 sysref_activate(&vp
->v_sysref
);
932 vp
->v_filesize
= NOOFFSET
;
937 KKASSERT(vp
->v_mount
== NULL
);
946 freesomevnodes(int n
)
953 if ((vp
= allocfreevnode()) == NULL
)