kernel - Fix not-quite-nonblocking VX lock in allocfreevnode()
[dragonfly.git] / sys / kern / vfs_lock.c
blobbcc36e24e3e607e176b0fe93f9d6d111cddb22c5
1 /*
2 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/kern/vfs_lock.c,v 1.30 2008/06/30 03:57:41 dillon Exp $
38 * External virtual filesystem routines
40 #include "opt_ddb.h"
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/malloc.h>
46 #include <sys/mount.h>
47 #include <sys/proc.h>
48 #include <sys/vnode.h>
49 #include <sys/buf.h>
50 #include <sys/sysctl.h>
52 #include <machine/limits.h>
54 #include <vm/vm.h>
55 #include <vm/vm_object.h>
57 #include <sys/buf2.h>
58 #include <sys/thread2.h>
59 #include <sys/sysref2.h>
61 static void vnode_terminate(struct vnode *vp);
62 static boolean_t vnode_ctor(void *obj, void *private, int ocflags);
63 static void vnode_dtor(void *obj, void *private);
65 static MALLOC_DEFINE(M_VNODE, "vnodes", "vnode structures");
66 static struct sysref_class vnode_sysref_class = {
67 .name = "vnode",
68 .mtype = M_VNODE,
69 .proto = SYSREF_PROTO_VNODE,
70 .offset = offsetof(struct vnode, v_sysref),
71 .objsize = sizeof(struct vnode),
72 .mag_capacity = 256,
73 .flags = SRC_MANAGEDINIT,
74 .ctor = vnode_ctor,
75 .dtor = vnode_dtor,
76 .ops = {
77 .terminate = (sysref_terminate_func_t)vnode_terminate
82 * The vnode free list hold inactive vnodes. Aged inactive vnodes
83 * are inserted prior to the mid point, and otherwise inserted
84 * at the tail.
86 static TAILQ_HEAD(freelst, vnode) vnode_free_list;
87 static struct vnode vnode_free_mid;
88 static struct spinlock vfs_spin = SPINLOCK_INITIALIZER(vfs_spin);
90 int freevnodes = 0;
91 SYSCTL_INT(_debug, OID_AUTO, freevnodes, CTLFLAG_RD,
92 &freevnodes, 0, "");
93 static int wantfreevnodes = 25;
94 SYSCTL_INT(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW,
95 &wantfreevnodes, 0, "");
96 #ifdef TRACKVNODE
97 static ulong trackvnode;
98 SYSCTL_ULONG(_debug, OID_AUTO, trackvnode, CTLFLAG_RW,
99 &trackvnode, 0, "");
100 #endif
103 * Called from vfsinit()
105 void
106 vfs_lock_init(void)
108 TAILQ_INIT(&vnode_free_list);
109 TAILQ_INSERT_HEAD(&vnode_free_list, &vnode_free_mid, v_freelist);
110 spin_init(&vfs_spin);
114 * Misc functions
116 static __inline
117 void
118 _vsetflags(struct vnode *vp, int flags)
120 atomic_set_int(&vp->v_flag, flags);
123 static __inline
124 void
125 _vclrflags(struct vnode *vp, int flags)
127 atomic_clear_int(&vp->v_flag, flags);
130 void
131 vsetflags(struct vnode *vp, int flags)
133 _vsetflags(vp, flags);
136 void
137 vclrflags(struct vnode *vp, int flags)
139 _vclrflags(vp, flags);
143 * Inline helper functions.
145 * WARNING: vbusy() may only be called while the vnode lock or VX lock
146 * is held. The vnode spinlock need not be held.
148 * MPSAFE
150 static __inline
151 void
152 __vbusy_interlocked(struct vnode *vp)
154 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
155 freevnodes--;
156 _vclrflags(vp, VFREE);
159 static __inline
160 void
161 __vbusy(struct vnode *vp)
163 #ifdef TRACKVNODE
164 if ((ulong)vp == trackvnode)
165 kprintf("__vbusy %p %08x\n", vp, vp->v_flag);
166 #endif
167 spin_lock_wr(&vfs_spin);
168 __vbusy_interlocked(vp);
169 spin_unlock_wr(&vfs_spin);
173 * Put a vnode on the free list. The caller has cleared VCACHED or owns the
174 * implied sysref related to having removed the vnode from the freelist
175 * (and VCACHED is already clear in that case).
177 * MPSAFE
179 static __inline
180 void
181 __vfree(struct vnode *vp)
183 #ifdef TRACKVNODE
184 if ((ulong)vp == trackvnode) {
185 kprintf("__vfree %p %08x\n", vp, vp->v_flag);
186 print_backtrace();
188 #endif
189 spin_lock_wr(&vfs_spin);
190 if (vp->v_flag & VRECLAIMED)
191 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
192 else if (vp->v_flag & (VAGE0 | VAGE1))
193 TAILQ_INSERT_BEFORE(&vnode_free_mid, vp, v_freelist);
194 else
195 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
196 freevnodes++;
197 _vsetflags(vp, VFREE);
198 spin_unlock_wr(&vfs_spin);
202 * Put a vnode on the free list. The caller has cleared VCACHED or owns the
203 * implied sysref related to having removed the vnode from the freelist
204 * (and VCACHED is already clear in that case).
206 * MPSAFE
208 static __inline
209 void
210 __vfreetail(struct vnode *vp)
212 #ifdef TRACKVNODE
213 if ((ulong)vp == trackvnode)
214 kprintf("__vfreetail %p %08x\n", vp, vp->v_flag);
215 #endif
216 spin_lock_wr(&vfs_spin);
217 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
218 freevnodes++;
219 _vsetflags(vp, VFREE);
220 spin_unlock_wr(&vfs_spin);
224 * Return a C boolean if we should put the vnode on the freelist (VFREE),
225 * or leave it / mark it as VCACHED.
227 * This routine is only valid if the vnode is already either VFREE or
228 * VCACHED, or if it can become VFREE or VCACHED via vnode_terminate().
230 * WARNING! This functions is typically called with v_spinlock held.
232 * MPSAFE
234 static __inline boolean_t
235 vshouldfree(struct vnode *vp)
237 return (vp->v_auxrefs == 0 &&
238 (vp->v_object == NULL || vp->v_object->resident_page_count == 0));
242 * Add a ref to an active vnode. This function should never be called
243 * with an inactive vnode (use vget() instead).
245 * MPSAFE
247 void
248 vref(struct vnode *vp)
250 KKASSERT(vp->v_sysref.refcnt > 0 &&
251 (vp->v_flag & (VFREE|VINACTIVE)) == 0);
252 sysref_get(&vp->v_sysref);
256 * Release a ref on an active or inactive vnode. The sysref termination
257 * function will be called when the active last active reference is released,
258 * and the vnode is returned to the objcache when the last inactive
259 * reference is released.
261 void
262 vrele(struct vnode *vp)
264 sysref_put(&vp->v_sysref);
268 * Add an auxiliary data structure reference to the vnode. Auxiliary
269 * references do not change the state of the vnode or prevent them
270 * from being deactivated, reclaimed, or placed on or removed from
271 * the free list.
273 * An auxiliary reference DOES prevent the vnode from being destroyed,
274 * allowing you to vx_lock() it, test state, etc.
276 * An auxiliary reference DOES NOT move a vnode out of the VFREE state
277 * once it has entered it.
279 * WARNING! vhold() and vhold_interlocked() must not acquire v_spinlock.
280 * The spinlock may or may not already be held by the caller.
281 * vdrop() will clean up the free list state.
283 * MPSAFE
285 void
286 vhold(struct vnode *vp)
288 KKASSERT(vp->v_sysref.refcnt != 0);
289 atomic_add_int(&vp->v_auxrefs, 1);
292 void
293 vhold_interlocked(struct vnode *vp)
295 atomic_add_int(&vp->v_auxrefs, 1);
299 * Remove an auxiliary reference from the vnode.
301 * vdrop needs to check for a VCACHE->VFREE transition to catch cases
302 * where a vnode is held past its reclamation. We use v_spinlock to
303 * interlock VCACHED -> !VCACHED transitions.
305 * MPSAFE
307 void
308 vdrop(struct vnode *vp)
310 KKASSERT(vp->v_sysref.refcnt != 0 && vp->v_auxrefs > 0);
311 spin_lock_wr(&vp->v_spinlock);
312 atomic_subtract_int(&vp->v_auxrefs, 1);
313 if ((vp->v_flag & VCACHED) && vshouldfree(vp)) {
314 _vclrflags(vp, VCACHED);
315 __vfree(vp);
317 spin_unlock_wr(&vp->v_spinlock);
321 * This function is called when the last active reference on the vnode
322 * is released, typically via vrele(). SYSREF will give the vnode a
323 * negative ref count, indicating that it is undergoing termination or
324 * is being set aside for the cache, and one final sysref_put() is
325 * required to actually return it to the memory subsystem.
327 * However, because vnodes may have auxiliary structural references via
328 * v_auxrefs, we must interlock auxiliary references against termination
329 * via the VX lock mechanism. It is possible for a vnode to be reactivated
330 * while we were blocked on the lock.
332 * MPSAFE
334 void
335 vnode_terminate(struct vnode *vp)
337 vx_lock(vp);
338 if (sysref_isinactive(&vp->v_sysref)) {
340 * Deactivate the vnode by marking it VFREE or VCACHED.
341 * The vnode can be reactivated from either state until
342 * reclaimed. These states inherit the 'last' sysref on the
343 * vnode.
345 * NOTE: There may be additional inactive references from
346 * other entities blocking on the VX lock while we hold it,
347 * but this does not prevent us from changing the vnode's
348 * state.
350 * NOTE: The vnode could already be marked inactive. XXX
351 * how?
353 * NOTE: v_mount may be NULL due to assignment to
354 * dead_vnode_vops
356 * NOTE: The vnode may be marked inactive with dirty buffers
357 * or dirty pages in its cached VM object still present.
359 * NOTE: VCACHED should not be set on entry. We lose control
360 * of the sysref the instant the vnode is placed on the
361 * free list or when VCACHED is set.
363 * The VX lock is sufficient when transitioning
364 * to +VCACHED but not sufficient for the vshouldfree()
365 * interlocked test.
367 if ((vp->v_flag & VINACTIVE) == 0) {
368 _vsetflags(vp, VINACTIVE);
369 if (vp->v_mount)
370 VOP_INACTIVE(vp);
372 spin_lock_wr(&vp->v_spinlock);
373 KKASSERT((vp->v_flag & (VFREE|VCACHED)) == 0);
374 if (vshouldfree(vp))
375 __vfree(vp);
376 else
377 _vsetflags(vp, VCACHED); /* inactive but not yet free*/
378 spin_unlock_wr(&vp->v_spinlock);
379 vx_unlock(vp);
380 } else {
382 * Someone reactivated the vnode while were blocked on the
383 * VX lock. Release the VX lock and release the (now active)
384 * last reference which is no longer last.
386 vx_unlock(vp);
387 vrele(vp);
392 * Physical vnode constructor / destructor. These are only executed on
393 * the backend of the objcache. They are NOT executed on every vnode
394 * allocation or deallocation.
396 * MPSAFE
398 boolean_t
399 vnode_ctor(void *obj, void *private, int ocflags)
401 struct vnode *vp = obj;
403 lwkt_token_init(&vp->v_token);
404 lockinit(&vp->v_lock, "vnode", 0, 0);
405 ccms_dataspace_init(&vp->v_ccms);
406 TAILQ_INIT(&vp->v_namecache);
407 RB_INIT(&vp->v_rbclean_tree);
408 RB_INIT(&vp->v_rbdirty_tree);
409 RB_INIT(&vp->v_rbhash_tree);
410 return(TRUE);
414 * MPSAFE
416 void
417 vnode_dtor(void *obj, void *private)
419 struct vnode *vp = obj;
421 ccms_dataspace_destroy(&vp->v_ccms);
424 /****************************************************************
425 * VX LOCKING FUNCTIONS *
426 ****************************************************************
428 * These functions lock vnodes for reclamation and deactivation related
429 * activities. The caller must already be holding some sort of reference
430 * on the vnode.
432 * MPSAFE
434 void
435 vx_lock(struct vnode *vp)
437 lockmgr(&vp->v_lock, LK_EXCLUSIVE);
440 static int
441 vx_lock_nonblock(struct vnode *vp)
443 return(lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT | LK_NOSPINWAIT));
446 void
447 vx_unlock(struct vnode *vp)
449 lockmgr(&vp->v_lock, LK_RELEASE);
452 /****************************************************************
453 * VNODE ACQUISITION FUNCTIONS *
454 ****************************************************************
456 * These functions must be used when accessing a vnode via an auxiliary
457 * reference such as the namecache or free list, or when you wish to
458 * do a combo ref+lock sequence.
460 * These functions are MANDATORY for any code chain accessing a vnode
461 * whos activation state is not known.
463 * vget() can be called with LK_NOWAIT and will return EBUSY if the
464 * lock cannot be immediately acquired.
466 * vget()/vput() are used when reactivation is desired.
468 * vx_get() and vx_put() are used when reactivation is not desired.
471 vget(struct vnode *vp, int flags)
473 int error;
476 * A lock type must be passed
478 if ((flags & LK_TYPE_MASK) == 0) {
479 panic("vget() called with no lock specified!");
480 /* NOT REACHED */
484 * Reference the structure and then acquire the lock. 0->1
485 * transitions and refs during termination are allowed here so
486 * call sysref directly.
488 * NOTE: The requested lock might be a shared lock and does
489 * not protect our access to the refcnt or other fields.
491 sysref_get(&vp->v_sysref);
492 if ((error = vn_lock(vp, flags)) != 0) {
494 * The lock failed, undo and return an error.
496 sysref_put(&vp->v_sysref);
497 } else if (vp->v_flag & VRECLAIMED) {
499 * The node is being reclaimed and cannot be reactivated
500 * any more, undo and return ENOENT.
502 vn_unlock(vp);
503 vrele(vp);
504 error = ENOENT;
505 } else {
507 * If the vnode is marked VFREE or VCACHED it needs to be
508 * reactivated, otherwise it had better already be active.
509 * VINACTIVE must also be cleared.
511 * In the VFREE/VCACHED case we have to throw away the
512 * sysref that was earmarking those cases and preventing
513 * the vnode from being destroyed. Our sysref is still held.
515 * The spinlock is our only real protection here.
517 spin_lock_wr(&vp->v_spinlock);
518 if (vp->v_flag & VFREE) {
519 __vbusy(vp);
520 sysref_activate(&vp->v_sysref);
521 spin_unlock_wr(&vp->v_spinlock);
522 sysref_put(&vp->v_sysref);
523 } else if (vp->v_flag & VCACHED) {
524 _vclrflags(vp, VCACHED);
525 sysref_activate(&vp->v_sysref);
526 spin_unlock_wr(&vp->v_spinlock);
527 sysref_put(&vp->v_sysref);
528 } else {
529 if (sysref_isinactive(&vp->v_sysref)) {
530 sysref_activate(&vp->v_sysref);
531 kprintf("Warning vp %p reactivation race\n",
532 vp);
534 spin_unlock_wr(&vp->v_spinlock);
536 _vclrflags(vp, VINACTIVE);
537 error = 0;
539 return(error);
543 * MPSAFE
545 void
546 vput(struct vnode *vp)
548 vn_unlock(vp);
549 vrele(vp);
553 * XXX The vx_*() locks should use auxrefs, not the main reference counter.
555 * MPSAFE
557 void
558 vx_get(struct vnode *vp)
560 sysref_get(&vp->v_sysref);
561 lockmgr(&vp->v_lock, LK_EXCLUSIVE);
565 * MPSAFE
568 vx_get_nonblock(struct vnode *vp)
570 int error;
572 sysref_get(&vp->v_sysref);
573 error = lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT);
574 if (error)
575 sysref_put(&vp->v_sysref);
576 return(error);
580 * Relase a VX lock that also held a ref on the vnode.
582 * vx_put needs to check for a VCACHE->VFREE transition to catch the
583 * case where e.g. vnlru issues a vgone*().
585 * MPSAFE
587 void
588 vx_put(struct vnode *vp)
590 spin_lock_wr(&vp->v_spinlock);
591 if ((vp->v_flag & VCACHED) && vshouldfree(vp)) {
592 _vclrflags(vp, VCACHED);
593 __vfree(vp);
595 spin_unlock_wr(&vp->v_spinlock);
596 lockmgr(&vp->v_lock, LK_RELEASE);
597 sysref_put(&vp->v_sysref);
601 * Try to reuse a vnode from the free list.
603 * NOTE: The returned vnode is not completely initialized.
605 * WARNING: The freevnodes count can race, NULL can be returned even if
606 * freevnodes != 0.
608 * MPSAFE
610 static
611 struct vnode *
612 allocfreevnode(void)
614 struct vnode *vp;
615 int count;
617 for (count = 0; count < freevnodes; count++) {
619 * Try to lock the first vnode on the free list.
620 * Cycle if we can't.
622 * We use a bad hack in vx_lock_nonblock() which avoids
623 * the lock order reversal between vfs_spin and v_spinlock.
624 * This is very fragile code and I don't want to use
625 * vhold here.
627 spin_lock_wr(&vfs_spin);
628 vp = TAILQ_FIRST(&vnode_free_list);
629 if (vp == &vnode_free_mid)
630 vp = TAILQ_NEXT(vp, v_freelist);
631 if (vx_lock_nonblock(vp)) {
632 KKASSERT(vp->v_flag & VFREE);
633 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
634 TAILQ_INSERT_TAIL(&vnode_free_list,
635 vp, v_freelist);
636 spin_unlock_wr(&vfs_spin);
637 continue;
641 * We inherit the sysref associated the vnode on the free
642 * list. Because VCACHED is clear the vnode will not
643 * be placed back on the free list. We own the sysref
644 * free and clear and thus control the disposition of
645 * the vnode.
647 __vbusy_interlocked(vp);
648 spin_unlock_wr(&vfs_spin);
649 #ifdef TRACKVNODE
650 if ((ulong)vp == trackvnode)
651 kprintf("allocfreevnode %p %08x\n", vp, vp->v_flag);
652 #endif
654 * Do not reclaim/reuse a vnode while auxillary refs exists.
655 * This includes namecache refs due to a related ncp being
656 * locked or having children.
658 * We will make this test several times as auxrefs can
659 * get incremented on us without any spinlocks being held
660 * until we have removed all namecache and inode references
661 * to the vnode.
663 * Because VCACHED is already in the correct state (cleared)
664 * we cannot race other vdrop()s occuring at the same time
665 * and can safely place vp on the free list.
667 * The free list association reinherits the sysref.
669 if (vp->v_auxrefs) {
670 __vfreetail(vp);
671 vx_unlock(vp);
672 continue;
676 * We inherit the reference that was previously associated
677 * with the vnode being on the free list. VCACHED had better
678 * not be set because the reference and VX lock prevents
679 * the sysref from transitioning to an active state.
681 KKASSERT((vp->v_flag & (VINACTIVE|VCACHED)) == VINACTIVE);
682 KKASSERT(sysref_isinactive(&vp->v_sysref));
685 * Holding the VX lock on an inactive vnode prevents it
686 * from being reactivated or reused. New namecache
687 * associations can only be made using active vnodes.
689 * Another thread may be blocked on our vnode lock while
690 * holding a namecache lock. We can only reuse this vnode
691 * if we can clear all namecache associations without
692 * blocking.
694 * Because VCACHED is already in the correct state (cleared)
695 * we cannot race other vdrop()s occuring at the same time
696 * and can safely place vp on the free list.
698 if ((vp->v_flag & VRECLAIMED) == 0) {
699 if (cache_inval_vp_nonblock(vp)) {
700 __vfreetail(vp);
701 vx_unlock(vp);
702 continue;
704 vgone_vxlocked(vp);
705 /* vnode is still VX locked */
709 * We can reuse the vnode if no primary or auxiliary
710 * references remain other then ours, else put it
711 * back on the free list and keep looking.
713 * Either the free list inherits the last reference
714 * or we fall through and sysref_activate() the last
715 * reference.
717 * Since the vnode is in a VRECLAIMED state, no new
718 * namecache associations could have been made.
720 KKASSERT(TAILQ_EMPTY(&vp->v_namecache));
721 if (vp->v_auxrefs ||
722 !sysref_islastdeactivation(&vp->v_sysref)) {
723 __vfreetail(vp);
724 vx_unlock(vp);
725 continue;
729 * Return a VX locked vnode suitable for reuse. The caller
730 * inherits the sysref.
732 return(vp);
734 return(NULL);
738 * Obtain a new vnode from the freelist, allocating more if necessary.
739 * The returned vnode is VX locked & refd.
741 * All new vnodes set the VAGE flags. An open() of the vnode will
742 * decrement the (2-bit) flags. Vnodes which are opened several times
743 * are thus retained in the cache over vnodes which are merely stat()d.
745 * MPSAFE
747 struct vnode *
748 allocvnode(int lktimeout, int lkflags)
750 struct vnode *vp;
753 * Try to reuse vnodes if we hit the max. This situation only
754 * occurs in certain large-memory (2G+) situations. We cannot
755 * attempt to directly reclaim vnodes due to nasty recursion
756 * problems.
758 while (numvnodes - freevnodes > desiredvnodes)
759 vnlru_proc_wait();
762 * Try to build up as many vnodes as we can before reallocating
763 * from the free list. A vnode on the free list simply means
764 * that it is inactive with no resident pages. It may or may not
765 * have been reclaimed and could have valuable information associated
766 * with it that we shouldn't throw away unless we really need to.
768 * HAMMER NOTE: Re-establishing a vnode is a fairly expensive
769 * operation for HAMMER but this should benefit UFS as well.
771 if (freevnodes >= wantfreevnodes && numvnodes >= desiredvnodes)
772 vp = allocfreevnode();
773 else
774 vp = NULL;
775 if (vp == NULL) {
776 vp = sysref_alloc(&vnode_sysref_class);
777 lockmgr(&vp->v_lock, LK_EXCLUSIVE);
778 numvnodes++;
782 * We are using a managed sysref class, vnode fields are only
783 * zerod on initial allocation from the backing store, not
784 * on reallocation. Thus we have to clear these fields for both
785 * reallocation and reuse.
787 #ifdef INVARIANTS
788 if (vp->v_data)
789 panic("cleaned vnode isn't");
790 if (bio_track_active(&vp->v_track_read) ||
791 bio_track_active(&vp->v_track_write)) {
792 panic("Clean vnode has pending I/O's");
794 if (vp->v_flag & VONWORKLST)
795 panic("Clean vnode still pending on syncer worklist!");
796 if (!RB_EMPTY(&vp->v_rbdirty_tree))
797 panic("Clean vnode still has dirty buffers!");
798 if (!RB_EMPTY(&vp->v_rbclean_tree))
799 panic("Clean vnode still has clean buffers!");
800 if (!RB_EMPTY(&vp->v_rbhash_tree))
801 panic("Clean vnode still on hash tree!");
802 KKASSERT(vp->v_mount == NULL);
803 #endif
804 vp->v_flag = VAGE0 | VAGE1;
805 vp->v_lastw = 0;
806 vp->v_lasta = 0;
807 vp->v_cstart = 0;
808 vp->v_clen = 0;
809 vp->v_socket = 0;
810 vp->v_opencount = 0;
811 vp->v_writecount = 0; /* XXX */
814 * lktimeout only applies when LK_TIMELOCK is used, and only
815 * the pageout daemon uses it. The timeout may not be zero
816 * or the pageout daemon can deadlock in low-VM situations.
818 if (lktimeout == 0)
819 lktimeout = hz / 10;
820 lockreinit(&vp->v_lock, "vnode", lktimeout, lkflags);
821 KKASSERT(TAILQ_EMPTY(&vp->v_namecache));
822 /* exclusive lock still held */
825 * Note: sysref needs to be activated to convert -0x40000000 to +1.
826 * The -0x40000000 comes from the last ref on reuse, and from
827 * sysref_init() on allocate.
829 sysref_activate(&vp->v_sysref);
830 vp->v_filesize = NOOFFSET;
831 vp->v_type = VNON;
832 vp->v_tag = 0;
833 vp->v_ops = NULL;
834 vp->v_data = NULL;
835 KKASSERT(vp->v_mount == NULL);
837 return (vp);
841 * MPSAFE
844 freesomevnodes(int n)
846 struct vnode *vp;
847 int count = 0;
849 while (n) {
850 --n;
851 if ((vp = allocfreevnode()) == NULL)
852 break;
853 vx_put(vp);
854 --numvnodes;
856 return(count);