kernel - fix improper VOP_*() calls on dead vnode
[dragonfly.git] / sys / kern / vfs_lock.c
blob935481f495dec05cf4ef30737f3c0c4360bd2a08
1 /*
2 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/kern/vfs_lock.c,v 1.30 2008/06/30 03:57:41 dillon Exp $
38 * External virtual filesystem routines
40 #include "opt_ddb.h"
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/malloc.h>
46 #include <sys/mount.h>
47 #include <sys/proc.h>
48 #include <sys/vnode.h>
49 #include <sys/buf.h>
50 #include <sys/sysctl.h>
52 #include <machine/limits.h>
54 #include <vm/vm.h>
55 #include <vm/vm_object.h>
57 #include <sys/buf2.h>
58 #include <sys/thread2.h>
59 #include <sys/sysref2.h>
61 static void vnode_terminate(struct vnode *vp);
62 static boolean_t vnode_ctor(void *obj, void *private, int ocflags);
63 static void vnode_dtor(void *obj, void *private);
65 static MALLOC_DEFINE(M_VNODE, "vnodes", "vnode structures");
66 static struct sysref_class vnode_sysref_class = {
67 .name = "vnode",
68 .mtype = M_VNODE,
69 .proto = SYSREF_PROTO_VNODE,
70 .offset = offsetof(struct vnode, v_sysref),
71 .objsize = sizeof(struct vnode),
72 .mag_capacity = 256,
73 .flags = SRC_MANAGEDINIT,
74 .ctor = vnode_ctor,
75 .dtor = vnode_dtor,
76 .ops = {
77 .terminate = (sysref_terminate_func_t)vnode_terminate
82 * The vnode free list hold inactive vnodes. Aged inactive vnodes
83 * are inserted prior to the mid point, and otherwise inserted
84 * at the tail.
86 static TAILQ_HEAD(freelst, vnode) vnode_free_list;
87 static struct vnode vnode_free_mid;
89 int freevnodes = 0;
90 SYSCTL_INT(_debug, OID_AUTO, freevnodes, CTLFLAG_RD,
91 &freevnodes, 0, "");
92 static int wantfreevnodes = 25;
93 SYSCTL_INT(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW,
94 &wantfreevnodes, 0, "");
95 #ifdef TRACKVNODE
96 static ulong trackvnode;
97 SYSCTL_ULONG(_debug, OID_AUTO, trackvnode, CTLFLAG_RW,
98 &trackvnode, 0, "");
99 #endif
102 * Called from vfsinit()
104 void
105 vfs_lock_init(void)
107 TAILQ_INIT(&vnode_free_list);
108 TAILQ_INSERT_HEAD(&vnode_free_list, &vnode_free_mid, v_freelist);
112 * Inline helper functions. vbusy() and vfree() must be called while in a
113 * critical section.
115 * Warning: must be callable if the caller holds a read spinlock to something
116 * else, meaning we can't use read spinlocks here.
118 static __inline
119 void
120 __vbusy(struct vnode *vp)
122 #ifdef TRACKVNODE
123 if ((ulong)vp == trackvnode)
124 kprintf("__vbusy %p %08x\n", vp, vp->v_flag);
125 #endif
126 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
127 freevnodes--;
128 vp->v_flag &= ~VFREE;
131 static __inline
132 void
133 __vfree(struct vnode *vp)
135 #ifdef TRACKVNODE
136 if ((ulong)vp == trackvnode) {
137 kprintf("__vfree %p %08x\n", vp, vp->v_flag);
138 print_backtrace();
140 #endif
141 if (vp->v_flag & VRECLAIMED)
142 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
143 else if (vp->v_flag & (VAGE0 | VAGE1))
144 TAILQ_INSERT_BEFORE(&vnode_free_mid, vp, v_freelist);
145 else
146 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
147 freevnodes++;
148 vp->v_flag |= VFREE;
151 static __inline
152 void
153 __vfreetail(struct vnode *vp)
155 #ifdef TRACKVNODE
156 if ((ulong)vp == trackvnode)
157 kprintf("__vfreetail %p %08x\n", vp, vp->v_flag);
158 #endif
159 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
160 freevnodes++;
161 vp->v_flag |= VFREE;
165 * Return a C boolean if we should put the vnode on the freelist (VFREE),
166 * or leave it / mark it as VCACHED.
168 * This routine is only valid if the vnode is already either VFREE or
169 * VCACHED, or if it can become VFREE or VCACHED via vnode_terminate().
171 static __inline boolean_t
172 vshouldfree(struct vnode *vp)
174 return (vp->v_auxrefs == 0 &&
175 (vp->v_object == NULL || vp->v_object->resident_page_count == 0));
179 * Add a ref to an active vnode. This function should never be called
180 * with an inactive vnode (use vget() instead).
182 void
183 vref(struct vnode *vp)
185 KKASSERT(vp->v_sysref.refcnt > 0 &&
186 (vp->v_flag & (VFREE|VINACTIVE)) == 0);
187 sysref_get(&vp->v_sysref);
191 * Release a ref on an active or inactive vnode. The sysref termination
192 * function will be called when the active last active reference is released,
193 * and the vnode is returned to the objcache when the last inactive
194 * reference is released.
196 void
197 vrele(struct vnode *vp)
199 sysref_put(&vp->v_sysref);
203 * Add an auxiliary data structure reference to the vnode. Auxiliary
204 * references do not change the state of the vnode or prevent them
205 * from being deactivated, reclaimed, or placed on the free list.
207 * An auxiliary reference DOES prevent the vnode from being destroyed,
208 * allowing you to vx_lock() it, test state, etc.
210 * An auxiliary reference DOES NOT move a vnode out of the VFREE state
211 * once it has entered it.
213 * MPSAFE
215 void
216 vhold(struct vnode *vp)
218 KKASSERT(vp->v_sysref.refcnt != 0);
219 atomic_add_int(&vp->v_auxrefs, 1);
223 * Remove an auxiliary reference from the vnode.
225 * vdrop needs to check for a VCACHE->VFREE transition to catch cases
226 * where a vnode is held past its reclamation.
228 void
229 vdrop(struct vnode *vp)
231 KKASSERT(vp->v_sysref.refcnt != 0 && vp->v_auxrefs > 0);
232 atomic_subtract_int(&vp->v_auxrefs, 1);
233 if ((vp->v_flag & VCACHED) && vshouldfree(vp)) {
234 vp->v_flag &= ~VCACHED;
235 __vfree(vp);
240 * This function is called when the last active reference on the vnode
241 * is released, typically via vrele(). SYSREF will give the vnode a
242 * negative ref count, indicating that it is undergoing termination or
243 * is being set aside for the cache, and one final sysref_put() is
244 * required to actually return it to the memory subsystem.
246 * However, because vnodes may have auxiliary structural references via
247 * v_auxrefs, we must interlock auxiliary references against termination
248 * via the VX lock mechanism. It is possible for a vnode to be reactivated
249 * while we were blocked on the lock.
251 void
252 vnode_terminate(struct vnode *vp)
254 vx_lock(vp);
255 if (sysref_isinactive(&vp->v_sysref)) {
257 * Deactivate the vnode by marking it VFREE or VCACHED.
258 * The vnode can be reactivated from either state until
259 * reclaimed. These states inherit the 'last' sysref on the
260 * vnode.
262 * NOTE: There may be additional inactive references from
263 * other entities blocking on the VX lock while we hold it,
264 * but this does not prevent us from changing the vnode's
265 * state.
267 * NOTE: The vnode could already be marked inactive. XXX
268 * how?
270 * NOTE: v_mount may be NULL due to assignment to
271 * dead_vnode_vops
273 * NOTE: The vnode may be marked inactive with dirty buffers
274 * or dirty pages in its cached VM object still present.
276 if ((vp->v_flag & VINACTIVE) == 0) {
277 vp->v_flag |= VINACTIVE;
278 if (vp->v_mount)
279 VOP_INACTIVE(vp);
281 KKASSERT((vp->v_flag & (VFREE|VCACHED)) == 0);
282 if (vshouldfree(vp))
283 __vfree(vp);
284 else
285 vp->v_flag |= VCACHED; /* inactive but not yet free */
286 vx_unlock(vp);
287 } else {
289 * Someone reactivated the vnode while were blocked on the
290 * VX lock. Release the VX lock and release the (now active)
291 * last reference which is no longer last.
293 vx_unlock(vp);
294 vrele(vp);
299 * Physical vnode constructor / destructor. These are only executed on
300 * the backend of the objcache. They are NOT executed on every vnode
301 * allocation or deallocation.
303 boolean_t
304 vnode_ctor(void *obj, void *private, int ocflags)
306 struct vnode *vp = obj;
308 lwkt_token_init(&vp->v_token);
309 lockinit(&vp->v_lock, "vnode", 0, 0);
310 ccms_dataspace_init(&vp->v_ccms);
311 TAILQ_INIT(&vp->v_namecache);
312 RB_INIT(&vp->v_rbclean_tree);
313 RB_INIT(&vp->v_rbdirty_tree);
314 RB_INIT(&vp->v_rbhash_tree);
315 return(TRUE);
318 void
319 vnode_dtor(void *obj, void *private)
321 struct vnode *vp = obj;
323 ccms_dataspace_destroy(&vp->v_ccms);
326 /****************************************************************
327 * VX LOCKING FUNCTIONS *
328 ****************************************************************
330 * These functions lock vnodes for reclamation and deactivation related
331 * activities. The caller must already be holding some sort of reference
332 * on the vnode.
335 void
336 vx_lock(struct vnode *vp)
338 lockmgr(&vp->v_lock, LK_EXCLUSIVE);
341 static int
342 vx_lock_nonblock(struct vnode *vp)
344 return(lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT));
347 void
348 vx_unlock(struct vnode *vp)
350 lockmgr(&vp->v_lock, LK_RELEASE);
353 /****************************************************************
354 * VNODE ACQUISITION FUNCTIONS *
355 ****************************************************************
357 * These functions must be used when accessing a vnode via an auxiliary
358 * reference such as the namecache or free list, or when you wish to
359 * do a combo ref+lock sequence.
361 * These functions are MANDATORY for any code chain accessing a vnode
362 * whos activation state is not known.
364 * vget()/vput() are used when reactivation is desired.
366 * vx_get() and vx_put() are used when reactivation is not desired.
369 vget(struct vnode *vp, int flags)
371 int error;
374 * A lock type must be passed
376 if ((flags & LK_TYPE_MASK) == 0) {
377 panic("vget() called with no lock specified!");
378 /* NOT REACHED */
382 * Reference the structure and then acquire the lock. 0->1
383 * transitions and refs during termination are allowed here so
384 * call sysref directly.
387 sysref_get(&vp->v_sysref);
388 if ((error = vn_lock(vp, flags)) != 0) {
390 * The lock failed, undo and return an error.
392 sysref_put(&vp->v_sysref);
393 } else if (vp->v_flag & VRECLAIMED) {
395 * The node is being reclaimed and cannot be reactivated
396 * any more, undo and return ENOENT.
398 vn_unlock(vp);
399 vrele(vp);
400 error = ENOENT;
401 } else {
403 * If the vnode is marked VFREE or VCACHED it needs to be
404 * reactivated, otherwise it had better already be active.
405 * VINACTIVE must also be cleared.
407 * In the VFREE/VCACHED case we have to throw away the
408 * sysref that was earmarking those cases and preventing
409 * the vnode from being destroyed. Our sysref is still held.
411 if (vp->v_flag & VFREE) {
412 __vbusy(vp);
413 sysref_put(&vp->v_sysref);
414 sysref_activate(&vp->v_sysref);
415 } else if (vp->v_flag & VCACHED) {
416 vp->v_flag &= ~VCACHED;
417 sysref_put(&vp->v_sysref);
418 sysref_activate(&vp->v_sysref);
419 } else {
420 KKASSERT(sysref_isactive(&vp->v_sysref));
422 vp->v_flag &= ~VINACTIVE;
423 error = 0;
425 return(error);
428 void
429 vput(struct vnode *vp)
431 vn_unlock(vp);
432 vrele(vp);
436 * XXX The vx_*() locks should use auxrefs, not the main reference counter.
438 void
439 vx_get(struct vnode *vp)
441 sysref_get(&vp->v_sysref);
442 lockmgr(&vp->v_lock, LK_EXCLUSIVE);
446 vx_get_nonblock(struct vnode *vp)
448 int error;
450 sysref_get(&vp->v_sysref);
451 error = lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT);
452 if (error)
453 sysref_put(&vp->v_sysref);
454 return(error);
458 * Relase a VX lock that also held a ref on the vnode.
460 * vx_put needs to check for a VCACHE->VFREE transition to catch the
461 * case where e.g. vnlru issues a vgone*().
463 void
464 vx_put(struct vnode *vp)
466 if ((vp->v_flag & VCACHED) && vshouldfree(vp)) {
467 vp->v_flag &= ~VCACHED;
468 __vfree(vp);
470 lockmgr(&vp->v_lock, LK_RELEASE);
471 sysref_put(&vp->v_sysref);
475 * Misc functions
478 void
479 vsetflags(struct vnode *vp, int flags)
481 crit_enter();
482 vp->v_flag |= flags;
483 crit_exit();
486 void
487 vclrflags(struct vnode *vp, int flags)
489 crit_enter();
490 vp->v_flag &= ~flags;
491 crit_exit();
495 * Try to reuse a vnode from the free list. NOTE: The returned vnode
496 * is not completely initialized.
498 static
499 struct vnode *
500 allocfreevnode(void)
502 struct vnode *vp;
503 int count;
505 for (count = 0; count < freevnodes; count++) {
507 * Note that regardless of how we block in this loop,
508 * we only get here if freevnodes != 0 so there
509 * had better be something on the list.
511 * Try to lock the first vnode on the free list.
512 * Cycle if we can't.
514 * XXX NOT MP SAFE
516 vp = TAILQ_FIRST(&vnode_free_list);
517 if (vp == &vnode_free_mid)
518 vp = TAILQ_NEXT(vp, v_freelist);
519 if (vx_lock_nonblock(vp)) {
520 KKASSERT(vp->v_flag & VFREE);
521 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
522 TAILQ_INSERT_TAIL(&vnode_free_list,
523 vp, v_freelist);
524 continue;
526 #ifdef TRACKVNODE
527 if ((ulong)vp == trackvnode)
528 kprintf("allocfreevnode %p %08x\n", vp, vp->v_flag);
529 #endif
532 * With the vnode locked we can safely remove it
533 * from the free list. We inherit the reference
534 * that was previously associated with the vnode
535 * being on the free list.
537 KKASSERT((vp->v_flag & (VFREE|VINACTIVE)) ==
538 (VFREE|VINACTIVE));
539 KKASSERT(sysref_isinactive(&vp->v_sysref));
540 __vbusy(vp);
543 * Holding the VX lock on an inactive vnode prevents it
544 * from being reactivated or reused. New namecache
545 * associations can only be made using active vnodes.
547 * Another thread may be blocked on our vnode lock while
548 * holding a namecache lock. We can only reuse this vnode
549 * if we can clear all namecache associations without
550 * blocking.
552 if ((vp->v_flag & VRECLAIMED) == 0) {
553 if (cache_inval_vp_nonblock(vp)) {
554 __vfreetail(vp);
555 vx_unlock(vp);
556 continue;
558 vgone_vxlocked(vp);
559 /* vnode is still VX locked */
563 * We can reuse the vnode if no primary or auxiliary
564 * references remain other then ours, else put it
565 * back on the free list and keep looking.
567 * Either the free list inherits the last reference
568 * or we fall through and sysref_activate() the last
569 * reference.
571 * Since the vnode is in a VRECLAIMED state, no new
572 * namecache associations could have been made.
574 KKASSERT(TAILQ_EMPTY(&vp->v_namecache));
575 if (vp->v_auxrefs ||
576 !sysref_islastdeactivation(&vp->v_sysref)) {
577 __vfreetail(vp);
578 vx_unlock(vp);
579 continue;
583 * Return a VX locked vnode suitable for reuse. The caller
584 * inherits the sysref.
586 return(vp);
588 return(NULL);
592 * Obtain a new vnode from the freelist, allocating more if necessary.
593 * The returned vnode is VX locked & refd.
595 * All new vnodes set the VAGE flags. An open() of the vnode will
596 * decrement the (2-bit) flags. Vnodes which are opened several times
597 * are thus retained in the cache over vnodes which are merely stat()d.
599 struct vnode *
600 allocvnode(int lktimeout, int lkflags)
602 struct vnode *vp;
605 * Try to reuse vnodes if we hit the max. This situation only
606 * occurs in certain large-memory (2G+) situations. We cannot
607 * attempt to directly reclaim vnodes due to nasty recursion
608 * problems.
610 while (numvnodes - freevnodes > desiredvnodes)
611 vnlru_proc_wait();
614 * Try to build up as many vnodes as we can before reallocating
615 * from the free list. A vnode on the free list simply means
616 * that it is inactive with no resident pages. It may or may not
617 * have been reclaimed and could have valuable information associated
618 * with it that we shouldn't throw away unless we really need to.
620 * HAMMER NOTE: Re-establishing a vnode is a fairly expensive
621 * operation for HAMMER but this should benefit UFS as well.
623 if (freevnodes >= wantfreevnodes && numvnodes >= desiredvnodes)
624 vp = allocfreevnode();
625 else
626 vp = NULL;
627 if (vp == NULL) {
628 vp = sysref_alloc(&vnode_sysref_class);
629 lockmgr(&vp->v_lock, LK_EXCLUSIVE);
630 numvnodes++;
634 * We are using a managed sysref class, vnode fields are only
635 * zerod on initial allocation from the backing store, not
636 * on reallocation. Thus we have to clear these fields for both
637 * reallocation and reuse.
639 #ifdef INVARIANTS
640 if (vp->v_data)
641 panic("cleaned vnode isn't");
642 if (bio_track_active(&vp->v_track_read) ||
643 bio_track_active(&vp->v_track_write)) {
644 panic("Clean vnode has pending I/O's");
646 if (vp->v_flag & VONWORKLST)
647 panic("Clean vnode still pending on syncer worklist!");
648 if (!RB_EMPTY(&vp->v_rbdirty_tree))
649 panic("Clean vnode still has dirty buffers!");
650 if (!RB_EMPTY(&vp->v_rbclean_tree))
651 panic("Clean vnode still has clean buffers!");
652 if (!RB_EMPTY(&vp->v_rbhash_tree))
653 panic("Clean vnode still on hash tree!");
654 KKASSERT(vp->v_mount == NULL);
655 #endif
656 vp->v_flag = VAGE0 | VAGE1;
657 vp->v_lastw = 0;
658 vp->v_lasta = 0;
659 vp->v_cstart = 0;
660 vp->v_clen = 0;
661 vp->v_socket = 0;
662 vp->v_opencount = 0;
663 vp->v_writecount = 0; /* XXX */
666 * lktimeout only applies when LK_TIMELOCK is used, and only
667 * the pageout daemon uses it. The timeout may not be zero
668 * or the pageout daemon can deadlock in low-VM situations.
670 if (lktimeout == 0)
671 lktimeout = hz / 10;
672 lockreinit(&vp->v_lock, "vnode", lktimeout, lkflags);
673 KKASSERT(TAILQ_EMPTY(&vp->v_namecache));
674 /* exclusive lock still held */
677 * Note: sysref needs to be activated to convert -0x40000000 to +1.
678 * The -0x40000000 comes from the last ref on reuse, and from
679 * sysref_init() on allocate.
681 sysref_activate(&vp->v_sysref);
682 vp->v_filesize = NOOFFSET;
683 vp->v_type = VNON;
684 vp->v_tag = 0;
685 vp->v_ops = NULL;
686 vp->v_data = NULL;
687 KKASSERT(vp->v_mount == NULL);
689 return (vp);
693 freesomevnodes(int n)
695 struct vnode *vp;
696 int count = 0;
698 while (n) {
699 --n;
700 if ((vp = allocfreevnode()) == NULL)
701 break;
702 vx_put(vp);
703 --numvnodes;
705 return(count);