Set the IDE DMA start bit as a separate I/O write from the DMA port
[dragonfly/vkernel-mp.git] / sys / kern / vfs_lock.c
blob0224b30d6901fba5cdc5b2412d01151612b82de7
1 /*
2 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * $DragonFly: src/sys/kern/vfs_lock.c,v 1.27 2007/05/13 04:34:47 dillon Exp $
38 * External virtual filesystem routines
40 #include "opt_ddb.h"
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/kernel.h>
45 #include <sys/malloc.h>
46 #include <sys/mount.h>
47 #include <sys/proc.h>
48 #include <sys/vnode.h>
49 #include <sys/buf.h>
50 #include <sys/sysctl.h>
52 #include <machine/limits.h>
54 #include <vm/vm.h>
55 #include <vm/vm_object.h>
57 #include <sys/buf2.h>
58 #include <sys/thread2.h>
59 #include <sys/sysref2.h>
61 static void vnode_terminate(struct vnode *vp);
62 static boolean_t vnode_ctor(void *obj, void *private, int ocflags);
63 static void vnode_dtor(void *obj, void *private);
65 static MALLOC_DEFINE(M_VNODE, "vnodes", "vnode structures");
66 static struct sysref_class vnode_sysref_class = {
67 .name = "vnode",
68 .mtype = M_VNODE,
69 .proto = SYSREF_PROTO_VNODE,
70 .offset = offsetof(struct vnode, v_sysref),
71 .objsize = sizeof(struct vnode),
72 .mag_capacity = 256,
73 .flags = SRC_MANAGEDINIT,
74 .ctor = vnode_ctor,
75 .dtor = vnode_dtor,
76 .ops = {
77 .terminate = (sysref_terminate_func_t)vnode_terminate
81 static TAILQ_HEAD(freelst, vnode) vnode_free_list; /* vnode free list */
83 int freevnodes = 0;
84 SYSCTL_INT(_debug, OID_AUTO, freevnodes, CTLFLAG_RD,
85 &freevnodes, 0, "");
86 static int wantfreevnodes = 25;
87 SYSCTL_INT(_debug, OID_AUTO, wantfreevnodes, CTLFLAG_RW,
88 &wantfreevnodes, 0, "");
89 static int minvnodes;
90 SYSCTL_INT(_kern, OID_AUTO, minvnodes, CTLFLAG_RW,
91 &minvnodes, 0, "Minimum number of vnodes");
94 * Called from vfsinit()
96 void
97 vfs_lock_init(void)
99 minvnodes = desiredvnodes / 4;
101 TAILQ_INIT(&vnode_free_list);
105 * Inline helper functions. vbusy() and vfree() must be called while in a
106 * critical section.
108 * Warning: must be callable if the caller holds a read spinlock to something
109 * else, meaning we can't use read spinlocks here.
111 static __inline
112 void
113 __vbusy(struct vnode *vp)
115 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
116 freevnodes--;
117 vp->v_flag &= ~(VFREE|VAGE);
120 static __inline
121 void
122 __vfree(struct vnode *vp)
124 if (vp->v_flag & (VAGE|VRECLAIMED))
125 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
126 else
127 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
128 freevnodes++;
129 vp->v_flag &= ~VAGE;
130 vp->v_flag |= VFREE;
133 static __inline
134 void
135 __vfreetail(struct vnode *vp)
137 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
138 freevnodes++;
139 vp->v_flag |= VFREE;
143 * Return a C boolean if we should put the vnode on the freelist (VFREE),
144 * or leave it / mark it as VCACHED.
146 * This routine is only valid if the vnode is already either VFREE or
147 * VCACHED, or if it can become VFREE or VCACHED via vnode_terminate().
149 static __inline boolean_t
150 vshouldfree(struct vnode *vp)
152 return (vp->v_auxrefs == 0 &&
153 (vp->v_object == NULL || vp->v_object->resident_page_count == 0));
157 * Add a ref to an active vnode. This function should never be called
158 * with an inactive vnode (use vget() instead).
160 void
161 vref(struct vnode *vp)
163 KKASSERT(vp->v_sysref.refcnt > 0 &&
164 (vp->v_flag & (VFREE|VINACTIVE)) == 0);
165 sysref_get(&vp->v_sysref);
169 * Release a ref on an active or inactive vnode. The sysref termination
170 * function will be called when the active last active reference is released,
171 * and the vnode is returned to the objcache when the last inactive
172 * reference is released.
174 void
175 vrele(struct vnode *vp)
177 sysref_put(&vp->v_sysref);
181 * Add an auxiliary data structure reference to the vnode. Auxiliary
182 * references do not change the state of the vnode or prevent them
183 * from being deactivated, reclaimed, or placed on the free list.
185 * An auxiliary reference DOES prevent the vnode from being destroyed,
186 * allowing you to vx_lock() it, test state, etc.
188 * An auxiliary reference DOES NOT move a vnode out of the VFREE state
189 * once it has entered it.
191 void
192 vhold(struct vnode *vp)
194 KKASSERT(vp->v_sysref.refcnt != 0);
195 atomic_add_int(&vp->v_auxrefs, 1);
199 * Remove an auxiliary reference from the vnode.
201 * vdrop needs to check for a VCACHE->VFREE transition to catch cases
202 * where a vnode is held past its reclamation.
204 void
205 vdrop(struct vnode *vp)
207 KKASSERT(vp->v_sysref.refcnt != 0 && vp->v_auxrefs > 0);
208 atomic_subtract_int(&vp->v_auxrefs, 1);
209 if ((vp->v_flag & VCACHED) && vshouldfree(vp)) {
210 vp->v_flag |= VAGE;
211 vp->v_flag &= ~VCACHED;
212 __vfree(vp);
217 * This function is called when the last active reference on the vnode
218 * is released, typically via vrele(). SYSREF will give the vnode a
219 * negative ref count, indicating that it is undergoing termination or
220 * is being set aside for the cache, and one final sysref_put() is
221 * required to actually return it to the memory subsystem.
223 * However, because vnodes may have auxiliary structural references via
224 * v_auxrefs, we must interlock auxiliary references against termination
225 * via the VX lock mechanism. It is possible for a vnode to be reactivated
226 * while we were blocked on the lock.
228 void
229 vnode_terminate(struct vnode *vp)
231 vx_lock(vp);
232 if (sysref_isinactive(&vp->v_sysref)) {
234 * Deactivate the vnode by marking it VFREE or VCACHED.
235 * The vnode can be reactivated from either state until
236 * reclaimed. These states inherit the 'last' sysref on the
237 * vnode.
239 * NOTE: There may be additional inactive references from
240 * other entities blocking on the VX lock while we hold it,
241 * but this does not prevent us from changing the vnode's
242 * state.
244 * NOTE: The vnode could already be marked inactive. XXX
245 * how?
247 * NOTE: The vnode may be marked inactive with dirty buffers
248 * or dirty pages in its cached VM object still present.
250 if ((vp->v_flag & VINACTIVE) == 0) {
251 vp->v_flag |= VINACTIVE;
252 VOP_INACTIVE(vp);
254 KKASSERT((vp->v_flag & (VFREE|VCACHED)) == 0);
255 if (vshouldfree(vp))
256 __vfree(vp);
257 else
258 vp->v_flag |= VCACHED;
259 vx_unlock(vp);
260 } else {
262 * Someone reactivated the vnode while were blocked on the
263 * VX lock. Release the VX lock and release the (now active)
264 * last reference which is no longer last.
266 vx_unlock(vp);
267 vrele(vp);
272 * Physical vnode constructor / destructor. These are only executed on
273 * the backend of the objcache. They are NOT executed on every vnode
274 * allocation or deallocation.
276 boolean_t
277 vnode_ctor(void *obj, void *private, int ocflags)
279 struct vnode *vp = obj;
281 lwkt_token_init(&vp->v_pollinfo.vpi_token);
282 lockinit(&vp->v_lock, "vnode", 0, 0);
283 ccms_dataspace_init(&vp->v_ccms);
284 TAILQ_INIT(&vp->v_namecache);
285 return(TRUE);
288 void
289 vnode_dtor(void *obj, void *private)
291 struct vnode *vp = obj;
293 ccms_dataspace_destroy(&vp->v_ccms);
296 /****************************************************************
297 * VX LOCKING FUNCTIONS *
298 ****************************************************************
300 * These functions lock vnodes for reclamation and deactivation related
301 * activities. The caller must already be holding some sort of reference
302 * on the vnode.
305 void
306 vx_lock(struct vnode *vp)
308 lockmgr(&vp->v_lock, LK_EXCLUSIVE);
311 static int
312 vx_lock_nonblock(struct vnode *vp)
314 return(lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT));
317 void
318 vx_unlock(struct vnode *vp)
320 lockmgr(&vp->v_lock, LK_RELEASE);
323 /****************************************************************
324 * VNODE ACQUISITION FUNCTIONS *
325 ****************************************************************
327 * These functions must be used when accessing a vnode via an auxiliary
328 * reference such as the namecache or free list, or when you wish to
329 * do a combo ref+lock sequence.
331 * These functions are MANDATORY for any code chain accessing a vnode
332 * whos activation state is not known.
334 * vget()/vput() are used when reactivation is desired.
336 * vx_get() and vx_put() are used when reactivation is not desired.
339 vget(struct vnode *vp, int flags)
341 int error;
344 * A lock type must be passed
346 if ((flags & LK_TYPE_MASK) == 0) {
347 panic("vget() called with no lock specified!");
348 /* NOT REACHED */
352 * Reference the structure and then acquire the lock. 0->1
353 * transitions and refs during termination are allowed here so
354 * call sysref directly.
357 sysref_get(&vp->v_sysref);
358 if ((error = vn_lock(vp, flags)) != 0) {
360 * The lock failed, undo and return an error.
362 sysref_put(&vp->v_sysref);
363 } else if (vp->v_flag & VRECLAIMED) {
365 * The node is being reclaimed and cannot be reactivated
366 * any more, undo and return ENOENT.
368 vn_unlock(vp);
369 vrele(vp);
370 error = ENOENT;
371 } else {
373 * If the vnode is marked VFREE or VCACHED it needs to be
374 * reactivated, otherwise it had better already be active.
375 * VINACTIVE must also be cleared.
377 * In the VFREE/VCACHED case we have to throw away the
378 * sysref that was earmarking those cases and preventing
379 * the vnode from being destroyed. Our sysref is still held.
381 if (vp->v_flag & VFREE) {
382 __vbusy(vp);
383 sysref_put(&vp->v_sysref);
384 sysref_activate(&vp->v_sysref);
385 } else if (vp->v_flag & VCACHED) {
386 vp->v_flag &= ~VCACHED;
387 sysref_put(&vp->v_sysref);
388 sysref_activate(&vp->v_sysref);
389 } else {
390 KKASSERT(sysref_isactive(&vp->v_sysref));
392 vp->v_flag &= ~VINACTIVE;
393 error = 0;
395 return(error);
398 void
399 vput(struct vnode *vp)
401 vn_unlock(vp);
402 vrele(vp);
406 * XXX The vx_*() locks should use auxrefs, not the main reference counter.
408 void
409 vx_get(struct vnode *vp)
411 sysref_get(&vp->v_sysref);
412 lockmgr(&vp->v_lock, LK_EXCLUSIVE);
416 vx_get_nonblock(struct vnode *vp)
418 int error;
420 sysref_get(&vp->v_sysref);
421 error = lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT);
422 if (error)
423 sysref_put(&vp->v_sysref);
424 return(error);
428 * Relase a VX lock that also held a ref on the vnode.
430 * vx_put needs to check for a VCACHE->VFREE transition to catch the
431 * case where e.g. vnlru issues a vgone*().
433 void
434 vx_put(struct vnode *vp)
436 if ((vp->v_flag & VCACHED) && vshouldfree(vp)) {
437 vp->v_flag |= VAGE;
438 vp->v_flag &= ~VCACHED;
439 __vfree(vp);
441 lockmgr(&vp->v_lock, LK_RELEASE);
442 sysref_put(&vp->v_sysref);
446 * Misc functions
449 void
450 vsetflags(struct vnode *vp, int flags)
452 crit_enter();
453 vp->v_flag |= flags;
454 crit_exit();
457 void
458 vclrflags(struct vnode *vp, int flags)
460 crit_enter();
461 vp->v_flag &= ~flags;
462 crit_exit();
466 * Try to reuse a vnode from the free list. NOTE: The returned vnode
467 * is not completely initialized.
469 static
470 struct vnode *
471 allocfreevnode(void)
473 struct vnode *vp;
474 int count;
476 for (count = 0; count < freevnodes; count++) {
478 * Note that regardless of how we block in this loop,
479 * we only get here if freevnodes != 0 so there
480 * had better be something on the list.
482 * Try to lock the first vnode on the free list.
483 * Cycle if we can't.
485 * XXX NOT MP SAFE
487 vp = TAILQ_FIRST(&vnode_free_list);
488 if (vx_lock_nonblock(vp)) {
489 KKASSERT(vp->v_flag & VFREE);
490 TAILQ_REMOVE(&vnode_free_list, vp, v_freelist);
491 TAILQ_INSERT_TAIL(&vnode_free_list,
492 vp, v_freelist);
493 continue;
497 * With the vnode locked we can safely remove it
498 * from the free list. We inherit the reference
499 * that was previously associated with the vnode
500 * being on the free list.
502 KKASSERT((vp->v_flag & (VFREE|VINACTIVE)) ==
503 (VFREE|VINACTIVE));
504 KKASSERT(sysref_isinactive(&vp->v_sysref));
505 __vbusy(vp);
508 * Holding the VX lock on an inactive vnode prevents it
509 * from being reactivated or reused. New namecache
510 * associations can only be made using active vnodes.
512 * Another thread may be blocked on our vnode lock while
513 * holding a namecache lock. We can only reuse this vnode
514 * if we can clear all namecache associations without
515 * blocking.
517 if ((vp->v_flag & VRECLAIMED) == 0) {
518 if (cache_inval_vp_nonblock(vp)) {
519 __vfreetail(vp);
520 vx_unlock(vp);
521 continue;
523 vgone_vxlocked(vp);
524 /* vnode is still VX locked */
528 * We can reuse the vnode if no primary or auxiliary
529 * references remain other then ours, else put it
530 * back on the free list and keep looking.
532 * Either the free list inherits the last reference
533 * or we fall through and sysref_activate() the last
534 * reference.
536 * Since the vnode is in a VRECLAIMED state, no new
537 * namecache associations could have been made.
539 KKASSERT(TAILQ_EMPTY(&vp->v_namecache));
540 if (vp->v_auxrefs ||
541 !sysref_islastdeactivation(&vp->v_sysref)) {
542 __vfreetail(vp);
543 vx_unlock(vp);
544 continue;
548 * Return a VX locked vnode suitable for reuse. The caller
549 * inherits the sysref.
551 return(vp);
553 return(NULL);
557 * Obtain a new vnode from the freelist, allocating more if necessary.
558 * The returned vnode is VX locked & refd.
560 struct vnode *
561 allocvnode(int lktimeout, int lkflags)
563 struct vnode *vp;
566 * Try to reuse vnodes if we hit the max. This situation only
567 * occurs in certain large-memory (2G+) situations. We cannot
568 * attempt to directly reclaim vnodes due to nasty recursion
569 * problems.
571 while (numvnodes - freevnodes > desiredvnodes)
572 vnlru_proc_wait();
575 * Attempt to reuse a vnode already on the free list, allocating
576 * a new vnode if we can't find one or if we have not reached a
577 * good minimum for good LRU performance.
579 if (freevnodes >= wantfreevnodes && numvnodes >= minvnodes)
580 vp = allocfreevnode();
581 else
582 vp = NULL;
583 if (vp == NULL) {
584 vp = sysref_alloc(&vnode_sysref_class);
585 lockmgr(&vp->v_lock, LK_EXCLUSIVE);
586 numvnodes++;
590 * We are using a managed sysref class, vnode fields are only
591 * zerod on initial allocation from the backing store, not
592 * on reallocation. Thus we have to clear these fields for both
593 * reallocation and reuse.
595 #ifdef INVARIANTS
596 if (vp->v_data)
597 panic("cleaned vnode isn't");
598 if (vp->v_track_read.bk_active + vp->v_track_write.bk_active)
599 panic("Clean vnode has pending I/O's");
600 KKASSERT(vp->v_mount == NULL);
601 #endif
602 vp->v_flag = 0;
603 vp->v_lastw = 0;
604 vp->v_lasta = 0;
605 vp->v_cstart = 0;
606 vp->v_clen = 0;
607 vp->v_socket = 0;
608 vp->v_opencount = 0;
609 vp->v_writecount = 0; /* XXX */
610 lockreinit(&vp->v_lock, "vnode", lktimeout, lkflags);
611 KKASSERT(TAILQ_FIRST(&vp->v_namecache) == NULL);
612 /* exclusive lock still held */
615 * Note: sysref needs to be activated to convert -0x40000000 to +1.
616 * The -0x40000000 comes from the last ref on reuse, and from
617 * sysref_init() on allocate.
619 sysref_activate(&vp->v_sysref);
620 RB_INIT(&vp->v_rbclean_tree);
621 RB_INIT(&vp->v_rbdirty_tree);
622 RB_INIT(&vp->v_rbhash_tree);
623 vp->v_filesize = NOOFFSET;
624 vp->v_type = VNON;
625 vp->v_tag = 0;
626 vp->v_ops = NULL;
627 vp->v_data = NULL;
628 KKASSERT(vp->v_mount == NULL);
630 return (vp);
634 freesomevnodes(int n)
636 struct vnode *vp;
637 int count = 0;
639 while (n) {
640 --n;
641 if ((vp = allocfreevnode()) == NULL)
642 break;
643 vx_put(vp);
644 --numvnodes;
646 return(count);