2 * Copyright (c) 2003,2004 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * Copyright (c) 1989, 1993, 1995
35 * The Regents of the University of California. All rights reserved.
37 * This code is derived from software contributed to Berkeley by
38 * Poul-Henning Kamp of the FreeBSD Project.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * @(#)vfs_cache.c 8.5 (Berkeley) 3/22/95
69 * $FreeBSD: src/sys/kern/vfs_cache.c,v 1.42.2.6 2001/10/05 20:07:03 dillon Exp $
70 * $DragonFly: src/sys/kern/vfs_cache.c,v 1.91 2008/06/14 05:34:06 dillon Exp $
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/kernel.h>
76 #include <sys/sysctl.h>
77 #include <sys/mount.h>
78 #include <sys/vnode.h>
79 #include <sys/malloc.h>
80 #include <sys/sysproto.h>
81 #include <sys/spinlock.h>
83 #include <sys/namei.h>
84 #include <sys/nlookup.h>
85 #include <sys/filedesc.h>
86 #include <sys/fnv_hash.h>
87 #include <sys/globaldata.h>
88 #include <sys/kern_syscall.h>
89 #include <sys/dirent.h>
92 #include <sys/sysref2.h>
93 #include <sys/spinlock2.h>
94 #include <sys/mplock2.h>
96 #define MAX_RECURSION_DEPTH 64
99 * Random lookups in the cache are accomplished with a hash table using
100 * a hash key of (nc_src_vp, name).
102 * Negative entries may exist and correspond to structures where nc_vp
103 * is NULL. In a negative entry, NCF_WHITEOUT will be set if the entry
104 * corresponds to a whited-out directory entry (verses simply not finding the
107 * Upon reaching the last segment of a path, if the reference is for DELETE,
108 * or NOCACHE is set (rewrite), and the name is located in the cache, it
113 * Structures associated with name cacheing.
115 #define NCHHASH(hash) (&nchashtbl[(hash) & nchash])
118 MALLOC_DEFINE(M_VFSCACHE
, "vfscache", "VFS name cache entries");
120 LIST_HEAD(nchash_list
, namecache
);
123 struct nchash_list list
;
124 struct spinlock spin
;
127 static struct nchash_head
*nchashtbl
;
128 static struct namecache_list ncneglist
;
129 static struct spinlock ncspin
;
130 struct lwkt_token vfs_token
;
133 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server
134 * to create the namecache infrastructure leading to a dangling vnode.
136 * 0 Only errors are reported
137 * 1 Successes are reported
138 * 2 Successes + the whole directory scan is reported
139 * 3 Force the directory scan code run as if the parent vnode did not
140 * have a namecache record, even if it does have one.
142 static int ncvp_debug
;
143 SYSCTL_INT(_debug
, OID_AUTO
, ncvp_debug
, CTLFLAG_RW
, &ncvp_debug
, 0, "");
145 static u_long nchash
; /* size of hash table */
146 SYSCTL_ULONG(_debug
, OID_AUTO
, nchash
, CTLFLAG_RD
, &nchash
, 0, "");
148 static int ncnegfactor
= 16; /* ratio of negative entries */
149 SYSCTL_INT(_debug
, OID_AUTO
, ncnegfactor
, CTLFLAG_RW
, &ncnegfactor
, 0, "");
151 static int nclockwarn
; /* warn on locked entries in ticks */
152 SYSCTL_INT(_debug
, OID_AUTO
, nclockwarn
, CTLFLAG_RW
, &nclockwarn
, 0, "");
154 static int numneg
; /* number of cache entries allocated */
155 SYSCTL_INT(_debug
, OID_AUTO
, numneg
, CTLFLAG_RD
, &numneg
, 0, "");
157 static int numcache
; /* number of cache entries allocated */
158 SYSCTL_INT(_debug
, OID_AUTO
, numcache
, CTLFLAG_RD
, &numcache
, 0, "");
160 static int numunres
; /* number of unresolved entries */
161 SYSCTL_INT(_debug
, OID_AUTO
, numunres
, CTLFLAG_RD
, &numunres
, 0, "");
163 SYSCTL_INT(_debug
, OID_AUTO
, vnsize
, CTLFLAG_RD
, 0, sizeof(struct vnode
), "");
164 SYSCTL_INT(_debug
, OID_AUTO
, ncsize
, CTLFLAG_RD
, 0, sizeof(struct namecache
), "");
166 static int cache_resolve_mp(struct mount
*mp
);
167 static struct vnode
*cache_dvpref(struct namecache
*ncp
);
168 static void _cache_rehash(struct namecache
*ncp
);
169 static void _cache_lock(struct namecache
*ncp
);
170 static void _cache_setunresolved(struct namecache
*ncp
);
173 * The new name cache statistics
175 SYSCTL_NODE(_vfs
, OID_AUTO
, cache
, CTLFLAG_RW
, 0, "Name cache statistics");
176 #define STATNODE(mode, name, var) \
177 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
178 STATNODE(CTLFLAG_RD
, numneg
, &numneg
);
179 STATNODE(CTLFLAG_RD
, numcache
, &numcache
);
180 static u_long numcalls
; STATNODE(CTLFLAG_RD
, numcalls
, &numcalls
);
181 static u_long dothits
; STATNODE(CTLFLAG_RD
, dothits
, &dothits
);
182 static u_long dotdothits
; STATNODE(CTLFLAG_RD
, dotdothits
, &dotdothits
);
183 static u_long numchecks
; STATNODE(CTLFLAG_RD
, numchecks
, &numchecks
);
184 static u_long nummiss
; STATNODE(CTLFLAG_RD
, nummiss
, &nummiss
);
185 static u_long nummisszap
; STATNODE(CTLFLAG_RD
, nummisszap
, &nummisszap
);
186 static u_long numposzaps
; STATNODE(CTLFLAG_RD
, numposzaps
, &numposzaps
);
187 static u_long numposhits
; STATNODE(CTLFLAG_RD
, numposhits
, &numposhits
);
188 static u_long numnegzaps
; STATNODE(CTLFLAG_RD
, numnegzaps
, &numnegzaps
);
189 static u_long numneghits
; STATNODE(CTLFLAG_RD
, numneghits
, &numneghits
);
191 struct nchstats nchstats
[SMP_MAXCPU
];
193 * Export VFS cache effectiveness statistics to user-land.
195 * The statistics are left for aggregation to user-land so
196 * neat things can be achieved, like observing per-CPU cache
200 sysctl_nchstats(SYSCTL_HANDLER_ARGS
)
202 struct globaldata
*gd
;
206 for (i
= 0; i
< ncpus
; ++i
) {
207 gd
= globaldata_find(i
);
208 if ((error
= SYSCTL_OUT(req
, (void *)&(*gd
->gd_nchstats
),
209 sizeof(struct nchstats
))))
215 SYSCTL_PROC(_vfs_cache
, OID_AUTO
, nchstats
, CTLTYPE_OPAQUE
|CTLFLAG_RD
,
216 0, 0, sysctl_nchstats
, "S,nchstats", "VFS cache effectiveness statistics");
218 static struct namecache
*cache_zap(struct namecache
*ncp
);
221 * Namespace locking. The caller must already hold a reference to the
222 * namecache structure in order to lock/unlock it. This function prevents
223 * the namespace from being created or destroyed by accessors other then
226 * Note that holding a locked namecache structure prevents other threads
227 * from making namespace changes (e.g. deleting or creating), prevents
228 * vnode association state changes by other threads, and prevents the
229 * namecache entry from being resolved or unresolved by other threads.
231 * The lock owner has full authority to associate/disassociate vnodes
232 * and resolve/unresolve the locked ncp.
234 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed
235 * or recycled, but it does NOT help you if the vnode had already
236 * initiated a recyclement. If this is important, use cache_get()
237 * rather then cache_lock() (and deal with the differences in the
238 * way the refs counter is handled). Or, alternatively, make an
239 * unconditional call to cache_validate() or cache_resolve()
240 * after cache_lock() returns.
244 _cache_lock(struct namecache
*ncp
)
251 KKASSERT(ncp
->nc_refs
!= 0);
256 xtd
= ncp
->nc_locktd
;
263 if (atomic_cmpset_ptr(&ncp
->nc_locktd
, NULL
, td
)) {
264 KKASSERT(ncp
->nc_exlocks
== 0);
268 * The vp associated with a locked ncp must
269 * be held to prevent it from being recycled.
271 * WARNING! If VRECLAIMED is set the vnode
272 * could already be in the middle of a recycle.
273 * Callers must use cache_vref() or
274 * cache_vget() on the locked ncp to
275 * validate the vp or set the cache entry
279 vhold(ncp
->nc_vp
); /* MPSAFE */
286 * Memory interlock (XXX)
289 tsleep_interlock(ncp
, 0);
291 if (xtd
!= ncp
->nc_locktd
)
293 error
= tsleep(ncp
, PINTERLOCKED
, "clock", nclockwarn
);
294 if (error
== EWOULDBLOCK
) {
298 kprintf("[diagnostic] cache_lock: blocked on %p", ncp
);
299 kprintf(" \"%*.*s\"\n",
300 ncp
->nc_nlen
, ncp
->nc_nlen
, ncp
->nc_name
);
305 kprintf("[diagnostic] cache_lock: unblocked %*.*s\n",
306 ncp
->nc_nlen
, ncp
->nc_nlen
, ncp
->nc_name
);
312 _cache_lock_nonblock(struct namecache
*ncp
)
317 KKASSERT(ncp
->nc_refs
!= 0);
321 xtd
= ncp
->nc_locktd
;
328 if (atomic_cmpset_ptr(&ncp
->nc_locktd
, NULL
, td
)) {
329 KKASSERT(ncp
->nc_exlocks
== 0);
333 * The vp associated with a locked ncp must
334 * be held to prevent it from being recycled.
336 * WARNING! If VRECLAIMED is set the vnode
337 * could already be in the middle of a recycle.
338 * Callers must use cache_vref() or
339 * cache_vget() on the locked ncp to
340 * validate the vp or set the cache entry
344 vhold(ncp
->nc_vp
); /* MPSAFE */
357 * NOTE: nc_refs can be 0 (degenerate case during _cache_drop).
361 _cache_unlock(struct namecache
*ncp
)
363 thread_t td __debugvar
= curthread
;
365 KKASSERT(ncp
->nc_refs
>= 0);
366 KKASSERT(ncp
->nc_exlocks
> 0);
367 KKASSERT(ncp
->nc_locktd
== td
);
369 if (--ncp
->nc_exlocks
== 0) {
372 ncp
->nc_locktd
= NULL
;
374 if (ncp
->nc_lockreq
) {
383 * cache_hold() and cache_drop() prevent the premature deletion of a
384 * namecache entry but do not prevent operations (such as zapping) on
385 * that namecache entry.
387 * This routine may only be called from outside this source module if
388 * nc_refs is already at least 1.
390 * This is a rare case where callers are allowed to hold a spinlock,
391 * so we can't ourselves.
397 _cache_hold(struct namecache
*ncp
)
399 atomic_add_int(&ncp
->nc_refs
, 1);
404 * Drop a cache entry, taking care to deal with races.
406 * For potential 1->0 transitions we must hold the ncp lock to safely
407 * test its flags. An unresolved entry with no children must be zapped
410 * The call to cache_zap() itself will handle all remaining races and
411 * will decrement the ncp's refs regardless. If we are resolved or
412 * have children nc_refs can safely be dropped to 0 without having to
415 * NOTE: cache_zap() will re-check nc_refs and nc_list in a MPSAFE fashion.
417 * NOTE: cache_zap() may return a non-NULL referenced parent which must
418 * be dropped in a loop.
422 _cache_drop(struct namecache
*ncp
)
427 KKASSERT(ncp
->nc_refs
> 0);
431 if (_cache_lock_nonblock(ncp
) == 0) {
432 if ((ncp
->nc_flag
& NCF_UNRESOLVED
) &&
433 TAILQ_EMPTY(&ncp
->nc_list
)) {
434 ncp
= cache_zap(ncp
);
437 if (atomic_cmpset_int(&ncp
->nc_refs
, 1, 0)) {
444 if (atomic_cmpset_int(&ncp
->nc_refs
, refs
, refs
- 1))
451 * Link a new namecache entry to its parent. Be careful to avoid races
452 * if vhold() blocks in the future.
454 * MPSAFE - ncp must be locked and vfs_token must be held.
457 _cache_link_parent(struct namecache
*ncp
, struct namecache
*par
)
459 KKASSERT(ncp
->nc_parent
== NULL
);
460 ncp
->nc_parent
= par
;
461 if (TAILQ_EMPTY(&par
->nc_list
)) {
462 TAILQ_INSERT_HEAD(&par
->nc_list
, ncp
, nc_entry
);
464 * Any vp associated with an ncp which has children must
465 * be held to prevent it from being recycled.
468 vhold(par
->nc_vp
); /* MPSAFE */
470 TAILQ_INSERT_HEAD(&par
->nc_list
, ncp
, nc_entry
);
475 * Remove the parent association from a namecache structure. If this is
476 * the last child of the parent the cache_drop(par) will attempt to
477 * recursively zap the parent.
479 * MPSAFE - ncp must be locked and vfs_token must be held.
482 _cache_unlink_parent(struct namecache
*ncp
)
484 struct namecache
*par
;
485 struct vnode
*dropvp
;
487 if ((par
= ncp
->nc_parent
) != NULL
) {
488 ncp
->nc_parent
= NULL
;
490 TAILQ_REMOVE(&par
->nc_list
, ncp
, nc_entry
);
492 if (par
->nc_vp
&& TAILQ_EMPTY(&par
->nc_list
))
497 * We can only safely vdrop with no spinlocks held.
505 * Allocate a new namecache structure. Most of the code does not require
506 * zero-termination of the string but it makes vop_compat_ncreate() easier.
508 static struct namecache
*
509 cache_alloc(int nlen
)
511 struct namecache
*ncp
;
513 ncp
= kmalloc(sizeof(*ncp
), M_VFSCACHE
, M_WAITOK
|M_ZERO
);
515 ncp
->nc_name
= kmalloc(nlen
+ 1, M_VFSCACHE
, M_WAITOK
);
517 ncp
->nc_flag
= NCF_UNRESOLVED
;
518 ncp
->nc_error
= ENOTCONN
; /* needs to be resolved */
521 TAILQ_INIT(&ncp
->nc_list
);
527 * Can only be called for the case where the ncp has never been
528 * associated with anything (so no spinlocks are needed).
531 _cache_free(struct namecache
*ncp
)
533 KKASSERT(ncp
->nc_refs
== 1 && ncp
->nc_exlocks
== 1);
535 kfree(ncp
->nc_name
, M_VFSCACHE
);
536 kfree(ncp
, M_VFSCACHE
);
540 cache_zero(struct nchandle
*nch
)
547 * Ref and deref a namecache structure.
549 * Warning: caller may hold an unrelated read spinlock, which means we can't
550 * use read spinlocks here.
555 cache_hold(struct nchandle
*nch
)
557 _cache_hold(nch
->ncp
);
558 atomic_add_int(&nch
->mount
->mnt_refs
, 1);
563 * Create a copy of a namecache handle for an already-referenced
569 cache_copy(struct nchandle
*nch
, struct nchandle
*target
)
573 _cache_hold(target
->ncp
);
574 atomic_add_int(&nch
->mount
->mnt_refs
, 1);
581 cache_changemount(struct nchandle
*nch
, struct mount
*mp
)
583 atomic_add_int(&nch
->mount
->mnt_refs
, -1);
585 atomic_add_int(&nch
->mount
->mnt_refs
, 1);
589 cache_drop(struct nchandle
*nch
)
591 atomic_add_int(&nch
->mount
->mnt_refs
, -1);
592 _cache_drop(nch
->ncp
);
598 cache_lock(struct nchandle
*nch
)
600 _cache_lock(nch
->ncp
);
604 cache_lock_nonblock(struct nchandle
*nch
)
606 return(_cache_lock_nonblock(nch
->ncp
));
611 cache_unlock(struct nchandle
*nch
)
613 _cache_unlock(nch
->ncp
);
617 * ref-and-lock, unlock-and-deref functions.
619 * This function is primarily used by nlookup. Even though cache_lock
620 * holds the vnode, it is possible that the vnode may have already
621 * initiated a recyclement.
623 * We want cache_get() to return a definitively usable vnode or a
624 * definitively unresolved ncp.
628 _cache_get(struct namecache
*ncp
)
632 if (ncp
->nc_vp
&& (ncp
->nc_vp
->v_flag
& VRECLAIMED
))
633 _cache_setunresolved(ncp
);
638 * This is a special form of _cache_get() which only succeeds if
639 * it can get a pristine, non-recursive lock. The caller must have
640 * already ref'd the ncp.
642 * On success the ncp will be locked, on failure it will not. The
643 * ref count does not change either way.
645 * We want _cache_get_nonblock() (on success) to return a definitively
646 * usable vnode or a definitively unresolved ncp.
649 _cache_get_nonblock(struct namecache
*ncp
)
651 if (_cache_lock_nonblock(ncp
) == 0) {
652 if (ncp
->nc_exlocks
== 1) {
653 if (ncp
->nc_vp
&& (ncp
->nc_vp
->v_flag
& VRECLAIMED
))
654 _cache_setunresolved(ncp
);
664 * NOTE: The same nchandle can be passed for both arguments.
667 cache_get(struct nchandle
*nch
, struct nchandle
*target
)
669 KKASSERT(nch
->ncp
->nc_refs
> 0);
670 target
->mount
= nch
->mount
;
671 target
->ncp
= _cache_get(nch
->ncp
);
672 atomic_add_int(&target
->mount
->mnt_refs
, 1);
677 cache_get_nonblock(struct nchandle
*nch
)
681 if ((error
= _cache_get_nonblock(nch
->ncp
)) == 0)
682 atomic_add_int(&nch
->mount
->mnt_refs
, 1);
689 _cache_put(struct namecache
*ncp
)
696 cache_put(struct nchandle
*nch
)
698 atomic_add_int(&nch
->mount
->mnt_refs
, -1);
699 _cache_put(nch
->ncp
);
705 * Resolve an unresolved ncp by associating a vnode with it. If the
706 * vnode is NULL, a negative cache entry is created.
708 * The ncp should be locked on entry and will remain locked on return.
712 _cache_setvp(struct mount
*mp
, struct namecache
*ncp
, struct vnode
*vp
)
714 KKASSERT(ncp
->nc_flag
& NCF_UNRESOLVED
);
717 * Any vp associated with an ncp which has children must
718 * be held. Any vp associated with a locked ncp must be held.
720 if (!TAILQ_EMPTY(&ncp
->nc_list
))
722 spin_lock_wr(&vp
->v_spinlock
);
724 TAILQ_INSERT_HEAD(&vp
->v_namecache
, ncp
, nc_vnode
);
725 spin_unlock_wr(&vp
->v_spinlock
);
730 * Set auxiliary flags
734 ncp
->nc_flag
|= NCF_ISDIR
;
737 ncp
->nc_flag
|= NCF_ISSYMLINK
;
738 /* XXX cache the contents of the symlink */
743 atomic_add_int(&numcache
, 1);
747 * When creating a negative cache hit we set the
748 * namecache_gen. A later resolve will clean out the
749 * negative cache hit if the mount point's namecache_gen
750 * has changed. Used by devfs, could also be used by
754 spin_lock_wr(&ncspin
);
755 lwkt_token_init(&vfs_token
);
756 TAILQ_INSERT_TAIL(&ncneglist
, ncp
, nc_vnode
);
758 spin_unlock_wr(&ncspin
);
759 ncp
->nc_error
= ENOENT
;
761 ncp
->nc_namecache_gen
= mp
->mnt_namecache_gen
;
763 ncp
->nc_flag
&= ~NCF_UNRESOLVED
;
767 cache_setvp(struct nchandle
*nch
, struct vnode
*vp
)
769 _cache_setvp(nch
->mount
, nch
->ncp
, vp
);
773 cache_settimeout(struct nchandle
*nch
, int nticks
)
775 struct namecache
*ncp
= nch
->ncp
;
777 if ((ncp
->nc_timeout
= ticks
+ nticks
) == 0)
782 * Disassociate the vnode or negative-cache association and mark a
783 * namecache entry as unresolved again. Note that the ncp is still
784 * left in the hash table and still linked to its parent.
786 * The ncp should be locked and refd on entry and will remain locked and refd
789 * This routine is normally never called on a directory containing children.
790 * However, NFS often does just that in its rename() code as a cop-out to
791 * avoid complex namespace operations. This disconnects a directory vnode
792 * from its namecache and can cause the OLDAPI and NEWAPI to get out of
797 _cache_setunresolved(struct namecache
*ncp
)
801 if ((ncp
->nc_flag
& NCF_UNRESOLVED
) == 0) {
802 ncp
->nc_flag
|= NCF_UNRESOLVED
;
804 ncp
->nc_error
= ENOTCONN
;
805 atomic_add_int(&numunres
, 1);
806 if ((vp
= ncp
->nc_vp
) != NULL
) {
807 atomic_add_int(&numcache
, -1);
808 spin_lock_wr(&vp
->v_spinlock
);
810 TAILQ_REMOVE(&vp
->v_namecache
, ncp
, nc_vnode
);
811 spin_unlock_wr(&vp
->v_spinlock
);
814 * Any vp associated with an ncp with children is
815 * held by that ncp. Any vp associated with a locked
816 * ncp is held by that ncp. These conditions must be
817 * undone when the vp is cleared out from the ncp.
819 if (!TAILQ_EMPTY(&ncp
->nc_list
))
824 spin_lock_wr(&ncspin
);
825 TAILQ_REMOVE(&ncneglist
, ncp
, nc_vnode
);
827 spin_unlock_wr(&ncspin
);
829 ncp
->nc_flag
&= ~(NCF_WHITEOUT
|NCF_ISDIR
|NCF_ISSYMLINK
);
834 * The cache_nresolve() code calls this function to automatically
835 * set a resolved cache element to unresolved if it has timed out
836 * or if it is a negative cache hit and the mount point namecache_gen
840 _cache_auto_unresolve(struct mount
*mp
, struct namecache
*ncp
)
843 * Already in an unresolved state, nothing to do.
845 if (ncp
->nc_flag
& NCF_UNRESOLVED
)
849 * Try to zap entries that have timed out. We have
850 * to be careful here because locked leafs may depend
851 * on the vnode remaining intact in a parent, so only
852 * do this under very specific conditions.
854 if (ncp
->nc_timeout
&& (int)(ncp
->nc_timeout
- ticks
) < 0 &&
855 TAILQ_EMPTY(&ncp
->nc_list
)) {
856 _cache_setunresolved(ncp
);
861 * If a resolved negative cache hit is invalid due to
862 * the mount's namecache generation being bumped, zap it.
864 if (ncp
->nc_vp
== NULL
&&
865 ncp
->nc_namecache_gen
!= mp
->mnt_namecache_gen
) {
866 _cache_setunresolved(ncp
);
872 cache_setunresolved(struct nchandle
*nch
)
874 _cache_setunresolved(nch
->ncp
);
878 * Determine if we can clear NCF_ISMOUNTPT by scanning the mountlist
879 * looking for matches. This flag tells the lookup code when it must
880 * check for a mount linkage and also prevents the directories in question
881 * from being deleted or renamed.
885 cache_clrmountpt_callback(struct mount
*mp
, void *data
)
887 struct nchandle
*nch
= data
;
889 if (mp
->mnt_ncmounton
.ncp
== nch
->ncp
)
891 if (mp
->mnt_ncmountpt
.ncp
== nch
->ncp
)
897 cache_clrmountpt(struct nchandle
*nch
)
901 count
= mountlist_scan(cache_clrmountpt_callback
, nch
,
902 MNTSCAN_FORWARD
|MNTSCAN_NOBUSY
);
904 nch
->ncp
->nc_flag
&= ~NCF_ISMOUNTPT
;
908 * Invalidate portions of the namecache topology given a starting entry.
909 * The passed ncp is set to an unresolved state and:
911 * The passed ncp must be locked.
913 * CINV_DESTROY - Set a flag in the passed ncp entry indicating
914 * that the physical underlying nodes have been
915 * destroyed... as in deleted. For example, when
916 * a directory is removed. This will cause record
917 * lookups on the name to no longer be able to find
918 * the record and tells the resolver to return failure
919 * rather then trying to resolve through the parent.
921 * The topology itself, including ncp->nc_name,
924 * This only applies to the passed ncp, if CINV_CHILDREN
925 * is specified the children are not flagged.
927 * CINV_CHILDREN - Set all children (recursively) to an unresolved
930 * Note that this will also have the side effect of
931 * cleaning out any unreferenced nodes in the topology
932 * from the leaves up as the recursion backs out.
934 * Note that the topology for any referenced nodes remains intact.
936 * It is possible for cache_inval() to race a cache_resolve(), meaning that
937 * the namecache entry may not actually be invalidated on return if it was
938 * revalidated while recursing down into its children. This code guarentees
939 * that the node(s) will go through an invalidation cycle, but does not
940 * guarentee that they will remain in an invalidated state.
942 * Returns non-zero if a revalidation was detected during the invalidation
943 * recursion, zero otherwise. Note that since only the original ncp is
944 * locked the revalidation ultimately can only indicate that the original ncp
945 * *MIGHT* no have been reresolved.
947 * DEEP RECURSION HANDLING - If a recursive invalidation recurses deeply we
948 * have to avoid blowing out the kernel stack. We do this by saving the
949 * deep namecache node and aborting the recursion, then re-recursing at that
950 * node using a depth-first algorithm in order to allow multiple deep
951 * recursions to chain through each other, then we restart the invalidation
956 struct namecache
*resume_ncp
;
960 static int _cache_inval_internal(struct namecache
*, int, struct cinvtrack
*);
964 _cache_inval(struct namecache
*ncp
, int flags
)
966 struct cinvtrack track
;
967 struct namecache
*ncp2
;
971 track
.resume_ncp
= NULL
;
974 r
= _cache_inval_internal(ncp
, flags
, &track
);
975 if (track
.resume_ncp
== NULL
)
977 kprintf("Warning: deep namecache recursion at %s\n",
980 while ((ncp2
= track
.resume_ncp
) != NULL
) {
981 track
.resume_ncp
= NULL
;
983 _cache_inval_internal(ncp2
, flags
& ~CINV_DESTROY
,
993 cache_inval(struct nchandle
*nch
, int flags
)
995 return(_cache_inval(nch
->ncp
, flags
));
999 _cache_inval_internal(struct namecache
*ncp
, int flags
, struct cinvtrack
*track
)
1001 struct namecache
*kid
;
1002 struct namecache
*nextkid
;
1006 KKASSERT(ncp
->nc_exlocks
);
1008 _cache_setunresolved(ncp
);
1009 lwkt_gettoken(&nlock
, &vfs_token
);
1010 if (flags
& CINV_DESTROY
)
1011 ncp
->nc_flag
|= NCF_DESTROYED
;
1012 if ((flags
& CINV_CHILDREN
) &&
1013 (kid
= TAILQ_FIRST(&ncp
->nc_list
)) != NULL
1016 if (++track
->depth
> MAX_RECURSION_DEPTH
) {
1017 track
->resume_ncp
= ncp
;
1023 if (track
->resume_ncp
) {
1027 if ((nextkid
= TAILQ_NEXT(kid
, nc_entry
)) != NULL
)
1028 _cache_hold(nextkid
);
1029 if ((kid
->nc_flag
& NCF_UNRESOLVED
) == 0 ||
1030 TAILQ_FIRST(&kid
->nc_list
)
1033 rcnt
+= _cache_inval_internal(kid
, flags
& ~CINV_DESTROY
, track
);
1042 lwkt_reltoken(&nlock
);
1045 * Someone could have gotten in there while ncp was unlocked,
1048 if ((ncp
->nc_flag
& NCF_UNRESOLVED
) == 0)
1054 * Invalidate a vnode's namecache associations. To avoid races against
1055 * the resolver we do not invalidate a node which we previously invalidated
1056 * but which was then re-resolved while we were in the invalidation loop.
1058 * Returns non-zero if any namecache entries remain after the invalidation
1061 * NOTE: Unlike the namecache topology which guarentees that ncp's will not
1062 * be ripped out of the topology while held, the vnode's v_namecache
1063 * list has no such restriction. NCP's can be ripped out of the list
1064 * at virtually any time if not locked, even if held.
1066 * In addition, the v_namecache list itself must be locked via
1067 * the vnode's spinlock.
1070 cache_inval_vp(struct vnode
*vp
, int flags
)
1072 struct namecache
*ncp
;
1073 struct namecache
*next
;
1076 spin_lock_wr(&vp
->v_spinlock
);
1077 ncp
= TAILQ_FIRST(&vp
->v_namecache
);
1081 /* loop entered with ncp held and vp spin-locked */
1082 if ((next
= TAILQ_NEXT(ncp
, nc_vnode
)) != NULL
)
1084 spin_unlock_wr(&vp
->v_spinlock
);
1086 if (ncp
->nc_vp
!= vp
) {
1087 kprintf("Warning: cache_inval_vp: race-A detected on "
1088 "%s\n", ncp
->nc_name
);
1094 _cache_inval(ncp
, flags
);
1095 _cache_put(ncp
); /* also releases reference */
1097 if (ncp
&& ncp
->nc_vp
!= vp
) {
1098 kprintf("Warning: cache_inval_vp: race-B detected on "
1099 "%s\n", ncp
->nc_name
);
1103 spin_lock_wr(&vp
->v_spinlock
);
1105 spin_unlock_wr(&vp
->v_spinlock
);
1106 return(TAILQ_FIRST(&vp
->v_namecache
) != NULL
);
1110 * This routine is used instead of the normal cache_inval_vp() when we
1111 * are trying to recycle otherwise good vnodes.
1113 * Return 0 on success, non-zero if not all namecache records could be
1114 * disassociated from the vnode (for various reasons).
1117 cache_inval_vp_nonblock(struct vnode
*vp
)
1119 struct namecache
*ncp
;
1120 struct namecache
*next
;
1122 spin_lock_wr(&vp
->v_spinlock
);
1123 ncp
= TAILQ_FIRST(&vp
->v_namecache
);
1127 /* loop entered with ncp held */
1128 if ((next
= TAILQ_NEXT(ncp
, nc_vnode
)) != NULL
)
1130 spin_unlock_wr(&vp
->v_spinlock
);
1131 if (_cache_lock_nonblock(ncp
)) {
1137 if (ncp
->nc_vp
!= vp
) {
1138 kprintf("Warning: cache_inval_vp: race-A detected on "
1139 "%s\n", ncp
->nc_name
);
1145 _cache_inval(ncp
, 0);
1146 _cache_put(ncp
); /* also releases reference */
1148 if (ncp
&& ncp
->nc_vp
!= vp
) {
1149 kprintf("Warning: cache_inval_vp: race-B detected on "
1150 "%s\n", ncp
->nc_name
);
1154 spin_lock_wr(&vp
->v_spinlock
);
1156 spin_unlock_wr(&vp
->v_spinlock
);
1157 return(TAILQ_FIRST(&vp
->v_namecache
) != NULL
);
1161 * The source ncp has been renamed to the target ncp. Both fncp and tncp
1162 * must be locked. The target ncp is destroyed (as a normal rename-over
1163 * would destroy the target file or directory).
1165 * Because there may be references to the source ncp we cannot copy its
1166 * contents to the target. Instead the source ncp is relinked as the target
1167 * and the target ncp is removed from the namecache topology.
1170 cache_rename(struct nchandle
*fnch
, struct nchandle
*tnch
)
1172 struct namecache
*fncp
= fnch
->ncp
;
1173 struct namecache
*tncp
= tnch
->ncp
;
1177 lwkt_gettoken(&nlock
, &vfs_token
);
1178 _cache_setunresolved(tncp
);
1179 _cache_unlink_parent(fncp
);
1180 _cache_link_parent(fncp
, tncp
->nc_parent
);
1181 _cache_unlink_parent(tncp
);
1182 oname
= fncp
->nc_name
;
1183 fncp
->nc_name
= tncp
->nc_name
;
1184 fncp
->nc_nlen
= tncp
->nc_nlen
;
1185 tncp
->nc_name
= NULL
;
1188 _cache_rehash(fncp
);
1190 _cache_rehash(tncp
);
1191 lwkt_reltoken(&nlock
);
1194 kfree(oname
, M_VFSCACHE
);
1198 * vget the vnode associated with the namecache entry. Resolve the namecache
1199 * entry if necessary and deal with namecache/vp races. The passed ncp must
1200 * be referenced and may be locked. The ncp's ref/locking state is not
1201 * effected by this call.
1203 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked
1204 * (depending on the passed lk_type) will be returned in *vpp with an error
1205 * of 0, or NULL will be returned in *vpp with a non-0 error code. The
1206 * most typical error is ENOENT, meaning that the ncp represents a negative
1207 * cache hit and there is no vnode to retrieve, but other errors can occur
1210 * The main race we have to deal with are namecache zaps. The ncp itself
1211 * will not disappear since it is referenced, and it turns out that the
1212 * validity of the vp pointer can be checked simply by rechecking the
1213 * contents of ncp->nc_vp.
1216 cache_vget(struct nchandle
*nch
, struct ucred
*cred
,
1217 int lk_type
, struct vnode
**vpp
)
1219 struct namecache
*ncp
;
1226 if (ncp
->nc_flag
& NCF_UNRESOLVED
) {
1228 error
= cache_resolve(nch
, cred
);
1233 if (error
== 0 && (vp
= ncp
->nc_vp
) != NULL
) {
1235 * Accessing the vnode from the namecache is a bit
1236 * dangerous. Because there are no refs on the vnode, it
1237 * could be in the middle of a reclaim.
1239 if (vp
->v_flag
& VRECLAIMED
) {
1240 kprintf("Warning: vnode reclaim race detected in cache_vget on %p (%s)\n", vp
, ncp
->nc_name
);
1242 _cache_setunresolved(ncp
);
1246 error
= vget(vp
, lk_type
);
1248 if (vp
!= ncp
->nc_vp
)
1251 } else if (vp
!= ncp
->nc_vp
) {
1254 } else if (vp
->v_flag
& VRECLAIMED
) {
1255 panic("vget succeeded on a VRECLAIMED node! vp %p", vp
);
1258 if (error
== 0 && vp
== NULL
)
1265 cache_vref(struct nchandle
*nch
, struct ucred
*cred
, struct vnode
**vpp
)
1267 struct namecache
*ncp
;
1275 if (ncp
->nc_flag
& NCF_UNRESOLVED
) {
1277 error
= cache_resolve(nch
, cred
);
1282 if (error
== 0 && (vp
= ncp
->nc_vp
) != NULL
) {
1284 * Since we did not obtain any locks, a cache zap
1285 * race can occur here if the vnode is in the middle
1286 * of being reclaimed and has not yet been able to
1287 * clean out its cache node. If that case occurs,
1288 * we must lock and unresolve the cache, then loop
1291 if ((error
= vget(vp
, LK_SHARED
)) != 0) {
1292 if (error
== ENOENT
) {
1293 kprintf("Warning: vnode reclaim race detected on cache_vref %p (%s)\n", vp
, ncp
->nc_name
);
1295 _cache_setunresolved(ncp
);
1301 /* caller does not want a lock */
1305 if (error
== 0 && vp
== NULL
)
1312 * Return a referenced vnode representing the parent directory of
1315 * Because the caller has locked the ncp it should not be possible for
1316 * the parent ncp to go away. However, the parent can unresolve its
1317 * dvp at any time so we must be able to acquire a lock on the parent
1318 * to safely access nc_vp.
1320 * We have to leave par unlocked when vget()ing dvp to avoid a deadlock,
1321 * so use vhold()/vdrop() while holding the lock to prevent dvp from
1322 * getting destroyed.
1324 static struct vnode
*
1325 cache_dvpref(struct namecache
*ncp
)
1327 struct namecache
*par
;
1331 if ((par
= ncp
->nc_parent
) != NULL
) {
1333 if (_cache_lock_nonblock(par
) == 0) {
1334 if ((par
->nc_flag
& NCF_UNRESOLVED
) == 0) {
1335 if ((dvp
= par
->nc_vp
) != NULL
)
1340 if (vget(dvp
, LK_SHARED
) == 0) {
1343 /* return refd, unlocked dvp */
1356 * Convert a directory vnode to a namecache record without any other
1357 * knowledge of the topology. This ONLY works with directory vnodes and
1358 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the
1359 * returned ncp (if not NULL) will be held and unlocked.
1361 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned.
1362 * If 'makeit' is 1 we attempt to track-down and create the namecache topology
1363 * for dvp. This will fail only if the directory has been deleted out from
1366 * Callers must always check for a NULL return no matter the value of 'makeit'.
1368 * To avoid underflowing the kernel stack each recursive call increments
1369 * the makeit variable.
1372 static int cache_inefficient_scan(struct nchandle
*nch
, struct ucred
*cred
,
1373 struct vnode
*dvp
, char *fakename
);
1374 static int cache_fromdvp_try(struct vnode
*dvp
, struct ucred
*cred
,
1375 struct vnode
**saved_dvp
);
1378 cache_fromdvp(struct vnode
*dvp
, struct ucred
*cred
, int makeit
,
1379 struct nchandle
*nch
)
1381 struct vnode
*saved_dvp
;
1387 nch
->mount
= dvp
->v_mount
;
1392 * Loop until resolution, inside code will break out on error.
1396 * Break out if we successfully acquire a working ncp.
1398 spin_lock_wr(&dvp
->v_spinlock
);
1399 nch
->ncp
= TAILQ_FIRST(&dvp
->v_namecache
);
1402 spin_unlock_wr(&dvp
->v_spinlock
);
1405 spin_unlock_wr(&dvp
->v_spinlock
);
1408 * If dvp is the root of its filesystem it should already
1409 * have a namecache pointer associated with it as a side
1410 * effect of the mount, but it may have been disassociated.
1412 if (dvp
->v_flag
& VROOT
) {
1413 nch
->ncp
= _cache_get(nch
->mount
->mnt_ncmountpt
.ncp
);
1414 error
= cache_resolve_mp(nch
->mount
);
1415 _cache_put(nch
->ncp
);
1417 kprintf("cache_fromdvp: resolve root of mount %p error %d",
1418 dvp
->v_mount
, error
);
1422 kprintf(" failed\n");
1427 kprintf(" succeeded\n");
1432 * If we are recursed too deeply resort to an O(n^2)
1433 * algorithm to resolve the namecache topology. The
1434 * resolved pvp is left referenced in saved_dvp to
1435 * prevent the tree from being destroyed while we loop.
1438 error
= cache_fromdvp_try(dvp
, cred
, &saved_dvp
);
1440 kprintf("lookupdotdot(longpath) failed %d "
1441 "dvp %p\n", error
, dvp
);
1449 * Get the parent directory and resolve its ncp.
1452 kfree(fakename
, M_TEMP
);
1455 error
= vop_nlookupdotdot(*dvp
->v_ops
, dvp
, &pvp
, cred
,
1458 kprintf("lookupdotdot failed %d dvp %p\n", error
, dvp
);
1464 * Reuse makeit as a recursion depth counter. On success
1465 * nch will be fully referenced.
1467 cache_fromdvp(pvp
, cred
, makeit
+ 1, nch
);
1469 if (nch
->ncp
== NULL
)
1473 * Do an inefficient scan of pvp (embodied by ncp) to look
1474 * for dvp. This will create a namecache record for dvp on
1475 * success. We loop up to recheck on success.
1477 * ncp and dvp are both held but not locked.
1479 error
= cache_inefficient_scan(nch
, cred
, dvp
, fakename
);
1481 kprintf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n",
1482 pvp
, nch
->ncp
->nc_name
, dvp
);
1484 /* nch was NULLed out, reload mount */
1485 nch
->mount
= dvp
->v_mount
;
1489 kprintf("cache_fromdvp: scan %p (%s) succeeded\n",
1490 pvp
, nch
->ncp
->nc_name
);
1493 /* nch was NULLed out, reload mount */
1494 nch
->mount
= dvp
->v_mount
;
1498 * If nch->ncp is non-NULL it will have been held already.
1501 kfree(fakename
, M_TEMP
);
1510 * Go up the chain of parent directories until we find something
1511 * we can resolve into the namecache. This is very inefficient.
1515 cache_fromdvp_try(struct vnode
*dvp
, struct ucred
*cred
,
1516 struct vnode
**saved_dvp
)
1518 struct nchandle nch
;
1521 static time_t last_fromdvp_report
;
1525 * Loop getting the parent directory vnode until we get something we
1526 * can resolve in the namecache.
1529 nch
.mount
= dvp
->v_mount
;
1535 kfree(fakename
, M_TEMP
);
1538 error
= vop_nlookupdotdot(*dvp
->v_ops
, dvp
, &pvp
, cred
,
1545 spin_lock_wr(&pvp
->v_spinlock
);
1546 if ((nch
.ncp
= TAILQ_FIRST(&pvp
->v_namecache
)) != NULL
) {
1547 _cache_hold(nch
.ncp
);
1548 spin_unlock_wr(&pvp
->v_spinlock
);
1552 spin_unlock_wr(&pvp
->v_spinlock
);
1553 if (pvp
->v_flag
& VROOT
) {
1554 nch
.ncp
= _cache_get(pvp
->v_mount
->mnt_ncmountpt
.ncp
);
1555 error
= cache_resolve_mp(nch
.mount
);
1556 _cache_unlock(nch
.ncp
);
1559 _cache_drop(nch
.ncp
);
1569 if (last_fromdvp_report
!= time_second
) {
1570 last_fromdvp_report
= time_second
;
1571 kprintf("Warning: extremely inefficient path "
1572 "resolution on %s\n",
1575 error
= cache_inefficient_scan(&nch
, cred
, dvp
, fakename
);
1578 * Hopefully dvp now has a namecache record associated with
1579 * it. Leave it referenced to prevent the kernel from
1580 * recycling the vnode. Otherwise extremely long directory
1581 * paths could result in endless recycling.
1586 _cache_drop(nch
.ncp
);
1589 kfree(fakename
, M_TEMP
);
1594 * Do an inefficient scan of the directory represented by ncp looking for
1595 * the directory vnode dvp. ncp must be held but not locked on entry and
1596 * will be held on return. dvp must be refd but not locked on entry and
1597 * will remain refd on return.
1599 * Why do this at all? Well, due to its stateless nature the NFS server
1600 * converts file handles directly to vnodes without necessarily going through
1601 * the namecache ops that would otherwise create the namecache topology
1602 * leading to the vnode. We could either (1) Change the namecache algorithms
1603 * to allow disconnect namecache records that are re-merged opportunistically,
1604 * or (2) Make the NFS server backtrack and scan to recover a connected
1605 * namecache topology in order to then be able to issue new API lookups.
1607 * It turns out that (1) is a huge mess. It takes a nice clean set of
1608 * namecache algorithms and introduces a lot of complication in every subsystem
1609 * that calls into the namecache to deal with the re-merge case, especially
1610 * since we are using the namecache to placehold negative lookups and the
1611 * vnode might not be immediately assigned. (2) is certainly far less
1612 * efficient then (1), but since we are only talking about directories here
1613 * (which are likely to remain cached), the case does not actually run all
1614 * that often and has the supreme advantage of not polluting the namecache
1617 * If a fakename is supplied just construct a namecache entry using the
1621 cache_inefficient_scan(struct nchandle
*nch
, struct ucred
*cred
,
1622 struct vnode
*dvp
, char *fakename
)
1624 struct nlcomponent nlc
;
1625 struct nchandle rncp
;
1637 vat
.va_blocksize
= 0;
1638 if ((error
= VOP_GETATTR(dvp
, &vat
)) != 0)
1640 if ((error
= cache_vref(nch
, cred
, &pvp
)) != 0)
1643 kprintf("inefficient_scan: directory iosize %ld "
1644 "vattr fileid = %lld\n",
1646 (long long)vat
.va_fileid
);
1650 * Use the supplied fakename if not NULL. Fake names are typically
1651 * not in the actual filesystem hierarchy. This is used by HAMMER
1652 * to glue @@timestamp recursions together.
1655 nlc
.nlc_nameptr
= fakename
;
1656 nlc
.nlc_namelen
= strlen(fakename
);
1657 rncp
= cache_nlookup(nch
, &nlc
);
1661 if ((blksize
= vat
.va_blocksize
) == 0)
1662 blksize
= DEV_BSIZE
;
1663 rbuf
= kmalloc(blksize
, M_TEMP
, M_WAITOK
);
1669 iov
.iov_base
= rbuf
;
1670 iov
.iov_len
= blksize
;
1673 uio
.uio_resid
= blksize
;
1674 uio
.uio_segflg
= UIO_SYSSPACE
;
1675 uio
.uio_rw
= UIO_READ
;
1676 uio
.uio_td
= curthread
;
1678 if (ncvp_debug
>= 2)
1679 kprintf("cache_inefficient_scan: readdir @ %08x\n", (int)uio
.uio_offset
);
1680 error
= VOP_READDIR(pvp
, &uio
, cred
, &eofflag
, NULL
, NULL
);
1682 den
= (struct dirent
*)rbuf
;
1683 bytes
= blksize
- uio
.uio_resid
;
1686 if (ncvp_debug
>= 2) {
1687 kprintf("cache_inefficient_scan: %*.*s\n",
1688 den
->d_namlen
, den
->d_namlen
,
1691 if (den
->d_type
!= DT_WHT
&&
1692 den
->d_ino
== vat
.va_fileid
) {
1694 kprintf("cache_inefficient_scan: "
1695 "MATCHED inode %lld path %s/%*.*s\n",
1696 (long long)vat
.va_fileid
,
1698 den
->d_namlen
, den
->d_namlen
,
1701 nlc
.nlc_nameptr
= den
->d_name
;
1702 nlc
.nlc_namelen
= den
->d_namlen
;
1703 rncp
= cache_nlookup(nch
, &nlc
);
1704 KKASSERT(rncp
.ncp
!= NULL
);
1707 bytes
-= _DIRENT_DIRSIZ(den
);
1708 den
= _DIRENT_NEXT(den
);
1710 if (rncp
.ncp
== NULL
&& eofflag
== 0 && uio
.uio_resid
!= blksize
)
1713 kfree(rbuf
, M_TEMP
);
1717 if (rncp
.ncp
->nc_flag
& NCF_UNRESOLVED
) {
1718 _cache_setvp(rncp
.mount
, rncp
.ncp
, dvp
);
1719 if (ncvp_debug
>= 2) {
1720 kprintf("cache_inefficient_scan: setvp %s/%s = %p\n",
1721 nch
->ncp
->nc_name
, rncp
.ncp
->nc_name
, dvp
);
1724 if (ncvp_debug
>= 2) {
1725 kprintf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n",
1726 nch
->ncp
->nc_name
, rncp
.ncp
->nc_name
, dvp
,
1730 if (rncp
.ncp
->nc_vp
== NULL
)
1731 error
= rncp
.ncp
->nc_error
;
1733 * Release rncp after a successful nlookup. rncp was fully
1738 kprintf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n",
1739 dvp
, nch
->ncp
->nc_name
);
1746 * Zap a namecache entry. The ncp is unconditionally set to an unresolved
1747 * state, which disassociates it from its vnode or ncneglist.
1749 * Then, if there are no additional references to the ncp and no children,
1750 * the ncp is removed from the topology and destroyed.
1752 * References and/or children may exist if the ncp is in the middle of the
1753 * topology, preventing the ncp from being destroyed.
1755 * This function must be called with the ncp held and locked and will unlock
1756 * and drop it during zapping.
1758 * This function may returned a held (but NOT locked) parent node which the
1759 * caller must drop. We do this so _cache_drop() can loop, to avoid
1760 * blowing out the kernel stack.
1762 * WARNING! For MPSAFE operation this routine must acquire up to three
1763 * spin locks to be able to safely test nc_refs. Lock order is
1766 * hash spinlock if on hash list
1767 * parent spinlock if child of parent
1768 * (the ncp is unresolved so there is no vnode association)
1770 static struct namecache
*
1771 cache_zap(struct namecache
*ncp
)
1773 struct namecache
*par
;
1774 struct spinlock
*hspin
;
1775 struct vnode
*dropvp
;
1780 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED.
1782 _cache_setunresolved(ncp
);
1785 * Try to scrap the entry and possibly tail-recurse on its parent.
1786 * We only scrap unref'd (other then our ref) unresolved entries,
1787 * we do not scrap 'live' entries.
1789 * Note that once the spinlocks are acquired if nc_refs == 1 no
1790 * other references are possible. If it isn't, however, we have
1791 * to decrement but also be sure to avoid a 1->0 transition.
1793 KKASSERT(ncp
->nc_flag
& NCF_UNRESOLVED
);
1794 KKASSERT(ncp
->nc_refs
> 0);
1799 lwkt_gettoken(&nlock
, &vfs_token
);
1802 hspin
= &ncp
->nc_head
->spin
;
1803 spin_lock_wr(hspin
);
1807 * If someone other then us has a ref or we have children
1808 * we cannot zap the entry. The 1->0 transition and any
1809 * further list operation is protected by the spinlocks
1810 * we have acquired but other transitions are not.
1813 refs
= ncp
->nc_refs
;
1814 if (refs
== 1 && TAILQ_EMPTY(&ncp
->nc_list
))
1816 if (atomic_cmpset_int(&ncp
->nc_refs
, refs
, refs
- 1)) {
1818 spin_unlock_wr(hspin
);
1819 lwkt_reltoken(&nlock
);
1826 * We are the only ref and with the spinlocks held no further
1827 * refs can be acquired by others.
1829 * Remove us from the hash list and parent list. We have to
1830 * drop a ref on the parent's vp if the parent's list becomes
1834 LIST_REMOVE(ncp
, nc_hash
);
1835 ncp
->nc_head
= NULL
;
1838 if ((par
= ncp
->nc_parent
) != NULL
) {
1839 par
= _cache_hold(par
);
1840 TAILQ_REMOVE(&par
->nc_list
, ncp
, nc_entry
);
1841 ncp
->nc_parent
= NULL
;
1843 if (par
->nc_vp
&& TAILQ_EMPTY(&par
->nc_list
))
1844 dropvp
= par
->nc_vp
;
1848 * ncp should not have picked up any refs. Physically
1852 spin_unlock_wr(hspin
);
1853 lwkt_reltoken(&nlock
);
1854 KKASSERT(ncp
->nc_refs
== 1);
1855 atomic_add_int(&numunres
, -1);
1856 /* _cache_unlock(ncp) not required */
1857 ncp
->nc_refs
= -1; /* safety */
1859 kfree(ncp
->nc_name
, M_VFSCACHE
);
1860 kfree(ncp
, M_VFSCACHE
);
1863 * Delayed drop (we had to release our spinlocks)
1865 * The refed parent (if not NULL) must be dropped. The
1866 * caller is responsible for looping.
1873 static enum { CHI_LOW
, CHI_HIGH
} cache_hysteresis_state
= CHI_LOW
;
1877 _cache_hysteresis(void)
1880 * Don't cache too many negative hits. We use hysteresis to reduce
1881 * the impact on the critical path.
1883 switch(cache_hysteresis_state
) {
1885 if (numneg
> MINNEG
&& numneg
* ncnegfactor
> numcache
) {
1887 cache_hysteresis_state
= CHI_HIGH
;
1891 if (numneg
> MINNEG
* 9 / 10 &&
1892 numneg
* ncnegfactor
* 9 / 10 > numcache
1896 cache_hysteresis_state
= CHI_LOW
;
1903 * NEW NAMECACHE LOOKUP API
1905 * Lookup an entry in the cache. A locked, referenced, non-NULL
1906 * entry is *always* returned, even if the supplied component is illegal.
1907 * The resulting namecache entry should be returned to the system with
1908 * cache_put() or _cache_unlock() + cache_drop().
1910 * namecache locks are recursive but care must be taken to avoid lock order
1913 * Nobody else will be able to manipulate the associated namespace (e.g.
1914 * create, delete, rename, rename-target) until the caller unlocks the
1917 * The returned entry will be in one of three states: positive hit (non-null
1918 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set).
1919 * Unresolved entries must be resolved through the filesystem to associate the
1920 * vnode and/or determine whether a positive or negative hit has occured.
1922 * It is not necessary to lock a directory in order to lock namespace under
1923 * that directory. In fact, it is explicitly not allowed to do that. A
1924 * directory is typically only locked when being created, renamed, or
1927 * The directory (par) may be unresolved, in which case any returned child
1928 * will likely also be marked unresolved. Likely but not guarenteed. Since
1929 * the filesystem lookup requires a resolved directory vnode the caller is
1930 * responsible for resolving the namecache chain top-down. This API
1931 * specifically allows whole chains to be created in an unresolved state.
1934 cache_nlookup(struct nchandle
*par_nch
, struct nlcomponent
*nlc
)
1936 struct nchandle nch
;
1937 struct namecache
*ncp
;
1938 struct namecache
*new_ncp
;
1939 struct nchash_head
*nchpp
;
1947 mp
= par_nch
->mount
;
1950 * Try to locate an existing entry
1952 hash
= fnv_32_buf(nlc
->nlc_nameptr
, nlc
->nlc_namelen
, FNV1_32_INIT
);
1953 hash
= fnv_32_buf(&par_nch
->ncp
, sizeof(par_nch
->ncp
), hash
);
1955 nchpp
= NCHHASH(hash
);
1957 spin_lock_wr(&nchpp
->spin
);
1958 LIST_FOREACH(ncp
, &nchpp
->list
, nc_hash
) {
1962 * Break out if we find a matching entry. Note that
1963 * UNRESOLVED entries may match, but DESTROYED entries
1966 if (ncp
->nc_parent
== par_nch
->ncp
&&
1967 ncp
->nc_nlen
== nlc
->nlc_namelen
&&
1968 bcmp(ncp
->nc_name
, nlc
->nlc_nameptr
, ncp
->nc_nlen
) == 0 &&
1969 (ncp
->nc_flag
& NCF_DESTROYED
) == 0
1972 spin_unlock_wr(&nchpp
->spin
);
1973 if (_cache_get_nonblock(ncp
) == 0) {
1974 _cache_auto_unresolve(mp
, ncp
);
1976 _cache_free(new_ncp
);
1985 spin_unlock_wr(&nchpp
->spin
);
1988 * We failed to locate an entry, create a new entry and add it to
1989 * the cache. We have to relookup after possibly blocking in
1992 if (new_ncp
== NULL
) {
1993 new_ncp
= cache_alloc(nlc
->nlc_namelen
);
2000 * Initialize as a new UNRESOLVED entry, lock (non-blocking),
2001 * and link to the parent. The mount point is usually inherited
2002 * from the parent unless this is a special case such as a mount
2003 * point where nlc_namelen is 0. If nlc_namelen is 0 nc_name will
2006 if (nlc
->nlc_namelen
) {
2007 bcopy(nlc
->nlc_nameptr
, ncp
->nc_name
, nlc
->nlc_namelen
);
2008 ncp
->nc_name
[nlc
->nlc_namelen
] = 0;
2010 nchpp
= NCHHASH(hash
); /* compiler optimization */
2011 spin_lock_wr(&nchpp
->spin
);
2012 LIST_INSERT_HEAD(&nchpp
->list
, ncp
, nc_hash
);
2013 ncp
->nc_head
= nchpp
;
2014 spin_unlock_wr(&nchpp
->spin
);
2015 lwkt_gettoken(&nlock
, &vfs_token
);
2016 _cache_link_parent(ncp
, par_nch
->ncp
);
2017 lwkt_reltoken(&nlock
);
2020 * stats and namecache size management
2022 if (ncp
->nc_flag
& NCF_UNRESOLVED
)
2023 ++gd
->gd_nchstats
->ncs_miss
;
2024 else if (ncp
->nc_vp
)
2025 ++gd
->gd_nchstats
->ncs_goodhits
;
2027 ++gd
->gd_nchstats
->ncs_neghits
;
2028 _cache_hysteresis();
2031 atomic_add_int(&nch
.mount
->mnt_refs
, 1);
2036 * The namecache entry is marked as being used as a mount point.
2037 * Locate the mount if it is visible to the caller.
2039 struct findmount_info
{
2040 struct mount
*result
;
2041 struct mount
*nch_mount
;
2042 struct namecache
*nch_ncp
;
2047 cache_findmount_callback(struct mount
*mp
, void *data
)
2049 struct findmount_info
*info
= data
;
2052 * Check the mount's mounted-on point against the passed nch.
2054 if (mp
->mnt_ncmounton
.mount
== info
->nch_mount
&&
2055 mp
->mnt_ncmounton
.ncp
== info
->nch_ncp
2064 cache_findmount(struct nchandle
*nch
)
2066 struct findmount_info info
;
2069 info
.nch_mount
= nch
->mount
;
2070 info
.nch_ncp
= nch
->ncp
;
2071 mountlist_scan(cache_findmount_callback
, &info
,
2072 MNTSCAN_FORWARD
|MNTSCAN_NOBUSY
);
2073 return(info
.result
);
2077 * Resolve an unresolved namecache entry, generally by looking it up.
2078 * The passed ncp must be locked and refd.
2080 * Theoretically since a vnode cannot be recycled while held, and since
2081 * the nc_parent chain holds its vnode as long as children exist, the
2082 * direct parent of the cache entry we are trying to resolve should
2083 * have a valid vnode. If not then generate an error that we can
2084 * determine is related to a resolver bug.
2086 * However, if a vnode was in the middle of a recyclement when the NCP
2087 * got locked, ncp->nc_vp might point to a vnode that is about to become
2088 * invalid. cache_resolve() handles this case by unresolving the entry
2089 * and then re-resolving it.
2091 * Note that successful resolution does not necessarily return an error
2092 * code of 0. If the ncp resolves to a negative cache hit then ENOENT
2096 cache_resolve(struct nchandle
*nch
, struct ucred
*cred
)
2098 struct namecache
*par
;
2099 struct namecache
*ncp
;
2100 struct nchandle nctmp
;
2109 * If the ncp is already resolved we have nothing to do. However,
2110 * we do want to guarentee that a usable vnode is returned when
2111 * a vnode is present, so make sure it hasn't been reclaimed.
2113 if ((ncp
->nc_flag
& NCF_UNRESOLVED
) == 0) {
2114 if (ncp
->nc_vp
&& (ncp
->nc_vp
->v_flag
& VRECLAIMED
))
2115 _cache_setunresolved(ncp
);
2116 if ((ncp
->nc_flag
& NCF_UNRESOLVED
) == 0)
2117 return (ncp
->nc_error
);
2121 * Mount points need special handling because the parent does not
2122 * belong to the same filesystem as the ncp.
2124 if (ncp
== mp
->mnt_ncmountpt
.ncp
)
2125 return (cache_resolve_mp(mp
));
2128 * We expect an unbroken chain of ncps to at least the mount point,
2129 * and even all the way to root (but this code doesn't have to go
2130 * past the mount point).
2132 if (ncp
->nc_parent
== NULL
) {
2133 kprintf("EXDEV case 1 %p %*.*s\n", ncp
,
2134 ncp
->nc_nlen
, ncp
->nc_nlen
, ncp
->nc_name
);
2135 ncp
->nc_error
= EXDEV
;
2136 return(ncp
->nc_error
);
2140 * The vp's of the parent directories in the chain are held via vhold()
2141 * due to the existance of the child, and should not disappear.
2142 * However, there are cases where they can disappear:
2144 * - due to filesystem I/O errors.
2145 * - due to NFS being stupid about tracking the namespace and
2146 * destroys the namespace for entire directories quite often.
2147 * - due to forced unmounts.
2148 * - due to an rmdir (parent will be marked DESTROYED)
2150 * When this occurs we have to track the chain backwards and resolve
2151 * it, looping until the resolver catches up to the current node. We
2152 * could recurse here but we might run ourselves out of kernel stack
2153 * so we do it in a more painful manner. This situation really should
2154 * not occur all that often, or if it does not have to go back too
2155 * many nodes to resolve the ncp.
2157 while ((dvp
= cache_dvpref(ncp
)) == NULL
) {
2159 * This case can occur if a process is CD'd into a
2160 * directory which is then rmdir'd. If the parent is marked
2161 * destroyed there is no point trying to resolve it.
2163 if (ncp
->nc_parent
->nc_flag
& NCF_DESTROYED
)
2166 par
= ncp
->nc_parent
;
2167 while (par
->nc_parent
&& par
->nc_parent
->nc_vp
== NULL
)
2168 par
= par
->nc_parent
;
2169 if (par
->nc_parent
== NULL
) {
2170 kprintf("EXDEV case 2 %*.*s\n",
2171 par
->nc_nlen
, par
->nc_nlen
, par
->nc_name
);
2174 kprintf("[diagnostic] cache_resolve: had to recurse on %*.*s\n",
2175 par
->nc_nlen
, par
->nc_nlen
, par
->nc_name
);
2177 * The parent is not set in stone, ref and lock it to prevent
2178 * it from disappearing. Also note that due to renames it
2179 * is possible for our ncp to move and for par to no longer
2180 * be one of its parents. We resolve it anyway, the loop
2181 * will handle any moves.
2184 if (par
== nch
->mount
->mnt_ncmountpt
.ncp
) {
2185 cache_resolve_mp(nch
->mount
);
2186 } else if ((dvp
= cache_dvpref(par
)) == NULL
) {
2187 kprintf("[diagnostic] cache_resolve: raced on %*.*s\n", par
->nc_nlen
, par
->nc_nlen
, par
->nc_name
);
2191 if (par
->nc_flag
& NCF_UNRESOLVED
) {
2194 par
->nc_error
= VOP_NRESOLVE(&nctmp
, dvp
, cred
);
2198 if ((error
= par
->nc_error
) != 0) {
2199 if (par
->nc_error
!= EAGAIN
) {
2200 kprintf("EXDEV case 3 %*.*s error %d\n",
2201 par
->nc_nlen
, par
->nc_nlen
, par
->nc_name
,
2206 kprintf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n",
2207 par
, par
->nc_nlen
, par
->nc_nlen
, par
->nc_name
);
2214 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected
2215 * ncp's and reattach them. If this occurs the original ncp is marked
2216 * EAGAIN to force a relookup.
2218 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed
2219 * ncp must already be resolved.
2224 ncp
->nc_error
= VOP_NRESOLVE(&nctmp
, dvp
, cred
);
2227 ncp
->nc_error
= EPERM
;
2229 if (ncp
->nc_error
== EAGAIN
) {
2230 kprintf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n",
2231 ncp
, ncp
->nc_nlen
, ncp
->nc_nlen
, ncp
->nc_name
);
2234 return(ncp
->nc_error
);
2238 * Resolve the ncp associated with a mount point. Such ncp's almost always
2239 * remain resolved and this routine is rarely called. NFS MPs tends to force
2240 * re-resolution more often due to its mac-truck-smash-the-namecache
2241 * method of tracking namespace changes.
2243 * The semantics for this call is that the passed ncp must be locked on
2244 * entry and will be locked on return. However, if we actually have to
2245 * resolve the mount point we temporarily unlock the entry in order to
2246 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of
2247 * the unlock we have to recheck the flags after we relock.
2250 cache_resolve_mp(struct mount
*mp
)
2252 struct namecache
*ncp
= mp
->mnt_ncmountpt
.ncp
;
2256 KKASSERT(mp
!= NULL
);
2259 * If the ncp is already resolved we have nothing to do. However,
2260 * we do want to guarentee that a usable vnode is returned when
2261 * a vnode is present, so make sure it hasn't been reclaimed.
2263 if ((ncp
->nc_flag
& NCF_UNRESOLVED
) == 0) {
2264 if (ncp
->nc_vp
&& (ncp
->nc_vp
->v_flag
& VRECLAIMED
))
2265 _cache_setunresolved(ncp
);
2268 if (ncp
->nc_flag
& NCF_UNRESOLVED
) {
2270 while (vfs_busy(mp
, 0))
2272 error
= VFS_ROOT(mp
, &vp
);
2276 * recheck the ncp state after relocking.
2278 if (ncp
->nc_flag
& NCF_UNRESOLVED
) {
2279 ncp
->nc_error
= error
;
2281 _cache_setvp(mp
, ncp
, vp
);
2284 kprintf("[diagnostic] cache_resolve_mp: failed"
2285 " to resolve mount %p err=%d ncp=%p\n",
2287 _cache_setvp(mp
, ncp
, NULL
);
2289 } else if (error
== 0) {
2294 return(ncp
->nc_error
);
2301 cache_cleanneg(int count
)
2303 struct namecache
*ncp
;
2306 * Automode from the vnlru proc - clean out 10% of the negative cache
2310 count
= numneg
/ 10 + 1;
2313 * Attempt to clean out the specified number of negative cache
2317 spin_lock_wr(&ncspin
);
2318 ncp
= TAILQ_FIRST(&ncneglist
);
2320 spin_unlock_wr(&ncspin
);
2323 TAILQ_REMOVE(&ncneglist
, ncp
, nc_vnode
);
2324 TAILQ_INSERT_TAIL(&ncneglist
, ncp
, nc_vnode
);
2326 spin_unlock_wr(&ncspin
);
2327 if (_cache_get_nonblock(ncp
) == 0) {
2328 ncp
= cache_zap(ncp
);
2339 * Rehash a ncp. Rehashing is typically required if the name changes (should
2340 * not generally occur) or the parent link changes. This function will
2341 * unhash the ncp if the ncp is no longer hashable.
2344 _cache_rehash(struct namecache
*ncp
)
2346 struct nchash_head
*nchpp
;
2349 if ((nchpp
= ncp
->nc_head
) != NULL
) {
2350 spin_lock_wr(&nchpp
->spin
);
2351 LIST_REMOVE(ncp
, nc_hash
);
2352 ncp
->nc_head
= NULL
;
2353 spin_unlock_wr(&nchpp
->spin
);
2355 if (ncp
->nc_nlen
&& ncp
->nc_parent
) {
2356 hash
= fnv_32_buf(ncp
->nc_name
, ncp
->nc_nlen
, FNV1_32_INIT
);
2357 hash
= fnv_32_buf(&ncp
->nc_parent
,
2358 sizeof(ncp
->nc_parent
), hash
);
2359 nchpp
= NCHHASH(hash
);
2360 spin_lock_wr(&nchpp
->spin
);
2361 LIST_INSERT_HEAD(&nchpp
->list
, ncp
, nc_hash
);
2362 ncp
->nc_head
= nchpp
;
2363 spin_unlock_wr(&nchpp
->spin
);
2368 * Name cache initialization, from vfsinit() when we are booting
2376 /* initialise per-cpu namecache effectiveness statistics. */
2377 for (i
= 0; i
< ncpus
; ++i
) {
2378 gd
= globaldata_find(i
);
2379 gd
->gd_nchstats
= &nchstats
[i
];
2381 TAILQ_INIT(&ncneglist
);
2383 nchashtbl
= hashinit_ext(desiredvnodes
*2, sizeof(struct nchash_head
),
2384 M_VFSCACHE
, &nchash
);
2385 for (i
= 0; i
<= (int)nchash
; ++i
) {
2386 LIST_INIT(&nchashtbl
[i
].list
);
2387 spin_init(&nchashtbl
[i
].spin
);
2389 nclockwarn
= 5 * hz
;
2393 * Called from start_init() to bootstrap the root filesystem. Returns
2394 * a referenced, unlocked namecache record.
2397 cache_allocroot(struct nchandle
*nch
, struct mount
*mp
, struct vnode
*vp
)
2399 nch
->ncp
= cache_alloc(0);
2401 atomic_add_int(&mp
->mnt_refs
, 1);
2403 _cache_setvp(nch
->mount
, nch
->ncp
, vp
);
2407 * vfs_cache_setroot()
2409 * Create an association between the root of our namecache and
2410 * the root vnode. This routine may be called several times during
2413 * If the caller intends to save the returned namecache pointer somewhere
2414 * it must cache_hold() it.
2417 vfs_cache_setroot(struct vnode
*nvp
, struct nchandle
*nch
)
2420 struct nchandle onch
;
2428 cache_zero(&rootnch
);
2436 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache
2437 * topology and is being removed as quickly as possible. The new VOP_N*()
2438 * API calls are required to make specific adjustments using the supplied
2439 * ncp pointers rather then just bogusly purging random vnodes.
2441 * Invalidate all namecache entries to a particular vnode as well as
2442 * any direct children of that vnode in the namecache. This is a
2443 * 'catch all' purge used by filesystems that do not know any better.
2445 * Note that the linkage between the vnode and its namecache entries will
2446 * be removed, but the namecache entries themselves might stay put due to
2447 * active references from elsewhere in the system or due to the existance of
2448 * the children. The namecache topology is left intact even if we do not
2449 * know what the vnode association is. Such entries will be marked
2453 cache_purge(struct vnode
*vp
)
2455 cache_inval_vp(vp
, CINV_DESTROY
| CINV_CHILDREN
);
2459 * Flush all entries referencing a particular filesystem.
2461 * Since we need to check it anyway, we will flush all the invalid
2462 * entries at the same time.
2467 cache_purgevfs(struct mount
*mp
)
2469 struct nchash_head
*nchpp
;
2470 struct namecache
*ncp
, *nnp
;
2473 * Scan hash tables for applicable entries.
2475 for (nchpp
= &nchashtbl
[nchash
]; nchpp
>= nchashtbl
; nchpp
--) {
2476 spin_lock_wr(&nchpp
->spin
); XXX
2477 ncp
= LIST_FIRST(&nchpp
->list
);
2481 nnp
= LIST_NEXT(ncp
, nc_hash
);
2484 if (ncp
->nc_mount
== mp
) {
2486 ncp
= cache_zap(ncp
);
2494 spin_unlock_wr(&nchpp
->spin
); XXX
2500 static int disablecwd
;
2501 SYSCTL_INT(_debug
, OID_AUTO
, disablecwd
, CTLFLAG_RW
, &disablecwd
, 0, "");
2503 static u_long numcwdcalls
; STATNODE(CTLFLAG_RD
, numcwdcalls
, &numcwdcalls
);
2504 static u_long numcwdfail1
; STATNODE(CTLFLAG_RD
, numcwdfail1
, &numcwdfail1
);
2505 static u_long numcwdfail2
; STATNODE(CTLFLAG_RD
, numcwdfail2
, &numcwdfail2
);
2506 static u_long numcwdfail3
; STATNODE(CTLFLAG_RD
, numcwdfail3
, &numcwdfail3
);
2507 static u_long numcwdfail4
; STATNODE(CTLFLAG_RD
, numcwdfail4
, &numcwdfail4
);
2508 static u_long numcwdfound
; STATNODE(CTLFLAG_RD
, numcwdfound
, &numcwdfound
);
2514 sys___getcwd(struct __getcwd_args
*uap
)
2524 buflen
= uap
->buflen
;
2527 if (buflen
> MAXPATHLEN
)
2528 buflen
= MAXPATHLEN
;
2530 buf
= kmalloc(buflen
, M_TEMP
, M_WAITOK
);
2532 bp
= kern_getcwd(buf
, buflen
, &error
);
2535 error
= copyout(bp
, uap
->buf
, strlen(bp
) + 1);
2541 kern_getcwd(char *buf
, size_t buflen
, int *error
)
2543 struct proc
*p
= curproc
;
2545 int i
, slash_prefixed
;
2546 struct filedesc
*fdp
;
2547 struct nchandle nch
;
2556 nch
= fdp
->fd_ncdir
;
2557 while (nch
.ncp
&& (nch
.ncp
!= fdp
->fd_nrdir
.ncp
||
2558 nch
.mount
!= fdp
->fd_nrdir
.mount
)
2561 * While traversing upwards if we encounter the root
2562 * of the current mount we have to skip to the mount point
2563 * in the underlying filesystem.
2565 if (nch
.ncp
== nch
.mount
->mnt_ncmountpt
.ncp
) {
2566 nch
= nch
.mount
->mnt_ncmounton
;
2571 * Prepend the path segment
2573 for (i
= nch
.ncp
->nc_nlen
- 1; i
>= 0; i
--) {
2579 *--bp
= nch
.ncp
->nc_name
[i
];
2590 * Go up a directory. This isn't a mount point so we don't
2591 * have to check again.
2593 nch
.ncp
= nch
.ncp
->nc_parent
;
2595 if (nch
.ncp
== NULL
) {
2600 if (!slash_prefixed
) {
2614 * Thus begins the fullpath magic.
2618 #define STATNODE(name) \
2619 static u_int name; \
2620 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "")
2622 static int disablefullpath
;
2623 SYSCTL_INT(_debug
, OID_AUTO
, disablefullpath
, CTLFLAG_RW
,
2624 &disablefullpath
, 0, "");
2626 STATNODE(numfullpathcalls
);
2627 STATNODE(numfullpathfail1
);
2628 STATNODE(numfullpathfail2
);
2629 STATNODE(numfullpathfail3
);
2630 STATNODE(numfullpathfail4
);
2631 STATNODE(numfullpathfound
);
2634 cache_fullpath(struct proc
*p
, struct nchandle
*nchp
, char **retbuf
, char **freebuf
)
2636 struct nchandle fd_nrdir
;
2637 struct nchandle nch
;
2638 struct namecache
*ncp
;
2646 atomic_add_int(&numfullpathcalls
, -1);
2647 lwkt_gettoken(&nlock
, &vfs_token
);
2652 buf
= kmalloc(MAXPATHLEN
, M_TEMP
, M_WAITOK
);
2653 bp
= buf
+ MAXPATHLEN
- 1;
2656 fd_nrdir
= p
->p_fd
->fd_nrdir
;
2660 cache_copy(nchp
, &nch
);
2664 while (ncp
&& (ncp
!= fd_nrdir
.ncp
|| mp
!= fd_nrdir
.mount
)) {
2666 * While traversing upwards if we encounter the root
2667 * of the current mount we have to skip to the mount point.
2669 if (ncp
== mp
->mnt_ncmountpt
.ncp
) {
2671 cache_copy(&mp
->mnt_ncmounton
, &nch
);
2678 * Prepend the path segment
2680 for (i
= nch
.ncp
->nc_nlen
- 1; i
>= 0; i
--) {
2687 *--bp
= nch
.ncp
->nc_name
[i
];
2699 * Go up a directory. This isn't a mount point so we don't
2700 * have to check again.
2702 * We need the ncp's spinlock to safely access nc_parent.
2704 if ((nch
.ncp
= ncp
->nc_parent
) != NULL
)
2705 _cache_hold(nch
.ncp
);
2709 if (nch
.ncp
== NULL
) {
2716 if (!slash_prefixed
) {
2731 lwkt_reltoken(&nlock
);
2736 vn_fullpath(struct proc
*p
, struct vnode
*vn
, char **retbuf
, char **freebuf
)
2738 struct namecache
*ncp
;
2739 struct nchandle nch
;
2742 atomic_add_int(&numfullpathcalls
, 1);
2743 if (disablefullpath
)
2749 /* vn is NULL, client wants us to use p->p_textvp */
2751 if ((vn
= p
->p_textvp
) == NULL
)
2754 spin_lock_wr(&vn
->v_spinlock
);
2755 TAILQ_FOREACH(ncp
, &vn
->v_namecache
, nc_vnode
) {
2760 spin_unlock_wr(&vn
->v_spinlock
);
2764 spin_unlock_wr(&vn
->v_spinlock
);
2766 atomic_add_int(&numfullpathcalls
, -1);
2768 nch
.mount
= vn
->v_mount
;
2769 error
= cache_fullpath(p
, &nch
, retbuf
, freebuf
);