2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * Copyright (c) 1989, 1993, 1995
35 * The Regents of the University of California. All rights reserved.
37 * This code is derived from software contributed to Berkeley by
38 * Poul-Henning Kamp of the FreeBSD Project.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/sysctl.h>
73 #include <sys/mount.h>
74 #include <sys/vnode.h>
75 #include <sys/malloc.h>
76 #include <sys/sysproto.h>
77 #include <sys/spinlock.h>
79 #include <sys/namei.h>
80 #include <sys/nlookup.h>
81 #include <sys/filedesc.h>
82 #include <sys/fnv_hash.h>
83 #include <sys/globaldata.h>
84 #include <sys/kern_syscall.h>
85 #include <sys/dirent.h>
88 #include <sys/sysref2.h>
89 #include <sys/spinlock2.h>
90 #include <sys/mplock2.h>
92 #define MAX_RECURSION_DEPTH 64
95 * Random lookups in the cache are accomplished with a hash table using
96 * a hash key of (nc_src_vp, name). Each hash chain has its own spin lock.
98 * Negative entries may exist and correspond to resolved namecache
99 * structures where nc_vp is NULL. In a negative entry, NCF_WHITEOUT
100 * will be set if the entry corresponds to a whited-out directory entry
101 * (verses simply not finding the entry at all). ncneglist is locked
102 * with a global spinlock (ncspin).
106 * (1) A ncp must be referenced before it can be locked.
108 * (2) A ncp must be locked in order to modify it.
110 * (3) ncp locks are always ordered child -> parent. That may seem
111 * backwards but forward scans use the hash table and thus can hold
112 * the parent unlocked when traversing downward.
114 * This allows insert/rename/delete/dot-dot and other operations
115 * to use ncp->nc_parent links.
117 * This also prevents a locked up e.g. NFS node from creating a
118 * chain reaction all the way back to the root vnode / namecache.
120 * (4) parent linkages require both the parent and child to be locked.
124 * Structures associated with name cacheing.
126 #define NCHHASH(hash) (&nchashtbl[(hash) & nchash])
129 MALLOC_DEFINE(M_VFSCACHE
, "vfscache", "VFS name cache entries");
131 LIST_HEAD(nchash_list
, namecache
);
134 struct nchash_list list
;
135 struct spinlock spin
;
138 static struct nchash_head
*nchashtbl
;
139 static struct namecache_list ncneglist
;
140 static struct spinlock ncspin
;
143 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server
144 * to create the namecache infrastructure leading to a dangling vnode.
146 * 0 Only errors are reported
147 * 1 Successes are reported
148 * 2 Successes + the whole directory scan is reported
149 * 3 Force the directory scan code run as if the parent vnode did not
150 * have a namecache record, even if it does have one.
152 static int ncvp_debug
;
153 SYSCTL_INT(_debug
, OID_AUTO
, ncvp_debug
, CTLFLAG_RW
, &ncvp_debug
, 0, "");
155 static u_long nchash
; /* size of hash table */
156 SYSCTL_ULONG(_debug
, OID_AUTO
, nchash
, CTLFLAG_RD
, &nchash
, 0, "");
158 static int ncnegfactor
= 16; /* ratio of negative entries */
159 SYSCTL_INT(_debug
, OID_AUTO
, ncnegfactor
, CTLFLAG_RW
, &ncnegfactor
, 0, "");
161 static int nclockwarn
; /* warn on locked entries in ticks */
162 SYSCTL_INT(_debug
, OID_AUTO
, nclockwarn
, CTLFLAG_RW
, &nclockwarn
, 0, "");
164 static int numneg
; /* number of cache entries allocated */
165 SYSCTL_INT(_debug
, OID_AUTO
, numneg
, CTLFLAG_RD
, &numneg
, 0, "");
167 static int numdefered
; /* number of cache entries allocated */
168 SYSCTL_INT(_debug
, OID_AUTO
, numdefered
, CTLFLAG_RD
, &numdefered
, 0, "");
170 static int numcache
; /* number of cache entries allocated */
171 SYSCTL_INT(_debug
, OID_AUTO
, numcache
, CTLFLAG_RD
, &numcache
, 0, "");
173 SYSCTL_INT(_debug
, OID_AUTO
, vnsize
, CTLFLAG_RD
, 0, sizeof(struct vnode
), "");
174 SYSCTL_INT(_debug
, OID_AUTO
, ncsize
, CTLFLAG_RD
, 0, sizeof(struct namecache
), "");
177 SYSCTL_INT(_vfs
, OID_AUTO
, cache_mpsafe
, CTLFLAG_RW
, &cache_mpsafe
, 0, "");
179 static int cache_resolve_mp(struct mount
*mp
);
180 static struct vnode
*cache_dvpref(struct namecache
*ncp
);
181 static void _cache_lock(struct namecache
*ncp
);
182 static void _cache_setunresolved(struct namecache
*ncp
);
183 static void _cache_cleanneg(int count
);
184 static void _cache_cleandefered(void);
187 * The new name cache statistics
189 SYSCTL_NODE(_vfs
, OID_AUTO
, cache
, CTLFLAG_RW
, 0, "Name cache statistics");
190 #define STATNODE(mode, name, var) \
191 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
192 STATNODE(CTLFLAG_RD
, numneg
, &numneg
);
193 STATNODE(CTLFLAG_RD
, numcache
, &numcache
);
194 static u_long numcalls
; STATNODE(CTLFLAG_RD
, numcalls
, &numcalls
);
195 static u_long dothits
; STATNODE(CTLFLAG_RD
, dothits
, &dothits
);
196 static u_long dotdothits
; STATNODE(CTLFLAG_RD
, dotdothits
, &dotdothits
);
197 static u_long numchecks
; STATNODE(CTLFLAG_RD
, numchecks
, &numchecks
);
198 static u_long nummiss
; STATNODE(CTLFLAG_RD
, nummiss
, &nummiss
);
199 static u_long nummisszap
; STATNODE(CTLFLAG_RD
, nummisszap
, &nummisszap
);
200 static u_long numposzaps
; STATNODE(CTLFLAG_RD
, numposzaps
, &numposzaps
);
201 static u_long numposhits
; STATNODE(CTLFLAG_RD
, numposhits
, &numposhits
);
202 static u_long numnegzaps
; STATNODE(CTLFLAG_RD
, numnegzaps
, &numnegzaps
);
203 static u_long numneghits
; STATNODE(CTLFLAG_RD
, numneghits
, &numneghits
);
205 struct nchstats nchstats
[SMP_MAXCPU
];
207 * Export VFS cache effectiveness statistics to user-land.
209 * The statistics are left for aggregation to user-land so
210 * neat things can be achieved, like observing per-CPU cache
214 sysctl_nchstats(SYSCTL_HANDLER_ARGS
)
216 struct globaldata
*gd
;
220 for (i
= 0; i
< ncpus
; ++i
) {
221 gd
= globaldata_find(i
);
222 if ((error
= SYSCTL_OUT(req
, (void *)&(*gd
->gd_nchstats
),
223 sizeof(struct nchstats
))))
229 SYSCTL_PROC(_vfs_cache
, OID_AUTO
, nchstats
, CTLTYPE_OPAQUE
|CTLFLAG_RD
,
230 0, 0, sysctl_nchstats
, "S,nchstats", "VFS cache effectiveness statistics");
232 static struct namecache
*cache_zap(struct namecache
*ncp
, int nonblock
);
235 * Namespace locking. The caller must already hold a reference to the
236 * namecache structure in order to lock/unlock it. This function prevents
237 * the namespace from being created or destroyed by accessors other then
240 * Note that holding a locked namecache structure prevents other threads
241 * from making namespace changes (e.g. deleting or creating), prevents
242 * vnode association state changes by other threads, and prevents the
243 * namecache entry from being resolved or unresolved by other threads.
245 * The lock owner has full authority to associate/disassociate vnodes
246 * and resolve/unresolve the locked ncp.
248 * The primary lock field is nc_exlocks. nc_locktd is set after the
249 * fact (when locking) or cleared prior to unlocking.
251 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed
252 * or recycled, but it does NOT help you if the vnode had already
253 * initiated a recyclement. If this is important, use cache_get()
254 * rather then cache_lock() (and deal with the differences in the
255 * way the refs counter is handled). Or, alternatively, make an
256 * unconditional call to cache_validate() or cache_resolve()
257 * after cache_lock() returns.
263 _cache_lock(struct namecache
*ncp
)
270 KKASSERT(ncp
->nc_refs
!= 0);
275 count
= ncp
->nc_exlocks
;
278 if (atomic_cmpset_int(&ncp
->nc_exlocks
, 0, 1)) {
280 * The vp associated with a locked ncp must
281 * be held to prevent it from being recycled.
283 * WARNING! If VRECLAIMED is set the vnode
284 * could already be in the middle of a recycle.
285 * Callers must use cache_vref() or
286 * cache_vget() on the locked ncp to
287 * validate the vp or set the cache entry
290 * NOTE! vhold() is allowed if we hold a
291 * lock on the ncp (which we do).
295 vhold(ncp
->nc_vp
); /* MPSAFE */
301 if (ncp
->nc_locktd
== td
) {
302 if (atomic_cmpset_int(&ncp
->nc_exlocks
, count
,
309 tsleep_interlock(ncp
, 0);
310 if (atomic_cmpset_int(&ncp
->nc_exlocks
, count
,
311 count
| NC_EXLOCK_REQ
) == 0) {
315 error
= tsleep(ncp
, PINTERLOCKED
, "clock", nclockwarn
);
316 if (error
== EWOULDBLOCK
) {
319 kprintf("[diagnostic] cache_lock: blocked "
322 kprintf(" \"%*.*s\"\n",
323 ncp
->nc_nlen
, ncp
->nc_nlen
,
329 kprintf("[diagnostic] cache_lock: unblocked %*.*s after "
331 ncp
->nc_nlen
, ncp
->nc_nlen
, ncp
->nc_name
,
332 (int)(ticks
- didwarn
) / hz
);
337 * NOTE: nc_refs may be zero if the ncp is interlocked by circumstance,
338 * such as the case where one of its children is locked.
344 _cache_lock_nonblock(struct namecache
*ncp
)
352 count
= ncp
->nc_exlocks
;
355 if (atomic_cmpset_int(&ncp
->nc_exlocks
, 0, 1)) {
357 * The vp associated with a locked ncp must
358 * be held to prevent it from being recycled.
360 * WARNING! If VRECLAIMED is set the vnode
361 * could already be in the middle of a recycle.
362 * Callers must use cache_vref() or
363 * cache_vget() on the locked ncp to
364 * validate the vp or set the cache entry
367 * NOTE! vhold() is allowed if we hold a
368 * lock on the ncp (which we do).
372 vhold(ncp
->nc_vp
); /* MPSAFE */
378 if (ncp
->nc_locktd
== td
) {
379 if (atomic_cmpset_int(&ncp
->nc_exlocks
, count
,
394 * NOTE: nc_refs can be 0 (degenerate case during _cache_drop).
396 * nc_locktd must be NULLed out prior to nc_exlocks getting cleared.
402 _cache_unlock(struct namecache
*ncp
)
404 thread_t td __debugvar
= curthread
;
407 KKASSERT(ncp
->nc_refs
>= 0);
408 KKASSERT(ncp
->nc_exlocks
> 0);
409 KKASSERT(ncp
->nc_locktd
== td
);
411 count
= ncp
->nc_exlocks
;
412 if ((count
& ~NC_EXLOCK_REQ
) == 1) {
413 ncp
->nc_locktd
= NULL
;
418 if ((count
& ~NC_EXLOCK_REQ
) == 1) {
419 if (atomic_cmpset_int(&ncp
->nc_exlocks
, count
, 0)) {
420 if (count
& NC_EXLOCK_REQ
)
425 if (atomic_cmpset_int(&ncp
->nc_exlocks
, count
,
430 count
= ncp
->nc_exlocks
;
436 * cache_hold() and cache_drop() prevent the premature deletion of a
437 * namecache entry but do not prevent operations (such as zapping) on
438 * that namecache entry.
440 * This routine may only be called from outside this source module if
441 * nc_refs is already at least 1.
443 * This is a rare case where callers are allowed to hold a spinlock,
444 * so we can't ourselves.
450 _cache_hold(struct namecache
*ncp
)
452 atomic_add_int(&ncp
->nc_refs
, 1);
457 * Drop a cache entry, taking care to deal with races.
459 * For potential 1->0 transitions we must hold the ncp lock to safely
460 * test its flags. An unresolved entry with no children must be zapped
463 * The call to cache_zap() itself will handle all remaining races and
464 * will decrement the ncp's refs regardless. If we are resolved or
465 * have children nc_refs can safely be dropped to 0 without having to
468 * NOTE: cache_zap() will re-check nc_refs and nc_list in a MPSAFE fashion.
470 * NOTE: cache_zap() may return a non-NULL referenced parent which must
471 * be dropped in a loop.
477 _cache_drop(struct namecache
*ncp
)
482 KKASSERT(ncp
->nc_refs
> 0);
486 if (_cache_lock_nonblock(ncp
) == 0) {
487 ncp
->nc_flag
&= ~NCF_DEFEREDZAP
;
488 if ((ncp
->nc_flag
& NCF_UNRESOLVED
) &&
489 TAILQ_EMPTY(&ncp
->nc_list
)) {
490 ncp
= cache_zap(ncp
, 1);
493 if (atomic_cmpset_int(&ncp
->nc_refs
, 1, 0)) {
500 if (atomic_cmpset_int(&ncp
->nc_refs
, refs
, refs
- 1))
508 * Link a new namecache entry to its parent and to the hash table. Be
509 * careful to avoid races if vhold() blocks in the future.
511 * Both ncp and par must be referenced and locked.
513 * NOTE: The hash table spinlock is likely held during this call, we
514 * can't do anything fancy.
519 _cache_link_parent(struct namecache
*ncp
, struct namecache
*par
,
520 struct nchash_head
*nchpp
)
522 KKASSERT(ncp
->nc_parent
== NULL
);
523 ncp
->nc_parent
= par
;
524 ncp
->nc_head
= nchpp
;
527 * Set inheritance flags. Note that the parent flags may be
528 * stale due to getattr potentially not having been run yet
529 * (it gets run during nlookup()'s).
531 ncp
->nc_flag
&= ~(NCF_SF_PNOCACHE
| NCF_UF_PCACHE
);
532 if (par
->nc_flag
& (NCF_SF_NOCACHE
| NCF_SF_PNOCACHE
))
533 ncp
->nc_flag
|= NCF_SF_PNOCACHE
;
534 if (par
->nc_flag
& (NCF_UF_CACHE
| NCF_UF_PCACHE
))
535 ncp
->nc_flag
|= NCF_UF_PCACHE
;
537 LIST_INSERT_HEAD(&nchpp
->list
, ncp
, nc_hash
);
539 if (TAILQ_EMPTY(&par
->nc_list
)) {
540 TAILQ_INSERT_HEAD(&par
->nc_list
, ncp
, nc_entry
);
542 * Any vp associated with an ncp which has children must
543 * be held to prevent it from being recycled.
548 TAILQ_INSERT_HEAD(&par
->nc_list
, ncp
, nc_entry
);
553 * Remove the parent and hash associations from a namecache structure.
554 * If this is the last child of the parent the cache_drop(par) will
555 * attempt to recursively zap the parent.
557 * ncp must be locked. This routine will acquire a temporary lock on
558 * the parent as wlel as the appropriate hash chain.
563 _cache_unlink_parent(struct namecache
*ncp
)
565 struct namecache
*par
;
566 struct vnode
*dropvp
;
568 if ((par
= ncp
->nc_parent
) != NULL
) {
569 KKASSERT(ncp
->nc_parent
== par
);
572 spin_lock_wr(&ncp
->nc_head
->spin
);
573 LIST_REMOVE(ncp
, nc_hash
);
574 TAILQ_REMOVE(&par
->nc_list
, ncp
, nc_entry
);
576 if (par
->nc_vp
&& TAILQ_EMPTY(&par
->nc_list
))
578 spin_unlock_wr(&ncp
->nc_head
->spin
);
579 ncp
->nc_parent
= NULL
;
585 * We can only safely vdrop with no spinlocks held.
593 * Allocate a new namecache structure. Most of the code does not require
594 * zero-termination of the string but it makes vop_compat_ncreate() easier.
598 static struct namecache
*
599 cache_alloc(int nlen
)
601 struct namecache
*ncp
;
603 ncp
= kmalloc(sizeof(*ncp
), M_VFSCACHE
, M_WAITOK
|M_ZERO
);
605 ncp
->nc_name
= kmalloc(nlen
+ 1, M_VFSCACHE
, M_WAITOK
);
607 ncp
->nc_flag
= NCF_UNRESOLVED
;
608 ncp
->nc_error
= ENOTCONN
; /* needs to be resolved */
611 TAILQ_INIT(&ncp
->nc_list
);
617 * Can only be called for the case where the ncp has never been
618 * associated with anything (so no spinlocks are needed).
623 _cache_free(struct namecache
*ncp
)
625 KKASSERT(ncp
->nc_refs
== 1 && ncp
->nc_exlocks
== 1);
627 kfree(ncp
->nc_name
, M_VFSCACHE
);
628 kfree(ncp
, M_VFSCACHE
);
635 cache_zero(struct nchandle
*nch
)
642 * Ref and deref a namecache structure.
644 * The caller must specify a stable ncp pointer, typically meaning the
645 * ncp is already referenced but this can also occur indirectly through
646 * e.g. holding a lock on a direct child.
648 * WARNING: Caller may hold an unrelated read spinlock, which means we can't
649 * use read spinlocks here.
654 cache_hold(struct nchandle
*nch
)
656 _cache_hold(nch
->ncp
);
657 atomic_add_int(&nch
->mount
->mnt_refs
, 1);
662 * Create a copy of a namecache handle for an already-referenced
668 cache_copy(struct nchandle
*nch
, struct nchandle
*target
)
672 _cache_hold(target
->ncp
);
673 atomic_add_int(&nch
->mount
->mnt_refs
, 1);
680 cache_changemount(struct nchandle
*nch
, struct mount
*mp
)
682 atomic_add_int(&nch
->mount
->mnt_refs
, -1);
684 atomic_add_int(&nch
->mount
->mnt_refs
, 1);
691 cache_drop(struct nchandle
*nch
)
693 atomic_add_int(&nch
->mount
->mnt_refs
, -1);
694 _cache_drop(nch
->ncp
);
703 cache_lock(struct nchandle
*nch
)
705 _cache_lock(nch
->ncp
);
709 * Relock nch1 given an unlocked nch1 and a locked nch2. The caller
710 * is responsible for checking both for validity on return as they
711 * may have become invalid.
713 * We have to deal with potential deadlocks here, just ping pong
714 * the lock until we get it (we will always block somewhere when
715 * looping so this is not cpu-intensive).
717 * which = 0 nch1 not locked, nch2 is locked
718 * which = 1 nch1 is locked, nch2 is not locked
721 cache_relock(struct nchandle
*nch1
, struct ucred
*cred1
,
722 struct nchandle
*nch2
, struct ucred
*cred2
)
730 if (cache_lock_nonblock(nch1
) == 0) {
731 cache_resolve(nch1
, cred1
);
736 cache_resolve(nch1
, cred1
);
739 if (cache_lock_nonblock(nch2
) == 0) {
740 cache_resolve(nch2
, cred2
);
745 cache_resolve(nch2
, cred2
);
755 cache_lock_nonblock(struct nchandle
*nch
)
757 return(_cache_lock_nonblock(nch
->ncp
));
765 cache_unlock(struct nchandle
*nch
)
767 _cache_unlock(nch
->ncp
);
771 * ref-and-lock, unlock-and-deref functions.
773 * This function is primarily used by nlookup. Even though cache_lock
774 * holds the vnode, it is possible that the vnode may have already
775 * initiated a recyclement.
777 * We want cache_get() to return a definitively usable vnode or a
778 * definitively unresolved ncp.
784 _cache_get(struct namecache
*ncp
)
788 if (ncp
->nc_vp
&& (ncp
->nc_vp
->v_flag
& VRECLAIMED
))
789 _cache_setunresolved(ncp
);
794 * This is a special form of _cache_lock() which only succeeds if
795 * it can get a pristine, non-recursive lock. The caller must have
796 * already ref'd the ncp.
798 * On success the ncp will be locked, on failure it will not. The
799 * ref count does not change either way.
801 * We want _cache_lock_special() (on success) to return a definitively
802 * usable vnode or a definitively unresolved ncp.
807 _cache_lock_special(struct namecache
*ncp
)
809 if (_cache_lock_nonblock(ncp
) == 0) {
810 if ((ncp
->nc_exlocks
& ~NC_EXLOCK_REQ
) == 1) {
811 if (ncp
->nc_vp
&& (ncp
->nc_vp
->v_flag
& VRECLAIMED
))
812 _cache_setunresolved(ncp
);
822 * NOTE: The same nchandle can be passed for both arguments.
827 cache_get(struct nchandle
*nch
, struct nchandle
*target
)
829 KKASSERT(nch
->ncp
->nc_refs
> 0);
830 target
->mount
= nch
->mount
;
831 target
->ncp
= _cache_get(nch
->ncp
);
832 atomic_add_int(&target
->mount
->mnt_refs
, 1);
840 _cache_put(struct namecache
*ncp
)
850 cache_put(struct nchandle
*nch
)
852 atomic_add_int(&nch
->mount
->mnt_refs
, -1);
853 _cache_put(nch
->ncp
);
859 * Resolve an unresolved ncp by associating a vnode with it. If the
860 * vnode is NULL, a negative cache entry is created.
862 * The ncp should be locked on entry and will remain locked on return.
868 _cache_setvp(struct mount
*mp
, struct namecache
*ncp
, struct vnode
*vp
)
870 KKASSERT(ncp
->nc_flag
& NCF_UNRESOLVED
);
874 * Any vp associated with an ncp which has children must
875 * be held. Any vp associated with a locked ncp must be held.
877 if (!TAILQ_EMPTY(&ncp
->nc_list
))
879 spin_lock_wr(&vp
->v_spinlock
);
881 TAILQ_INSERT_HEAD(&vp
->v_namecache
, ncp
, nc_vnode
);
882 spin_unlock_wr(&vp
->v_spinlock
);
887 * Set auxiliary flags
891 ncp
->nc_flag
|= NCF_ISDIR
;
894 ncp
->nc_flag
|= NCF_ISSYMLINK
;
895 /* XXX cache the contents of the symlink */
900 atomic_add_int(&numcache
, 1);
904 * When creating a negative cache hit we set the
905 * namecache_gen. A later resolve will clean out the
906 * negative cache hit if the mount point's namecache_gen
907 * has changed. Used by devfs, could also be used by
911 spin_lock_wr(&ncspin
);
912 TAILQ_INSERT_TAIL(&ncneglist
, ncp
, nc_vnode
);
914 spin_unlock_wr(&ncspin
);
915 ncp
->nc_error
= ENOENT
;
917 ncp
->nc_namecache_gen
= mp
->mnt_namecache_gen
;
919 ncp
->nc_flag
&= ~(NCF_UNRESOLVED
| NCF_DEFEREDZAP
);
926 cache_setvp(struct nchandle
*nch
, struct vnode
*vp
)
928 _cache_setvp(nch
->mount
, nch
->ncp
, vp
);
935 cache_settimeout(struct nchandle
*nch
, int nticks
)
937 struct namecache
*ncp
= nch
->ncp
;
939 if ((ncp
->nc_timeout
= ticks
+ nticks
) == 0)
944 * Disassociate the vnode or negative-cache association and mark a
945 * namecache entry as unresolved again. Note that the ncp is still
946 * left in the hash table and still linked to its parent.
948 * The ncp should be locked and refd on entry and will remain locked and refd
951 * This routine is normally never called on a directory containing children.
952 * However, NFS often does just that in its rename() code as a cop-out to
953 * avoid complex namespace operations. This disconnects a directory vnode
954 * from its namecache and can cause the OLDAPI and NEWAPI to get out of
961 _cache_setunresolved(struct namecache
*ncp
)
965 if ((ncp
->nc_flag
& NCF_UNRESOLVED
) == 0) {
966 ncp
->nc_flag
|= NCF_UNRESOLVED
;
968 ncp
->nc_error
= ENOTCONN
;
969 if ((vp
= ncp
->nc_vp
) != NULL
) {
970 atomic_add_int(&numcache
, -1);
971 spin_lock_wr(&vp
->v_spinlock
);
973 TAILQ_REMOVE(&vp
->v_namecache
, ncp
, nc_vnode
);
974 spin_unlock_wr(&vp
->v_spinlock
);
977 * Any vp associated with an ncp with children is
978 * held by that ncp. Any vp associated with a locked
979 * ncp is held by that ncp. These conditions must be
980 * undone when the vp is cleared out from the ncp.
982 if (!TAILQ_EMPTY(&ncp
->nc_list
))
987 spin_lock_wr(&ncspin
);
988 TAILQ_REMOVE(&ncneglist
, ncp
, nc_vnode
);
990 spin_unlock_wr(&ncspin
);
992 ncp
->nc_flag
&= ~(NCF_WHITEOUT
|NCF_ISDIR
|NCF_ISSYMLINK
);
997 * The cache_nresolve() code calls this function to automatically
998 * set a resolved cache element to unresolved if it has timed out
999 * or if it is a negative cache hit and the mount point namecache_gen
1004 static __inline
void
1005 _cache_auto_unresolve(struct mount
*mp
, struct namecache
*ncp
)
1008 * Already in an unresolved state, nothing to do.
1010 if (ncp
->nc_flag
& NCF_UNRESOLVED
)
1014 * Try to zap entries that have timed out. We have
1015 * to be careful here because locked leafs may depend
1016 * on the vnode remaining intact in a parent, so only
1017 * do this under very specific conditions.
1019 if (ncp
->nc_timeout
&& (int)(ncp
->nc_timeout
- ticks
) < 0 &&
1020 TAILQ_EMPTY(&ncp
->nc_list
)) {
1021 _cache_setunresolved(ncp
);
1026 * If a resolved negative cache hit is invalid due to
1027 * the mount's namecache generation being bumped, zap it.
1029 if (ncp
->nc_vp
== NULL
&&
1030 ncp
->nc_namecache_gen
!= mp
->mnt_namecache_gen
) {
1031 _cache_setunresolved(ncp
);
1040 cache_setunresolved(struct nchandle
*nch
)
1042 _cache_setunresolved(nch
->ncp
);
1046 * Determine if we can clear NCF_ISMOUNTPT by scanning the mountlist
1047 * looking for matches. This flag tells the lookup code when it must
1048 * check for a mount linkage and also prevents the directories in question
1049 * from being deleted or renamed.
1055 cache_clrmountpt_callback(struct mount
*mp
, void *data
)
1057 struct nchandle
*nch
= data
;
1059 if (mp
->mnt_ncmounton
.ncp
== nch
->ncp
)
1061 if (mp
->mnt_ncmountpt
.ncp
== nch
->ncp
)
1070 cache_clrmountpt(struct nchandle
*nch
)
1074 count
= mountlist_scan(cache_clrmountpt_callback
, nch
,
1075 MNTSCAN_FORWARD
|MNTSCAN_NOBUSY
);
1077 nch
->ncp
->nc_flag
&= ~NCF_ISMOUNTPT
;
1081 * Invalidate portions of the namecache topology given a starting entry.
1082 * The passed ncp is set to an unresolved state and:
1084 * The passed ncp must be referencxed and locked. The routine may unlock
1085 * and relock ncp several times, and will recheck the children and loop
1086 * to catch races. When done the passed ncp will be returned with the
1087 * reference and lock intact.
1089 * CINV_DESTROY - Set a flag in the passed ncp entry indicating
1090 * that the physical underlying nodes have been
1091 * destroyed... as in deleted. For example, when
1092 * a directory is removed. This will cause record
1093 * lookups on the name to no longer be able to find
1094 * the record and tells the resolver to return failure
1095 * rather then trying to resolve through the parent.
1097 * The topology itself, including ncp->nc_name,
1100 * This only applies to the passed ncp, if CINV_CHILDREN
1101 * is specified the children are not flagged.
1103 * CINV_CHILDREN - Set all children (recursively) to an unresolved
1106 * Note that this will also have the side effect of
1107 * cleaning out any unreferenced nodes in the topology
1108 * from the leaves up as the recursion backs out.
1110 * Note that the topology for any referenced nodes remains intact, but
1111 * the nodes will be marked as having been destroyed and will be set
1112 * to an unresolved state.
1114 * It is possible for cache_inval() to race a cache_resolve(), meaning that
1115 * the namecache entry may not actually be invalidated on return if it was
1116 * revalidated while recursing down into its children. This code guarentees
1117 * that the node(s) will go through an invalidation cycle, but does not
1118 * guarentee that they will remain in an invalidated state.
1120 * Returns non-zero if a revalidation was detected during the invalidation
1121 * recursion, zero otherwise. Note that since only the original ncp is
1122 * locked the revalidation ultimately can only indicate that the original ncp
1123 * *MIGHT* no have been reresolved.
1125 * DEEP RECURSION HANDLING - If a recursive invalidation recurses deeply we
1126 * have to avoid blowing out the kernel stack. We do this by saving the
1127 * deep namecache node and aborting the recursion, then re-recursing at that
1128 * node using a depth-first algorithm in order to allow multiple deep
1129 * recursions to chain through each other, then we restart the invalidation
1136 struct namecache
*resume_ncp
;
1140 static int _cache_inval_internal(struct namecache
*, int, struct cinvtrack
*);
1144 _cache_inval(struct namecache
*ncp
, int flags
)
1146 struct cinvtrack track
;
1147 struct namecache
*ncp2
;
1151 track
.resume_ncp
= NULL
;
1154 r
= _cache_inval_internal(ncp
, flags
, &track
);
1155 if (track
.resume_ncp
== NULL
)
1157 kprintf("Warning: deep namecache recursion at %s\n",
1160 while ((ncp2
= track
.resume_ncp
) != NULL
) {
1161 track
.resume_ncp
= NULL
;
1163 _cache_inval_internal(ncp2
, flags
& ~CINV_DESTROY
,
1173 cache_inval(struct nchandle
*nch
, int flags
)
1175 return(_cache_inval(nch
->ncp
, flags
));
1179 * Helper for _cache_inval(). The passed ncp is refd and locked and
1180 * remains that way on return, but may be unlocked/relocked multiple
1181 * times by the routine.
1184 _cache_inval_internal(struct namecache
*ncp
, int flags
, struct cinvtrack
*track
)
1186 struct namecache
*kid
;
1187 struct namecache
*nextkid
;
1190 KKASSERT(ncp
->nc_exlocks
);
1192 _cache_setunresolved(ncp
);
1193 if (flags
& CINV_DESTROY
)
1194 ncp
->nc_flag
|= NCF_DESTROYED
;
1195 if ((flags
& CINV_CHILDREN
) &&
1196 (kid
= TAILQ_FIRST(&ncp
->nc_list
)) != NULL
1199 if (++track
->depth
> MAX_RECURSION_DEPTH
) {
1200 track
->resume_ncp
= ncp
;
1206 if (track
->resume_ncp
) {
1210 if ((nextkid
= TAILQ_NEXT(kid
, nc_entry
)) != NULL
)
1211 _cache_hold(nextkid
);
1212 if ((kid
->nc_flag
& NCF_UNRESOLVED
) == 0 ||
1213 TAILQ_FIRST(&kid
->nc_list
)
1216 rcnt
+= _cache_inval_internal(kid
, flags
& ~CINV_DESTROY
, track
);
1227 * Someone could have gotten in there while ncp was unlocked,
1230 if ((ncp
->nc_flag
& NCF_UNRESOLVED
) == 0)
1236 * Invalidate a vnode's namecache associations. To avoid races against
1237 * the resolver we do not invalidate a node which we previously invalidated
1238 * but which was then re-resolved while we were in the invalidation loop.
1240 * Returns non-zero if any namecache entries remain after the invalidation
1243 * NOTE: Unlike the namecache topology which guarentees that ncp's will not
1244 * be ripped out of the topology while held, the vnode's v_namecache
1245 * list has no such restriction. NCP's can be ripped out of the list
1246 * at virtually any time if not locked, even if held.
1248 * In addition, the v_namecache list itself must be locked via
1249 * the vnode's spinlock.
1254 cache_inval_vp(struct vnode
*vp
, int flags
)
1256 struct namecache
*ncp
;
1257 struct namecache
*next
;
1260 spin_lock_wr(&vp
->v_spinlock
);
1261 ncp
= TAILQ_FIRST(&vp
->v_namecache
);
1265 /* loop entered with ncp held and vp spin-locked */
1266 if ((next
= TAILQ_NEXT(ncp
, nc_vnode
)) != NULL
)
1268 spin_unlock_wr(&vp
->v_spinlock
);
1270 if (ncp
->nc_vp
!= vp
) {
1271 kprintf("Warning: cache_inval_vp: race-A detected on "
1272 "%s\n", ncp
->nc_name
);
1278 _cache_inval(ncp
, flags
);
1279 _cache_put(ncp
); /* also releases reference */
1281 spin_lock_wr(&vp
->v_spinlock
);
1282 if (ncp
&& ncp
->nc_vp
!= vp
) {
1283 spin_unlock_wr(&vp
->v_spinlock
);
1284 kprintf("Warning: cache_inval_vp: race-B detected on "
1285 "%s\n", ncp
->nc_name
);
1290 spin_unlock_wr(&vp
->v_spinlock
);
1291 return(TAILQ_FIRST(&vp
->v_namecache
) != NULL
);
1295 * This routine is used instead of the normal cache_inval_vp() when we
1296 * are trying to recycle otherwise good vnodes.
1298 * Return 0 on success, non-zero if not all namecache records could be
1299 * disassociated from the vnode (for various reasons).
1304 cache_inval_vp_nonblock(struct vnode
*vp
)
1306 struct namecache
*ncp
;
1307 struct namecache
*next
;
1309 spin_lock_wr(&vp
->v_spinlock
);
1310 ncp
= TAILQ_FIRST(&vp
->v_namecache
);
1314 /* loop entered with ncp held */
1315 if ((next
= TAILQ_NEXT(ncp
, nc_vnode
)) != NULL
)
1317 spin_unlock_wr(&vp
->v_spinlock
);
1318 if (_cache_lock_nonblock(ncp
)) {
1324 if (ncp
->nc_vp
!= vp
) {
1325 kprintf("Warning: cache_inval_vp: race-A detected on "
1326 "%s\n", ncp
->nc_name
);
1332 _cache_inval(ncp
, 0);
1333 _cache_put(ncp
); /* also releases reference */
1335 spin_lock_wr(&vp
->v_spinlock
);
1336 if (ncp
&& ncp
->nc_vp
!= vp
) {
1337 spin_unlock_wr(&vp
->v_spinlock
);
1338 kprintf("Warning: cache_inval_vp: race-B detected on "
1339 "%s\n", ncp
->nc_name
);
1344 spin_unlock_wr(&vp
->v_spinlock
);
1346 return(TAILQ_FIRST(&vp
->v_namecache
) != NULL
);
1350 * The source ncp has been renamed to the target ncp. Both fncp and tncp
1351 * must be locked. The target ncp is destroyed (as a normal rename-over
1352 * would destroy the target file or directory).
1354 * Because there may be references to the source ncp we cannot copy its
1355 * contents to the target. Instead the source ncp is relinked as the target
1356 * and the target ncp is removed from the namecache topology.
1361 cache_rename(struct nchandle
*fnch
, struct nchandle
*tnch
)
1363 struct namecache
*fncp
= fnch
->ncp
;
1364 struct namecache
*tncp
= tnch
->ncp
;
1365 struct namecache
*tncp_par
;
1366 struct nchash_head
*nchpp
;
1371 * Rename fncp (unlink)
1373 _cache_unlink_parent(fncp
);
1374 oname
= fncp
->nc_name
;
1375 fncp
->nc_name
= tncp
->nc_name
;
1376 fncp
->nc_nlen
= tncp
->nc_nlen
;
1377 tncp_par
= tncp
->nc_parent
;
1378 _cache_hold(tncp_par
);
1379 _cache_lock(tncp_par
);
1382 * Rename fncp (relink)
1384 hash
= fnv_32_buf(fncp
->nc_name
, fncp
->nc_nlen
, FNV1_32_INIT
);
1385 hash
= fnv_32_buf(&tncp_par
, sizeof(tncp_par
), hash
);
1386 nchpp
= NCHHASH(hash
);
1388 spin_lock_wr(&nchpp
->spin
);
1389 _cache_link_parent(fncp
, tncp_par
, nchpp
);
1390 spin_unlock_wr(&nchpp
->spin
);
1392 _cache_put(tncp_par
);
1395 * Get rid of the overwritten tncp (unlink)
1397 _cache_setunresolved(tncp
);
1398 _cache_unlink_parent(tncp
);
1399 tncp
->nc_name
= NULL
;
1403 kfree(oname
, M_VFSCACHE
);
1407 * vget the vnode associated with the namecache entry. Resolve the namecache
1408 * entry if necessary. The passed ncp must be referenced and locked.
1410 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked
1411 * (depending on the passed lk_type) will be returned in *vpp with an error
1412 * of 0, or NULL will be returned in *vpp with a non-0 error code. The
1413 * most typical error is ENOENT, meaning that the ncp represents a negative
1414 * cache hit and there is no vnode to retrieve, but other errors can occur
1417 * The vget() can race a reclaim. If this occurs we re-resolve the
1420 * There are numerous places in the kernel where vget() is called on a
1421 * vnode while one or more of its namecache entries is locked. Releasing
1422 * a vnode never deadlocks against locked namecache entries (the vnode
1423 * will not get recycled while referenced ncp's exist). This means we
1424 * can safely acquire the vnode. In fact, we MUST NOT release the ncp
1425 * lock when acquiring the vp lock or we might cause a deadlock.
1430 cache_vget(struct nchandle
*nch
, struct ucred
*cred
,
1431 int lk_type
, struct vnode
**vpp
)
1433 struct namecache
*ncp
;
1438 KKASSERT(ncp
->nc_locktd
== curthread
);
1441 if (ncp
->nc_flag
& NCF_UNRESOLVED
)
1442 error
= cache_resolve(nch
, cred
);
1446 if (error
== 0 && (vp
= ncp
->nc_vp
) != NULL
) {
1447 error
= vget(vp
, lk_type
);
1452 if (error
== ENOENT
) {
1453 kprintf("Warning: vnode reclaim race detected "
1454 "in cache_vget on %p (%s)\n",
1456 _cache_setunresolved(ncp
);
1461 * Not a reclaim race, some other error.
1463 KKASSERT(ncp
->nc_vp
== vp
);
1466 KKASSERT(ncp
->nc_vp
== vp
);
1467 KKASSERT((vp
->v_flag
& VRECLAIMED
) == 0);
1470 if (error
== 0 && vp
== NULL
)
1477 cache_vref(struct nchandle
*nch
, struct ucred
*cred
, struct vnode
**vpp
)
1479 struct namecache
*ncp
;
1484 KKASSERT(ncp
->nc_locktd
== curthread
);
1487 if (ncp
->nc_flag
& NCF_UNRESOLVED
)
1488 error
= cache_resolve(nch
, cred
);
1492 if (error
== 0 && (vp
= ncp
->nc_vp
) != NULL
) {
1493 error
= vget(vp
, LK_SHARED
);
1498 if (error
== ENOENT
) {
1499 kprintf("Warning: vnode reclaim race detected "
1500 "in cache_vget on %p (%s)\n",
1502 _cache_setunresolved(ncp
);
1507 * Not a reclaim race, some other error.
1509 KKASSERT(ncp
->nc_vp
== vp
);
1512 KKASSERT(ncp
->nc_vp
== vp
);
1513 KKASSERT((vp
->v_flag
& VRECLAIMED
) == 0);
1514 /* caller does not want a lock */
1518 if (error
== 0 && vp
== NULL
)
1525 * Return a referenced vnode representing the parent directory of
1528 * Because the caller has locked the ncp it should not be possible for
1529 * the parent ncp to go away. However, the parent can unresolve its
1530 * dvp at any time so we must be able to acquire a lock on the parent
1531 * to safely access nc_vp.
1533 * We have to leave par unlocked when vget()ing dvp to avoid a deadlock,
1534 * so use vhold()/vdrop() while holding the lock to prevent dvp from
1535 * getting destroyed.
1537 * MPSAFE - Note vhold() is allowed when dvp has 0 refs if we hold a
1538 * lock on the ncp in question..
1540 static struct vnode
*
1541 cache_dvpref(struct namecache
*ncp
)
1543 struct namecache
*par
;
1547 if ((par
= ncp
->nc_parent
) != NULL
) {
1550 if ((par
->nc_flag
& NCF_UNRESOLVED
) == 0) {
1551 if ((dvp
= par
->nc_vp
) != NULL
)
1556 if (vget(dvp
, LK_SHARED
) == 0) {
1559 /* return refd, unlocked dvp */
1571 * Convert a directory vnode to a namecache record without any other
1572 * knowledge of the topology. This ONLY works with directory vnodes and
1573 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the
1574 * returned ncp (if not NULL) will be held and unlocked.
1576 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned.
1577 * If 'makeit' is 1 we attempt to track-down and create the namecache topology
1578 * for dvp. This will fail only if the directory has been deleted out from
1581 * Callers must always check for a NULL return no matter the value of 'makeit'.
1583 * To avoid underflowing the kernel stack each recursive call increments
1584 * the makeit variable.
1587 static int cache_inefficient_scan(struct nchandle
*nch
, struct ucred
*cred
,
1588 struct vnode
*dvp
, char *fakename
);
1589 static int cache_fromdvp_try(struct vnode
*dvp
, struct ucred
*cred
,
1590 struct vnode
**saved_dvp
);
1593 cache_fromdvp(struct vnode
*dvp
, struct ucred
*cred
, int makeit
,
1594 struct nchandle
*nch
)
1596 struct vnode
*saved_dvp
;
1602 nch
->mount
= dvp
->v_mount
;
1607 * Handle the makeit == 0 degenerate case
1610 spin_lock_wr(&dvp
->v_spinlock
);
1611 nch
->ncp
= TAILQ_FIRST(&dvp
->v_namecache
);
1614 spin_unlock_wr(&dvp
->v_spinlock
);
1618 * Loop until resolution, inside code will break out on error.
1622 * Break out if we successfully acquire a working ncp.
1624 spin_lock_wr(&dvp
->v_spinlock
);
1625 nch
->ncp
= TAILQ_FIRST(&dvp
->v_namecache
);
1628 spin_unlock_wr(&dvp
->v_spinlock
);
1631 spin_unlock_wr(&dvp
->v_spinlock
);
1634 * If dvp is the root of its filesystem it should already
1635 * have a namecache pointer associated with it as a side
1636 * effect of the mount, but it may have been disassociated.
1638 if (dvp
->v_flag
& VROOT
) {
1639 nch
->ncp
= _cache_get(nch
->mount
->mnt_ncmountpt
.ncp
);
1640 error
= cache_resolve_mp(nch
->mount
);
1641 _cache_put(nch
->ncp
);
1643 kprintf("cache_fromdvp: resolve root of mount %p error %d",
1644 dvp
->v_mount
, error
);
1648 kprintf(" failed\n");
1653 kprintf(" succeeded\n");
1658 * If we are recursed too deeply resort to an O(n^2)
1659 * algorithm to resolve the namecache topology. The
1660 * resolved pvp is left referenced in saved_dvp to
1661 * prevent the tree from being destroyed while we loop.
1664 error
= cache_fromdvp_try(dvp
, cred
, &saved_dvp
);
1666 kprintf("lookupdotdot(longpath) failed %d "
1667 "dvp %p\n", error
, dvp
);
1675 * Get the parent directory and resolve its ncp.
1678 kfree(fakename
, M_TEMP
);
1681 error
= vop_nlookupdotdot(*dvp
->v_ops
, dvp
, &pvp
, cred
,
1684 kprintf("lookupdotdot failed %d dvp %p\n", error
, dvp
);
1690 * Reuse makeit as a recursion depth counter. On success
1691 * nch will be fully referenced.
1693 cache_fromdvp(pvp
, cred
, makeit
+ 1, nch
);
1695 if (nch
->ncp
== NULL
)
1699 * Do an inefficient scan of pvp (embodied by ncp) to look
1700 * for dvp. This will create a namecache record for dvp on
1701 * success. We loop up to recheck on success.
1703 * ncp and dvp are both held but not locked.
1705 error
= cache_inefficient_scan(nch
, cred
, dvp
, fakename
);
1707 kprintf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n",
1708 pvp
, nch
->ncp
->nc_name
, dvp
);
1710 /* nch was NULLed out, reload mount */
1711 nch
->mount
= dvp
->v_mount
;
1715 kprintf("cache_fromdvp: scan %p (%s) succeeded\n",
1716 pvp
, nch
->ncp
->nc_name
);
1719 /* nch was NULLed out, reload mount */
1720 nch
->mount
= dvp
->v_mount
;
1724 * If nch->ncp is non-NULL it will have been held already.
1727 kfree(fakename
, M_TEMP
);
1736 * Go up the chain of parent directories until we find something
1737 * we can resolve into the namecache. This is very inefficient.
1741 cache_fromdvp_try(struct vnode
*dvp
, struct ucred
*cred
,
1742 struct vnode
**saved_dvp
)
1744 struct nchandle nch
;
1747 static time_t last_fromdvp_report
;
1751 * Loop getting the parent directory vnode until we get something we
1752 * can resolve in the namecache.
1755 nch
.mount
= dvp
->v_mount
;
1761 kfree(fakename
, M_TEMP
);
1764 error
= vop_nlookupdotdot(*dvp
->v_ops
, dvp
, &pvp
, cred
,
1771 spin_lock_wr(&pvp
->v_spinlock
);
1772 if ((nch
.ncp
= TAILQ_FIRST(&pvp
->v_namecache
)) != NULL
) {
1773 _cache_hold(nch
.ncp
);
1774 spin_unlock_wr(&pvp
->v_spinlock
);
1778 spin_unlock_wr(&pvp
->v_spinlock
);
1779 if (pvp
->v_flag
& VROOT
) {
1780 nch
.ncp
= _cache_get(pvp
->v_mount
->mnt_ncmountpt
.ncp
);
1781 error
= cache_resolve_mp(nch
.mount
);
1782 _cache_unlock(nch
.ncp
);
1785 _cache_drop(nch
.ncp
);
1795 if (last_fromdvp_report
!= time_second
) {
1796 last_fromdvp_report
= time_second
;
1797 kprintf("Warning: extremely inefficient path "
1798 "resolution on %s\n",
1801 error
= cache_inefficient_scan(&nch
, cred
, dvp
, fakename
);
1804 * Hopefully dvp now has a namecache record associated with
1805 * it. Leave it referenced to prevent the kernel from
1806 * recycling the vnode. Otherwise extremely long directory
1807 * paths could result in endless recycling.
1812 _cache_drop(nch
.ncp
);
1815 kfree(fakename
, M_TEMP
);
1820 * Do an inefficient scan of the directory represented by ncp looking for
1821 * the directory vnode dvp. ncp must be held but not locked on entry and
1822 * will be held on return. dvp must be refd but not locked on entry and
1823 * will remain refd on return.
1825 * Why do this at all? Well, due to its stateless nature the NFS server
1826 * converts file handles directly to vnodes without necessarily going through
1827 * the namecache ops that would otherwise create the namecache topology
1828 * leading to the vnode. We could either (1) Change the namecache algorithms
1829 * to allow disconnect namecache records that are re-merged opportunistically,
1830 * or (2) Make the NFS server backtrack and scan to recover a connected
1831 * namecache topology in order to then be able to issue new API lookups.
1833 * It turns out that (1) is a huge mess. It takes a nice clean set of
1834 * namecache algorithms and introduces a lot of complication in every subsystem
1835 * that calls into the namecache to deal with the re-merge case, especially
1836 * since we are using the namecache to placehold negative lookups and the
1837 * vnode might not be immediately assigned. (2) is certainly far less
1838 * efficient then (1), but since we are only talking about directories here
1839 * (which are likely to remain cached), the case does not actually run all
1840 * that often and has the supreme advantage of not polluting the namecache
1843 * If a fakename is supplied just construct a namecache entry using the
1847 cache_inefficient_scan(struct nchandle
*nch
, struct ucred
*cred
,
1848 struct vnode
*dvp
, char *fakename
)
1850 struct nlcomponent nlc
;
1851 struct nchandle rncp
;
1863 vat
.va_blocksize
= 0;
1864 if ((error
= VOP_GETATTR(dvp
, &vat
)) != 0)
1867 error
= cache_vref(nch
, cred
, &pvp
);
1872 kprintf("inefficient_scan: directory iosize %ld "
1873 "vattr fileid = %lld\n",
1875 (long long)vat
.va_fileid
);
1879 * Use the supplied fakename if not NULL. Fake names are typically
1880 * not in the actual filesystem hierarchy. This is used by HAMMER
1881 * to glue @@timestamp recursions together.
1884 nlc
.nlc_nameptr
= fakename
;
1885 nlc
.nlc_namelen
= strlen(fakename
);
1886 rncp
= cache_nlookup(nch
, &nlc
);
1890 if ((blksize
= vat
.va_blocksize
) == 0)
1891 blksize
= DEV_BSIZE
;
1892 rbuf
= kmalloc(blksize
, M_TEMP
, M_WAITOK
);
1898 iov
.iov_base
= rbuf
;
1899 iov
.iov_len
= blksize
;
1902 uio
.uio_resid
= blksize
;
1903 uio
.uio_segflg
= UIO_SYSSPACE
;
1904 uio
.uio_rw
= UIO_READ
;
1905 uio
.uio_td
= curthread
;
1907 if (ncvp_debug
>= 2)
1908 kprintf("cache_inefficient_scan: readdir @ %08x\n", (int)uio
.uio_offset
);
1909 error
= VOP_READDIR(pvp
, &uio
, cred
, &eofflag
, NULL
, NULL
);
1911 den
= (struct dirent
*)rbuf
;
1912 bytes
= blksize
- uio
.uio_resid
;
1915 if (ncvp_debug
>= 2) {
1916 kprintf("cache_inefficient_scan: %*.*s\n",
1917 den
->d_namlen
, den
->d_namlen
,
1920 if (den
->d_type
!= DT_WHT
&&
1921 den
->d_ino
== vat
.va_fileid
) {
1923 kprintf("cache_inefficient_scan: "
1924 "MATCHED inode %lld path %s/%*.*s\n",
1925 (long long)vat
.va_fileid
,
1927 den
->d_namlen
, den
->d_namlen
,
1930 nlc
.nlc_nameptr
= den
->d_name
;
1931 nlc
.nlc_namelen
= den
->d_namlen
;
1932 rncp
= cache_nlookup(nch
, &nlc
);
1933 KKASSERT(rncp
.ncp
!= NULL
);
1936 bytes
-= _DIRENT_DIRSIZ(den
);
1937 den
= _DIRENT_NEXT(den
);
1939 if (rncp
.ncp
== NULL
&& eofflag
== 0 && uio
.uio_resid
!= blksize
)
1942 kfree(rbuf
, M_TEMP
);
1946 if (rncp
.ncp
->nc_flag
& NCF_UNRESOLVED
) {
1947 _cache_setvp(rncp
.mount
, rncp
.ncp
, dvp
);
1948 if (ncvp_debug
>= 2) {
1949 kprintf("cache_inefficient_scan: setvp %s/%s = %p\n",
1950 nch
->ncp
->nc_name
, rncp
.ncp
->nc_name
, dvp
);
1953 if (ncvp_debug
>= 2) {
1954 kprintf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n",
1955 nch
->ncp
->nc_name
, rncp
.ncp
->nc_name
, dvp
,
1959 if (rncp
.ncp
->nc_vp
== NULL
)
1960 error
= rncp
.ncp
->nc_error
;
1962 * Release rncp after a successful nlookup. rncp was fully
1967 kprintf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n",
1968 dvp
, nch
->ncp
->nc_name
);
1975 * Zap a namecache entry. The ncp is unconditionally set to an unresolved
1976 * state, which disassociates it from its vnode or ncneglist.
1978 * Then, if there are no additional references to the ncp and no children,
1979 * the ncp is removed from the topology and destroyed.
1981 * References and/or children may exist if the ncp is in the middle of the
1982 * topology, preventing the ncp from being destroyed.
1984 * This function must be called with the ncp held and locked and will unlock
1985 * and drop it during zapping.
1987 * If nonblock is non-zero and the parent ncp cannot be locked we give up.
1988 * This case can occur in the cache_drop() path.
1990 * This function may returned a held (but NOT locked) parent node which the
1991 * caller must drop. We do this so _cache_drop() can loop, to avoid
1992 * blowing out the kernel stack.
1994 * WARNING! For MPSAFE operation this routine must acquire up to three
1995 * spin locks to be able to safely test nc_refs. Lock order is
1998 * hash spinlock if on hash list
1999 * parent spinlock if child of parent
2000 * (the ncp is unresolved so there is no vnode association)
2002 static struct namecache
*
2003 cache_zap(struct namecache
*ncp
, int nonblock
)
2005 struct namecache
*par
;
2006 struct vnode
*dropvp
;
2010 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED.
2012 _cache_setunresolved(ncp
);
2015 * Try to scrap the entry and possibly tail-recurse on its parent.
2016 * We only scrap unref'd (other then our ref) unresolved entries,
2017 * we do not scrap 'live' entries.
2019 * Note that once the spinlocks are acquired if nc_refs == 1 no
2020 * other references are possible. If it isn't, however, we have
2021 * to decrement but also be sure to avoid a 1->0 transition.
2023 KKASSERT(ncp
->nc_flag
& NCF_UNRESOLVED
);
2024 KKASSERT(ncp
->nc_refs
> 0);
2027 * Acquire locks. Note that the parent can't go away while we hold
2030 if ((par
= ncp
->nc_parent
) != NULL
) {
2033 if (_cache_lock_nonblock(par
) == 0)
2035 refs
= ncp
->nc_refs
;
2036 ncp
->nc_flag
|= NCF_DEFEREDZAP
;
2037 ++numdefered
; /* MP race ok */
2038 if (atomic_cmpset_int(&ncp
->nc_refs
,
2050 spin_lock_wr(&ncp
->nc_head
->spin
);
2054 * If someone other then us has a ref or we have children
2055 * we cannot zap the entry. The 1->0 transition and any
2056 * further list operation is protected by the spinlocks
2057 * we have acquired but other transitions are not.
2060 refs
= ncp
->nc_refs
;
2061 if (refs
== 1 && TAILQ_EMPTY(&ncp
->nc_list
))
2063 if (atomic_cmpset_int(&ncp
->nc_refs
, refs
, refs
- 1)) {
2065 spin_unlock_wr(&ncp
->nc_head
->spin
);
2075 * We are the only ref and with the spinlocks held no further
2076 * refs can be acquired by others.
2078 * Remove us from the hash list and parent list. We have to
2079 * drop a ref on the parent's vp if the parent's list becomes
2084 struct nchash_head
*nchpp
= ncp
->nc_head
;
2086 KKASSERT(nchpp
!= NULL
);
2087 LIST_REMOVE(ncp
, nc_hash
);
2088 TAILQ_REMOVE(&par
->nc_list
, ncp
, nc_entry
);
2089 if (par
->nc_vp
&& TAILQ_EMPTY(&par
->nc_list
))
2090 dropvp
= par
->nc_vp
;
2091 ncp
->nc_head
= NULL
;
2092 ncp
->nc_parent
= NULL
;
2093 spin_unlock_wr(&nchpp
->spin
);
2096 KKASSERT(ncp
->nc_head
== NULL
);
2100 * ncp should not have picked up any refs. Physically
2103 KKASSERT(ncp
->nc_refs
== 1);
2104 /* _cache_unlock(ncp) not required */
2105 ncp
->nc_refs
= -1; /* safety */
2107 kfree(ncp
->nc_name
, M_VFSCACHE
);
2108 kfree(ncp
, M_VFSCACHE
);
2111 * Delayed drop (we had to release our spinlocks)
2113 * The refed parent (if not NULL) must be dropped. The
2114 * caller is responsible for looping.
2122 * Clean up dangling negative cache and defered-drop entries in the
2125 static enum { CHI_LOW
, CHI_HIGH
} cache_hysteresis_state
= CHI_LOW
;
2128 cache_hysteresis(void)
2131 * Don't cache too many negative hits. We use hysteresis to reduce
2132 * the impact on the critical path.
2134 switch(cache_hysteresis_state
) {
2136 if (numneg
> MINNEG
&& numneg
* ncnegfactor
> numcache
) {
2137 _cache_cleanneg(10);
2138 cache_hysteresis_state
= CHI_HIGH
;
2142 if (numneg
> MINNEG
* 9 / 10 &&
2143 numneg
* ncnegfactor
* 9 / 10 > numcache
2145 _cache_cleanneg(10);
2147 cache_hysteresis_state
= CHI_LOW
;
2153 * Clean out dangling defered-zap ncps which could not
2154 * be cleanly dropped if too many build up. Note
2155 * that numdefered is not an exact number as such ncps
2156 * can be reused and the counter is not handled in a MP
2157 * safe manner by design.
2159 if (numdefered
* ncnegfactor
> numcache
) {
2160 _cache_cleandefered();
2165 * NEW NAMECACHE LOOKUP API
2167 * Lookup an entry in the namecache. The passed par_nch must be referenced
2168 * and unlocked. A referenced and locked nchandle with a non-NULL nch.ncp
2169 * is ALWAYS returned, eve if the supplied component is illegal.
2171 * The resulting namecache entry should be returned to the system with
2172 * cache_put() or cache_unlock() + cache_drop().
2174 * namecache locks are recursive but care must be taken to avoid lock order
2175 * reversals (hence why the passed par_nch must be unlocked). Locking
2176 * rules are to order for parent traversals, not for child traversals.
2178 * Nobody else will be able to manipulate the associated namespace (e.g.
2179 * create, delete, rename, rename-target) until the caller unlocks the
2182 * The returned entry will be in one of three states: positive hit (non-null
2183 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set).
2184 * Unresolved entries must be resolved through the filesystem to associate the
2185 * vnode and/or determine whether a positive or negative hit has occured.
2187 * It is not necessary to lock a directory in order to lock namespace under
2188 * that directory. In fact, it is explicitly not allowed to do that. A
2189 * directory is typically only locked when being created, renamed, or
2192 * The directory (par) may be unresolved, in which case any returned child
2193 * will likely also be marked unresolved. Likely but not guarenteed. Since
2194 * the filesystem lookup requires a resolved directory vnode the caller is
2195 * responsible for resolving the namecache chain top-down. This API
2196 * specifically allows whole chains to be created in an unresolved state.
2199 cache_nlookup(struct nchandle
*par_nch
, struct nlcomponent
*nlc
)
2201 struct nchandle nch
;
2202 struct namecache
*ncp
;
2203 struct namecache
*new_ncp
;
2204 struct nchash_head
*nchpp
;
2212 mp
= par_nch
->mount
;
2216 * This is a good time to call it, no ncp's are locked by
2222 * Try to locate an existing entry
2224 hash
= fnv_32_buf(nlc
->nlc_nameptr
, nlc
->nlc_namelen
, FNV1_32_INIT
);
2225 hash
= fnv_32_buf(&par_nch
->ncp
, sizeof(par_nch
->ncp
), hash
);
2227 nchpp
= NCHHASH(hash
);
2229 spin_lock_wr(&nchpp
->spin
);
2230 LIST_FOREACH(ncp
, &nchpp
->list
, nc_hash
) {
2234 * Break out if we find a matching entry. Note that
2235 * UNRESOLVED entries may match, but DESTROYED entries
2238 if (ncp
->nc_parent
== par_nch
->ncp
&&
2239 ncp
->nc_nlen
== nlc
->nlc_namelen
&&
2240 bcmp(ncp
->nc_name
, nlc
->nlc_nameptr
, ncp
->nc_nlen
) == 0 &&
2241 (ncp
->nc_flag
& NCF_DESTROYED
) == 0
2244 spin_unlock_wr(&nchpp
->spin
);
2246 _cache_unlock(par_nch
->ncp
);
2249 if (_cache_lock_special(ncp
) == 0) {
2250 _cache_auto_unresolve(mp
, ncp
);
2252 _cache_free(new_ncp
);
2263 * We failed to locate an entry, create a new entry and add it to
2264 * the cache. The parent ncp must also be locked so we
2267 * We have to relookup after possibly blocking in kmalloc or
2268 * when locking par_nch.
2270 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special
2271 * mount case, in which case nc_name will be NULL.
2273 if (new_ncp
== NULL
) {
2274 spin_unlock_wr(&nchpp
->spin
);
2275 new_ncp
= cache_alloc(nlc
->nlc_namelen
);
2276 if (nlc
->nlc_namelen
) {
2277 bcopy(nlc
->nlc_nameptr
, new_ncp
->nc_name
,
2279 new_ncp
->nc_name
[nlc
->nlc_namelen
] = 0;
2283 if (par_locked
== 0) {
2284 spin_unlock_wr(&nchpp
->spin
);
2285 _cache_lock(par_nch
->ncp
);
2291 * WARNING! We still hold the spinlock. We have to set the hash
2292 * table entry atomically.
2295 _cache_link_parent(ncp
, par_nch
->ncp
, nchpp
);
2296 spin_unlock_wr(&nchpp
->spin
);
2297 _cache_unlock(par_nch
->ncp
);
2298 /* par_locked = 0 - not used */
2301 * stats and namecache size management
2303 if (ncp
->nc_flag
& NCF_UNRESOLVED
)
2304 ++gd
->gd_nchstats
->ncs_miss
;
2305 else if (ncp
->nc_vp
)
2306 ++gd
->gd_nchstats
->ncs_goodhits
;
2308 ++gd
->gd_nchstats
->ncs_neghits
;
2311 atomic_add_int(&nch
.mount
->mnt_refs
, 1);
2316 * This is a non-blocking verison of cache_nlookup() used by
2317 * nfs_readdirplusrpc_uio(). It can fail for any reason and
2318 * will return nch.ncp == NULL in that case.
2321 cache_nlookup_nonblock(struct nchandle
*par_nch
, struct nlcomponent
*nlc
)
2323 struct nchandle nch
;
2324 struct namecache
*ncp
;
2325 struct namecache
*new_ncp
;
2326 struct nchash_head
*nchpp
;
2334 mp
= par_nch
->mount
;
2338 * Try to locate an existing entry
2340 hash
= fnv_32_buf(nlc
->nlc_nameptr
, nlc
->nlc_namelen
, FNV1_32_INIT
);
2341 hash
= fnv_32_buf(&par_nch
->ncp
, sizeof(par_nch
->ncp
), hash
);
2343 nchpp
= NCHHASH(hash
);
2345 spin_lock_wr(&nchpp
->spin
);
2346 LIST_FOREACH(ncp
, &nchpp
->list
, nc_hash
) {
2350 * Break out if we find a matching entry. Note that
2351 * UNRESOLVED entries may match, but DESTROYED entries
2354 if (ncp
->nc_parent
== par_nch
->ncp
&&
2355 ncp
->nc_nlen
== nlc
->nlc_namelen
&&
2356 bcmp(ncp
->nc_name
, nlc
->nlc_nameptr
, ncp
->nc_nlen
) == 0 &&
2357 (ncp
->nc_flag
& NCF_DESTROYED
) == 0
2360 spin_unlock_wr(&nchpp
->spin
);
2362 _cache_unlock(par_nch
->ncp
);
2365 if (_cache_lock_special(ncp
) == 0) {
2366 _cache_auto_unresolve(mp
, ncp
);
2368 _cache_free(new_ncp
);
2379 * We failed to locate an entry, create a new entry and add it to
2380 * the cache. The parent ncp must also be locked so we
2383 * We have to relookup after possibly blocking in kmalloc or
2384 * when locking par_nch.
2386 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special
2387 * mount case, in which case nc_name will be NULL.
2389 if (new_ncp
== NULL
) {
2390 spin_unlock_wr(&nchpp
->spin
);
2391 new_ncp
= cache_alloc(nlc
->nlc_namelen
);
2392 if (nlc
->nlc_namelen
) {
2393 bcopy(nlc
->nlc_nameptr
, new_ncp
->nc_name
,
2395 new_ncp
->nc_name
[nlc
->nlc_namelen
] = 0;
2399 if (par_locked
== 0) {
2400 spin_unlock_wr(&nchpp
->spin
);
2401 if (_cache_lock_nonblock(par_nch
->ncp
) == 0) {
2409 * WARNING! We still hold the spinlock. We have to set the hash
2410 * table entry atomically.
2413 _cache_link_parent(ncp
, par_nch
->ncp
, nchpp
);
2414 spin_unlock_wr(&nchpp
->spin
);
2415 _cache_unlock(par_nch
->ncp
);
2416 /* par_locked = 0 - not used */
2419 * stats and namecache size management
2421 if (ncp
->nc_flag
& NCF_UNRESOLVED
)
2422 ++gd
->gd_nchstats
->ncs_miss
;
2423 else if (ncp
->nc_vp
)
2424 ++gd
->gd_nchstats
->ncs_goodhits
;
2426 ++gd
->gd_nchstats
->ncs_neghits
;
2429 atomic_add_int(&nch
.mount
->mnt_refs
, 1);
2433 _cache_free(new_ncp
);
2442 * The namecache entry is marked as being used as a mount point.
2443 * Locate the mount if it is visible to the caller.
2445 struct findmount_info
{
2446 struct mount
*result
;
2447 struct mount
*nch_mount
;
2448 struct namecache
*nch_ncp
;
2453 cache_findmount_callback(struct mount
*mp
, void *data
)
2455 struct findmount_info
*info
= data
;
2458 * Check the mount's mounted-on point against the passed nch.
2460 if (mp
->mnt_ncmounton
.mount
== info
->nch_mount
&&
2461 mp
->mnt_ncmounton
.ncp
== info
->nch_ncp
2470 cache_findmount(struct nchandle
*nch
)
2472 struct findmount_info info
;
2475 info
.nch_mount
= nch
->mount
;
2476 info
.nch_ncp
= nch
->ncp
;
2477 mountlist_scan(cache_findmount_callback
, &info
,
2478 MNTSCAN_FORWARD
|MNTSCAN_NOBUSY
);
2479 return(info
.result
);
2483 * Resolve an unresolved namecache entry, generally by looking it up.
2484 * The passed ncp must be locked and refd.
2486 * Theoretically since a vnode cannot be recycled while held, and since
2487 * the nc_parent chain holds its vnode as long as children exist, the
2488 * direct parent of the cache entry we are trying to resolve should
2489 * have a valid vnode. If not then generate an error that we can
2490 * determine is related to a resolver bug.
2492 * However, if a vnode was in the middle of a recyclement when the NCP
2493 * got locked, ncp->nc_vp might point to a vnode that is about to become
2494 * invalid. cache_resolve() handles this case by unresolving the entry
2495 * and then re-resolving it.
2497 * Note that successful resolution does not necessarily return an error
2498 * code of 0. If the ncp resolves to a negative cache hit then ENOENT
2504 cache_resolve(struct nchandle
*nch
, struct ucred
*cred
)
2506 struct namecache
*par_tmp
;
2507 struct namecache
*par
;
2508 struct namecache
*ncp
;
2509 struct nchandle nctmp
;
2518 * If the ncp is already resolved we have nothing to do. However,
2519 * we do want to guarentee that a usable vnode is returned when
2520 * a vnode is present, so make sure it hasn't been reclaimed.
2522 if ((ncp
->nc_flag
& NCF_UNRESOLVED
) == 0) {
2523 if (ncp
->nc_vp
&& (ncp
->nc_vp
->v_flag
& VRECLAIMED
))
2524 _cache_setunresolved(ncp
);
2525 if ((ncp
->nc_flag
& NCF_UNRESOLVED
) == 0)
2526 return (ncp
->nc_error
);
2530 * Mount points need special handling because the parent does not
2531 * belong to the same filesystem as the ncp.
2533 if (ncp
== mp
->mnt_ncmountpt
.ncp
)
2534 return (cache_resolve_mp(mp
));
2537 * We expect an unbroken chain of ncps to at least the mount point,
2538 * and even all the way to root (but this code doesn't have to go
2539 * past the mount point).
2541 if (ncp
->nc_parent
== NULL
) {
2542 kprintf("EXDEV case 1 %p %*.*s\n", ncp
,
2543 ncp
->nc_nlen
, ncp
->nc_nlen
, ncp
->nc_name
);
2544 ncp
->nc_error
= EXDEV
;
2545 return(ncp
->nc_error
);
2549 * The vp's of the parent directories in the chain are held via vhold()
2550 * due to the existance of the child, and should not disappear.
2551 * However, there are cases where they can disappear:
2553 * - due to filesystem I/O errors.
2554 * - due to NFS being stupid about tracking the namespace and
2555 * destroys the namespace for entire directories quite often.
2556 * - due to forced unmounts.
2557 * - due to an rmdir (parent will be marked DESTROYED)
2559 * When this occurs we have to track the chain backwards and resolve
2560 * it, looping until the resolver catches up to the current node. We
2561 * could recurse here but we might run ourselves out of kernel stack
2562 * so we do it in a more painful manner. This situation really should
2563 * not occur all that often, or if it does not have to go back too
2564 * many nodes to resolve the ncp.
2566 while ((dvp
= cache_dvpref(ncp
)) == NULL
) {
2568 * This case can occur if a process is CD'd into a
2569 * directory which is then rmdir'd. If the parent is marked
2570 * destroyed there is no point trying to resolve it.
2572 if (ncp
->nc_parent
->nc_flag
& NCF_DESTROYED
)
2574 par
= ncp
->nc_parent
;
2577 while ((par_tmp
= par
->nc_parent
) != NULL
&&
2578 par_tmp
->nc_vp
== NULL
) {
2579 _cache_hold(par_tmp
);
2580 _cache_lock(par_tmp
);
2584 if (par
->nc_parent
== NULL
) {
2585 kprintf("EXDEV case 2 %*.*s\n",
2586 par
->nc_nlen
, par
->nc_nlen
, par
->nc_name
);
2590 kprintf("[diagnostic] cache_resolve: had to recurse on %*.*s\n",
2591 par
->nc_nlen
, par
->nc_nlen
, par
->nc_name
);
2593 * The parent is not set in stone, ref and lock it to prevent
2594 * it from disappearing. Also note that due to renames it
2595 * is possible for our ncp to move and for par to no longer
2596 * be one of its parents. We resolve it anyway, the loop
2597 * will handle any moves.
2599 _cache_get(par
); /* additional hold/lock */
2600 _cache_put(par
); /* from earlier hold/lock */
2601 if (par
== nch
->mount
->mnt_ncmountpt
.ncp
) {
2602 cache_resolve_mp(nch
->mount
);
2603 } else if ((dvp
= cache_dvpref(par
)) == NULL
) {
2604 kprintf("[diagnostic] cache_resolve: raced on %*.*s\n", par
->nc_nlen
, par
->nc_nlen
, par
->nc_name
);
2608 if (par
->nc_flag
& NCF_UNRESOLVED
) {
2611 par
->nc_error
= VOP_NRESOLVE(&nctmp
, dvp
, cred
);
2615 if ((error
= par
->nc_error
) != 0) {
2616 if (par
->nc_error
!= EAGAIN
) {
2617 kprintf("EXDEV case 3 %*.*s error %d\n",
2618 par
->nc_nlen
, par
->nc_nlen
, par
->nc_name
,
2623 kprintf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n",
2624 par
, par
->nc_nlen
, par
->nc_nlen
, par
->nc_name
);
2631 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected
2632 * ncp's and reattach them. If this occurs the original ncp is marked
2633 * EAGAIN to force a relookup.
2635 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed
2636 * ncp must already be resolved.
2641 ncp
->nc_error
= VOP_NRESOLVE(&nctmp
, dvp
, cred
);
2644 ncp
->nc_error
= EPERM
;
2646 if (ncp
->nc_error
== EAGAIN
) {
2647 kprintf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n",
2648 ncp
, ncp
->nc_nlen
, ncp
->nc_nlen
, ncp
->nc_name
);
2651 return(ncp
->nc_error
);
2655 * Resolve the ncp associated with a mount point. Such ncp's almost always
2656 * remain resolved and this routine is rarely called. NFS MPs tends to force
2657 * re-resolution more often due to its mac-truck-smash-the-namecache
2658 * method of tracking namespace changes.
2660 * The semantics for this call is that the passed ncp must be locked on
2661 * entry and will be locked on return. However, if we actually have to
2662 * resolve the mount point we temporarily unlock the entry in order to
2663 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of
2664 * the unlock we have to recheck the flags after we relock.
2667 cache_resolve_mp(struct mount
*mp
)
2669 struct namecache
*ncp
= mp
->mnt_ncmountpt
.ncp
;
2673 KKASSERT(mp
!= NULL
);
2676 * If the ncp is already resolved we have nothing to do. However,
2677 * we do want to guarentee that a usable vnode is returned when
2678 * a vnode is present, so make sure it hasn't been reclaimed.
2680 if ((ncp
->nc_flag
& NCF_UNRESOLVED
) == 0) {
2681 if (ncp
->nc_vp
&& (ncp
->nc_vp
->v_flag
& VRECLAIMED
))
2682 _cache_setunresolved(ncp
);
2685 if (ncp
->nc_flag
& NCF_UNRESOLVED
) {
2687 while (vfs_busy(mp
, 0))
2689 error
= VFS_ROOT(mp
, &vp
);
2693 * recheck the ncp state after relocking.
2695 if (ncp
->nc_flag
& NCF_UNRESOLVED
) {
2696 ncp
->nc_error
= error
;
2698 _cache_setvp(mp
, ncp
, vp
);
2701 kprintf("[diagnostic] cache_resolve_mp: failed"
2702 " to resolve mount %p err=%d ncp=%p\n",
2704 _cache_setvp(mp
, ncp
, NULL
);
2706 } else if (error
== 0) {
2711 return(ncp
->nc_error
);
2715 * Clean out negative cache entries when too many have accumulated.
2720 _cache_cleanneg(int count
)
2722 struct namecache
*ncp
;
2725 * Automode from the vnlru proc - clean out 10% of the negative cache
2729 count
= numneg
/ 10 + 1;
2732 * Attempt to clean out the specified number of negative cache
2736 spin_lock_wr(&ncspin
);
2737 ncp
= TAILQ_FIRST(&ncneglist
);
2739 spin_unlock_wr(&ncspin
);
2742 TAILQ_REMOVE(&ncneglist
, ncp
, nc_vnode
);
2743 TAILQ_INSERT_TAIL(&ncneglist
, ncp
, nc_vnode
);
2745 spin_unlock_wr(&ncspin
);
2746 if (_cache_lock_special(ncp
) == 0) {
2747 ncp
= cache_zap(ncp
, 0);
2758 * This is a kitchen sink function to clean out ncps which we
2759 * tried to zap from cache_drop() but failed because we were
2760 * unable to acquire the parent lock.
2762 * Such entries can also be removed via cache_inval_vp(), such
2763 * as when unmounting.
2768 _cache_cleandefered(void)
2770 struct nchash_head
*nchpp
;
2771 struct namecache
*ncp
;
2772 struct namecache dummy
;
2776 bzero(&dummy
, sizeof(dummy
));
2777 dummy
.nc_flag
= NCF_DESTROYED
;
2779 for (i
= 0; i
<= nchash
; ++i
) {
2780 nchpp
= &nchashtbl
[i
];
2782 spin_lock_wr(&nchpp
->spin
);
2783 LIST_INSERT_HEAD(&nchpp
->list
, &dummy
, nc_hash
);
2785 while ((ncp
= LIST_NEXT(ncp
, nc_hash
)) != NULL
) {
2786 if ((ncp
->nc_flag
& NCF_DEFEREDZAP
) == 0)
2788 LIST_REMOVE(&dummy
, nc_hash
);
2789 LIST_INSERT_AFTER(ncp
, &dummy
, nc_hash
);
2791 spin_unlock_wr(&nchpp
->spin
);
2792 if (_cache_lock_nonblock(ncp
) == 0) {
2793 ncp
->nc_flag
&= ~NCF_DEFEREDZAP
;
2797 spin_lock_wr(&nchpp
->spin
);
2800 LIST_REMOVE(&dummy
, nc_hash
);
2801 spin_unlock_wr(&nchpp
->spin
);
2806 * Name cache initialization, from vfsinit() when we are booting
2814 /* initialise per-cpu namecache effectiveness statistics. */
2815 for (i
= 0; i
< ncpus
; ++i
) {
2816 gd
= globaldata_find(i
);
2817 gd
->gd_nchstats
= &nchstats
[i
];
2819 TAILQ_INIT(&ncneglist
);
2821 nchashtbl
= hashinit_ext(desiredvnodes
*2, sizeof(struct nchash_head
),
2822 M_VFSCACHE
, &nchash
);
2823 for (i
= 0; i
<= (int)nchash
; ++i
) {
2824 LIST_INIT(&nchashtbl
[i
].list
);
2825 spin_init(&nchashtbl
[i
].spin
);
2827 nclockwarn
= 5 * hz
;
2831 * Called from start_init() to bootstrap the root filesystem. Returns
2832 * a referenced, unlocked namecache record.
2835 cache_allocroot(struct nchandle
*nch
, struct mount
*mp
, struct vnode
*vp
)
2837 nch
->ncp
= cache_alloc(0);
2839 atomic_add_int(&mp
->mnt_refs
, 1);
2841 _cache_setvp(nch
->mount
, nch
->ncp
, vp
);
2845 * vfs_cache_setroot()
2847 * Create an association between the root of our namecache and
2848 * the root vnode. This routine may be called several times during
2851 * If the caller intends to save the returned namecache pointer somewhere
2852 * it must cache_hold() it.
2855 vfs_cache_setroot(struct vnode
*nvp
, struct nchandle
*nch
)
2858 struct nchandle onch
;
2866 cache_zero(&rootnch
);
2874 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache
2875 * topology and is being removed as quickly as possible. The new VOP_N*()
2876 * API calls are required to make specific adjustments using the supplied
2877 * ncp pointers rather then just bogusly purging random vnodes.
2879 * Invalidate all namecache entries to a particular vnode as well as
2880 * any direct children of that vnode in the namecache. This is a
2881 * 'catch all' purge used by filesystems that do not know any better.
2883 * Note that the linkage between the vnode and its namecache entries will
2884 * be removed, but the namecache entries themselves might stay put due to
2885 * active references from elsewhere in the system or due to the existance of
2886 * the children. The namecache topology is left intact even if we do not
2887 * know what the vnode association is. Such entries will be marked
2891 cache_purge(struct vnode
*vp
)
2893 cache_inval_vp(vp
, CINV_DESTROY
| CINV_CHILDREN
);
2897 * Flush all entries referencing a particular filesystem.
2899 * Since we need to check it anyway, we will flush all the invalid
2900 * entries at the same time.
2905 cache_purgevfs(struct mount
*mp
)
2907 struct nchash_head
*nchpp
;
2908 struct namecache
*ncp
, *nnp
;
2911 * Scan hash tables for applicable entries.
2913 for (nchpp
= &nchashtbl
[nchash
]; nchpp
>= nchashtbl
; nchpp
--) {
2914 spin_lock_wr(&nchpp
->spin
); XXX
2915 ncp
= LIST_FIRST(&nchpp
->list
);
2919 nnp
= LIST_NEXT(ncp
, nc_hash
);
2922 if (ncp
->nc_mount
== mp
) {
2924 ncp
= cache_zap(ncp
, 0);
2932 spin_unlock_wr(&nchpp
->spin
); XXX
2938 static int disablecwd
;
2939 SYSCTL_INT(_debug
, OID_AUTO
, disablecwd
, CTLFLAG_RW
, &disablecwd
, 0, "");
2941 static u_long numcwdcalls
; STATNODE(CTLFLAG_RD
, numcwdcalls
, &numcwdcalls
);
2942 static u_long numcwdfail1
; STATNODE(CTLFLAG_RD
, numcwdfail1
, &numcwdfail1
);
2943 static u_long numcwdfail2
; STATNODE(CTLFLAG_RD
, numcwdfail2
, &numcwdfail2
);
2944 static u_long numcwdfail3
; STATNODE(CTLFLAG_RD
, numcwdfail3
, &numcwdfail3
);
2945 static u_long numcwdfail4
; STATNODE(CTLFLAG_RD
, numcwdfail4
, &numcwdfail4
);
2946 static u_long numcwdfound
; STATNODE(CTLFLAG_RD
, numcwdfound
, &numcwdfound
);
2952 sys___getcwd(struct __getcwd_args
*uap
)
2962 buflen
= uap
->buflen
;
2965 if (buflen
> MAXPATHLEN
)
2966 buflen
= MAXPATHLEN
;
2968 buf
= kmalloc(buflen
, M_TEMP
, M_WAITOK
);
2970 bp
= kern_getcwd(buf
, buflen
, &error
);
2973 error
= copyout(bp
, uap
->buf
, strlen(bp
) + 1);
2979 kern_getcwd(char *buf
, size_t buflen
, int *error
)
2981 struct proc
*p
= curproc
;
2983 int i
, slash_prefixed
;
2984 struct filedesc
*fdp
;
2985 struct nchandle nch
;
2986 struct namecache
*ncp
;
2995 nch
= fdp
->fd_ncdir
;
3000 while (ncp
&& (ncp
!= fdp
->fd_nrdir
.ncp
||
3001 nch
.mount
!= fdp
->fd_nrdir
.mount
)
3004 * While traversing upwards if we encounter the root
3005 * of the current mount we have to skip to the mount point
3006 * in the underlying filesystem.
3008 if (ncp
== nch
.mount
->mnt_ncmountpt
.ncp
) {
3009 nch
= nch
.mount
->mnt_ncmounton
;
3018 * Prepend the path segment
3020 for (i
= ncp
->nc_nlen
- 1; i
>= 0; i
--) {
3027 *--bp
= ncp
->nc_name
[i
];
3039 * Go up a directory. This isn't a mount point so we don't
3040 * have to check again.
3042 while ((nch
.ncp
= ncp
->nc_parent
) != NULL
) {
3044 if (nch
.ncp
!= ncp
->nc_parent
) {
3048 _cache_hold(nch
.ncp
);
3061 if (!slash_prefixed
) {
3079 * Thus begins the fullpath magic.
3081 * The passed nchp is referenced but not locked.
3084 #define STATNODE(name) \
3085 static u_int name; \
3086 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "")
3088 static int disablefullpath
;
3089 SYSCTL_INT(_debug
, OID_AUTO
, disablefullpath
, CTLFLAG_RW
,
3090 &disablefullpath
, 0, "");
3092 STATNODE(numfullpathcalls
);
3093 STATNODE(numfullpathfail1
);
3094 STATNODE(numfullpathfail2
);
3095 STATNODE(numfullpathfail3
);
3096 STATNODE(numfullpathfail4
);
3097 STATNODE(numfullpathfound
);
3100 cache_fullpath(struct proc
*p
, struct nchandle
*nchp
,
3101 char **retbuf
, char **freebuf
)
3103 struct nchandle fd_nrdir
;
3104 struct nchandle nch
;
3105 struct namecache
*ncp
;
3112 atomic_add_int(&numfullpathcalls
, -1);
3117 buf
= kmalloc(MAXPATHLEN
, M_TEMP
, M_WAITOK
);
3118 bp
= buf
+ MAXPATHLEN
- 1;
3121 fd_nrdir
= p
->p_fd
->fd_nrdir
;
3131 while (ncp
&& (ncp
!= fd_nrdir
.ncp
|| mp
!= fd_nrdir
.mount
)) {
3133 * While traversing upwards if we encounter the root
3134 * of the current mount we have to skip to the mount point.
3136 if (ncp
== mp
->mnt_ncmountpt
.ncp
) {
3137 nch
= mp
->mnt_ncmounton
;
3147 * Prepend the path segment
3149 for (i
= ncp
->nc_nlen
- 1; i
>= 0; i
--) {
3156 *--bp
= ncp
->nc_name
[i
];
3168 * Go up a directory. This isn't a mount point so we don't
3169 * have to check again.
3171 * We can only safely access nc_parent with ncp held locked.
3173 while ((nch
.ncp
= ncp
->nc_parent
) != NULL
) {
3175 if (nch
.ncp
!= ncp
->nc_parent
) {
3179 _cache_hold(nch
.ncp
);
3193 if (!slash_prefixed
) {
3213 vn_fullpath(struct proc
*p
, struct vnode
*vn
, char **retbuf
, char **freebuf
)
3215 struct namecache
*ncp
;
3216 struct nchandle nch
;
3219 atomic_add_int(&numfullpathcalls
, 1);
3220 if (disablefullpath
)
3226 /* vn is NULL, client wants us to use p->p_textvp */
3228 if ((vn
= p
->p_textvp
) == NULL
)
3231 spin_lock_wr(&vn
->v_spinlock
);
3232 TAILQ_FOREACH(ncp
, &vn
->v_namecache
, nc_vnode
) {
3237 spin_unlock_wr(&vn
->v_spinlock
);
3241 spin_unlock_wr(&vn
->v_spinlock
);
3243 atomic_add_int(&numfullpathcalls
, -1);
3245 nch
.mount
= vn
->v_mount
;
3246 error
= cache_fullpath(p
, &nch
, retbuf
, freebuf
);