2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * Copyright (c) 1989, 1993, 1995
35 * The Regents of the University of California. All rights reserved.
37 * This code is derived from software contributed to Berkeley by
38 * Poul-Henning Kamp of the FreeBSD Project.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/sysctl.h>
73 #include <sys/mount.h>
74 #include <sys/vnode.h>
75 #include <sys/malloc.h>
76 #include <sys/sysproto.h>
77 #include <sys/spinlock.h>
79 #include <sys/namei.h>
80 #include <sys/nlookup.h>
81 #include <sys/filedesc.h>
82 #include <sys/fnv_hash.h>
83 #include <sys/globaldata.h>
84 #include <sys/kern_syscall.h>
85 #include <sys/dirent.h>
88 #include <sys/sysref2.h>
89 #include <sys/spinlock2.h>
90 #include <sys/mplock2.h>
92 #define MAX_RECURSION_DEPTH 64
95 * Random lookups in the cache are accomplished with a hash table using
96 * a hash key of (nc_src_vp, name). Each hash chain has its own spin lock.
98 * Negative entries may exist and correspond to resolved namecache
99 * structures where nc_vp is NULL. In a negative entry, NCF_WHITEOUT
100 * will be set if the entry corresponds to a whited-out directory entry
101 * (verses simply not finding the entry at all). ncneglist is locked
102 * with a global spinlock (ncspin).
106 * (1) A ncp must be referenced before it can be locked.
108 * (2) A ncp must be locked in order to modify it.
110 * (3) ncp locks are always ordered child -> parent. That may seem
111 * backwards but forward scans use the hash table and thus can hold
112 * the parent unlocked when traversing downward.
114 * This allows insert/rename/delete/dot-dot and other operations
115 * to use ncp->nc_parent links.
117 * This also prevents a locked up e.g. NFS node from creating a
118 * chain reaction all the way back to the root vnode / namecache.
120 * (4) parent linkages require both the parent and child to be locked.
124 * Structures associated with name cacheing.
126 #define NCHHASH(hash) (&nchashtbl[(hash) & nchash])
130 MALLOC_DEFINE(M_VFSCACHE
, "vfscache", "VFS name cache entries");
132 LIST_HEAD(nchash_list
, namecache
);
135 struct nchash_list list
;
136 struct spinlock spin
;
139 static struct nchash_head
*nchashtbl
;
140 static struct namecache_list ncneglist
;
141 static struct spinlock ncspin
;
144 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server
145 * to create the namecache infrastructure leading to a dangling vnode.
147 * 0 Only errors are reported
148 * 1 Successes are reported
149 * 2 Successes + the whole directory scan is reported
150 * 3 Force the directory scan code run as if the parent vnode did not
151 * have a namecache record, even if it does have one.
153 static int ncvp_debug
;
154 SYSCTL_INT(_debug
, OID_AUTO
, ncvp_debug
, CTLFLAG_RW
, &ncvp_debug
, 0,
155 "Namecache debug level (0-3)");
157 static u_long nchash
; /* size of hash table */
158 SYSCTL_ULONG(_debug
, OID_AUTO
, nchash
, CTLFLAG_RD
, &nchash
, 0,
159 "Size of namecache hash table");
161 static int ncnegfactor
= 16; /* ratio of negative entries */
162 SYSCTL_INT(_debug
, OID_AUTO
, ncnegfactor
, CTLFLAG_RW
, &ncnegfactor
, 0,
163 "Ratio of namecache negative entries");
165 static int nclockwarn
; /* warn on locked entries in ticks */
166 SYSCTL_INT(_debug
, OID_AUTO
, nclockwarn
, CTLFLAG_RW
, &nclockwarn
, 0,
167 "Warn on locked namecache entries in ticks");
169 static int numdefered
; /* number of cache entries allocated */
170 SYSCTL_INT(_debug
, OID_AUTO
, numdefered
, CTLFLAG_RD
, &numdefered
, 0,
171 "Number of cache entries allocated");
173 static int ncposlimit
; /* number of cache entries allocated */
174 SYSCTL_INT(_debug
, OID_AUTO
, ncposlimit
, CTLFLAG_RW
, &ncposlimit
, 0,
175 "Number of cache entries allocated");
177 SYSCTL_INT(_debug
, OID_AUTO
, vnsize
, CTLFLAG_RD
, 0, sizeof(struct vnode
),
178 "sizeof(struct vnode)");
179 SYSCTL_INT(_debug
, OID_AUTO
, ncsize
, CTLFLAG_RD
, 0, sizeof(struct namecache
),
180 "sizeof(struct namecache)");
182 static int cache_resolve_mp(struct mount
*mp
);
183 static struct vnode
*cache_dvpref(struct namecache
*ncp
);
184 static void _cache_lock(struct namecache
*ncp
);
185 static void _cache_setunresolved(struct namecache
*ncp
);
186 static void _cache_cleanneg(int count
);
187 static void _cache_cleanpos(int count
);
188 static void _cache_cleandefered(void);
189 static void _cache_unlink(struct namecache
*ncp
);
192 * The new name cache statistics
194 SYSCTL_NODE(_vfs
, OID_AUTO
, cache
, CTLFLAG_RW
, 0, "Name cache statistics");
196 SYSCTL_INT(_vfs_cache
, OID_AUTO
, numneg
, CTLFLAG_RD
, &numneg
, 0,
197 "Number of negative namecache entries");
199 SYSCTL_INT(_vfs_cache
, OID_AUTO
, numcache
, CTLFLAG_RD
, &numcache
, 0,
200 "Number of namecaches entries");
201 static u_long numcalls
;
202 SYSCTL_ULONG(_vfs_cache
, OID_AUTO
, numcalls
, CTLFLAG_RD
, &numcalls
, 0,
203 "Number of namecache lookups");
204 static u_long numchecks
;
205 SYSCTL_ULONG(_vfs_cache
, OID_AUTO
, numchecks
, CTLFLAG_RD
, &numchecks
, 0,
206 "Number of checked entries in namecache lookups");
208 struct nchstats nchstats
[SMP_MAXCPU
];
210 * Export VFS cache effectiveness statistics to user-land.
212 * The statistics are left for aggregation to user-land so
213 * neat things can be achieved, like observing per-CPU cache
217 sysctl_nchstats(SYSCTL_HANDLER_ARGS
)
219 struct globaldata
*gd
;
223 for (i
= 0; i
< ncpus
; ++i
) {
224 gd
= globaldata_find(i
);
225 if ((error
= SYSCTL_OUT(req
, (void *)&(*gd
->gd_nchstats
),
226 sizeof(struct nchstats
))))
232 SYSCTL_PROC(_vfs_cache
, OID_AUTO
, nchstats
, CTLTYPE_OPAQUE
|CTLFLAG_RD
,
233 0, 0, sysctl_nchstats
, "S,nchstats", "VFS cache effectiveness statistics");
235 static struct namecache
*cache_zap(struct namecache
*ncp
, int nonblock
);
238 * Namespace locking. The caller must already hold a reference to the
239 * namecache structure in order to lock/unlock it. This function prevents
240 * the namespace from being created or destroyed by accessors other then
243 * Note that holding a locked namecache structure prevents other threads
244 * from making namespace changes (e.g. deleting or creating), prevents
245 * vnode association state changes by other threads, and prevents the
246 * namecache entry from being resolved or unresolved by other threads.
248 * The lock owner has full authority to associate/disassociate vnodes
249 * and resolve/unresolve the locked ncp.
251 * The primary lock field is nc_exlocks. nc_locktd is set after the
252 * fact (when locking) or cleared prior to unlocking.
254 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed
255 * or recycled, but it does NOT help you if the vnode had already
256 * initiated a recyclement. If this is important, use cache_get()
257 * rather then cache_lock() (and deal with the differences in the
258 * way the refs counter is handled). Or, alternatively, make an
259 * unconditional call to cache_validate() or cache_resolve()
260 * after cache_lock() returns.
266 _cache_lock(struct namecache
*ncp
)
273 KKASSERT(ncp
->nc_refs
!= 0);
278 count
= ncp
->nc_exlocks
;
281 if (atomic_cmpset_int(&ncp
->nc_exlocks
, 0, 1)) {
283 * The vp associated with a locked ncp must
284 * be held to prevent it from being recycled.
286 * WARNING! If VRECLAIMED is set the vnode
287 * could already be in the middle of a recycle.
288 * Callers must use cache_vref() or
289 * cache_vget() on the locked ncp to
290 * validate the vp or set the cache entry
293 * NOTE! vhold() is allowed if we hold a
294 * lock on the ncp (which we do).
298 vhold(ncp
->nc_vp
); /* MPSAFE */
304 if (ncp
->nc_locktd
== td
) {
305 if (atomic_cmpset_int(&ncp
->nc_exlocks
, count
,
312 tsleep_interlock(ncp
, 0);
313 if (atomic_cmpset_int(&ncp
->nc_exlocks
, count
,
314 count
| NC_EXLOCK_REQ
) == 0) {
318 error
= tsleep(ncp
, PINTERLOCKED
, "clock", nclockwarn
);
319 if (error
== EWOULDBLOCK
) {
322 kprintf("[diagnostic] cache_lock: blocked "
325 kprintf(" \"%*.*s\"\n",
326 ncp
->nc_nlen
, ncp
->nc_nlen
,
332 kprintf("[diagnostic] cache_lock: unblocked %*.*s after "
334 ncp
->nc_nlen
, ncp
->nc_nlen
, ncp
->nc_name
,
335 (int)(ticks
- didwarn
) / hz
);
340 * NOTE: nc_refs may be zero if the ncp is interlocked by circumstance,
341 * such as the case where one of its children is locked.
347 _cache_lock_nonblock(struct namecache
*ncp
)
355 count
= ncp
->nc_exlocks
;
358 if (atomic_cmpset_int(&ncp
->nc_exlocks
, 0, 1)) {
360 * The vp associated with a locked ncp must
361 * be held to prevent it from being recycled.
363 * WARNING! If VRECLAIMED is set the vnode
364 * could already be in the middle of a recycle.
365 * Callers must use cache_vref() or
366 * cache_vget() on the locked ncp to
367 * validate the vp or set the cache entry
370 * NOTE! vhold() is allowed if we hold a
371 * lock on the ncp (which we do).
375 vhold(ncp
->nc_vp
); /* MPSAFE */
381 if (ncp
->nc_locktd
== td
) {
382 if (atomic_cmpset_int(&ncp
->nc_exlocks
, count
,
397 * NOTE: nc_refs can be 0 (degenerate case during _cache_drop).
399 * nc_locktd must be NULLed out prior to nc_exlocks getting cleared.
405 _cache_unlock(struct namecache
*ncp
)
407 thread_t td __debugvar
= curthread
;
410 KKASSERT(ncp
->nc_refs
>= 0);
411 KKASSERT(ncp
->nc_exlocks
> 0);
412 KKASSERT(ncp
->nc_locktd
== td
);
414 count
= ncp
->nc_exlocks
;
415 if ((count
& ~NC_EXLOCK_REQ
) == 1) {
416 ncp
->nc_locktd
= NULL
;
421 if ((count
& ~NC_EXLOCK_REQ
) == 1) {
422 if (atomic_cmpset_int(&ncp
->nc_exlocks
, count
, 0)) {
423 if (count
& NC_EXLOCK_REQ
)
428 if (atomic_cmpset_int(&ncp
->nc_exlocks
, count
,
433 count
= ncp
->nc_exlocks
;
439 * cache_hold() and cache_drop() prevent the premature deletion of a
440 * namecache entry but do not prevent operations (such as zapping) on
441 * that namecache entry.
443 * This routine may only be called from outside this source module if
444 * nc_refs is already at least 1.
446 * This is a rare case where callers are allowed to hold a spinlock,
447 * so we can't ourselves.
453 _cache_hold(struct namecache
*ncp
)
455 atomic_add_int(&ncp
->nc_refs
, 1);
460 * Drop a cache entry, taking care to deal with races.
462 * For potential 1->0 transitions we must hold the ncp lock to safely
463 * test its flags. An unresolved entry with no children must be zapped
466 * The call to cache_zap() itself will handle all remaining races and
467 * will decrement the ncp's refs regardless. If we are resolved or
468 * have children nc_refs can safely be dropped to 0 without having to
471 * NOTE: cache_zap() will re-check nc_refs and nc_list in a MPSAFE fashion.
473 * NOTE: cache_zap() may return a non-NULL referenced parent which must
474 * be dropped in a loop.
480 _cache_drop(struct namecache
*ncp
)
485 KKASSERT(ncp
->nc_refs
> 0);
489 if (_cache_lock_nonblock(ncp
) == 0) {
490 ncp
->nc_flag
&= ~NCF_DEFEREDZAP
;
491 if ((ncp
->nc_flag
& NCF_UNRESOLVED
) &&
492 TAILQ_EMPTY(&ncp
->nc_list
)) {
493 ncp
= cache_zap(ncp
, 1);
496 if (atomic_cmpset_int(&ncp
->nc_refs
, 1, 0)) {
503 if (atomic_cmpset_int(&ncp
->nc_refs
, refs
, refs
- 1))
511 * Link a new namecache entry to its parent and to the hash table. Be
512 * careful to avoid races if vhold() blocks in the future.
514 * Both ncp and par must be referenced and locked.
516 * NOTE: The hash table spinlock is likely held during this call, we
517 * can't do anything fancy.
522 _cache_link_parent(struct namecache
*ncp
, struct namecache
*par
,
523 struct nchash_head
*nchpp
)
525 KKASSERT(ncp
->nc_parent
== NULL
);
526 ncp
->nc_parent
= par
;
527 ncp
->nc_head
= nchpp
;
530 * Set inheritance flags. Note that the parent flags may be
531 * stale due to getattr potentially not having been run yet
532 * (it gets run during nlookup()'s).
534 ncp
->nc_flag
&= ~(NCF_SF_PNOCACHE
| NCF_UF_PCACHE
);
535 if (par
->nc_flag
& (NCF_SF_NOCACHE
| NCF_SF_PNOCACHE
))
536 ncp
->nc_flag
|= NCF_SF_PNOCACHE
;
537 if (par
->nc_flag
& (NCF_UF_CACHE
| NCF_UF_PCACHE
))
538 ncp
->nc_flag
|= NCF_UF_PCACHE
;
540 LIST_INSERT_HEAD(&nchpp
->list
, ncp
, nc_hash
);
542 if (TAILQ_EMPTY(&par
->nc_list
)) {
543 TAILQ_INSERT_HEAD(&par
->nc_list
, ncp
, nc_entry
);
545 * Any vp associated with an ncp which has children must
546 * be held to prevent it from being recycled.
551 TAILQ_INSERT_HEAD(&par
->nc_list
, ncp
, nc_entry
);
556 * Remove the parent and hash associations from a namecache structure.
557 * If this is the last child of the parent the cache_drop(par) will
558 * attempt to recursively zap the parent.
560 * ncp must be locked. This routine will acquire a temporary lock on
561 * the parent as wlel as the appropriate hash chain.
566 _cache_unlink_parent(struct namecache
*ncp
)
568 struct namecache
*par
;
569 struct vnode
*dropvp
;
571 if ((par
= ncp
->nc_parent
) != NULL
) {
572 KKASSERT(ncp
->nc_parent
== par
);
575 spin_lock(&ncp
->nc_head
->spin
);
576 LIST_REMOVE(ncp
, nc_hash
);
577 TAILQ_REMOVE(&par
->nc_list
, ncp
, nc_entry
);
579 if (par
->nc_vp
&& TAILQ_EMPTY(&par
->nc_list
))
581 spin_unlock(&ncp
->nc_head
->spin
);
582 ncp
->nc_parent
= NULL
;
588 * We can only safely vdrop with no spinlocks held.
596 * Allocate a new namecache structure. Most of the code does not require
597 * zero-termination of the string but it makes vop_compat_ncreate() easier.
601 static struct namecache
*
602 cache_alloc(int nlen
)
604 struct namecache
*ncp
;
606 ncp
= kmalloc(sizeof(*ncp
), M_VFSCACHE
, M_WAITOK
|M_ZERO
);
608 ncp
->nc_name
= kmalloc(nlen
+ 1, M_VFSCACHE
, M_WAITOK
);
610 ncp
->nc_flag
= NCF_UNRESOLVED
;
611 ncp
->nc_error
= ENOTCONN
; /* needs to be resolved */
614 TAILQ_INIT(&ncp
->nc_list
);
620 * Can only be called for the case where the ncp has never been
621 * associated with anything (so no spinlocks are needed).
626 _cache_free(struct namecache
*ncp
)
628 KKASSERT(ncp
->nc_refs
== 1 && ncp
->nc_exlocks
== 1);
630 kfree(ncp
->nc_name
, M_VFSCACHE
);
631 kfree(ncp
, M_VFSCACHE
);
638 cache_zero(struct nchandle
*nch
)
645 * Ref and deref a namecache structure.
647 * The caller must specify a stable ncp pointer, typically meaning the
648 * ncp is already referenced but this can also occur indirectly through
649 * e.g. holding a lock on a direct child.
651 * WARNING: Caller may hold an unrelated read spinlock, which means we can't
652 * use read spinlocks here.
657 cache_hold(struct nchandle
*nch
)
659 _cache_hold(nch
->ncp
);
660 atomic_add_int(&nch
->mount
->mnt_refs
, 1);
665 * Create a copy of a namecache handle for an already-referenced
671 cache_copy(struct nchandle
*nch
, struct nchandle
*target
)
675 _cache_hold(target
->ncp
);
676 atomic_add_int(&nch
->mount
->mnt_refs
, 1);
683 cache_changemount(struct nchandle
*nch
, struct mount
*mp
)
685 atomic_add_int(&nch
->mount
->mnt_refs
, -1);
687 atomic_add_int(&nch
->mount
->mnt_refs
, 1);
694 cache_drop(struct nchandle
*nch
)
696 atomic_add_int(&nch
->mount
->mnt_refs
, -1);
697 _cache_drop(nch
->ncp
);
706 cache_lock(struct nchandle
*nch
)
708 _cache_lock(nch
->ncp
);
712 * Relock nch1 given an unlocked nch1 and a locked nch2. The caller
713 * is responsible for checking both for validity on return as they
714 * may have become invalid.
716 * We have to deal with potential deadlocks here, just ping pong
717 * the lock until we get it (we will always block somewhere when
718 * looping so this is not cpu-intensive).
720 * which = 0 nch1 not locked, nch2 is locked
721 * which = 1 nch1 is locked, nch2 is not locked
724 cache_relock(struct nchandle
*nch1
, struct ucred
*cred1
,
725 struct nchandle
*nch2
, struct ucred
*cred2
)
733 if (cache_lock_nonblock(nch1
) == 0) {
734 cache_resolve(nch1
, cred1
);
739 cache_resolve(nch1
, cred1
);
742 if (cache_lock_nonblock(nch2
) == 0) {
743 cache_resolve(nch2
, cred2
);
748 cache_resolve(nch2
, cred2
);
758 cache_lock_nonblock(struct nchandle
*nch
)
760 return(_cache_lock_nonblock(nch
->ncp
));
768 cache_unlock(struct nchandle
*nch
)
770 _cache_unlock(nch
->ncp
);
774 * ref-and-lock, unlock-and-deref functions.
776 * This function is primarily used by nlookup. Even though cache_lock
777 * holds the vnode, it is possible that the vnode may have already
778 * initiated a recyclement.
780 * We want cache_get() to return a definitively usable vnode or a
781 * definitively unresolved ncp.
787 _cache_get(struct namecache
*ncp
)
791 if (ncp
->nc_vp
&& (ncp
->nc_vp
->v_flag
& VRECLAIMED
))
792 _cache_setunresolved(ncp
);
797 * This is a special form of _cache_lock() which only succeeds if
798 * it can get a pristine, non-recursive lock. The caller must have
799 * already ref'd the ncp.
801 * On success the ncp will be locked, on failure it will not. The
802 * ref count does not change either way.
804 * We want _cache_lock_special() (on success) to return a definitively
805 * usable vnode or a definitively unresolved ncp.
810 _cache_lock_special(struct namecache
*ncp
)
812 if (_cache_lock_nonblock(ncp
) == 0) {
813 if ((ncp
->nc_exlocks
& ~NC_EXLOCK_REQ
) == 1) {
814 if (ncp
->nc_vp
&& (ncp
->nc_vp
->v_flag
& VRECLAIMED
))
815 _cache_setunresolved(ncp
);
825 * NOTE: The same nchandle can be passed for both arguments.
830 cache_get(struct nchandle
*nch
, struct nchandle
*target
)
832 KKASSERT(nch
->ncp
->nc_refs
> 0);
833 target
->mount
= nch
->mount
;
834 target
->ncp
= _cache_get(nch
->ncp
);
835 atomic_add_int(&target
->mount
->mnt_refs
, 1);
843 _cache_put(struct namecache
*ncp
)
853 cache_put(struct nchandle
*nch
)
855 atomic_add_int(&nch
->mount
->mnt_refs
, -1);
856 _cache_put(nch
->ncp
);
862 * Resolve an unresolved ncp by associating a vnode with it. If the
863 * vnode is NULL, a negative cache entry is created.
865 * The ncp should be locked on entry and will remain locked on return.
871 _cache_setvp(struct mount
*mp
, struct namecache
*ncp
, struct vnode
*vp
)
873 KKASSERT(ncp
->nc_flag
& NCF_UNRESOLVED
);
877 * Any vp associated with an ncp which has children must
878 * be held. Any vp associated with a locked ncp must be held.
880 if (!TAILQ_EMPTY(&ncp
->nc_list
))
882 spin_lock(&vp
->v_spin
);
884 TAILQ_INSERT_HEAD(&vp
->v_namecache
, ncp
, nc_vnode
);
885 spin_unlock(&vp
->v_spin
);
890 * Set auxiliary flags
894 ncp
->nc_flag
|= NCF_ISDIR
;
897 ncp
->nc_flag
|= NCF_ISSYMLINK
;
898 /* XXX cache the contents of the symlink */
903 atomic_add_int(&numcache
, 1);
905 /* XXX: this is a hack to work-around the lack of a real pfs vfs
908 if (strncmp(mp
->mnt_stat
.f_fstypename
, "null", 5) == 0)
912 * When creating a negative cache hit we set the
913 * namecache_gen. A later resolve will clean out the
914 * negative cache hit if the mount point's namecache_gen
915 * has changed. Used by devfs, could also be used by
920 TAILQ_INSERT_TAIL(&ncneglist
, ncp
, nc_vnode
);
922 spin_unlock(&ncspin
);
923 ncp
->nc_error
= ENOENT
;
925 VFS_NCPGEN_SET(mp
, ncp
);
927 ncp
->nc_flag
&= ~(NCF_UNRESOLVED
| NCF_DEFEREDZAP
);
934 cache_setvp(struct nchandle
*nch
, struct vnode
*vp
)
936 _cache_setvp(nch
->mount
, nch
->ncp
, vp
);
943 cache_settimeout(struct nchandle
*nch
, int nticks
)
945 struct namecache
*ncp
= nch
->ncp
;
947 if ((ncp
->nc_timeout
= ticks
+ nticks
) == 0)
952 * Disassociate the vnode or negative-cache association and mark a
953 * namecache entry as unresolved again. Note that the ncp is still
954 * left in the hash table and still linked to its parent.
956 * The ncp should be locked and refd on entry and will remain locked and refd
959 * This routine is normally never called on a directory containing children.
960 * However, NFS often does just that in its rename() code as a cop-out to
961 * avoid complex namespace operations. This disconnects a directory vnode
962 * from its namecache and can cause the OLDAPI and NEWAPI to get out of
969 _cache_setunresolved(struct namecache
*ncp
)
973 if ((ncp
->nc_flag
& NCF_UNRESOLVED
) == 0) {
974 ncp
->nc_flag
|= NCF_UNRESOLVED
;
976 ncp
->nc_error
= ENOTCONN
;
977 if ((vp
= ncp
->nc_vp
) != NULL
) {
978 atomic_add_int(&numcache
, -1);
979 spin_lock(&vp
->v_spin
);
981 TAILQ_REMOVE(&vp
->v_namecache
, ncp
, nc_vnode
);
982 spin_unlock(&vp
->v_spin
);
985 * Any vp associated with an ncp with children is
986 * held by that ncp. Any vp associated with a locked
987 * ncp is held by that ncp. These conditions must be
988 * undone when the vp is cleared out from the ncp.
990 if (!TAILQ_EMPTY(&ncp
->nc_list
))
996 TAILQ_REMOVE(&ncneglist
, ncp
, nc_vnode
);
998 spin_unlock(&ncspin
);
1000 ncp
->nc_flag
&= ~(NCF_WHITEOUT
|NCF_ISDIR
|NCF_ISSYMLINK
);
1005 * The cache_nresolve() code calls this function to automatically
1006 * set a resolved cache element to unresolved if it has timed out
1007 * or if it is a negative cache hit and the mount point namecache_gen
1012 static __inline
void
1013 _cache_auto_unresolve(struct mount
*mp
, struct namecache
*ncp
)
1016 * Already in an unresolved state, nothing to do.
1018 if (ncp
->nc_flag
& NCF_UNRESOLVED
)
1022 * Try to zap entries that have timed out. We have
1023 * to be careful here because locked leafs may depend
1024 * on the vnode remaining intact in a parent, so only
1025 * do this under very specific conditions.
1027 if (ncp
->nc_timeout
&& (int)(ncp
->nc_timeout
- ticks
) < 0 &&
1028 TAILQ_EMPTY(&ncp
->nc_list
)) {
1029 _cache_setunresolved(ncp
);
1034 * If a resolved negative cache hit is invalid due to
1035 * the mount's namecache generation being bumped, zap it.
1037 if (ncp
->nc_vp
== NULL
&& VFS_NCPGEN_TEST(mp
, ncp
)) {
1038 _cache_setunresolved(ncp
);
1047 cache_setunresolved(struct nchandle
*nch
)
1049 _cache_setunresolved(nch
->ncp
);
1053 * Determine if we can clear NCF_ISMOUNTPT by scanning the mountlist
1054 * looking for matches. This flag tells the lookup code when it must
1055 * check for a mount linkage and also prevents the directories in question
1056 * from being deleted or renamed.
1062 cache_clrmountpt_callback(struct mount
*mp
, void *data
)
1064 struct nchandle
*nch
= data
;
1066 if (mp
->mnt_ncmounton
.ncp
== nch
->ncp
)
1068 if (mp
->mnt_ncmountpt
.ncp
== nch
->ncp
)
1077 cache_clrmountpt(struct nchandle
*nch
)
1081 count
= mountlist_scan(cache_clrmountpt_callback
, nch
,
1082 MNTSCAN_FORWARD
|MNTSCAN_NOBUSY
);
1084 nch
->ncp
->nc_flag
&= ~NCF_ISMOUNTPT
;
1088 * Invalidate portions of the namecache topology given a starting entry.
1089 * The passed ncp is set to an unresolved state and:
1091 * The passed ncp must be referencxed and locked. The routine may unlock
1092 * and relock ncp several times, and will recheck the children and loop
1093 * to catch races. When done the passed ncp will be returned with the
1094 * reference and lock intact.
1096 * CINV_DESTROY - Set a flag in the passed ncp entry indicating
1097 * that the physical underlying nodes have been
1098 * destroyed... as in deleted. For example, when
1099 * a directory is removed. This will cause record
1100 * lookups on the name to no longer be able to find
1101 * the record and tells the resolver to return failure
1102 * rather then trying to resolve through the parent.
1104 * The topology itself, including ncp->nc_name,
1107 * This only applies to the passed ncp, if CINV_CHILDREN
1108 * is specified the children are not flagged.
1110 * CINV_CHILDREN - Set all children (recursively) to an unresolved
1113 * Note that this will also have the side effect of
1114 * cleaning out any unreferenced nodes in the topology
1115 * from the leaves up as the recursion backs out.
1117 * Note that the topology for any referenced nodes remains intact, but
1118 * the nodes will be marked as having been destroyed and will be set
1119 * to an unresolved state.
1121 * It is possible for cache_inval() to race a cache_resolve(), meaning that
1122 * the namecache entry may not actually be invalidated on return if it was
1123 * revalidated while recursing down into its children. This code guarentees
1124 * that the node(s) will go through an invalidation cycle, but does not
1125 * guarentee that they will remain in an invalidated state.
1127 * Returns non-zero if a revalidation was detected during the invalidation
1128 * recursion, zero otherwise. Note that since only the original ncp is
1129 * locked the revalidation ultimately can only indicate that the original ncp
1130 * *MIGHT* no have been reresolved.
1132 * DEEP RECURSION HANDLING - If a recursive invalidation recurses deeply we
1133 * have to avoid blowing out the kernel stack. We do this by saving the
1134 * deep namecache node and aborting the recursion, then re-recursing at that
1135 * node using a depth-first algorithm in order to allow multiple deep
1136 * recursions to chain through each other, then we restart the invalidation
1143 struct namecache
*resume_ncp
;
1147 static int _cache_inval_internal(struct namecache
*, int, struct cinvtrack
*);
1151 _cache_inval(struct namecache
*ncp
, int flags
)
1153 struct cinvtrack track
;
1154 struct namecache
*ncp2
;
1158 track
.resume_ncp
= NULL
;
1161 r
= _cache_inval_internal(ncp
, flags
, &track
);
1162 if (track
.resume_ncp
== NULL
)
1164 kprintf("Warning: deep namecache recursion at %s\n",
1167 while ((ncp2
= track
.resume_ncp
) != NULL
) {
1168 track
.resume_ncp
= NULL
;
1170 _cache_inval_internal(ncp2
, flags
& ~CINV_DESTROY
,
1180 cache_inval(struct nchandle
*nch
, int flags
)
1182 return(_cache_inval(nch
->ncp
, flags
));
1186 * Helper for _cache_inval(). The passed ncp is refd and locked and
1187 * remains that way on return, but may be unlocked/relocked multiple
1188 * times by the routine.
1191 _cache_inval_internal(struct namecache
*ncp
, int flags
, struct cinvtrack
*track
)
1193 struct namecache
*kid
;
1194 struct namecache
*nextkid
;
1197 KKASSERT(ncp
->nc_exlocks
);
1199 _cache_setunresolved(ncp
);
1200 if (flags
& CINV_DESTROY
)
1201 ncp
->nc_flag
|= NCF_DESTROYED
;
1202 if ((flags
& CINV_CHILDREN
) &&
1203 (kid
= TAILQ_FIRST(&ncp
->nc_list
)) != NULL
1206 if (++track
->depth
> MAX_RECURSION_DEPTH
) {
1207 track
->resume_ncp
= ncp
;
1213 if (track
->resume_ncp
) {
1217 if ((nextkid
= TAILQ_NEXT(kid
, nc_entry
)) != NULL
)
1218 _cache_hold(nextkid
);
1219 if ((kid
->nc_flag
& NCF_UNRESOLVED
) == 0 ||
1220 TAILQ_FIRST(&kid
->nc_list
)
1223 rcnt
+= _cache_inval_internal(kid
, flags
& ~CINV_DESTROY
, track
);
1234 * Someone could have gotten in there while ncp was unlocked,
1237 if ((ncp
->nc_flag
& NCF_UNRESOLVED
) == 0)
1243 * Invalidate a vnode's namecache associations. To avoid races against
1244 * the resolver we do not invalidate a node which we previously invalidated
1245 * but which was then re-resolved while we were in the invalidation loop.
1247 * Returns non-zero if any namecache entries remain after the invalidation
1250 * NOTE: Unlike the namecache topology which guarentees that ncp's will not
1251 * be ripped out of the topology while held, the vnode's v_namecache
1252 * list has no such restriction. NCP's can be ripped out of the list
1253 * at virtually any time if not locked, even if held.
1255 * In addition, the v_namecache list itself must be locked via
1256 * the vnode's spinlock.
1261 cache_inval_vp(struct vnode
*vp
, int flags
)
1263 struct namecache
*ncp
;
1264 struct namecache
*next
;
1267 spin_lock(&vp
->v_spin
);
1268 ncp
= TAILQ_FIRST(&vp
->v_namecache
);
1272 /* loop entered with ncp held and vp spin-locked */
1273 if ((next
= TAILQ_NEXT(ncp
, nc_vnode
)) != NULL
)
1275 spin_unlock(&vp
->v_spin
);
1277 if (ncp
->nc_vp
!= vp
) {
1278 kprintf("Warning: cache_inval_vp: race-A detected on "
1279 "%s\n", ncp
->nc_name
);
1285 _cache_inval(ncp
, flags
);
1286 _cache_put(ncp
); /* also releases reference */
1288 spin_lock(&vp
->v_spin
);
1289 if (ncp
&& ncp
->nc_vp
!= vp
) {
1290 spin_unlock(&vp
->v_spin
);
1291 kprintf("Warning: cache_inval_vp: race-B detected on "
1292 "%s\n", ncp
->nc_name
);
1297 spin_unlock(&vp
->v_spin
);
1298 return(TAILQ_FIRST(&vp
->v_namecache
) != NULL
);
1302 * This routine is used instead of the normal cache_inval_vp() when we
1303 * are trying to recycle otherwise good vnodes.
1305 * Return 0 on success, non-zero if not all namecache records could be
1306 * disassociated from the vnode (for various reasons).
1311 cache_inval_vp_nonblock(struct vnode
*vp
)
1313 struct namecache
*ncp
;
1314 struct namecache
*next
;
1316 spin_lock(&vp
->v_spin
);
1317 ncp
= TAILQ_FIRST(&vp
->v_namecache
);
1321 /* loop entered with ncp held */
1322 if ((next
= TAILQ_NEXT(ncp
, nc_vnode
)) != NULL
)
1324 spin_unlock(&vp
->v_spin
);
1325 if (_cache_lock_nonblock(ncp
)) {
1331 if (ncp
->nc_vp
!= vp
) {
1332 kprintf("Warning: cache_inval_vp: race-A detected on "
1333 "%s\n", ncp
->nc_name
);
1339 _cache_inval(ncp
, 0);
1340 _cache_put(ncp
); /* also releases reference */
1342 spin_lock(&vp
->v_spin
);
1343 if (ncp
&& ncp
->nc_vp
!= vp
) {
1344 spin_unlock(&vp
->v_spin
);
1345 kprintf("Warning: cache_inval_vp: race-B detected on "
1346 "%s\n", ncp
->nc_name
);
1351 spin_unlock(&vp
->v_spin
);
1353 return(TAILQ_FIRST(&vp
->v_namecache
) != NULL
);
1357 * The source ncp has been renamed to the target ncp. Both fncp and tncp
1358 * must be locked. The target ncp is destroyed (as a normal rename-over
1359 * would destroy the target file or directory).
1361 * Because there may be references to the source ncp we cannot copy its
1362 * contents to the target. Instead the source ncp is relinked as the target
1363 * and the target ncp is removed from the namecache topology.
1368 cache_rename(struct nchandle
*fnch
, struct nchandle
*tnch
)
1370 struct namecache
*fncp
= fnch
->ncp
;
1371 struct namecache
*tncp
= tnch
->ncp
;
1372 struct namecache
*tncp_par
;
1373 struct nchash_head
*nchpp
;
1378 if (tncp
->nc_nlen
) {
1379 nname
= kmalloc(tncp
->nc_nlen
+ 1, M_VFSCACHE
, M_WAITOK
);
1380 bcopy(tncp
->nc_name
, nname
, tncp
->nc_nlen
);
1381 nname
[tncp
->nc_nlen
] = 0;
1387 * Rename fncp (unlink)
1389 _cache_unlink_parent(fncp
);
1390 oname
= fncp
->nc_name
;
1391 fncp
->nc_name
= nname
;
1392 fncp
->nc_nlen
= tncp
->nc_nlen
;
1394 kfree(oname
, M_VFSCACHE
);
1396 tncp_par
= tncp
->nc_parent
;
1397 _cache_hold(tncp_par
);
1398 _cache_lock(tncp_par
);
1401 * Rename fncp (relink)
1403 hash
= fnv_32_buf(fncp
->nc_name
, fncp
->nc_nlen
, FNV1_32_INIT
);
1404 hash
= fnv_32_buf(&tncp_par
, sizeof(tncp_par
), hash
);
1405 nchpp
= NCHHASH(hash
);
1407 spin_lock(&nchpp
->spin
);
1408 _cache_link_parent(fncp
, tncp_par
, nchpp
);
1409 spin_unlock(&nchpp
->spin
);
1411 _cache_put(tncp_par
);
1414 * Get rid of the overwritten tncp (unlink)
1416 _cache_unlink(tncp
);
1420 * Perform actions consistent with unlinking a file. The namecache
1421 * entry is marked DESTROYED so it no longer shows up in searches,
1422 * and will be physically deleted when the vnode goes away.
1425 cache_unlink(struct nchandle
*nch
)
1427 _cache_unlink(nch
->ncp
);
1431 _cache_unlink(struct namecache
*ncp
)
1433 ncp
->nc_flag
|= NCF_DESTROYED
;
1437 * vget the vnode associated with the namecache entry. Resolve the namecache
1438 * entry if necessary. The passed ncp must be referenced and locked.
1440 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked
1441 * (depending on the passed lk_type) will be returned in *vpp with an error
1442 * of 0, or NULL will be returned in *vpp with a non-0 error code. The
1443 * most typical error is ENOENT, meaning that the ncp represents a negative
1444 * cache hit and there is no vnode to retrieve, but other errors can occur
1447 * The vget() can race a reclaim. If this occurs we re-resolve the
1450 * There are numerous places in the kernel where vget() is called on a
1451 * vnode while one or more of its namecache entries is locked. Releasing
1452 * a vnode never deadlocks against locked namecache entries (the vnode
1453 * will not get recycled while referenced ncp's exist). This means we
1454 * can safely acquire the vnode. In fact, we MUST NOT release the ncp
1455 * lock when acquiring the vp lock or we might cause a deadlock.
1460 cache_vget(struct nchandle
*nch
, struct ucred
*cred
,
1461 int lk_type
, struct vnode
**vpp
)
1463 struct namecache
*ncp
;
1468 KKASSERT(ncp
->nc_locktd
== curthread
);
1471 if (ncp
->nc_flag
& NCF_UNRESOLVED
)
1472 error
= cache_resolve(nch
, cred
);
1476 if (error
== 0 && (vp
= ncp
->nc_vp
) != NULL
) {
1477 error
= vget(vp
, lk_type
);
1482 if (error
== ENOENT
) {
1483 kprintf("Warning: vnode reclaim race detected "
1484 "in cache_vget on %p (%s)\n",
1486 _cache_setunresolved(ncp
);
1491 * Not a reclaim race, some other error.
1493 KKASSERT(ncp
->nc_vp
== vp
);
1496 KKASSERT(ncp
->nc_vp
== vp
);
1497 KKASSERT((vp
->v_flag
& VRECLAIMED
) == 0);
1500 if (error
== 0 && vp
== NULL
)
1507 cache_vref(struct nchandle
*nch
, struct ucred
*cred
, struct vnode
**vpp
)
1509 struct namecache
*ncp
;
1514 KKASSERT(ncp
->nc_locktd
== curthread
);
1517 if (ncp
->nc_flag
& NCF_UNRESOLVED
)
1518 error
= cache_resolve(nch
, cred
);
1522 if (error
== 0 && (vp
= ncp
->nc_vp
) != NULL
) {
1523 error
= vget(vp
, LK_SHARED
);
1528 if (error
== ENOENT
) {
1529 kprintf("Warning: vnode reclaim race detected "
1530 "in cache_vget on %p (%s)\n",
1532 _cache_setunresolved(ncp
);
1537 * Not a reclaim race, some other error.
1539 KKASSERT(ncp
->nc_vp
== vp
);
1542 KKASSERT(ncp
->nc_vp
== vp
);
1543 KKASSERT((vp
->v_flag
& VRECLAIMED
) == 0);
1544 /* caller does not want a lock */
1548 if (error
== 0 && vp
== NULL
)
1555 * Return a referenced vnode representing the parent directory of
1558 * Because the caller has locked the ncp it should not be possible for
1559 * the parent ncp to go away. However, the parent can unresolve its
1560 * dvp at any time so we must be able to acquire a lock on the parent
1561 * to safely access nc_vp.
1563 * We have to leave par unlocked when vget()ing dvp to avoid a deadlock,
1564 * so use vhold()/vdrop() while holding the lock to prevent dvp from
1565 * getting destroyed.
1567 * MPSAFE - Note vhold() is allowed when dvp has 0 refs if we hold a
1568 * lock on the ncp in question..
1570 static struct vnode
*
1571 cache_dvpref(struct namecache
*ncp
)
1573 struct namecache
*par
;
1577 if ((par
= ncp
->nc_parent
) != NULL
) {
1580 if ((par
->nc_flag
& NCF_UNRESOLVED
) == 0) {
1581 if ((dvp
= par
->nc_vp
) != NULL
)
1586 if (vget(dvp
, LK_SHARED
) == 0) {
1589 /* return refd, unlocked dvp */
1601 * Convert a directory vnode to a namecache record without any other
1602 * knowledge of the topology. This ONLY works with directory vnodes and
1603 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the
1604 * returned ncp (if not NULL) will be held and unlocked.
1606 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned.
1607 * If 'makeit' is 1 we attempt to track-down and create the namecache topology
1608 * for dvp. This will fail only if the directory has been deleted out from
1611 * Callers must always check for a NULL return no matter the value of 'makeit'.
1613 * To avoid underflowing the kernel stack each recursive call increments
1614 * the makeit variable.
1617 static int cache_inefficient_scan(struct nchandle
*nch
, struct ucred
*cred
,
1618 struct vnode
*dvp
, char *fakename
);
1619 static int cache_fromdvp_try(struct vnode
*dvp
, struct ucred
*cred
,
1620 struct vnode
**saved_dvp
);
1623 cache_fromdvp(struct vnode
*dvp
, struct ucred
*cred
, int makeit
,
1624 struct nchandle
*nch
)
1626 struct vnode
*saved_dvp
;
1632 nch
->mount
= dvp
->v_mount
;
1637 * Handle the makeit == 0 degenerate case
1640 spin_lock(&dvp
->v_spin
);
1641 nch
->ncp
= TAILQ_FIRST(&dvp
->v_namecache
);
1644 spin_unlock(&dvp
->v_spin
);
1648 * Loop until resolution, inside code will break out on error.
1652 * Break out if we successfully acquire a working ncp.
1654 spin_lock(&dvp
->v_spin
);
1655 nch
->ncp
= TAILQ_FIRST(&dvp
->v_namecache
);
1658 spin_unlock(&dvp
->v_spin
);
1661 spin_unlock(&dvp
->v_spin
);
1664 * If dvp is the root of its filesystem it should already
1665 * have a namecache pointer associated with it as a side
1666 * effect of the mount, but it may have been disassociated.
1668 if (dvp
->v_flag
& VROOT
) {
1669 nch
->ncp
= _cache_get(nch
->mount
->mnt_ncmountpt
.ncp
);
1670 error
= cache_resolve_mp(nch
->mount
);
1671 _cache_put(nch
->ncp
);
1673 kprintf("cache_fromdvp: resolve root of mount %p error %d",
1674 dvp
->v_mount
, error
);
1678 kprintf(" failed\n");
1683 kprintf(" succeeded\n");
1688 * If we are recursed too deeply resort to an O(n^2)
1689 * algorithm to resolve the namecache topology. The
1690 * resolved pvp is left referenced in saved_dvp to
1691 * prevent the tree from being destroyed while we loop.
1694 error
= cache_fromdvp_try(dvp
, cred
, &saved_dvp
);
1696 kprintf("lookupdotdot(longpath) failed %d "
1697 "dvp %p\n", error
, dvp
);
1705 * Get the parent directory and resolve its ncp.
1708 kfree(fakename
, M_TEMP
);
1711 error
= vop_nlookupdotdot(*dvp
->v_ops
, dvp
, &pvp
, cred
,
1714 kprintf("lookupdotdot failed %d dvp %p\n", error
, dvp
);
1720 * Reuse makeit as a recursion depth counter. On success
1721 * nch will be fully referenced.
1723 cache_fromdvp(pvp
, cred
, makeit
+ 1, nch
);
1725 if (nch
->ncp
== NULL
)
1729 * Do an inefficient scan of pvp (embodied by ncp) to look
1730 * for dvp. This will create a namecache record for dvp on
1731 * success. We loop up to recheck on success.
1733 * ncp and dvp are both held but not locked.
1735 error
= cache_inefficient_scan(nch
, cred
, dvp
, fakename
);
1737 kprintf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n",
1738 pvp
, nch
->ncp
->nc_name
, dvp
);
1740 /* nch was NULLed out, reload mount */
1741 nch
->mount
= dvp
->v_mount
;
1745 kprintf("cache_fromdvp: scan %p (%s) succeeded\n",
1746 pvp
, nch
->ncp
->nc_name
);
1749 /* nch was NULLed out, reload mount */
1750 nch
->mount
= dvp
->v_mount
;
1754 * If nch->ncp is non-NULL it will have been held already.
1757 kfree(fakename
, M_TEMP
);
1766 * Go up the chain of parent directories until we find something
1767 * we can resolve into the namecache. This is very inefficient.
1771 cache_fromdvp_try(struct vnode
*dvp
, struct ucred
*cred
,
1772 struct vnode
**saved_dvp
)
1774 struct nchandle nch
;
1777 static time_t last_fromdvp_report
;
1781 * Loop getting the parent directory vnode until we get something we
1782 * can resolve in the namecache.
1785 nch
.mount
= dvp
->v_mount
;
1791 kfree(fakename
, M_TEMP
);
1794 error
= vop_nlookupdotdot(*dvp
->v_ops
, dvp
, &pvp
, cred
,
1801 spin_lock(&pvp
->v_spin
);
1802 if ((nch
.ncp
= TAILQ_FIRST(&pvp
->v_namecache
)) != NULL
) {
1803 _cache_hold(nch
.ncp
);
1804 spin_unlock(&pvp
->v_spin
);
1808 spin_unlock(&pvp
->v_spin
);
1809 if (pvp
->v_flag
& VROOT
) {
1810 nch
.ncp
= _cache_get(pvp
->v_mount
->mnt_ncmountpt
.ncp
);
1811 error
= cache_resolve_mp(nch
.mount
);
1812 _cache_unlock(nch
.ncp
);
1815 _cache_drop(nch
.ncp
);
1825 if (last_fromdvp_report
!= time_second
) {
1826 last_fromdvp_report
= time_second
;
1827 kprintf("Warning: extremely inefficient path "
1828 "resolution on %s\n",
1831 error
= cache_inefficient_scan(&nch
, cred
, dvp
, fakename
);
1834 * Hopefully dvp now has a namecache record associated with
1835 * it. Leave it referenced to prevent the kernel from
1836 * recycling the vnode. Otherwise extremely long directory
1837 * paths could result in endless recycling.
1842 _cache_drop(nch
.ncp
);
1845 kfree(fakename
, M_TEMP
);
1850 * Do an inefficient scan of the directory represented by ncp looking for
1851 * the directory vnode dvp. ncp must be held but not locked on entry and
1852 * will be held on return. dvp must be refd but not locked on entry and
1853 * will remain refd on return.
1855 * Why do this at all? Well, due to its stateless nature the NFS server
1856 * converts file handles directly to vnodes without necessarily going through
1857 * the namecache ops that would otherwise create the namecache topology
1858 * leading to the vnode. We could either (1) Change the namecache algorithms
1859 * to allow disconnect namecache records that are re-merged opportunistically,
1860 * or (2) Make the NFS server backtrack and scan to recover a connected
1861 * namecache topology in order to then be able to issue new API lookups.
1863 * It turns out that (1) is a huge mess. It takes a nice clean set of
1864 * namecache algorithms and introduces a lot of complication in every subsystem
1865 * that calls into the namecache to deal with the re-merge case, especially
1866 * since we are using the namecache to placehold negative lookups and the
1867 * vnode might not be immediately assigned. (2) is certainly far less
1868 * efficient then (1), but since we are only talking about directories here
1869 * (which are likely to remain cached), the case does not actually run all
1870 * that often and has the supreme advantage of not polluting the namecache
1873 * If a fakename is supplied just construct a namecache entry using the
1877 cache_inefficient_scan(struct nchandle
*nch
, struct ucred
*cred
,
1878 struct vnode
*dvp
, char *fakename
)
1880 struct nlcomponent nlc
;
1881 struct nchandle rncp
;
1893 vat
.va_blocksize
= 0;
1894 if ((error
= VOP_GETATTR(dvp
, &vat
)) != 0)
1897 error
= cache_vref(nch
, cred
, &pvp
);
1902 kprintf("inefficient_scan: directory iosize %ld "
1903 "vattr fileid = %lld\n",
1905 (long long)vat
.va_fileid
);
1909 * Use the supplied fakename if not NULL. Fake names are typically
1910 * not in the actual filesystem hierarchy. This is used by HAMMER
1911 * to glue @@timestamp recursions together.
1914 nlc
.nlc_nameptr
= fakename
;
1915 nlc
.nlc_namelen
= strlen(fakename
);
1916 rncp
= cache_nlookup(nch
, &nlc
);
1920 if ((blksize
= vat
.va_blocksize
) == 0)
1921 blksize
= DEV_BSIZE
;
1922 rbuf
= kmalloc(blksize
, M_TEMP
, M_WAITOK
);
1928 iov
.iov_base
= rbuf
;
1929 iov
.iov_len
= blksize
;
1932 uio
.uio_resid
= blksize
;
1933 uio
.uio_segflg
= UIO_SYSSPACE
;
1934 uio
.uio_rw
= UIO_READ
;
1935 uio
.uio_td
= curthread
;
1937 if (ncvp_debug
>= 2)
1938 kprintf("cache_inefficient_scan: readdir @ %08x\n", (int)uio
.uio_offset
);
1939 error
= VOP_READDIR(pvp
, &uio
, cred
, &eofflag
, NULL
, NULL
);
1941 den
= (struct dirent
*)rbuf
;
1942 bytes
= blksize
- uio
.uio_resid
;
1945 if (ncvp_debug
>= 2) {
1946 kprintf("cache_inefficient_scan: %*.*s\n",
1947 den
->d_namlen
, den
->d_namlen
,
1950 if (den
->d_type
!= DT_WHT
&&
1951 den
->d_ino
== vat
.va_fileid
) {
1953 kprintf("cache_inefficient_scan: "
1954 "MATCHED inode %lld path %s/%*.*s\n",
1955 (long long)vat
.va_fileid
,
1957 den
->d_namlen
, den
->d_namlen
,
1960 nlc
.nlc_nameptr
= den
->d_name
;
1961 nlc
.nlc_namelen
= den
->d_namlen
;
1962 rncp
= cache_nlookup(nch
, &nlc
);
1963 KKASSERT(rncp
.ncp
!= NULL
);
1966 bytes
-= _DIRENT_DIRSIZ(den
);
1967 den
= _DIRENT_NEXT(den
);
1969 if (rncp
.ncp
== NULL
&& eofflag
== 0 && uio
.uio_resid
!= blksize
)
1972 kfree(rbuf
, M_TEMP
);
1976 if (rncp
.ncp
->nc_flag
& NCF_UNRESOLVED
) {
1977 _cache_setvp(rncp
.mount
, rncp
.ncp
, dvp
);
1978 if (ncvp_debug
>= 2) {
1979 kprintf("cache_inefficient_scan: setvp %s/%s = %p\n",
1980 nch
->ncp
->nc_name
, rncp
.ncp
->nc_name
, dvp
);
1983 if (ncvp_debug
>= 2) {
1984 kprintf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n",
1985 nch
->ncp
->nc_name
, rncp
.ncp
->nc_name
, dvp
,
1989 if (rncp
.ncp
->nc_vp
== NULL
)
1990 error
= rncp
.ncp
->nc_error
;
1992 * Release rncp after a successful nlookup. rncp was fully
1997 kprintf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n",
1998 dvp
, nch
->ncp
->nc_name
);
2005 * Zap a namecache entry. The ncp is unconditionally set to an unresolved
2006 * state, which disassociates it from its vnode or ncneglist.
2008 * Then, if there are no additional references to the ncp and no children,
2009 * the ncp is removed from the topology and destroyed.
2011 * References and/or children may exist if the ncp is in the middle of the
2012 * topology, preventing the ncp from being destroyed.
2014 * This function must be called with the ncp held and locked and will unlock
2015 * and drop it during zapping.
2017 * If nonblock is non-zero and the parent ncp cannot be locked we give up.
2018 * This case can occur in the cache_drop() path.
2020 * This function may returned a held (but NOT locked) parent node which the
2021 * caller must drop. We do this so _cache_drop() can loop, to avoid
2022 * blowing out the kernel stack.
2024 * WARNING! For MPSAFE operation this routine must acquire up to three
2025 * spin locks to be able to safely test nc_refs. Lock order is
2028 * hash spinlock if on hash list
2029 * parent spinlock if child of parent
2030 * (the ncp is unresolved so there is no vnode association)
2032 static struct namecache
*
2033 cache_zap(struct namecache
*ncp
, int nonblock
)
2035 struct namecache
*par
;
2036 struct vnode
*dropvp
;
2040 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED.
2042 _cache_setunresolved(ncp
);
2045 * Try to scrap the entry and possibly tail-recurse on its parent.
2046 * We only scrap unref'd (other then our ref) unresolved entries,
2047 * we do not scrap 'live' entries.
2049 * Note that once the spinlocks are acquired if nc_refs == 1 no
2050 * other references are possible. If it isn't, however, we have
2051 * to decrement but also be sure to avoid a 1->0 transition.
2053 KKASSERT(ncp
->nc_flag
& NCF_UNRESOLVED
);
2054 KKASSERT(ncp
->nc_refs
> 0);
2057 * Acquire locks. Note that the parent can't go away while we hold
2060 if ((par
= ncp
->nc_parent
) != NULL
) {
2063 if (_cache_lock_nonblock(par
) == 0)
2065 refs
= ncp
->nc_refs
;
2066 ncp
->nc_flag
|= NCF_DEFEREDZAP
;
2067 ++numdefered
; /* MP race ok */
2068 if (atomic_cmpset_int(&ncp
->nc_refs
,
2080 spin_lock(&ncp
->nc_head
->spin
);
2084 * If someone other then us has a ref or we have children
2085 * we cannot zap the entry. The 1->0 transition and any
2086 * further list operation is protected by the spinlocks
2087 * we have acquired but other transitions are not.
2090 refs
= ncp
->nc_refs
;
2091 if (refs
== 1 && TAILQ_EMPTY(&ncp
->nc_list
))
2093 if (atomic_cmpset_int(&ncp
->nc_refs
, refs
, refs
- 1)) {
2095 spin_unlock(&ncp
->nc_head
->spin
);
2105 * We are the only ref and with the spinlocks held no further
2106 * refs can be acquired by others.
2108 * Remove us from the hash list and parent list. We have to
2109 * drop a ref on the parent's vp if the parent's list becomes
2114 struct nchash_head
*nchpp
= ncp
->nc_head
;
2116 KKASSERT(nchpp
!= NULL
);
2117 LIST_REMOVE(ncp
, nc_hash
);
2118 TAILQ_REMOVE(&par
->nc_list
, ncp
, nc_entry
);
2119 if (par
->nc_vp
&& TAILQ_EMPTY(&par
->nc_list
))
2120 dropvp
= par
->nc_vp
;
2121 ncp
->nc_head
= NULL
;
2122 ncp
->nc_parent
= NULL
;
2123 spin_unlock(&nchpp
->spin
);
2126 KKASSERT(ncp
->nc_head
== NULL
);
2130 * ncp should not have picked up any refs. Physically
2133 KKASSERT(ncp
->nc_refs
== 1);
2134 /* _cache_unlock(ncp) not required */
2135 ncp
->nc_refs
= -1; /* safety */
2137 kfree(ncp
->nc_name
, M_VFSCACHE
);
2138 kfree(ncp
, M_VFSCACHE
);
2141 * Delayed drop (we had to release our spinlocks)
2143 * The refed parent (if not NULL) must be dropped. The
2144 * caller is responsible for looping.
2152 * Clean up dangling negative cache and defered-drop entries in the
2155 typedef enum { CHI_LOW
, CHI_HIGH
} cache_hs_t
;
2157 static cache_hs_t neg_cache_hysteresis_state
= CHI_LOW
;
2158 static cache_hs_t pos_cache_hysteresis_state
= CHI_LOW
;
2161 cache_hysteresis(void)
2166 * Don't cache too many negative hits. We use hysteresis to reduce
2167 * the impact on the critical path.
2169 switch(neg_cache_hysteresis_state
) {
2171 if (numneg
> MINNEG
&& numneg
* ncnegfactor
> numcache
) {
2172 _cache_cleanneg(10);
2173 neg_cache_hysteresis_state
= CHI_HIGH
;
2177 if (numneg
> MINNEG
* 9 / 10 &&
2178 numneg
* ncnegfactor
* 9 / 10 > numcache
2180 _cache_cleanneg(10);
2182 neg_cache_hysteresis_state
= CHI_LOW
;
2188 * Don't cache too many positive hits. We use hysteresis to reduce
2189 * the impact on the critical path.
2191 * Excessive positive hits can accumulate due to large numbers of
2192 * hardlinks (the vnode cache will not prevent hl ncps from growing
2195 if ((poslimit
= ncposlimit
) == 0)
2196 poslimit
= desiredvnodes
* 2;
2198 switch(pos_cache_hysteresis_state
) {
2200 if (numcache
> poslimit
&& numcache
> MINPOS
) {
2201 _cache_cleanpos(10);
2202 pos_cache_hysteresis_state
= CHI_HIGH
;
2206 if (numcache
> poslimit
* 5 / 6 && numcache
> MINPOS
) {
2207 _cache_cleanpos(10);
2209 pos_cache_hysteresis_state
= CHI_LOW
;
2215 * Clean out dangling defered-zap ncps which could not
2216 * be cleanly dropped if too many build up. Note
2217 * that numdefered is not an exact number as such ncps
2218 * can be reused and the counter is not handled in a MP
2219 * safe manner by design.
2221 if (numdefered
* ncnegfactor
> numcache
) {
2222 _cache_cleandefered();
2227 * NEW NAMECACHE LOOKUP API
2229 * Lookup an entry in the namecache. The passed par_nch must be referenced
2230 * and unlocked. A referenced and locked nchandle with a non-NULL nch.ncp
2231 * is ALWAYS returned, eve if the supplied component is illegal.
2233 * The resulting namecache entry should be returned to the system with
2234 * cache_put() or cache_unlock() + cache_drop().
2236 * namecache locks are recursive but care must be taken to avoid lock order
2237 * reversals (hence why the passed par_nch must be unlocked). Locking
2238 * rules are to order for parent traversals, not for child traversals.
2240 * Nobody else will be able to manipulate the associated namespace (e.g.
2241 * create, delete, rename, rename-target) until the caller unlocks the
2244 * The returned entry will be in one of three states: positive hit (non-null
2245 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set).
2246 * Unresolved entries must be resolved through the filesystem to associate the
2247 * vnode and/or determine whether a positive or negative hit has occured.
2249 * It is not necessary to lock a directory in order to lock namespace under
2250 * that directory. In fact, it is explicitly not allowed to do that. A
2251 * directory is typically only locked when being created, renamed, or
2254 * The directory (par) may be unresolved, in which case any returned child
2255 * will likely also be marked unresolved. Likely but not guarenteed. Since
2256 * the filesystem lookup requires a resolved directory vnode the caller is
2257 * responsible for resolving the namecache chain top-down. This API
2258 * specifically allows whole chains to be created in an unresolved state.
2261 cache_nlookup(struct nchandle
*par_nch
, struct nlcomponent
*nlc
)
2263 struct nchandle nch
;
2264 struct namecache
*ncp
;
2265 struct namecache
*new_ncp
;
2266 struct nchash_head
*nchpp
;
2274 mp
= par_nch
->mount
;
2278 * This is a good time to call it, no ncp's are locked by
2284 * Try to locate an existing entry
2286 hash
= fnv_32_buf(nlc
->nlc_nameptr
, nlc
->nlc_namelen
, FNV1_32_INIT
);
2287 hash
= fnv_32_buf(&par_nch
->ncp
, sizeof(par_nch
->ncp
), hash
);
2289 nchpp
= NCHHASH(hash
);
2291 spin_lock(&nchpp
->spin
);
2292 LIST_FOREACH(ncp
, &nchpp
->list
, nc_hash
) {
2296 * Break out if we find a matching entry. Note that
2297 * UNRESOLVED entries may match, but DESTROYED entries
2300 if (ncp
->nc_parent
== par_nch
->ncp
&&
2301 ncp
->nc_nlen
== nlc
->nlc_namelen
&&
2302 bcmp(ncp
->nc_name
, nlc
->nlc_nameptr
, ncp
->nc_nlen
) == 0 &&
2303 (ncp
->nc_flag
& NCF_DESTROYED
) == 0
2306 spin_unlock(&nchpp
->spin
);
2308 _cache_unlock(par_nch
->ncp
);
2311 if (_cache_lock_special(ncp
) == 0) {
2312 _cache_auto_unresolve(mp
, ncp
);
2314 _cache_free(new_ncp
);
2325 * We failed to locate an entry, create a new entry and add it to
2326 * the cache. The parent ncp must also be locked so we
2329 * We have to relookup after possibly blocking in kmalloc or
2330 * when locking par_nch.
2332 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special
2333 * mount case, in which case nc_name will be NULL.
2335 if (new_ncp
== NULL
) {
2336 spin_unlock(&nchpp
->spin
);
2337 new_ncp
= cache_alloc(nlc
->nlc_namelen
);
2338 if (nlc
->nlc_namelen
) {
2339 bcopy(nlc
->nlc_nameptr
, new_ncp
->nc_name
,
2341 new_ncp
->nc_name
[nlc
->nlc_namelen
] = 0;
2345 if (par_locked
== 0) {
2346 spin_unlock(&nchpp
->spin
);
2347 _cache_lock(par_nch
->ncp
);
2353 * WARNING! We still hold the spinlock. We have to set the hash
2354 * table entry atomically.
2357 _cache_link_parent(ncp
, par_nch
->ncp
, nchpp
);
2358 spin_unlock(&nchpp
->spin
);
2359 _cache_unlock(par_nch
->ncp
);
2360 /* par_locked = 0 - not used */
2363 * stats and namecache size management
2365 if (ncp
->nc_flag
& NCF_UNRESOLVED
)
2366 ++gd
->gd_nchstats
->ncs_miss
;
2367 else if (ncp
->nc_vp
)
2368 ++gd
->gd_nchstats
->ncs_goodhits
;
2370 ++gd
->gd_nchstats
->ncs_neghits
;
2373 atomic_add_int(&nch
.mount
->mnt_refs
, 1);
2378 * This is a non-blocking verison of cache_nlookup() used by
2379 * nfs_readdirplusrpc_uio(). It can fail for any reason and
2380 * will return nch.ncp == NULL in that case.
2383 cache_nlookup_nonblock(struct nchandle
*par_nch
, struct nlcomponent
*nlc
)
2385 struct nchandle nch
;
2386 struct namecache
*ncp
;
2387 struct namecache
*new_ncp
;
2388 struct nchash_head
*nchpp
;
2396 mp
= par_nch
->mount
;
2400 * Try to locate an existing entry
2402 hash
= fnv_32_buf(nlc
->nlc_nameptr
, nlc
->nlc_namelen
, FNV1_32_INIT
);
2403 hash
= fnv_32_buf(&par_nch
->ncp
, sizeof(par_nch
->ncp
), hash
);
2405 nchpp
= NCHHASH(hash
);
2407 spin_lock(&nchpp
->spin
);
2408 LIST_FOREACH(ncp
, &nchpp
->list
, nc_hash
) {
2412 * Break out if we find a matching entry. Note that
2413 * UNRESOLVED entries may match, but DESTROYED entries
2416 if (ncp
->nc_parent
== par_nch
->ncp
&&
2417 ncp
->nc_nlen
== nlc
->nlc_namelen
&&
2418 bcmp(ncp
->nc_name
, nlc
->nlc_nameptr
, ncp
->nc_nlen
) == 0 &&
2419 (ncp
->nc_flag
& NCF_DESTROYED
) == 0
2422 spin_unlock(&nchpp
->spin
);
2424 _cache_unlock(par_nch
->ncp
);
2427 if (_cache_lock_special(ncp
) == 0) {
2428 _cache_auto_unresolve(mp
, ncp
);
2430 _cache_free(new_ncp
);
2441 * We failed to locate an entry, create a new entry and add it to
2442 * the cache. The parent ncp must also be locked so we
2445 * We have to relookup after possibly blocking in kmalloc or
2446 * when locking par_nch.
2448 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special
2449 * mount case, in which case nc_name will be NULL.
2451 if (new_ncp
== NULL
) {
2452 spin_unlock(&nchpp
->spin
);
2453 new_ncp
= cache_alloc(nlc
->nlc_namelen
);
2454 if (nlc
->nlc_namelen
) {
2455 bcopy(nlc
->nlc_nameptr
, new_ncp
->nc_name
,
2457 new_ncp
->nc_name
[nlc
->nlc_namelen
] = 0;
2461 if (par_locked
== 0) {
2462 spin_unlock(&nchpp
->spin
);
2463 if (_cache_lock_nonblock(par_nch
->ncp
) == 0) {
2471 * WARNING! We still hold the spinlock. We have to set the hash
2472 * table entry atomically.
2475 _cache_link_parent(ncp
, par_nch
->ncp
, nchpp
);
2476 spin_unlock(&nchpp
->spin
);
2477 _cache_unlock(par_nch
->ncp
);
2478 /* par_locked = 0 - not used */
2481 * stats and namecache size management
2483 if (ncp
->nc_flag
& NCF_UNRESOLVED
)
2484 ++gd
->gd_nchstats
->ncs_miss
;
2485 else if (ncp
->nc_vp
)
2486 ++gd
->gd_nchstats
->ncs_goodhits
;
2488 ++gd
->gd_nchstats
->ncs_neghits
;
2491 atomic_add_int(&nch
.mount
->mnt_refs
, 1);
2495 _cache_free(new_ncp
);
2504 * The namecache entry is marked as being used as a mount point.
2505 * Locate the mount if it is visible to the caller.
2507 struct findmount_info
{
2508 struct mount
*result
;
2509 struct mount
*nch_mount
;
2510 struct namecache
*nch_ncp
;
2515 cache_findmount_callback(struct mount
*mp
, void *data
)
2517 struct findmount_info
*info
= data
;
2520 * Check the mount's mounted-on point against the passed nch.
2522 if (mp
->mnt_ncmounton
.mount
== info
->nch_mount
&&
2523 mp
->mnt_ncmounton
.ncp
== info
->nch_ncp
2526 atomic_add_int(&mp
->mnt_refs
, 1);
2533 cache_findmount(struct nchandle
*nch
)
2535 struct findmount_info info
;
2538 info
.nch_mount
= nch
->mount
;
2539 info
.nch_ncp
= nch
->ncp
;
2540 mountlist_scan(cache_findmount_callback
, &info
,
2541 MNTSCAN_FORWARD
|MNTSCAN_NOBUSY
);
2542 return(info
.result
);
2546 cache_dropmount(struct mount
*mp
)
2548 atomic_add_int(&mp
->mnt_refs
, -1);
2552 * Resolve an unresolved namecache entry, generally by looking it up.
2553 * The passed ncp must be locked and refd.
2555 * Theoretically since a vnode cannot be recycled while held, and since
2556 * the nc_parent chain holds its vnode as long as children exist, the
2557 * direct parent of the cache entry we are trying to resolve should
2558 * have a valid vnode. If not then generate an error that we can
2559 * determine is related to a resolver bug.
2561 * However, if a vnode was in the middle of a recyclement when the NCP
2562 * got locked, ncp->nc_vp might point to a vnode that is about to become
2563 * invalid. cache_resolve() handles this case by unresolving the entry
2564 * and then re-resolving it.
2566 * Note that successful resolution does not necessarily return an error
2567 * code of 0. If the ncp resolves to a negative cache hit then ENOENT
2573 cache_resolve(struct nchandle
*nch
, struct ucred
*cred
)
2575 struct namecache
*par_tmp
;
2576 struct namecache
*par
;
2577 struct namecache
*ncp
;
2578 struct nchandle nctmp
;
2587 * If the ncp is already resolved we have nothing to do. However,
2588 * we do want to guarentee that a usable vnode is returned when
2589 * a vnode is present, so make sure it hasn't been reclaimed.
2591 if ((ncp
->nc_flag
& NCF_UNRESOLVED
) == 0) {
2592 if (ncp
->nc_vp
&& (ncp
->nc_vp
->v_flag
& VRECLAIMED
))
2593 _cache_setunresolved(ncp
);
2594 if ((ncp
->nc_flag
& NCF_UNRESOLVED
) == 0)
2595 return (ncp
->nc_error
);
2599 * If the ncp was destroyed it will never resolve again. This
2600 * can basically only happen when someone is chdir'd into an
2601 * empty directory which is then rmdir'd. We want to catch this
2602 * here and not dive the VFS because the VFS might actually
2603 * have a way to re-resolve the disconnected ncp, which will
2604 * result in inconsistencies in the cdir/nch for proc->p_fd.
2606 if (ncp
->nc_flag
& NCF_DESTROYED
) {
2607 kprintf("Warning: cache_resolve: ncp '%s' was unlinked\n",
2613 * Mount points need special handling because the parent does not
2614 * belong to the same filesystem as the ncp.
2616 if (ncp
== mp
->mnt_ncmountpt
.ncp
)
2617 return (cache_resolve_mp(mp
));
2620 * We expect an unbroken chain of ncps to at least the mount point,
2621 * and even all the way to root (but this code doesn't have to go
2622 * past the mount point).
2624 if (ncp
->nc_parent
== NULL
) {
2625 kprintf("EXDEV case 1 %p %*.*s\n", ncp
,
2626 ncp
->nc_nlen
, ncp
->nc_nlen
, ncp
->nc_name
);
2627 ncp
->nc_error
= EXDEV
;
2628 return(ncp
->nc_error
);
2632 * The vp's of the parent directories in the chain are held via vhold()
2633 * due to the existance of the child, and should not disappear.
2634 * However, there are cases where they can disappear:
2636 * - due to filesystem I/O errors.
2637 * - due to NFS being stupid about tracking the namespace and
2638 * destroys the namespace for entire directories quite often.
2639 * - due to forced unmounts.
2640 * - due to an rmdir (parent will be marked DESTROYED)
2642 * When this occurs we have to track the chain backwards and resolve
2643 * it, looping until the resolver catches up to the current node. We
2644 * could recurse here but we might run ourselves out of kernel stack
2645 * so we do it in a more painful manner. This situation really should
2646 * not occur all that often, or if it does not have to go back too
2647 * many nodes to resolve the ncp.
2649 while ((dvp
= cache_dvpref(ncp
)) == NULL
) {
2651 * This case can occur if a process is CD'd into a
2652 * directory which is then rmdir'd. If the parent is marked
2653 * destroyed there is no point trying to resolve it.
2655 if (ncp
->nc_parent
->nc_flag
& NCF_DESTROYED
)
2657 par
= ncp
->nc_parent
;
2660 while ((par_tmp
= par
->nc_parent
) != NULL
&&
2661 par_tmp
->nc_vp
== NULL
) {
2662 _cache_hold(par_tmp
);
2663 _cache_lock(par_tmp
);
2667 if (par
->nc_parent
== NULL
) {
2668 kprintf("EXDEV case 2 %*.*s\n",
2669 par
->nc_nlen
, par
->nc_nlen
, par
->nc_name
);
2673 kprintf("[diagnostic] cache_resolve: had to recurse on %*.*s\n",
2674 par
->nc_nlen
, par
->nc_nlen
, par
->nc_name
);
2676 * The parent is not set in stone, ref and lock it to prevent
2677 * it from disappearing. Also note that due to renames it
2678 * is possible for our ncp to move and for par to no longer
2679 * be one of its parents. We resolve it anyway, the loop
2680 * will handle any moves.
2682 _cache_get(par
); /* additional hold/lock */
2683 _cache_put(par
); /* from earlier hold/lock */
2684 if (par
== nch
->mount
->mnt_ncmountpt
.ncp
) {
2685 cache_resolve_mp(nch
->mount
);
2686 } else if ((dvp
= cache_dvpref(par
)) == NULL
) {
2687 kprintf("[diagnostic] cache_resolve: raced on %*.*s\n", par
->nc_nlen
, par
->nc_nlen
, par
->nc_name
);
2691 if (par
->nc_flag
& NCF_UNRESOLVED
) {
2694 par
->nc_error
= VOP_NRESOLVE(&nctmp
, dvp
, cred
);
2698 if ((error
= par
->nc_error
) != 0) {
2699 if (par
->nc_error
!= EAGAIN
) {
2700 kprintf("EXDEV case 3 %*.*s error %d\n",
2701 par
->nc_nlen
, par
->nc_nlen
, par
->nc_name
,
2706 kprintf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n",
2707 par
, par
->nc_nlen
, par
->nc_nlen
, par
->nc_name
);
2714 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected
2715 * ncp's and reattach them. If this occurs the original ncp is marked
2716 * EAGAIN to force a relookup.
2718 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed
2719 * ncp must already be resolved.
2724 ncp
->nc_error
= VOP_NRESOLVE(&nctmp
, dvp
, cred
);
2727 ncp
->nc_error
= EPERM
;
2729 if (ncp
->nc_error
== EAGAIN
) {
2730 kprintf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n",
2731 ncp
, ncp
->nc_nlen
, ncp
->nc_nlen
, ncp
->nc_name
);
2734 return(ncp
->nc_error
);
2738 * Resolve the ncp associated with a mount point. Such ncp's almost always
2739 * remain resolved and this routine is rarely called. NFS MPs tends to force
2740 * re-resolution more often due to its mac-truck-smash-the-namecache
2741 * method of tracking namespace changes.
2743 * The semantics for this call is that the passed ncp must be locked on
2744 * entry and will be locked on return. However, if we actually have to
2745 * resolve the mount point we temporarily unlock the entry in order to
2746 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of
2747 * the unlock we have to recheck the flags after we relock.
2750 cache_resolve_mp(struct mount
*mp
)
2752 struct namecache
*ncp
= mp
->mnt_ncmountpt
.ncp
;
2756 KKASSERT(mp
!= NULL
);
2759 * If the ncp is already resolved we have nothing to do. However,
2760 * we do want to guarentee that a usable vnode is returned when
2761 * a vnode is present, so make sure it hasn't been reclaimed.
2763 if ((ncp
->nc_flag
& NCF_UNRESOLVED
) == 0) {
2764 if (ncp
->nc_vp
&& (ncp
->nc_vp
->v_flag
& VRECLAIMED
))
2765 _cache_setunresolved(ncp
);
2768 if (ncp
->nc_flag
& NCF_UNRESOLVED
) {
2770 while (vfs_busy(mp
, 0))
2772 error
= VFS_ROOT(mp
, &vp
);
2776 * recheck the ncp state after relocking.
2778 if (ncp
->nc_flag
& NCF_UNRESOLVED
) {
2779 ncp
->nc_error
= error
;
2781 _cache_setvp(mp
, ncp
, vp
);
2784 kprintf("[diagnostic] cache_resolve_mp: failed"
2785 " to resolve mount %p err=%d ncp=%p\n",
2787 _cache_setvp(mp
, ncp
, NULL
);
2789 } else if (error
== 0) {
2794 return(ncp
->nc_error
);
2798 * Clean out negative cache entries when too many have accumulated.
2803 _cache_cleanneg(int count
)
2805 struct namecache
*ncp
;
2808 * Attempt to clean out the specified number of negative cache
2813 ncp
= TAILQ_FIRST(&ncneglist
);
2815 spin_unlock(&ncspin
);
2818 TAILQ_REMOVE(&ncneglist
, ncp
, nc_vnode
);
2819 TAILQ_INSERT_TAIL(&ncneglist
, ncp
, nc_vnode
);
2821 spin_unlock(&ncspin
);
2822 if (_cache_lock_special(ncp
) == 0) {
2823 ncp
= cache_zap(ncp
, 1);
2834 * Clean out positive cache entries when too many have accumulated.
2839 _cache_cleanpos(int count
)
2841 static volatile int rover
;
2842 struct nchash_head
*nchpp
;
2843 struct namecache
*ncp
;
2847 * Attempt to clean out the specified number of negative cache
2851 rover_copy
= ++rover
; /* MPSAFEENOUGH */
2853 nchpp
= NCHHASH(rover_copy
);
2855 spin_lock(&nchpp
->spin
);
2856 ncp
= LIST_FIRST(&nchpp
->list
);
2859 spin_unlock(&nchpp
->spin
);
2862 if (_cache_lock_special(ncp
) == 0) {
2863 ncp
= cache_zap(ncp
, 1);
2875 * This is a kitchen sink function to clean out ncps which we
2876 * tried to zap from cache_drop() but failed because we were
2877 * unable to acquire the parent lock.
2879 * Such entries can also be removed via cache_inval_vp(), such
2880 * as when unmounting.
2885 _cache_cleandefered(void)
2887 struct nchash_head
*nchpp
;
2888 struct namecache
*ncp
;
2889 struct namecache dummy
;
2893 bzero(&dummy
, sizeof(dummy
));
2894 dummy
.nc_flag
= NCF_DESTROYED
;
2896 for (i
= 0; i
<= nchash
; ++i
) {
2897 nchpp
= &nchashtbl
[i
];
2899 spin_lock(&nchpp
->spin
);
2900 LIST_INSERT_HEAD(&nchpp
->list
, &dummy
, nc_hash
);
2902 while ((ncp
= LIST_NEXT(ncp
, nc_hash
)) != NULL
) {
2903 if ((ncp
->nc_flag
& NCF_DEFEREDZAP
) == 0)
2905 LIST_REMOVE(&dummy
, nc_hash
);
2906 LIST_INSERT_AFTER(ncp
, &dummy
, nc_hash
);
2908 spin_unlock(&nchpp
->spin
);
2909 if (_cache_lock_nonblock(ncp
) == 0) {
2910 ncp
->nc_flag
&= ~NCF_DEFEREDZAP
;
2914 spin_lock(&nchpp
->spin
);
2917 LIST_REMOVE(&dummy
, nc_hash
);
2918 spin_unlock(&nchpp
->spin
);
2923 * Name cache initialization, from vfsinit() when we are booting
2931 /* initialise per-cpu namecache effectiveness statistics. */
2932 for (i
= 0; i
< ncpus
; ++i
) {
2933 gd
= globaldata_find(i
);
2934 gd
->gd_nchstats
= &nchstats
[i
];
2936 TAILQ_INIT(&ncneglist
);
2938 nchashtbl
= hashinit_ext(desiredvnodes
/ 2,
2939 sizeof(struct nchash_head
),
2940 M_VFSCACHE
, &nchash
);
2941 for (i
= 0; i
<= (int)nchash
; ++i
) {
2942 LIST_INIT(&nchashtbl
[i
].list
);
2943 spin_init(&nchashtbl
[i
].spin
);
2945 nclockwarn
= 5 * hz
;
2949 * Called from start_init() to bootstrap the root filesystem. Returns
2950 * a referenced, unlocked namecache record.
2953 cache_allocroot(struct nchandle
*nch
, struct mount
*mp
, struct vnode
*vp
)
2955 nch
->ncp
= cache_alloc(0);
2957 atomic_add_int(&mp
->mnt_refs
, 1);
2959 _cache_setvp(nch
->mount
, nch
->ncp
, vp
);
2963 * vfs_cache_setroot()
2965 * Create an association between the root of our namecache and
2966 * the root vnode. This routine may be called several times during
2969 * If the caller intends to save the returned namecache pointer somewhere
2970 * it must cache_hold() it.
2973 vfs_cache_setroot(struct vnode
*nvp
, struct nchandle
*nch
)
2976 struct nchandle onch
;
2984 cache_zero(&rootnch
);
2992 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache
2993 * topology and is being removed as quickly as possible. The new VOP_N*()
2994 * API calls are required to make specific adjustments using the supplied
2995 * ncp pointers rather then just bogusly purging random vnodes.
2997 * Invalidate all namecache entries to a particular vnode as well as
2998 * any direct children of that vnode in the namecache. This is a
2999 * 'catch all' purge used by filesystems that do not know any better.
3001 * Note that the linkage between the vnode and its namecache entries will
3002 * be removed, but the namecache entries themselves might stay put due to
3003 * active references from elsewhere in the system or due to the existance of
3004 * the children. The namecache topology is left intact even if we do not
3005 * know what the vnode association is. Such entries will be marked
3009 cache_purge(struct vnode
*vp
)
3011 cache_inval_vp(vp
, CINV_DESTROY
| CINV_CHILDREN
);
3015 * Flush all entries referencing a particular filesystem.
3017 * Since we need to check it anyway, we will flush all the invalid
3018 * entries at the same time.
3023 cache_purgevfs(struct mount
*mp
)
3025 struct nchash_head
*nchpp
;
3026 struct namecache
*ncp
, *nnp
;
3029 * Scan hash tables for applicable entries.
3031 for (nchpp
= &nchashtbl
[nchash
]; nchpp
>= nchashtbl
; nchpp
--) {
3032 spin_lock_wr(&nchpp
->spin
); XXX
3033 ncp
= LIST_FIRST(&nchpp
->list
);
3037 nnp
= LIST_NEXT(ncp
, nc_hash
);
3040 if (ncp
->nc_mount
== mp
) {
3042 ncp
= cache_zap(ncp
, 0);
3050 spin_unlock_wr(&nchpp
->spin
); XXX
3056 static int disablecwd
;
3057 SYSCTL_INT(_debug
, OID_AUTO
, disablecwd
, CTLFLAG_RW
, &disablecwd
, 0,
3060 static u_long numcwdcalls
;
3061 SYSCTL_ULONG(_vfs_cache
, OID_AUTO
, numcwdcalls
, CTLFLAG_RD
, &numcwdcalls
, 0,
3062 "Number of current directory resolution calls");
3063 static u_long numcwdfailnf
;
3064 SYSCTL_ULONG(_vfs_cache
, OID_AUTO
, numcwdfailnf
, CTLFLAG_RD
, &numcwdfailnf
, 0,
3065 "Number of current directory failures due to lack of file");
3066 static u_long numcwdfailsz
;
3067 SYSCTL_ULONG(_vfs_cache
, OID_AUTO
, numcwdfailsz
, CTLFLAG_RD
, &numcwdfailsz
, 0,
3068 "Number of current directory failures due to large result");
3069 static u_long numcwdfound
;
3070 SYSCTL_ULONG(_vfs_cache
, OID_AUTO
, numcwdfound
, CTLFLAG_RD
, &numcwdfound
, 0,
3071 "Number of current directory resolution successes");
3077 sys___getcwd(struct __getcwd_args
*uap
)
3087 buflen
= uap
->buflen
;
3090 if (buflen
> MAXPATHLEN
)
3091 buflen
= MAXPATHLEN
;
3093 buf
= kmalloc(buflen
, M_TEMP
, M_WAITOK
);
3095 bp
= kern_getcwd(buf
, buflen
, &error
);
3098 error
= copyout(bp
, uap
->buf
, strlen(bp
) + 1);
3104 kern_getcwd(char *buf
, size_t buflen
, int *error
)
3106 struct proc
*p
= curproc
;
3108 int i
, slash_prefixed
;
3109 struct filedesc
*fdp
;
3110 struct nchandle nch
;
3111 struct namecache
*ncp
;
3120 nch
= fdp
->fd_ncdir
;
3125 while (ncp
&& (ncp
!= fdp
->fd_nrdir
.ncp
||
3126 nch
.mount
!= fdp
->fd_nrdir
.mount
)
3129 * While traversing upwards if we encounter the root
3130 * of the current mount we have to skip to the mount point
3131 * in the underlying filesystem.
3133 if (ncp
== nch
.mount
->mnt_ncmountpt
.ncp
) {
3134 nch
= nch
.mount
->mnt_ncmounton
;
3143 * Prepend the path segment
3145 for (i
= ncp
->nc_nlen
- 1; i
>= 0; i
--) {
3152 *--bp
= ncp
->nc_name
[i
];
3164 * Go up a directory. This isn't a mount point so we don't
3165 * have to check again.
3167 while ((nch
.ncp
= ncp
->nc_parent
) != NULL
) {
3169 if (nch
.ncp
!= ncp
->nc_parent
) {
3173 _cache_hold(nch
.ncp
);
3186 if (!slash_prefixed
) {
3204 * Thus begins the fullpath magic.
3206 * The passed nchp is referenced but not locked.
3208 static int disablefullpath
;
3209 SYSCTL_INT(_debug
, OID_AUTO
, disablefullpath
, CTLFLAG_RW
,
3210 &disablefullpath
, 0,
3211 "Disable fullpath lookups");
3213 static u_int numfullpathcalls
;
3214 SYSCTL_UINT(_vfs_cache
, OID_AUTO
, numfullpathcalls
, CTLFLAG_RD
,
3215 &numfullpathcalls
, 0,
3216 "Number of full path resolutions in progress");
3217 static u_int numfullpathfailnf
;
3218 SYSCTL_UINT(_vfs_cache
, OID_AUTO
, numfullpathfailnf
, CTLFLAG_RD
,
3219 &numfullpathfailnf
, 0,
3220 "Number of full path resolution failures due to lack of file");
3221 static u_int numfullpathfailsz
;
3222 SYSCTL_UINT(_vfs_cache
, OID_AUTO
, numfullpathfailsz
, CTLFLAG_RD
,
3223 &numfullpathfailsz
, 0,
3224 "Number of full path resolution failures due to insufficient memory");
3225 static u_int numfullpathfound
;
3226 SYSCTL_UINT(_vfs_cache
, OID_AUTO
, numfullpathfound
, CTLFLAG_RD
,
3227 &numfullpathfound
, 0,
3228 "Number of full path resolution successes");
3231 cache_fullpath(struct proc
*p
, struct nchandle
*nchp
,
3232 char **retbuf
, char **freebuf
, int guess
)
3234 struct nchandle fd_nrdir
;
3235 struct nchandle nch
;
3236 struct namecache
*ncp
;
3237 struct mount
*mp
, *new_mp
;
3243 atomic_add_int(&numfullpathcalls
, -1);
3248 buf
= kmalloc(MAXPATHLEN
, M_TEMP
, M_WAITOK
);
3249 bp
= buf
+ MAXPATHLEN
- 1;
3252 fd_nrdir
= p
->p_fd
->fd_nrdir
;
3262 while (ncp
&& (ncp
!= fd_nrdir
.ncp
|| mp
!= fd_nrdir
.mount
)) {
3266 * If we are asked to guess the upwards path, we do so whenever
3267 * we encounter an ncp marked as a mountpoint. We try to find
3268 * the actual mountpoint by finding the mountpoint with this ncp.
3270 if (guess
&& (ncp
->nc_flag
& NCF_ISMOUNTPT
)) {
3271 new_mp
= mount_get_by_nc(ncp
);
3274 * While traversing upwards if we encounter the root
3275 * of the current mount we have to skip to the mount point.
3277 if (ncp
== mp
->mnt_ncmountpt
.ncp
) {
3281 nch
= new_mp
->mnt_ncmounton
;
3291 * Prepend the path segment
3293 for (i
= ncp
->nc_nlen
- 1; i
>= 0; i
--) {
3295 numfullpathfailsz
++;
3300 *--bp
= ncp
->nc_name
[i
];
3303 numfullpathfailsz
++;
3312 * Go up a directory. This isn't a mount point so we don't
3313 * have to check again.
3315 * We can only safely access nc_parent with ncp held locked.
3317 while ((nch
.ncp
= ncp
->nc_parent
) != NULL
) {
3319 if (nch
.ncp
!= ncp
->nc_parent
) {
3323 _cache_hold(nch
.ncp
);
3331 numfullpathfailnf
++;
3337 if (!slash_prefixed
) {
3339 numfullpathfailsz
++;
3357 vn_fullpath(struct proc
*p
, struct vnode
*vn
, char **retbuf
, char **freebuf
,
3360 struct namecache
*ncp
;
3361 struct nchandle nch
;
3365 atomic_add_int(&numfullpathcalls
, 1);
3366 if (disablefullpath
)
3372 /* vn is NULL, client wants us to use p->p_textvp */
3374 if ((vn
= p
->p_textvp
) == NULL
)
3377 spin_lock(&vn
->v_spin
);
3378 TAILQ_FOREACH(ncp
, &vn
->v_namecache
, nc_vnode
) {
3383 spin_unlock(&vn
->v_spin
);
3387 spin_unlock(&vn
->v_spin
);
3389 atomic_add_int(&numfullpathcalls
, -1);
3391 nch
.mount
= vn
->v_mount
;
3392 error
= cache_fullpath(p
, &nch
, retbuf
, freebuf
, guess
);