2 * Copyright (c) 2003-2020 The DragonFly Project. All rights reserved.
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * Copyright (c) 1989, 1993, 1995
35 * The Regents of the University of California. All rights reserved.
37 * This code is derived from software contributed to Berkeley by
38 * Poul-Henning Kamp of the FreeBSD Project.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. Neither the name of the University nor the names of its contributors
49 * may be used to endorse or promote products derived from this software
50 * without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
56 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 #include <sys/param.h>
66 #include <sys/systm.h>
68 #include <sys/kernel.h>
69 #include <sys/sysctl.h>
70 #include <sys/mount.h>
71 #include <sys/vnode.h>
72 #include <sys/malloc.h>
73 #include <sys/sysmsg.h>
74 #include <sys/spinlock.h>
76 #include <sys/nlookup.h>
77 #include <sys/filedesc.h>
78 #include <sys/fnv_hash.h>
79 #include <sys/globaldata.h>
80 #include <sys/kern_syscall.h>
81 #include <sys/dirent.h>
84 #include <sys/spinlock2.h>
86 #define MAX_RECURSION_DEPTH 64
89 * Random lookups in the cache are accomplished with a hash table using
90 * a hash key of (nc_src_vp, name). Each hash chain has its own spin lock,
91 * but we use the ncp->update counter trick to avoid acquiring any
92 * contestable spin-locks during a lookup.
94 * Negative entries may exist and correspond to resolved namecache
95 * structures where nc_vp is NULL. In a negative entry, NCF_WHITEOUT
96 * will be set if the entry corresponds to a whited-out directory entry
97 * (verses simply not finding the entry at all). pcpu_ncache[n].neg_list
98 * is locked via pcpu_ncache[n].neg_spin;
102 * (1) ncp's typically have at least a nc_refs of 1, and usually 2. One
103 * is applicable to direct lookups via the hash table nchpp or via
104 * nc_list (the two are added or removed together). Removal of the ncp
105 * from the hash table drops this reference. The second is applicable
106 * to vp->v_namecache linkages (or negative list linkages), and removal
107 * of the ncp from these lists drops this reference.
109 * On the 1->0 transition of nc_refs the ncp can no longer be referenced
110 * and must be destroyed. No other thread should have access to it at
111 * this point so it can be safely locked and freed without any deadlock
114 * The 1->0 transition can occur at almost any juncture and so cache_drop()
115 * deals with it directly.
117 * (2) Once the 1->0 transition occurs, the entity that caused the transition
118 * will be responsible for destroying the ncp. The ncp cannot be on any
119 * list or hash at this time, or be held by anyone other than the caller
120 * responsible for the transition.
122 * (3) A ncp must be locked in order to modify it.
124 * (5) ncp locks are ordered, child-to-parent. Child first, then parent.
125 * This may seem backwards but forward-scans use the hash table and thus
126 * can hold the parent unlocked while traversing downward. Deletions,
127 * on the other-hand, tend to propagate bottom-up since the ref on the
128 * is dropped as the children go away.
130 * (6) Both parent and child must be locked in order to enter the child onto
131 * the parent's nc_list.
135 * Structures associated with name cacheing.
137 #define NCHHASH(hash) (&nchashtbl[(hash) & nchash])
140 #define NCMOUNT_NUMCACHE (16384) /* power of 2 */
141 #define NCMOUNT_SET (8) /* power of 2 */
143 MALLOC_DEFINE_OBJ(M_VFSCACHE
, sizeof(struct namecache
),
144 "namecache", "namecache entries");
145 MALLOC_DEFINE(M_VFSCACHEAUX
, "namecachestr", "namecache strings");
147 TAILQ_HEAD(nchash_list
, namecache
);
150 * Don't cachealign, but at least pad to 32 bytes so entries
151 * don't cross a cache line.
154 struct nchash_list list
; /* 16 bytes */
155 struct spinlock spin
; /* 8 bytes */
156 long pad01
; /* 8 bytes */
159 struct ncmount_cache
{
160 struct spinlock spin
;
161 struct namecache
*ncp
;
163 struct mount
*mp_target
;
171 struct spinlock umount_spin
; /* cache_findmount/interlock */
172 struct spinlock neg_spin
; /* for neg_list and neg_count */
173 struct namecache_list neg_list
;
180 long inv_kid_quick_count
;
181 long inv_ncp_quick_count
;
182 long clean_pos_count
;
183 long clean_neg_count
;
186 __read_mostly
static struct nchash_head
*nchashtbl
;
187 __read_mostly
static struct pcpu_ncache
*pcpu_ncache
;
188 static struct ncmount_cache ncmount_cache
[NCMOUNT_NUMCACHE
];
191 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server
192 * to create the namecache infrastructure leading to a dangling vnode.
194 * 0 Only errors are reported
195 * 1 Successes are reported
196 * 2 Successes + the whole directory scan is reported
197 * 3 Force the directory scan code run as if the parent vnode did not
198 * have a namecache record, even if it does have one.
200 __read_mostly
int ncvp_debug
;
201 SYSCTL_INT(_debug
, OID_AUTO
, ncvp_debug
, CTLFLAG_RW
, &ncvp_debug
, 0,
202 "Namecache debug level (0-3)");
204 __read_mostly
static u_long nchash
; /* size of hash table */
205 SYSCTL_ULONG(_debug
, OID_AUTO
, nchash
, CTLFLAG_RD
, &nchash
, 0,
206 "Size of namecache hash table");
208 __read_mostly
static int ncnegflush
= 10; /* burst for negative flush */
209 SYSCTL_INT(_debug
, OID_AUTO
, ncnegflush
, CTLFLAG_RW
, &ncnegflush
, 0,
210 "Batch flush negative entries");
212 __read_mostly
static int ncposflush
= 10; /* burst for positive flush */
213 SYSCTL_INT(_debug
, OID_AUTO
, ncposflush
, CTLFLAG_RW
, &ncposflush
, 0,
214 "Batch flush positive entries");
216 __read_mostly
static int ncnegfactor
= 16; /* ratio of negative entries */
217 SYSCTL_INT(_debug
, OID_AUTO
, ncnegfactor
, CTLFLAG_RW
, &ncnegfactor
, 0,
218 "Ratio of negative namecache entries");
220 __read_mostly
static int ncposfactor
= 16; /* ratio of unres+leaf entries */
221 SYSCTL_INT(_debug
, OID_AUTO
, ncposfactor
, CTLFLAG_RW
, &ncposfactor
, 0,
222 "Ratio of unresolved leaf namecache entries");
224 __read_mostly
static int nclockwarn
; /* warn on locked entries in ticks */
225 SYSCTL_INT(_debug
, OID_AUTO
, nclockwarn
, CTLFLAG_RW
, &nclockwarn
, 0,
226 "Warn on locked namecache entries in ticks");
228 __read_mostly
static int ncposlimit
; /* number of cache entries allocated */
229 SYSCTL_INT(_debug
, OID_AUTO
, ncposlimit
, CTLFLAG_RW
, &ncposlimit
, 0,
230 "Number of cache entries allocated");
232 __read_mostly
static int ncp_shared_lock_disable
= 0;
233 SYSCTL_INT(_debug
, OID_AUTO
, ncp_shared_lock_disable
, CTLFLAG_RW
,
234 &ncp_shared_lock_disable
, 0, "Disable shared namecache locks");
236 SYSCTL_INT(_debug
, OID_AUTO
, vnsize
, CTLFLAG_RD
, 0, sizeof(struct vnode
),
237 "sizeof(struct vnode)");
238 SYSCTL_INT(_debug
, OID_AUTO
, ncsize
, CTLFLAG_RD
, 0, sizeof(struct namecache
),
239 "sizeof(struct namecache)");
241 __read_mostly
static int ncmount_cache_enable
= 1;
242 SYSCTL_INT(_debug
, OID_AUTO
, ncmount_cache_enable
, CTLFLAG_RW
,
243 &ncmount_cache_enable
, 0, "mount point cache");
245 static __inline
void _cache_drop(struct namecache
*ncp
);
246 static int cache_resolve_mp(struct mount
*mp
, int adjgen
);
247 static int cache_findmount_callback(struct mount
*mp
, void *data
);
248 static void _cache_setunresolved(struct namecache
*ncp
, int adjgen
);
249 static void _cache_cleanneg(long count
);
250 static void _cache_cleanpos(long ucount
, long xcount
);
251 static void _cache_cleandefered(void);
252 static void _cache_unlink(struct namecache
*ncp
);
255 * The new name cache statistics (these are rolled up globals and not
256 * modified in the critical path, see struct pcpu_ncache).
258 SYSCTL_NODE(_vfs
, OID_AUTO
, cache
, CTLFLAG_RW
, 0, "Name cache statistics");
259 static long vfscache_negs
;
260 SYSCTL_LONG(_vfs_cache
, OID_AUTO
, numneg
, CTLFLAG_RD
, &vfscache_negs
, 0,
261 "Number of negative namecache entries");
262 static long vfscache_count
;
263 SYSCTL_LONG(_vfs_cache
, OID_AUTO
, numcache
, CTLFLAG_RD
, &vfscache_count
, 0,
264 "Number of namecaches entries");
265 static long vfscache_leafs
;
266 SYSCTL_LONG(_vfs_cache
, OID_AUTO
, numleafs
, CTLFLAG_RD
, &vfscache_leafs
, 0,
267 "Number of leaf namecaches entries");
268 static long vfscache_unres
;
269 SYSCTL_LONG(_vfs_cache
, OID_AUTO
, numunres
, CTLFLAG_RD
, &vfscache_unres
, 0,
270 "Number of unresolved leaf namecaches entries");
272 static long inv_kid_quick_count
;
273 SYSCTL_LONG(_vfs_cache
, OID_AUTO
, inv_kid_quick_count
, CTLFLAG_RD
,
274 &inv_kid_quick_count
, 0,
275 "quick kid invalidations");
276 static long inv_ncp_quick_count
;
277 SYSCTL_LONG(_vfs_cache
, OID_AUTO
, inv_ncp_quick_count
, CTLFLAG_RD
,
278 &inv_ncp_quick_count
, 0,
279 "quick ncp invalidations");
280 static long clean_pos_count
;
281 SYSCTL_LONG(_vfs_cache
, OID_AUTO
, clean_pos_count
, CTLFLAG_RD
,
283 "positive ncp cleanings");
284 static long clean_neg_count
;
285 SYSCTL_LONG(_vfs_cache
, OID_AUTO
, clean_neg_count
, CTLFLAG_RD
,
287 "negative ncp cleanings");
289 static long numdefered
;
290 SYSCTL_LONG(_debug
, OID_AUTO
, numdefered
, CTLFLAG_RD
, &numdefered
, 0,
291 "Number of cache entries allocated");
294 * Returns the number of basic references expected on the ncp, not
295 * including any children. 1 for the natural ref, and an addition ref
296 * if the ncp is resolved (representing a positive or negative hit).
299 ncpbaserefs(struct namecache
*ncp
)
301 return (1 + ((ncp
->nc_flag
& NCF_UNRESOLVED
) == 0));
304 struct nchstats nchstats
[SMP_MAXCPU
];
306 * Export VFS cache effectiveness statistics to user-land.
308 * The statistics are left for aggregation to user-land so
309 * neat things can be achieved, like observing per-CPU cache
313 sysctl_nchstats(SYSCTL_HANDLER_ARGS
)
315 struct globaldata
*gd
;
319 for (i
= 0; i
< ncpus
; ++i
) {
320 gd
= globaldata_find(i
);
321 if ((error
= SYSCTL_OUT(req
, (void *)&(*gd
->gd_nchstats
),
322 sizeof(struct nchstats
))))
328 SYSCTL_PROC(_vfs_cache
, OID_AUTO
, nchstats
, CTLTYPE_OPAQUE
|CTLFLAG_RD
,
329 0, 0, sysctl_nchstats
, "S,nchstats", "VFS cache effectiveness statistics");
331 static int cache_zap(struct namecache
*ncp
);
334 * Cache mount points and namecache records in order to avoid unnecessary
335 * atomic ops on mnt_refs and ncp->refs. This improves concurrent SMP
336 * performance and is particularly important on multi-socket systems to
337 * reduce cache-line ping-ponging.
339 * Try to keep the pcpu structure within one cache line (~64 bytes).
341 #define MNTCACHE_COUNT 32 /* power of 2, multiple of SET */
342 #define MNTCACHE_SET 8 /* set associativity */
344 struct mntcache_elm
{
345 struct namecache
*ncp
;
352 struct mntcache_elm array
[MNTCACHE_COUNT
];
355 static struct mntcache pcpu_mntcache
[MAXCPU
];
359 _cache_ncp_gen_enter(struct namecache
*ncp
)
361 ncp
->nc_generation
+= 2;
367 _cache_ncp_gen_exit(struct namecache
*ncp
)
370 ncp
->nc_generation
+= 2;
375 struct mntcache_elm
*
376 _cache_mntcache_hash(void *ptr
)
378 struct mntcache_elm
*elm
;
381 hv
= iscsi_crc32(&ptr
, sizeof(ptr
)) & (MNTCACHE_COUNT
- 1);
382 elm
= &pcpu_mntcache
[mycpu
->gd_cpuid
].array
[hv
& ~(MNTCACHE_SET
- 1)];
389 _cache_mntref(struct mount
*mp
)
391 struct mntcache_elm
*elm
;
395 elm
= _cache_mntcache_hash(mp
);
396 for (i
= 0; i
< MNTCACHE_SET
; ++i
) {
398 mpr
= atomic_swap_ptr((void *)&elm
->mp
, NULL
);
399 if (__predict_true(mpr
== mp
))
402 atomic_add_int(&mpr
->mnt_refs
, -1);
406 atomic_add_int(&mp
->mnt_refs
, 1);
411 _cache_mntrel(struct mount
*mp
)
413 struct mntcache_elm
*elm
;
414 struct mntcache_elm
*best
;
420 elm
= _cache_mntcache_hash(mp
);
422 for (i
= 0; i
< MNTCACHE_SET
; ++i
) {
423 if (elm
->mp
== NULL
) {
424 mpr
= atomic_swap_ptr((void *)&elm
->mp
, mp
);
425 if (__predict_false(mpr
!= NULL
)) {
426 atomic_add_int(&mpr
->mnt_refs
, -1);
431 delta1
= ticks
- best
->ticks
;
432 delta2
= ticks
- elm
->ticks
;
433 if (delta2
> delta1
|| delta1
< -1 || delta2
< -1)
437 mpr
= atomic_swap_ptr((void *)&best
->mp
, mp
);
440 atomic_add_int(&mpr
->mnt_refs
, -1);
444 * Clears all cached mount points on all cpus. This routine should only
445 * be called when we are waiting for a mount to clear, e.g. so we can
449 cache_clearmntcache(struct mount
*target __unused
)
453 for (n
= 0; n
< ncpus
; ++n
) {
454 struct mntcache
*cache
= &pcpu_mntcache
[n
];
455 struct mntcache_elm
*elm
;
456 struct namecache
*ncp
;
460 for (i
= 0; i
< MNTCACHE_COUNT
; ++i
) {
461 elm
= &cache
->array
[i
];
463 mp
= atomic_swap_ptr((void *)&elm
->mp
, NULL
);
465 atomic_add_int(&mp
->mnt_refs
, -1);
468 ncp
= atomic_swap_ptr((void *)&elm
->ncp
, NULL
);
477 * Namespace locking. The caller must already hold a reference to the
478 * namecache structure in order to lock/unlock it. The controlling entity
479 * in a 1->0 transition does not need to lock the ncp to dispose of it,
480 * as nobody else will have visibility to it at that point.
482 * Note that holding a locked namecache structure prevents other threads
483 * from making namespace changes (e.g. deleting or creating), prevents
484 * vnode association state changes by other threads, and prevents the
485 * namecache entry from being resolved or unresolved by other threads.
487 * An exclusive lock owner has full authority to associate/disassociate
488 * vnodes and resolve/unresolve the locked ncp.
490 * A shared lock owner only has authority to acquire the underlying vnode,
493 * The primary lock field is nc_lockstatus. nc_locktd is set after the
494 * fact (when locking) or cleared prior to unlocking.
496 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed
497 * or recycled, but it does NOT help you if the vnode had already
498 * initiated a recyclement. If this is important, use cache_get()
499 * rather then cache_lock() (and deal with the differences in the
500 * way the refs counter is handled). Or, alternatively, make an
501 * unconditional call to cache_validate() or cache_resolve()
502 * after cache_lock() returns.
506 _cache_lock(struct namecache
*ncp
)
511 error
= lockmgr(&ncp
->nc_lock
, LK_EXCLUSIVE
);
512 while (__predict_false(error
== EWOULDBLOCK
)) {
514 didwarn
= ticks
- nclockwarn
;
515 kprintf("[diagnostic] cache_lock: "
518 curthread
->td_comm
, ncp
,
519 ncp
->nc_nlen
, ncp
->nc_nlen
,
522 error
= lockmgr(&ncp
->nc_lock
, LK_EXCLUSIVE
| LK_TIMELOCK
);
524 if (__predict_false(didwarn
)) {
525 kprintf("[diagnostic] cache_lock: "
526 "%s unblocked %*.*s after %d secs\n",
528 ncp
->nc_nlen
, ncp
->nc_nlen
, ncp
->nc_name
,
529 (int)(ticks
- didwarn
) / hz
);
534 * Release a previously acquired lock.
536 * A concurrent shared-lock acquisition or acquisition/release can
537 * race bit 31 so only drop the ncp if bit 31 was set.
541 _cache_unlock(struct namecache
*ncp
)
543 lockmgr(&ncp
->nc_lock
, LK_RELEASE
);
547 * Lock ncp exclusively, non-blocking. Return 0 on success.
551 _cache_lock_nonblock(struct namecache
*ncp
)
555 error
= lockmgr(&ncp
->nc_lock
, LK_EXCLUSIVE
| LK_NOWAIT
);
556 if (__predict_false(error
!= 0)) {
563 * This is a special form of _cache_lock() which only succeeds if
564 * it can get a pristine, non-recursive lock. The caller must have
565 * already ref'd the ncp.
567 * On success the ncp will be locked, on failure it will not. The
568 * ref count does not change either way.
570 * We want _cache_lock_special() (on success) to return a definitively
571 * usable vnode or a definitively unresolved ncp.
575 _cache_lock_special(struct namecache
*ncp
)
577 if (_cache_lock_nonblock(ncp
) == 0) {
578 if (lockmgr_oneexcl(&ncp
->nc_lock
)) {
579 if (ncp
->nc_vp
&& (ncp
->nc_vp
->v_flag
& VRECLAIMED
))
580 _cache_setunresolved(ncp
, 1);
589 * Shared lock, guarantees vp held
591 * The shared lock holds vp on the 0->1 transition. It is possible to race
592 * another shared lock release, preventing the other release from dropping
593 * the vnode and clearing bit 31.
595 * If it is not set then we are responsible for setting it, and this
596 * responsibility does not race with anyone else.
600 _cache_lock_shared(struct namecache
*ncp
)
605 error
= lockmgr(&ncp
->nc_lock
, LK_SHARED
| LK_TIMELOCK
);
606 while (__predict_false(error
== EWOULDBLOCK
)) {
608 didwarn
= ticks
- nclockwarn
;
609 kprintf("[diagnostic] cache_lock_shared: "
612 curthread
->td_comm
, ncp
,
613 ncp
->nc_nlen
, ncp
->nc_nlen
,
616 error
= lockmgr(&ncp
->nc_lock
, LK_SHARED
| LK_TIMELOCK
);
618 if (__predict_false(didwarn
)) {
619 kprintf("[diagnostic] cache_lock_shared: "
620 "%s unblocked %*.*s after %d secs\n",
622 ncp
->nc_nlen
, ncp
->nc_nlen
, ncp
->nc_name
,
623 (int)(ticks
- didwarn
) / hz
);
628 * Shared lock, guarantees vp held. Non-blocking. Returns 0 on success
632 _cache_lock_shared_nonblock(struct namecache
*ncp
)
636 error
= lockmgr(&ncp
->nc_lock
, LK_SHARED
| LK_NOWAIT
);
637 if (__predict_false(error
!= 0)) {
644 * This function tries to get a shared lock but will back-off to an
647 * (1) Some other thread is trying to obtain an exclusive lock
648 * (to prevent the exclusive requester from getting livelocked out
649 * by many shared locks).
651 * (2) The current thread already owns an exclusive lock (to avoid
654 * WARNING! On machines with lots of cores we really want to try hard to
655 * get a shared lock or concurrent path lookups can chain-react
656 * into a very high-latency exclusive lock.
658 * This is very evident in dsynth's initial scans.
662 _cache_lock_shared_special(struct namecache
*ncp
)
665 * Only honor a successful shared lock (returning 0) if there is
666 * no exclusive request pending and the vnode, if present, is not
667 * in a reclaimed state.
669 if (_cache_lock_shared_nonblock(ncp
) == 0) {
670 if (__predict_true(!lockmgr_exclpending(&ncp
->nc_lock
))) {
671 if (ncp
->nc_vp
== NULL
||
672 (ncp
->nc_vp
->v_flag
& VRECLAIMED
) == 0) {
681 * Non-blocking shared lock failed. If we already own the exclusive
682 * lock just acquire another exclusive lock (instead of deadlocking).
683 * Otherwise acquire a shared lock.
685 if (lockstatus(&ncp
->nc_lock
, curthread
) == LK_EXCLUSIVE
) {
689 _cache_lock_shared(ncp
);
697 * (v) LK_SHARED or LK_EXCLUSIVE
701 _cache_lockstatus(struct namecache
*ncp
)
705 status
= lockstatus(&ncp
->nc_lock
, curthread
);
706 if (status
== LK_EXCLOTHER
)
712 * cache_hold() and cache_drop() prevent the premature deletion of a
713 * namecache entry but do not prevent operations (such as zapping) on
714 * that namecache entry.
716 * This routine may only be called from outside this source module if
717 * nc_refs is already deterministically at least 1, such as being
718 * associated with e.g. a process, file descriptor, or some other entity.
720 * Only the above situations, similar situations within this module where
721 * the ref count is deterministically at least 1, or when the ncp is found
722 * via the nchpp (hash table) lookup, can bump nc_refs.
724 * Very specifically, a ncp found via nc_list CANNOT bump nc_refs. It
725 * can still be removed from the nc_list, however, as long as the caller
726 * can acquire its lock (in the wrong order).
728 * This is a rare case where callers are allowed to hold a spinlock,
729 * so we can't ourselves.
733 _cache_hold(struct namecache
*ncp
)
735 KKASSERT(ncp
->nc_refs
> 0);
736 atomic_add_int(&ncp
->nc_refs
, 1);
742 * Drop a cache entry.
744 * The 1->0 transition can only occur after or because the natural ref
745 * is being dropped. If another thread had a temporary ref during the
746 * ncp's destruction, then that other thread might wind up being the
747 * one to drop the last ref.
751 _cache_drop(struct namecache
*ncp
)
753 if (atomic_fetchadd_int(&ncp
->nc_refs
, -1) == 1) {
754 KKASSERT(ncp
->nc_flag
& NCF_UNRESOLVED
);
759 ncp
->nc_refs
= -1; /* safety */
761 kfree(ncp
->nc_name
, M_VFSCACHEAUX
);
762 kfree_obj(ncp
, M_VFSCACHE
);
767 * Link a new namecache entry to its parent and to the hash table. Be
768 * careful to avoid races if vhold() blocks in the future.
770 * Both ncp and par must be referenced and locked. The reference is
771 * transfered to the nchpp (and, most notably, NOT to the parent list).
773 * NOTE: The hash table spinlock is held across this call, we can't do
777 _cache_link_parent(struct namecache
*ncp
, struct namecache
*par
,
778 struct nchash_head
*nchpp
)
780 struct pcpu_ncache
*pn
= &pcpu_ncache
[mycpu
->gd_cpuid
];
782 KKASSERT(ncp
->nc_parent
== NULL
);
783 _cache_ncp_gen_enter(ncp
);
784 ncp
->nc_parent
= par
;
785 ncp
->nc_head
= nchpp
;
788 * Set inheritance flags. Note that the parent flags may be
789 * stale due to getattr potentially not having been run yet
790 * (it gets run during nlookup()'s).
792 ncp
->nc_flag
&= ~(NCF_SF_PNOCACHE
| NCF_UF_PCACHE
);
793 if (par
->nc_flag
& (NCF_SF_NOCACHE
| NCF_SF_PNOCACHE
))
794 ncp
->nc_flag
|= NCF_SF_PNOCACHE
;
795 if (par
->nc_flag
& (NCF_UF_CACHE
| NCF_UF_PCACHE
))
796 ncp
->nc_flag
|= NCF_UF_PCACHE
;
799 * Add to hash table and parent, adjust accounting
801 TAILQ_INSERT_HEAD(&nchpp
->list
, ncp
, nc_hash
);
802 atomic_add_long(&pn
->vfscache_count
, 1);
805 * ncp is a new leaf being added to the tree
807 if (TAILQ_EMPTY(&ncp
->nc_list
)) {
808 atomic_add_long(&pn
->vfscache_leafs
, 1);
809 if (ncp
->nc_flag
& NCF_UNRESOLVED
)
810 atomic_add_long(&pn
->vfscache_unres
, 1);
813 if (TAILQ_EMPTY(&par
->nc_list
)) {
815 * Parent was, but now is no longer a leaf
818 * XXX for now don't mess with par's gen, it causes
819 * unnecessary nlookup retries (though not many)
821 /*_cache_ncp_gen_enter(par);*/
822 TAILQ_INSERT_HEAD(&par
->nc_list
, ncp
, nc_entry
);
823 if (par
->nc_parent
) {
824 if (par
->nc_flag
& NCF_UNRESOLVED
)
825 atomic_add_long(&pn
->vfscache_unres
, -1);
826 atomic_add_long(&pn
->vfscache_leafs
, -1);
830 * Any vp associated with an ncp which has children must
831 * be held to prevent it from being recycled.
835 /*_cache_ncp_gen_exit(par);*/
837 TAILQ_INSERT_HEAD(&par
->nc_list
, ncp
, nc_entry
);
839 _cache_hold(par
); /* add nc_parent ref */
840 _cache_ncp_gen_exit(ncp
);
844 * Remove the parent and hash associations from a namecache structure.
845 * Drop the ref-count on the parent. The caller receives the ref
846 * from the ncp's nchpp linkage that was removed and may forward that
847 * ref to a new linkage.
849 * The caller usually holds an additional ref * on the ncp so the unlink
850 * cannot be the final drop. XXX should not be necessary now since the
851 * caller receives the ref from the nchpp linkage, assuming the ncp
852 * was linked in the first place.
854 * ncp must be locked, which means that there won't be any nc_parent
855 * removal races. This routine will acquire a temporary lock on
856 * the parent as well as the appropriate hash chain.
858 * par must be locked and will remain locked on return.
860 * nhcpp must be spin-locked. This routine eats the spin-lock.
863 _cache_unlink_parent(struct namecache
*par
, struct namecache
*ncp
,
864 struct nchash_head
*nchpp
)
866 struct pcpu_ncache
*pn
= &pcpu_ncache
[mycpu
->gd_cpuid
];
867 struct vnode
*dropvp
;
869 KKASSERT(ncp
->nc_parent
== par
);
871 _cache_ncp_gen_enter(ncp
);
873 /* don't add a ref, we drop the nchpp ref later */
876 * Remove from hash table and parent, adjust accounting
878 TAILQ_REMOVE(&ncp
->nc_head
->list
, ncp
, nc_hash
);
879 TAILQ_REMOVE(&par
->nc_list
, ncp
, nc_entry
);
880 atomic_add_long(&pn
->vfscache_count
, -1);
883 * Removing leaf from tree
885 if (TAILQ_EMPTY(&ncp
->nc_list
)) {
886 if (ncp
->nc_flag
& NCF_UNRESOLVED
)
887 atomic_add_long(&pn
->vfscache_unres
, -1);
888 atomic_add_long(&pn
->vfscache_leafs
, -1);
892 * Parent is now a leaf?
895 if (TAILQ_EMPTY(&par
->nc_list
)) {
897 * XXX for now don't mess with par's gen, it causes
898 * unnecessary nlookup retries (though not many)
900 /*_cache_ncp_gen_enter(par);*/
901 if (par
->nc_parent
) {
902 if (par
->nc_flag
& NCF_UNRESOLVED
)
903 atomic_add_long(&pn
->vfscache_unres
, 1);
904 atomic_add_long(&pn
->vfscache_leafs
, 1);
908 /*_cache_ncp_gen_exit(par);*/
910 ncp
->nc_parent
= NULL
;
912 spin_unlock(&nchpp
->spin
);
913 _cache_drop(par
); /* drop ncp's nc_parent ref from (par) */
916 * We can only safely vdrop with no spinlocks held.
920 _cache_ncp_gen_exit(ncp
);
924 * Allocate a new namecache structure. Most of the code does not require
925 * zero-termination of the string but it makes vop_compat_ncreate() easier.
927 * The returned ncp will be locked and referenced. The ref is generally meant
928 * to be transfered to the nchpp linkage.
930 static struct namecache
*
931 cache_alloc(int nlen
)
933 struct namecache
*ncp
;
935 ncp
= kmalloc_obj(sizeof(*ncp
), M_VFSCACHE
, M_WAITOK
|M_ZERO
);
937 ncp
->nc_name
= kmalloc(nlen
+ 1, M_VFSCACHEAUX
, M_WAITOK
);
939 ncp
->nc_flag
= NCF_UNRESOLVED
;
940 ncp
->nc_error
= ENOTCONN
; /* needs to be resolved */
941 ncp
->nc_refs
= 1; /* natural ref */
942 ncp
->nc_generation
= 0; /* link/unlink/res/unres op */
943 TAILQ_INIT(&ncp
->nc_list
);
944 lockinit(&ncp
->nc_lock
, "ncplk", hz
, LK_CANRECURSE
);
945 lockmgr(&ncp
->nc_lock
, LK_EXCLUSIVE
);
951 * Can only be called for the case where the ncp has never been
952 * associated with anything (so no spinlocks are needed).
955 _cache_free(struct namecache
*ncp
)
957 KKASSERT(ncp
->nc_refs
== 1);
959 kfree(ncp
->nc_name
, M_VFSCACHEAUX
);
960 kfree_obj(ncp
, M_VFSCACHE
);
964 * [re]initialize a nchandle.
967 cache_zero(struct nchandle
*nch
)
974 * Ref and deref a nchandle structure (ncp + mp)
976 * The caller must specify a stable ncp pointer, typically meaning the
977 * ncp is already referenced but this can also occur indirectly through
978 * e.g. holding a lock on a direct child.
980 * WARNING: Caller may hold an unrelated read spinlock, which means we can't
981 * use read spinlocks here.
984 cache_hold(struct nchandle
*nch
)
986 _cache_hold(nch
->ncp
);
987 _cache_mntref(nch
->mount
);
992 * Create a copy of a namecache handle for an already-referenced
996 cache_copy(struct nchandle
*nch
, struct nchandle
*target
)
998 struct namecache
*ncp
;
1000 struct mntcache_elm
*elm
;
1001 struct namecache
*ncpr
;
1009 elm
= _cache_mntcache_hash(ncp
);
1010 for (i
= 0; i
< MNTCACHE_SET
; ++i
) {
1011 if (elm
->ncp
== ncp
) {
1012 ncpr
= atomic_swap_ptr((void *)&elm
->ncp
, NULL
);
1028 * Drop the nchandle, but try to cache the ref to avoid global atomic
1029 * ops. This is typically done on the system root and jail root nchandles.
1032 cache_drop_and_cache(struct nchandle
*nch
, int elmno
)
1034 struct mntcache_elm
*elm
;
1035 struct mntcache_elm
*best
;
1036 struct namecache
*ncpr
;
1043 _cache_drop(nch
->ncp
);
1047 _cache_mntrel(nch
->mount
);
1053 elm
= _cache_mntcache_hash(nch
->ncp
);
1055 for (i
= 0; i
< MNTCACHE_SET
; ++i
) {
1056 if (elm
->ncp
== NULL
) {
1057 ncpr
= atomic_swap_ptr((void *)&elm
->ncp
, nch
->ncp
);
1058 _cache_mntrel(nch
->mount
);
1066 delta1
= ticks
- best
->ticks
;
1067 delta2
= ticks
- elm
->ticks
;
1068 if (delta2
> delta1
|| delta1
< -1 || delta2
< -1)
1072 ncpr
= atomic_swap_ptr((void *)&best
->ncp
, nch
->ncp
);
1073 _cache_mntrel(nch
->mount
);
1074 best
->ticks
= ticks
;
1082 cache_changemount(struct nchandle
*nch
, struct mount
*mp
)
1085 _cache_mntrel(nch
->mount
);
1090 cache_drop(struct nchandle
*nch
)
1092 _cache_mntrel(nch
->mount
);
1093 _cache_drop(nch
->ncp
);
1100 * -1 Locked by other
1102 * (v) LK_SHARED or LK_EXCLUSIVE
1105 cache_lockstatus(struct nchandle
*nch
)
1107 return(_cache_lockstatus(nch
->ncp
));
1111 cache_lock(struct nchandle
*nch
)
1113 _cache_lock(nch
->ncp
);
1117 * Returns a shared or exclusive-locked ncp. The ncp will only be
1118 * shared-locked if it is already resolved.
1121 cache_lock_maybe_shared(struct nchandle
*nch
, int excl
)
1123 struct namecache
*ncp
= nch
->ncp
;
1125 if (ncp_shared_lock_disable
|| excl
||
1126 (ncp
->nc_flag
& NCF_UNRESOLVED
)) {
1129 _cache_lock_shared(ncp
);
1130 if ((ncp
->nc_flag
& NCF_UNRESOLVED
) == 0) {
1131 if (ncp
->nc_vp
&& (ncp
->nc_vp
->v_flag
& VRECLAIMED
)) {
1143 * Lock fncpd, fncp, tncpd, and tncp. tncp is already locked but may
1144 * have to be cycled to avoid deadlocks. Make sure all four are resolved.
1146 * The caller is responsible for checking the validity upon return as
1147 * the records may have been flagged DESTROYED in the interim.
1149 * Namecache lock ordering is leaf first, then parent. However, complex
1150 * interactions may occur between the source and target because there is
1151 * no ordering guarantee between (fncpd, fncp) and (tncpd and tncp).
1154 cache_lock4_tondlocked(struct nchandle
*fncpd
, struct nchandle
*fncp
,
1155 struct nchandle
*tncpd
, struct nchandle
*tncp
,
1156 struct ucred
*fcred
, struct ucred
*tcred
)
1159 u_int dummy_gen
= 0;
1162 * Lock tncp and tncpd
1164 * NOTE: Because these ncps are not locked to begin with, it is
1165 * possible for other rename races to cause the normal lock
1166 * order assumptions to fail.
1168 * NOTE: Lock ordering assumptions are valid if a leaf's parent
1169 * matches after the leaf has been locked. However, ordering
1170 * between the 'from' and the 'to' is not and an overlapping
1171 * lock order reversal is still possible.
1174 if (__predict_false(tlocked
== 0)) {
1177 if (__predict_false(cache_lock_nonblock(tncpd
) != 0)) {
1179 cache_lock(tncpd
); /* cycle tncpd lock */
1180 cache_unlock(tncpd
);
1186 * Lock fncp and fncpd
1188 * NOTE: Because these ncps are not locked to begin with, it is
1189 * possible for other rename races to cause the normal lock
1190 * order assumptions to fail.
1192 * NOTE: Lock ordering assumptions are valid if a leaf's parent
1193 * matches after the leaf has been locked. However, ordering
1194 * between the 'from' and the 'to' is not and an overlapping
1195 * lock order reversal is still possible.
1197 if (__predict_false(cache_lock_nonblock(fncp
) != 0)) {
1198 cache_unlock(tncpd
);
1200 cache_lock(fncp
); /* cycle fncp lock */
1206 if (__predict_false(cache_lock_nonblock(fncpd
) != 0)) {
1208 cache_unlock(tncpd
);
1211 cache_unlock(fncpd
); /* cycle fncpd lock */
1216 if (__predict_true((fncpd
->ncp
->nc_flag
& NCF_DESTROYED
) == 0))
1217 cache_resolve(fncpd
, &dummy_gen
, fcred
);
1218 if (__predict_true((tncpd
->ncp
->nc_flag
& NCF_DESTROYED
) == 0))
1219 cache_resolve(tncpd
, &dummy_gen
, tcred
);
1220 if (__predict_true((fncp
->ncp
->nc_flag
& NCF_DESTROYED
) == 0))
1221 cache_resolve(fncp
, &dummy_gen
, fcred
);
1222 if (__predict_true((tncp
->ncp
->nc_flag
& NCF_DESTROYED
) == 0))
1223 cache_resolve(tncp
, &dummy_gen
, tcred
);
1227 cache_lock_nonblock(struct nchandle
*nch
)
1229 return(_cache_lock_nonblock(nch
->ncp
));
1233 cache_unlock(struct nchandle
*nch
)
1235 _cache_unlock(nch
->ncp
);
1239 * ref-and-lock, unlock-and-deref functions.
1241 * This function is primarily used by nlookup. Even though cache_lock
1242 * holds the vnode, it is possible that the vnode may have already
1243 * initiated a recyclement.
1245 * We want cache_get() to return a definitively usable vnode or a
1246 * definitively unresolved ncp.
1250 _cache_get(struct namecache
*ncp
)
1254 if (ncp
->nc_vp
&& (ncp
->nc_vp
->v_flag
& VRECLAIMED
))
1255 _cache_setunresolved(ncp
, 1);
1260 * Attempt to obtain a shared lock on the ncp. A shared lock will only
1261 * be obtained if the ncp is resolved and the vnode (if not ENOENT) is
1262 * valid. Otherwise an exclusive lock will be acquired instead.
1266 _cache_get_maybe_shared(struct namecache
*ncp
, int excl
)
1268 if (ncp_shared_lock_disable
|| excl
||
1269 (ncp
->nc_flag
& NCF_UNRESOLVED
))
1271 return(_cache_get(ncp
));
1274 _cache_lock_shared(ncp
);
1275 if ((ncp
->nc_flag
& NCF_UNRESOLVED
) == 0) {
1276 if (ncp
->nc_vp
&& (ncp
->nc_vp
->v_flag
& VRECLAIMED
)) {
1278 ncp
= _cache_get(ncp
);
1283 ncp
= _cache_get(ncp
);
1290 * NOTE: The same nchandle can be passed for both arguments.
1293 cache_get(struct nchandle
*nch
, struct nchandle
*target
)
1295 KKASSERT(nch
->ncp
->nc_refs
> 0);
1296 target
->mount
= nch
->mount
;
1297 target
->ncp
= _cache_get(nch
->ncp
);
1298 _cache_mntref(target
->mount
);
1302 cache_get_maybe_shared(struct nchandle
*nch
, struct nchandle
*target
, int excl
)
1304 KKASSERT(nch
->ncp
->nc_refs
> 0);
1305 target
->mount
= nch
->mount
;
1306 target
->ncp
= _cache_get_maybe_shared(nch
->ncp
, excl
);
1307 _cache_mntref(target
->mount
);
1311 * Release a held and locked ncp
1315 _cache_put(struct namecache
*ncp
)
1322 cache_put(struct nchandle
*nch
)
1324 _cache_mntrel(nch
->mount
);
1325 _cache_put(nch
->ncp
);
1331 * Resolve an unresolved ncp by associating a vnode with it. If the
1332 * vnode is NULL, a negative cache entry is created.
1334 * The ncp should be locked on entry and will remain locked on return.
1338 _cache_setvp(struct mount
*mp
, struct namecache
*ncp
, struct vnode
*vp
,
1341 struct pcpu_ncache
*pn
= &pcpu_ncache
[mycpu
->gd_cpuid
];
1343 KKASSERT((ncp
->nc_flag
& NCF_UNRESOLVED
) &&
1344 (_cache_lockstatus(ncp
) == LK_EXCLUSIVE
) &&
1345 ncp
->nc_vp
== NULL
);
1348 _cache_ncp_gen_enter(ncp
);
1352 * Any vp associated with an ncp which has children must
1353 * be held. Any vp associated with a locked ncp must be held.
1355 if (!TAILQ_EMPTY(&ncp
->nc_list
))
1357 spin_lock(&vp
->v_spin
);
1359 TAILQ_INSERT_HEAD(&vp
->v_namecache
, ncp
, nc_vnode
);
1360 ++vp
->v_namecache_count
;
1361 _cache_hold(ncp
); /* v_namecache assoc */
1362 spin_unlock(&vp
->v_spin
);
1363 vhold(vp
); /* nc_vp */
1366 * Set auxiliary flags
1368 switch(vp
->v_type
) {
1370 ncp
->nc_flag
|= NCF_ISDIR
;
1373 ncp
->nc_flag
|= NCF_ISSYMLINK
;
1374 /* XXX cache the contents of the symlink */
1383 * XXX: this is a hack to work-around the lack of a real pfs vfs
1387 if (strncmp(mp
->mnt_stat
.f_fstypename
, "null", 5) == 0)
1392 * When creating a negative cache hit we set the
1393 * namecache_gen. A later resolve will clean out the
1394 * negative cache hit if the mount point's namecache_gen
1395 * has changed. Used by devfs, could also be used by
1399 ncp
->nc_negcpu
= mycpu
->gd_cpuid
;
1400 spin_lock(&pn
->neg_spin
);
1401 TAILQ_INSERT_TAIL(&pn
->neg_list
, ncp
, nc_vnode
);
1402 _cache_hold(ncp
); /* neg_list assoc */
1404 spin_unlock(&pn
->neg_spin
);
1405 atomic_add_long(&pn
->vfscache_negs
, 1);
1407 ncp
->nc_error
= ENOENT
;
1409 VFS_NCPGEN_SET(mp
, ncp
);
1413 * Previously unresolved leaf is now resolved.
1415 * Clear the NCF_UNRESOLVED flag last (see cache_nlookup_nonlocked()).
1416 * We only adjust vfscache_unres for ncp's that are in the tree.
1418 if (TAILQ_EMPTY(&ncp
->nc_list
) && ncp
->nc_parent
)
1419 atomic_add_long(&pn
->vfscache_unres
, -1);
1420 ncp
->nc_flag
&= ~(NCF_UNRESOLVED
| NCF_DEFEREDZAP
);
1422 _cache_ncp_gen_exit(ncp
);
1426 cache_setvp(struct nchandle
*nch
, struct vnode
*vp
)
1428 _cache_setvp(nch
->mount
, nch
->ncp
, vp
, 1);
1435 cache_settimeout(struct nchandle
*nch
, int nticks
)
1437 struct namecache
*ncp
= nch
->ncp
;
1439 if ((ncp
->nc_timeout
= ticks
+ nticks
) == 0)
1440 ncp
->nc_timeout
= 1;
1444 * Disassociate the vnode or negative-cache association and mark a
1445 * namecache entry as unresolved again. Note that the ncp is still
1446 * left in the hash table and still linked to its parent.
1448 * The ncp should be locked and refd on entry and will remain locked and refd
1451 * This routine is normally never called on a directory containing children.
1452 * However, NFS often does just that in its rename() code as a cop-out to
1453 * avoid complex namespace operations. This disconnects a directory vnode
1454 * from its namecache and can cause the OLDAPI and NEWAPI to get out of
1460 _cache_setunresolved(struct namecache
*ncp
, int adjgen
)
1464 if ((ncp
->nc_flag
& NCF_UNRESOLVED
) == 0) {
1465 struct pcpu_ncache
*pn
;
1468 _cache_ncp_gen_enter(ncp
);
1471 * Is a resolved or destroyed leaf now becoming unresolved?
1472 * Only adjust vfscache_unres for linked ncp's.
1474 if (TAILQ_EMPTY(&ncp
->nc_list
) && ncp
->nc_parent
) {
1475 pn
= &pcpu_ncache
[mycpu
->gd_cpuid
];
1476 atomic_add_long(&pn
->vfscache_unres
, 1);
1479 ncp
->nc_flag
|= NCF_UNRESOLVED
;
1480 ncp
->nc_timeout
= 0;
1481 ncp
->nc_error
= ENOTCONN
;
1482 if ((vp
= ncp
->nc_vp
) != NULL
) {
1483 spin_lock(&vp
->v_spin
);
1485 TAILQ_REMOVE(&vp
->v_namecache
, ncp
, nc_vnode
);
1486 --vp
->v_namecache_count
;
1487 spin_unlock(&vp
->v_spin
);
1490 * Any vp associated with an ncp with children is
1491 * held by that ncp. Any vp associated with ncp
1492 * is held by that ncp. These conditions must be
1493 * undone when the vp is cleared out from the ncp.
1495 if (!TAILQ_EMPTY(&ncp
->nc_list
))
1499 pn
= &pcpu_ncache
[ncp
->nc_negcpu
];
1501 atomic_add_long(&pn
->vfscache_negs
, -1);
1502 spin_lock(&pn
->neg_spin
);
1503 TAILQ_REMOVE(&pn
->neg_list
, ncp
, nc_vnode
);
1505 spin_unlock(&pn
->neg_spin
);
1507 ncp
->nc_flag
&= ~(NCF_WHITEOUT
|NCF_ISDIR
|NCF_ISSYMLINK
);
1510 _cache_ncp_gen_exit(ncp
);
1511 _cache_drop(ncp
); /* from v_namecache or neg_list */
1516 * The cache_nresolve() code calls this function to automatically
1517 * set a resolved cache element to unresolved if it has timed out
1518 * or if it is a negative cache hit and the mount point namecache_gen
1522 _cache_auto_unresolve_test(struct mount
*mp
, struct namecache
*ncp
)
1525 * Try to zap entries that have timed out. We have
1526 * to be careful here because locked leafs may depend
1527 * on the vnode remaining intact in a parent, so only
1528 * do this under very specific conditions.
1530 if (ncp
->nc_timeout
&& (int)(ncp
->nc_timeout
- ticks
) < 0 &&
1531 TAILQ_EMPTY(&ncp
->nc_list
)) {
1536 * If a resolved negative cache hit is invalid due to
1537 * the mount's namecache generation being bumped, zap it.
1539 if (ncp
->nc_vp
== NULL
&& VFS_NCPGEN_TEST(mp
, ncp
)) {
1544 * Otherwise we are good
1549 static __inline
void
1550 _cache_auto_unresolve(struct mount
*mp
, struct namecache
*ncp
)
1553 * Already in an unresolved state, nothing to do.
1555 if ((ncp
->nc_flag
& NCF_UNRESOLVED
) == 0) {
1556 if (_cache_auto_unresolve_test(mp
, ncp
))
1557 _cache_setunresolved(ncp
, 1);
1562 cache_setunresolved(struct nchandle
*nch
)
1564 _cache_setunresolved(nch
->ncp
, 1);
1568 * Determine if we can clear NCF_ISMOUNTPT by scanning the mountlist
1569 * looking for matches. This flag tells the lookup code when it must
1570 * check for a mount linkage and also prevents the directories in question
1571 * from being deleted or renamed.
1575 cache_clrmountpt_callback(struct mount
*mp
, void *data
)
1577 struct nchandle
*nch
= data
;
1579 if (mp
->mnt_ncmounton
.ncp
== nch
->ncp
)
1581 if (mp
->mnt_ncmountpt
.ncp
== nch
->ncp
)
1587 * Clear NCF_ISMOUNTPT on nch->ncp if it is no longer associated
1588 * with a mount point.
1591 cache_clrmountpt(struct nchandle
*nch
)
1595 count
= mountlist_scan(cache_clrmountpt_callback
, nch
,
1596 MNTSCAN_FORWARD
| MNTSCAN_NOBUSY
|
1599 nch
->ncp
->nc_flag
&= ~NCF_ISMOUNTPT
;
1603 * Invalidate portions of the namecache topology given a starting entry.
1604 * The passed ncp is set to an unresolved state and:
1606 * The passed ncp must be referenced and locked. The routine may unlock
1607 * and relock ncp several times, and will recheck the children and loop
1608 * to catch races. When done the passed ncp will be returned with the
1609 * reference and lock intact.
1611 * CINV_DESTROY - Set a flag in the passed ncp entry indicating
1612 * that the physical underlying nodes have been
1613 * destroyed... as in deleted. For example, when
1614 * a directory is removed. This will cause record
1615 * lookups on the name to no longer be able to find
1616 * the record and tells the resolver to return failure
1617 * rather then trying to resolve through the parent.
1619 * The topology itself, including ncp->nc_name,
1622 * This only applies to the passed ncp, if CINV_CHILDREN
1623 * is specified the children are not flagged.
1625 * CINV_CHILDREN - Set all children (recursively) to an unresolved
1628 * Note that this will also have the side effect of
1629 * cleaning out any unreferenced nodes in the topology
1630 * from the leaves up as the recursion backs out.
1632 * Note that the topology for any referenced nodes remains intact, but
1633 * the nodes will be marked as having been destroyed and will be set
1634 * to an unresolved state.
1636 * It is possible for cache_inval() to race a cache_resolve(), meaning that
1637 * the namecache entry may not actually be invalidated on return if it was
1638 * revalidated while recursing down into its children. This code guarentees
1639 * that the node(s) will go through an invalidation cycle, but does not
1640 * guarentee that they will remain in an invalidated state.
1642 * Returns non-zero if a revalidation was detected during the invalidation
1643 * recursion, zero otherwise. Note that since only the original ncp is
1644 * locked the revalidation ultimately can only indicate that the original ncp
1645 * *MIGHT* no have been reresolved.
1647 * DEEP RECURSION HANDLING - If a recursive invalidation recurses deeply we
1648 * have to avoid blowing out the kernel stack. We do this by saving the
1649 * deep namecache node and aborting the recursion, then re-recursing at that
1650 * node using a depth-first algorithm in order to allow multiple deep
1651 * recursions to chain through each other, then we restart the invalidation
1656 struct namecache
*resume_ncp
;
1660 static int _cache_inval_internal(struct namecache
*, int, struct cinvtrack
*);
1664 _cache_inval(struct namecache
*ncp
, int flags
)
1666 struct cinvtrack track
;
1667 struct namecache
*ncp2
;
1671 track
.resume_ncp
= NULL
;
1674 r
= _cache_inval_internal(ncp
, flags
, &track
);
1675 if (track
.resume_ncp
== NULL
)
1678 while ((ncp2
= track
.resume_ncp
) != NULL
) {
1679 track
.resume_ncp
= NULL
;
1681 _cache_inval_internal(ncp2
, flags
& ~CINV_DESTROY
,
1683 /*_cache_put(ncp2);*/
1692 cache_inval(struct nchandle
*nch
, int flags
)
1694 return(_cache_inval(nch
->ncp
, flags
));
1698 * Helper for _cache_inval(). The passed ncp is refd and locked and
1699 * remains that way on return, but may be unlocked/relocked multiple
1700 * times by the routine.
1703 _cache_inval_internal(struct namecache
*ncp
, int flags
, struct cinvtrack
*track
)
1705 struct namecache
*nextkid
;
1708 KKASSERT(_cache_lockstatus(ncp
) == LK_EXCLUSIVE
);
1710 _cache_ncp_gen_enter(ncp
);
1711 _cache_setunresolved(ncp
, 0);
1712 if (flags
& CINV_DESTROY
) {
1713 ncp
->nc_flag
|= NCF_DESTROYED
;
1717 while ((flags
& CINV_CHILDREN
) &&
1718 (nextkid
= TAILQ_FIRST(&ncp
->nc_list
)) != NULL
1720 struct namecache
*kid
;
1724 _cache_hold(nextkid
);
1725 if (++track
->depth
> MAX_RECURSION_DEPTH
) {
1726 track
->resume_ncp
= ncp
;
1730 while ((kid
= nextkid
) != NULL
) {
1732 * Parent (ncp) must be locked for the iteration.
1735 if (kid
->nc_parent
!= ncp
) {
1737 kprintf("cache_inval_internal restartA %s\n",
1742 if ((nextkid
= TAILQ_NEXT(kid
, nc_entry
)) != NULL
)
1743 _cache_hold(nextkid
);
1746 * Parent unlocked for this section to avoid
1747 * deadlocks. Then lock the kid and check for
1751 if (track
->resume_ncp
) {
1757 if (kid
->nc_parent
!= ncp
) {
1758 kprintf("cache_inval_internal "
1767 if ((kid
->nc_flag
& NCF_UNRESOLVED
) == 0 ||
1768 TAILQ_FIRST(&kid
->nc_list
)
1771 rcnt
+= _cache_inval_internal(kid
,
1772 flags
& ~CINV_DESTROY
, track
);
1773 /*_cache_unlock(kid);*/
1774 /*_cache_drop(kid);*/
1781 * Relock parent to continue scan
1786 _cache_drop(nextkid
);
1793 * Someone could have gotten in there while ncp was unlocked,
1796 if ((ncp
->nc_flag
& NCF_UNRESOLVED
) == 0)
1798 _cache_ncp_gen_exit(ncp
);
1804 * Invalidate a vnode's namecache associations. To avoid races against
1805 * the resolver we do not invalidate a node which we previously invalidated
1806 * but which was then re-resolved while we were in the invalidation loop.
1808 * Returns non-zero if any namecache entries remain after the invalidation
1811 * NOTE: Unlike the namecache topology which guarentees that ncp's will not
1812 * be ripped out of the topology while held, the vnode's v_namecache
1813 * list has no such restriction. NCP's can be ripped out of the list
1814 * at virtually any time if not locked, even if held.
1816 * In addition, the v_namecache list itself must be locked via
1817 * the vnode's spinlock.
1820 cache_inval_vp(struct vnode
*vp
, int flags
)
1822 struct namecache
*ncp
;
1823 struct namecache
*next
;
1826 spin_lock(&vp
->v_spin
);
1827 ncp
= TAILQ_FIRST(&vp
->v_namecache
);
1831 /* loop entered with ncp held and vp spin-locked */
1832 if ((next
= TAILQ_NEXT(ncp
, nc_vnode
)) != NULL
)
1834 spin_unlock(&vp
->v_spin
);
1836 if (ncp
->nc_vp
!= vp
) {
1837 kprintf("Warning: cache_inval_vp: race-A detected on "
1838 "%s\n", ncp
->nc_name
);
1844 _cache_inval(ncp
, flags
);
1845 _cache_put(ncp
); /* also releases reference */
1847 spin_lock(&vp
->v_spin
);
1848 if (ncp
&& ncp
->nc_vp
!= vp
) {
1849 spin_unlock(&vp
->v_spin
);
1850 kprintf("Warning: cache_inval_vp: race-B detected on "
1851 "%s\n", ncp
->nc_name
);
1856 spin_unlock(&vp
->v_spin
);
1857 return(TAILQ_FIRST(&vp
->v_namecache
) != NULL
);
1861 * This routine is used instead of the normal cache_inval_vp() when we
1862 * are trying to recycle otherwise good vnodes.
1864 * Return 0 on success, non-zero if not all namecache records could be
1865 * disassociated from the vnode (for various reasons).
1868 cache_inval_vp_nonblock(struct vnode
*vp
)
1870 struct namecache
*ncp
;
1871 struct namecache
*next
;
1873 spin_lock(&vp
->v_spin
);
1875 ncp
= TAILQ_FIRST(&vp
->v_namecache
);
1880 /* loop entered with ncp held */
1881 if ((next
= TAILQ_NEXT(ncp
, nc_vnode
)) != NULL
)
1883 spin_unlock(&vp
->v_spin
);
1884 if (_cache_lock_nonblock(ncp
)) {
1890 if (ncp
->nc_vp
!= vp
) {
1891 kprintf("Warning: cache_inval_vp: race-A detected on "
1892 "%s\n", ncp
->nc_name
);
1898 _cache_inval(ncp
, 0);
1899 _cache_put(ncp
); /* also releases reference */
1901 spin_lock(&vp
->v_spin
);
1902 if (ncp
&& ncp
->nc_vp
!= vp
) {
1903 spin_unlock(&vp
->v_spin
);
1904 kprintf("Warning: cache_inval_vp: race-B detected on "
1905 "%s\n", ncp
->nc_name
);
1910 spin_unlock(&vp
->v_spin
);
1912 return(TAILQ_FIRST(&vp
->v_namecache
) != NULL
);
1916 * Attempt to quickly invalidate the vnode's namecache entry. This function
1917 * will also dive the ncp and free its children but only if they are trivial.
1918 * All locks are non-blocking and the function will fail if required locks
1919 * cannot be obtained.
1921 * We want this sort of function to be able to guarantee progress when vnlru
1922 * wants to recycle a vnode. Directories could otherwise get stuck and not
1923 * be able to recycle due to destroyed or unresolved children in the
1927 cache_inval_vp_quick(struct vnode
*vp
)
1929 struct pcpu_ncache
*pn
= &pcpu_ncache
[mycpu
->gd_cpuid
];
1930 struct namecache
*ncp
;
1931 struct namecache
*kid
;
1933 spin_lock(&vp
->v_spin
);
1934 while ((ncp
= TAILQ_FIRST(&vp
->v_namecache
)) != NULL
) {
1936 spin_unlock(&vp
->v_spin
);
1937 if (_cache_lock_nonblock(ncp
)) {
1943 * Try to trivially destroy any children.
1945 while ((kid
= TAILQ_FIRST(&ncp
->nc_list
)) != NULL
) {
1946 struct nchash_head
*nchpp
;
1949 * Early test without the lock. Give-up if the
1950 * child has children of its own, the child is
1951 * positively-resolved, or the ref-count is
1954 if (TAILQ_FIRST(&kid
->nc_list
) ||
1956 kid
->nc_refs
!= ncpbaserefs(kid
))
1963 if (_cache_lock_nonblock(kid
)) {
1970 * A destruction/free test requires the parent,
1971 * the kid, and the hash table to be locked. Note
1972 * that the kid may still be on the negative cache
1975 nchpp
= kid
->nc_head
;
1976 spin_lock(&nchpp
->spin
);
1979 * Give up if the child isn't trivial. It can be
1980 * resolved or unresolved but must not have a vp.
1982 if (kid
->nc_parent
!= ncp
||
1984 TAILQ_FIRST(&kid
->nc_list
) ||
1985 kid
->nc_refs
!= 1 + ncpbaserefs(kid
))
1987 spin_unlock(&nchpp
->spin
);
1993 ++pn
->inv_kid_quick_count
;
1996 * We can safely destroy the kid. It may still
1997 * have extra refs due to ncneglist races, but since
1998 * we checked above with the lock held those races
1999 * will self-resolve.
2001 * With these actions the kid should nominally
2002 * have just its natural ref plus our ref.
2004 * This is only safe because we hold locks on
2005 * the parent, the kid, and the nchpp. The only
2006 * lock we don't have is on the ncneglist and that
2007 * can race a ref, but as long as we unresolve the
2008 * kid before executing our final drop the ncneglist
2009 * code path(s) will just drop their own ref so all
2012 _cache_unlink_parent(ncp
, kid
, nchpp
);
2013 _cache_setunresolved(kid
, 1);
2014 if (kid
->nc_refs
!= 2) {
2015 kprintf("Warning: kid %p unexpected refs=%d "
2018 kid
->nc_flag
, kid
->nc_name
);
2020 _cache_put(kid
); /* drop our ref and lock */
2021 _cache_drop(kid
); /* drop natural ref to destroy */
2025 * Now check ncp itself against our expectations. With
2026 * no children left we have our ref plus whether it is
2027 * resolved or not (which it has to be, actually, since it
2028 * is hanging off the vp->v_namecache).
2030 if (ncp
->nc_refs
!= 1 + ncpbaserefs(ncp
)) {
2032 spin_lock(&vp
->v_spin
);
2036 ++pn
->inv_ncp_quick_count
;
2039 * Success, disassociate and release the ncp. Do not
2040 * try to zap it here.
2042 * NOTE: Releasing the ncp here leaves it in the tree,
2043 * but since we have disassociated the vnode this
2044 * ncp entry becomes 'trivial' and successive calls
2045 * to cache_inval_vp_quick() will be able to continue
2048 _cache_setunresolved(ncp
, 1);
2050 spin_lock(&vp
->v_spin
);
2052 spin_unlock(&vp
->v_spin
);
2056 * Clears the universal directory search 'ok' flag. This flag allows
2057 * nlookup() to bypass normal vnode checks. This flag is a cached flag
2058 * so clearing it simply forces revalidation.
2061 cache_inval_wxok(struct vnode
*vp
)
2063 struct namecache
*ncp
;
2065 spin_lock(&vp
->v_spin
);
2066 TAILQ_FOREACH(ncp
, &vp
->v_namecache
, nc_vnode
) {
2067 if (ncp
->nc_flag
& (NCF_WXOK
| NCF_NOTX
))
2068 atomic_clear_short(&ncp
->nc_flag
, NCF_WXOK
| NCF_NOTX
);
2070 spin_unlock(&vp
->v_spin
);
2074 * The source ncp has been renamed to the target ncp. All elements have been
2075 * locked, including the parent ncp's.
2077 * The target ncp is destroyed (as a normal rename-over would destroy the
2078 * target file or directory).
2080 * Because there may be references to the source ncp we cannot copy its
2081 * contents to the target. Instead the source ncp is relinked as the target
2082 * and the target ncp is removed from the namecache topology.
2085 cache_rename(struct nchandle
*fnch
, struct nchandle
*tnch
)
2087 struct namecache
*fncp
= fnch
->ncp
;
2088 struct namecache
*tncp
= tnch
->ncp
;
2089 struct namecache
*par
;
2090 struct nchash_head
*nchpp
;
2095 if (tncp
->nc_nlen
) {
2096 nname
= kmalloc(tncp
->nc_nlen
+ 1, M_VFSCACHEAUX
, M_WAITOK
);
2097 bcopy(tncp
->nc_name
, nname
, tncp
->nc_nlen
);
2098 nname
[tncp
->nc_nlen
] = 0;
2104 * Rename fncp (unlink)
2106 if (fncp
->nc_parent
) {
2107 par
= fncp
->nc_parent
;
2110 nchpp
= fncp
->nc_head
;
2111 spin_lock(&nchpp
->spin
);
2112 _cache_unlink_parent(par
, fncp
, nchpp
); /* eats nchpp */
2118 oname
= fncp
->nc_name
;
2119 fncp
->nc_name
= nname
;
2120 fncp
->nc_nlen
= tncp
->nc_nlen
;
2122 kfree(oname
, M_VFSCACHEAUX
);
2124 par
= tncp
->nc_parent
;
2125 KKASSERT(par
->nc_lock
.lk_lockholder
== curthread
);
2128 * Rename fncp (relink)
2130 hash
= fnv_32_buf(fncp
->nc_name
, fncp
->nc_nlen
, FNV1_32_INIT
);
2131 hash
= fnv_32_buf(&par
, sizeof(par
), hash
);
2132 nchpp
= NCHHASH(hash
);
2134 spin_lock(&nchpp
->spin
);
2135 _cache_link_parent(fncp
, par
, nchpp
);
2136 spin_unlock(&nchpp
->spin
);
2139 * Get rid of the overwritten tncp (unlink)
2141 _cache_unlink(tncp
);
2145 * Perform actions consistent with unlinking a file. The passed-in ncp
2148 * The ncp is marked DESTROYED so it no longer shows up in searches,
2149 * and will be physically deleted when the vnode goes away.
2151 * If the related vnode has no refs then we cycle it through vget()/vput()
2152 * to (possibly if we don't have a ref race) trigger a deactivation,
2153 * allowing the VFS to trivially detect and recycle the deleted vnode
2154 * via VOP_INACTIVE().
2156 * NOTE: _cache_rename() will automatically call _cache_unlink() on the
2160 cache_unlink(struct nchandle
*nch
)
2162 _cache_unlink(nch
->ncp
);
2166 _cache_unlink(struct namecache
*ncp
)
2171 * Causes lookups to fail and allows another ncp with the same
2172 * name to be created under ncp->nc_parent.
2174 _cache_ncp_gen_enter(ncp
);
2175 ncp
->nc_flag
|= NCF_DESTROYED
;
2178 * Attempt to trigger a deactivation. Set VREF_FINALIZE to
2179 * force action on the 1->0 transition. Do not destroy the
2180 * vp association if a vp is present (leave the destroyed ncp
2181 * resolved through the vp finalization).
2183 * Cleanup the refs in the resolved-not-found case by setting
2184 * the ncp to an unresolved state. This improves our ability
2185 * to get rid of dead ncp elements in other cache_*() routines.
2187 if ((ncp
->nc_flag
& NCF_UNRESOLVED
) == 0) {
2190 atomic_set_int(&vp
->v_refcnt
, VREF_FINALIZE
);
2191 if (VREFCNT(vp
) <= 0) {
2192 if (vget(vp
, LK_SHARED
) == 0)
2196 _cache_setunresolved(ncp
, 0);
2199 _cache_ncp_gen_exit(ncp
);
2203 * Return non-zero if the nch might be associated with an open and/or mmap()'d
2204 * file. The easy solution is to just return non-zero if the vnode has refs.
2205 * Used to interlock hammer2 reclaims (VREF_FINALIZE should already be set to
2206 * force the reclaim).
2209 cache_isopen(struct nchandle
*nch
)
2212 struct namecache
*ncp
= nch
->ncp
;
2214 if ((ncp
->nc_flag
& NCF_UNRESOLVED
) == 0 &&
2215 (vp
= ncp
->nc_vp
) != NULL
&&
2224 * vget the vnode associated with the namecache entry. Resolve the namecache
2225 * entry if necessary. The passed ncp must be referenced and locked. If
2226 * the ncp is resolved it might be locked shared.
2228 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked
2229 * (depending on the passed lk_type) will be returned in *vpp with an error
2230 * of 0, or NULL will be returned in *vpp with a non-0 error code. The
2231 * most typical error is ENOENT, meaning that the ncp represents a negative
2232 * cache hit and there is no vnode to retrieve, but other errors can occur
2235 * The vget() can race a reclaim. If this occurs we re-resolve the
2238 * There are numerous places in the kernel where vget() is called on a
2239 * vnode while one or more of its namecache entries is locked. Releasing
2240 * a vnode never deadlocks against locked namecache entries (the vnode
2241 * will not get recycled while referenced ncp's exist). This means we
2242 * can safely acquire the vnode. In fact, we MUST NOT release the ncp
2243 * lock when acquiring the vp lock or we might cause a deadlock.
2245 * NOTE: The passed-in ncp must be locked exclusively if it is initially
2246 * unresolved. If a reclaim race occurs the passed-in ncp will be
2247 * relocked exclusively before being re-resolved.
2250 cache_vget(struct nchandle
*nch
, struct ucred
*cred
,
2251 int lk_type
, struct vnode
**vpp
)
2253 struct namecache
*ncp
;
2256 u_int dummy_gen
= 0;
2261 if (ncp
->nc_flag
& NCF_UNRESOLVED
)
2262 error
= cache_resolve(nch
, &dummy_gen
, cred
);
2266 if (error
== 0 && (vp
= ncp
->nc_vp
) != NULL
) {
2267 error
= vget(vp
, lk_type
);
2272 * The ncp may have been locked shared, we must relock
2273 * it exclusively before we can set it to unresolved.
2275 if (error
== ENOENT
) {
2276 kprintf("Warning: vnode reclaim race detected "
2277 "in cache_vget on %p (%s)\n",
2281 _cache_setunresolved(ncp
, 1);
2286 * Not a reclaim race, some other error.
2288 KKASSERT(ncp
->nc_vp
== vp
);
2291 KKASSERT(ncp
->nc_vp
== vp
);
2292 KKASSERT((vp
->v_flag
& VRECLAIMED
) == 0);
2295 if (error
== 0 && vp
== NULL
)
2302 * Similar to cache_vget() but only acquires a ref on the vnode. The vnode
2303 * is already held by virtuue of the ncp being locked, but it might not be
2304 * referenced and while it is not referenced it can transition into the
2307 * NOTE: The passed-in ncp must be locked exclusively if it is initially
2308 * unresolved. If a reclaim race occurs the passed-in ncp will be
2309 * relocked exclusively before being re-resolved.
2311 * NOTE: At the moment we have to issue a vget() on the vnode, even though
2312 * we are going to immediately release the lock, in order to resolve
2313 * potential reclamation races. Once we have a solid vnode ref that
2314 * was (at some point) interlocked via a vget(), the vnode will not
2317 * NOTE: vhold counts (v_auxrefs) do not prevent reclamation.
2320 cache_vref(struct nchandle
*nch
, struct ucred
*cred
, struct vnode
**vpp
)
2322 struct namecache
*ncp
;
2326 u_int dummy_gen
= 0;
2331 if (ncp
->nc_flag
& NCF_UNRESOLVED
)
2332 error
= cache_resolve(nch
, &dummy_gen
, cred
);
2336 while (error
== 0 && (vp
= ncp
->nc_vp
) != NULL
) {
2338 * Try a lockless ref of the vnode. VRECLAIMED transitions
2339 * use the vx_lock state and update-counter mechanism so we
2340 * can detect if one is in-progress or occurred.
2342 * If we can successfully ref the vnode and interlock against
2343 * the update-counter mechanism, and VRECLAIMED is found to
2344 * not be set after that, we should be good.
2346 v
= spin_access_start_only(&vp
->v_spin
);
2347 if (__predict_true(spin_access_check_inprog(v
) == 0)) {
2349 if (__predict_false(
2350 spin_access_end_only(&vp
->v_spin
, v
))) {
2354 if (__predict_true((vp
->v_flag
& VRECLAIMED
) == 0)) {
2358 kprintf("CACHE_VREF: IN-RECLAIM\n");
2362 * Do it the slow way
2364 error
= vget(vp
, LK_SHARED
);
2369 if (error
== ENOENT
) {
2370 kprintf("Warning: vnode reclaim race detected "
2371 "in cache_vget on %p (%s)\n",
2375 _cache_setunresolved(ncp
, 1);
2380 * Not a reclaim race, some other error.
2382 KKASSERT(ncp
->nc_vp
== vp
);
2385 KKASSERT(ncp
->nc_vp
== vp
);
2386 KKASSERT((vp
->v_flag
& VRECLAIMED
) == 0);
2387 /* caller does not want a lock */
2392 if (error
== 0 && vp
== NULL
)
2400 * Return a referenced vnode representing the parent directory of
2403 * Because the caller has locked the ncp it should not be possible for
2404 * the parent ncp to go away. However, the parent can unresolve its
2405 * dvp at any time so we must be able to acquire a lock on the parent
2406 * to safely access nc_vp.
2408 * We have to leave par unlocked when vget()ing dvp to avoid a deadlock,
2409 * so use vhold()/vdrop() while holding the lock to prevent dvp from
2410 * getting destroyed.
2412 * NOTE: vhold() is allowed when dvp has 0 refs if we hold a
2413 * lock on the ncp in question..
2416 cache_dvpref(struct namecache
*ncp
)
2418 struct namecache
*par
;
2422 if ((par
= ncp
->nc_parent
) != NULL
) {
2425 if ((par
->nc_flag
& NCF_UNRESOLVED
) == 0) {
2426 if ((dvp
= par
->nc_vp
) != NULL
)
2431 if (vget(dvp
, LK_SHARED
) == 0) {
2434 /* return refd, unlocked dvp */
2446 * Convert a directory vnode to a namecache record without any other
2447 * knowledge of the topology. This ONLY works with directory vnodes and
2448 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the
2449 * returned ncp (if not NULL) will be held and unlocked.
2451 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned.
2452 * If 'makeit' is 1 we attempt to track-down and create the namecache topology
2453 * for dvp. This will fail only if the directory has been deleted out from
2456 * Callers must always check for a NULL return no matter the value of 'makeit'.
2458 * To avoid underflowing the kernel stack each recursive call increments
2459 * the makeit variable.
2462 static int cache_inefficient_scan(struct nchandle
*nch
, struct ucred
*cred
,
2463 struct vnode
*dvp
, char *fakename
);
2464 static int cache_fromdvp_try(struct vnode
*dvp
, struct ucred
*cred
,
2465 struct vnode
**saved_dvp
);
2468 cache_fromdvp(struct vnode
*dvp
, struct ucred
*cred
, int makeit
,
2469 struct nchandle
*nch
)
2471 struct vnode
*saved_dvp
;
2477 nch
->mount
= dvp
->v_mount
;
2482 * Handle the makeit == 0 degenerate case
2485 spin_lock_shared(&dvp
->v_spin
);
2486 nch
->ncp
= TAILQ_FIRST(&dvp
->v_namecache
);
2489 spin_unlock_shared(&dvp
->v_spin
);
2493 * Loop until resolution, inside code will break out on error.
2497 * Break out if we successfully acquire a working ncp.
2499 spin_lock_shared(&dvp
->v_spin
);
2500 nch
->ncp
= TAILQ_FIRST(&dvp
->v_namecache
);
2503 spin_unlock_shared(&dvp
->v_spin
);
2506 spin_unlock_shared(&dvp
->v_spin
);
2509 * If dvp is the root of its filesystem it should already
2510 * have a namecache pointer associated with it as a side
2511 * effect of the mount, but it may have been disassociated.
2513 if (dvp
->v_flag
& VROOT
) {
2514 nch
->ncp
= _cache_get(nch
->mount
->mnt_ncmountpt
.ncp
);
2515 error
= cache_resolve_mp(nch
->mount
, 1);
2516 _cache_put(nch
->ncp
);
2517 if (ncvp_debug
& 1) {
2518 kprintf("cache_fromdvp: resolve root of "
2519 "mount %p error %d",
2520 dvp
->v_mount
, error
);
2524 kprintf(" failed\n");
2529 kprintf(" succeeded\n");
2534 * If we are recursed too deeply resort to an O(n^2)
2535 * algorithm to resolve the namecache topology. The
2536 * resolved pvp is left referenced in saved_dvp to
2537 * prevent the tree from being destroyed while we loop.
2540 error
= cache_fromdvp_try(dvp
, cred
, &saved_dvp
);
2542 kprintf("lookupdotdot(longpath) failed %d "
2543 "dvp %p\n", error
, dvp
);
2551 * Get the parent directory and resolve its ncp.
2554 kfree(fakename
, M_TEMP
);
2557 error
= vop_nlookupdotdot(*dvp
->v_ops
, dvp
, &pvp
, cred
,
2560 kprintf("lookupdotdot failed %d dvp %p\n", error
, dvp
);
2566 * Reuse makeit as a recursion depth counter. On success
2567 * nch will be fully referenced.
2569 cache_fromdvp(pvp
, cred
, makeit
+ 1, nch
);
2571 if (nch
->ncp
== NULL
)
2575 * Do an inefficient scan of pvp (embodied by ncp) to look
2576 * for dvp. This will create a namecache record for dvp on
2577 * success. We loop up to recheck on success.
2579 * ncp and dvp are both held but not locked.
2581 error
= cache_inefficient_scan(nch
, cred
, dvp
, fakename
);
2583 kprintf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n",
2584 pvp
, nch
->ncp
->nc_name
, dvp
);
2586 /* nch was NULLed out, reload mount */
2587 nch
->mount
= dvp
->v_mount
;
2590 if (ncvp_debug
& 1) {
2591 kprintf("cache_fromdvp: scan %p (%s) succeeded\n",
2592 pvp
, nch
->ncp
->nc_name
);
2595 /* nch was NULLed out, reload mount */
2596 nch
->mount
= dvp
->v_mount
;
2600 * If nch->ncp is non-NULL it will have been held already.
2603 kfree(fakename
, M_TEMP
);
2612 * Go up the chain of parent directories until we find something
2613 * we can resolve into the namecache. This is very inefficient.
2617 cache_fromdvp_try(struct vnode
*dvp
, struct ucred
*cred
,
2618 struct vnode
**saved_dvp
)
2620 struct nchandle nch
;
2623 static time_t last_fromdvp_report
;
2627 * Loop getting the parent directory vnode until we get something we
2628 * can resolve in the namecache.
2631 nch
.mount
= dvp
->v_mount
;
2637 kfree(fakename
, M_TEMP
);
2640 error
= vop_nlookupdotdot(*dvp
->v_ops
, dvp
, &pvp
, cred
,
2647 spin_lock_shared(&pvp
->v_spin
);
2648 if ((nch
.ncp
= TAILQ_FIRST(&pvp
->v_namecache
)) != NULL
) {
2649 _cache_hold(nch
.ncp
);
2650 spin_unlock_shared(&pvp
->v_spin
);
2654 spin_unlock_shared(&pvp
->v_spin
);
2655 if (pvp
->v_flag
& VROOT
) {
2656 nch
.ncp
= _cache_get(pvp
->v_mount
->mnt_ncmountpt
.ncp
);
2657 error
= cache_resolve_mp(nch
.mount
, 1);
2658 _cache_unlock(nch
.ncp
);
2661 _cache_drop(nch
.ncp
);
2671 if (last_fromdvp_report
!= time_uptime
) {
2672 last_fromdvp_report
= time_uptime
;
2673 kprintf("Warning: extremely inefficient path "
2674 "resolution on %s\n",
2677 error
= cache_inefficient_scan(&nch
, cred
, dvp
, fakename
);
2680 * Hopefully dvp now has a namecache record associated with
2681 * it. Leave it referenced to prevent the kernel from
2682 * recycling the vnode. Otherwise extremely long directory
2683 * paths could result in endless recycling.
2688 _cache_drop(nch
.ncp
);
2691 kfree(fakename
, M_TEMP
);
2696 * Do an inefficient scan of the directory represented by ncp looking for
2697 * the directory vnode dvp. ncp must be held but not locked on entry and
2698 * will be held on return. dvp must be refd but not locked on entry and
2699 * will remain refd on return.
2701 * Why do this at all? Well, due to its stateless nature the NFS server
2702 * converts file handles directly to vnodes without necessarily going through
2703 * the namecache ops that would otherwise create the namecache topology
2704 * leading to the vnode. We could either (1) Change the namecache algorithms
2705 * to allow disconnect namecache records that are re-merged opportunistically,
2706 * or (2) Make the NFS server backtrack and scan to recover a connected
2707 * namecache topology in order to then be able to issue new API lookups.
2709 * It turns out that (1) is a huge mess. It takes a nice clean set of
2710 * namecache algorithms and introduces a lot of complication in every subsystem
2711 * that calls into the namecache to deal with the re-merge case, especially
2712 * since we are using the namecache to placehold negative lookups and the
2713 * vnode might not be immediately assigned. (2) is certainly far less
2714 * efficient then (1), but since we are only talking about directories here
2715 * (which are likely to remain cached), the case does not actually run all
2716 * that often and has the supreme advantage of not polluting the namecache
2719 * If a fakename is supplied just construct a namecache entry using the
2723 cache_inefficient_scan(struct nchandle
*nch
, struct ucred
*cred
,
2724 struct vnode
*dvp
, char *fakename
)
2726 struct nlcomponent nlc
;
2727 struct nchandle rncp
;
2739 vat
.va_blocksize
= 0;
2740 if ((error
= VOP_GETATTR(dvp
, &vat
)) != 0)
2743 error
= cache_vref(nch
, cred
, &pvp
);
2747 if (ncvp_debug
& 1) {
2748 kprintf("inefficient_scan of (%p,%s): directory iosize %ld "
2749 "vattr fileid = %lld\n",
2750 nch
->ncp
, nch
->ncp
->nc_name
,
2752 (long long)vat
.va_fileid
);
2756 * Use the supplied fakename if not NULL. Fake names are typically
2757 * not in the actual filesystem hierarchy. This is used by HAMMER
2758 * to glue @@timestamp recursions together.
2761 nlc
.nlc_nameptr
= fakename
;
2762 nlc
.nlc_namelen
= strlen(fakename
);
2763 rncp
= cache_nlookup(nch
, &nlc
);
2767 if ((blksize
= vat
.va_blocksize
) == 0)
2768 blksize
= DEV_BSIZE
;
2769 rbuf
= kmalloc(blksize
, M_TEMP
, M_WAITOK
);
2775 iov
.iov_base
= rbuf
;
2776 iov
.iov_len
= blksize
;
2779 uio
.uio_resid
= blksize
;
2780 uio
.uio_segflg
= UIO_SYSSPACE
;
2781 uio
.uio_rw
= UIO_READ
;
2782 uio
.uio_td
= curthread
;
2785 kprintf("cache_inefficient_scan: readdir @ %08x\n", (int)uio
.uio_offset
);
2786 error
= VOP_READDIR(pvp
, &uio
, cred
, &eofflag
, NULL
, NULL
);
2788 den
= (struct dirent
*)rbuf
;
2789 bytes
= blksize
- uio
.uio_resid
;
2792 if (ncvp_debug
& 2) {
2793 kprintf("cache_inefficient_scan: %*.*s\n",
2794 den
->d_namlen
, den
->d_namlen
,
2797 if (den
->d_type
!= DT_WHT
&&
2798 den
->d_ino
== vat
.va_fileid
) {
2799 if (ncvp_debug
& 1) {
2800 kprintf("cache_inefficient_scan: "
2801 "MATCHED inode %lld path %s/%*.*s\n",
2802 (long long)vat
.va_fileid
,
2804 den
->d_namlen
, den
->d_namlen
,
2807 nlc
.nlc_nameptr
= den
->d_name
;
2808 nlc
.nlc_namelen
= den
->d_namlen
;
2809 rncp
= cache_nlookup(nch
, &nlc
);
2810 KKASSERT(rncp
.ncp
!= NULL
);
2813 bytes
-= _DIRENT_DIRSIZ(den
);
2814 den
= _DIRENT_NEXT(den
);
2816 if (rncp
.ncp
== NULL
&& eofflag
== 0 && uio
.uio_resid
!= blksize
)
2819 kfree(rbuf
, M_TEMP
);
2823 if (rncp
.ncp
->nc_flag
& NCF_UNRESOLVED
) {
2824 _cache_setvp(rncp
.mount
, rncp
.ncp
, dvp
, 1);
2825 if (ncvp_debug
& 2) {
2826 kprintf("cache_inefficient_scan: setvp %s/%s = %p\n",
2827 nch
->ncp
->nc_name
, rncp
.ncp
->nc_name
, dvp
);
2830 if (ncvp_debug
& 2) {
2831 kprintf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n",
2832 nch
->ncp
->nc_name
, rncp
.ncp
->nc_name
, dvp
,
2836 if (rncp
.ncp
->nc_vp
== NULL
)
2837 error
= rncp
.ncp
->nc_error
;
2839 * Release rncp after a successful nlookup. rncp was fully
2844 kprintf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n",
2845 dvp
, nch
->ncp
->nc_name
);
2852 * This function must be called with the ncp held and locked and will unlock
2853 * and drop it during zapping.
2855 * Zap a namecache entry. The ncp is unconditionally set to an unresolved
2856 * state, which disassociates it from its vnode or pcpu_ncache[n].neg_list
2857 * and removes the related reference. If the ncp can be removed, and the
2858 * parent can be zapped non-blocking, this function loops up.
2860 * There will be one ref from the caller (which we now own). The only
2861 * remaining autonomous refs to the ncp will then be due to nc_parent->nc_list,
2862 * so possibly 2 refs left. Taking this into account, if there are no
2863 * additional refs and no children, the ncp will be removed from the topology
2866 * References and/or children may exist if the ncp is in the middle of the
2867 * topology, preventing the ncp from being destroyed.
2869 * If nonblock is non-zero and the parent ncp cannot be locked we give up.
2871 * This function may return a held (but NOT locked) parent node which the
2872 * caller must drop in a loop. Looping is one way to avoid unbounded recursion
2873 * due to deep namecache trees.
2875 * WARNING! For MPSAFE operation this routine must acquire up to three
2876 * spin locks to be able to safely test nc_refs. Lock order is
2879 * hash spinlock if on hash list
2880 * parent spinlock if child of parent
2881 * (the ncp is unresolved so there is no vnode association)
2884 cache_zap(struct namecache
*ncp
)
2886 struct namecache
*par
;
2887 struct nchash_head
*nchpp
;
2889 int nonblock
= 1; /* XXX cleanup */
2894 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED.
2895 * This gets rid of any vp->v_namecache list or negative list and
2898 _cache_setunresolved(ncp
, 1);
2901 * Try to scrap the entry and possibly tail-recurse on its parent.
2902 * We only scrap unref'd (other then our ref) unresolved entries,
2903 * we do not scrap 'live' entries.
2905 * If nc_parent is non NULL we expect 2 references, else just 1.
2906 * If there are more, someone else also holds the ncp and we cannot
2909 KKASSERT(ncp
->nc_flag
& NCF_UNRESOLVED
);
2910 KKASSERT(ncp
->nc_refs
> 0);
2913 * If the ncp is linked to its parent it will also be in the hash
2914 * table. We have to be able to lock the parent and the hash table.
2916 * Acquire locks. Note that the parent can't go away while we hold
2917 * a child locked. If nc_parent is present, expect 2 refs instead
2921 if ((par
= ncp
->nc_parent
) != NULL
) {
2923 if (_cache_lock_nonblock(par
)) {
2925 ncp
->nc_flag
|= NCF_DEFEREDZAP
;
2927 &pcpu_ncache
[mycpu
->gd_cpuid
].numdefered
,
2930 _cache_drop(ncp
); /* caller's ref */
2938 nchpp
= ncp
->nc_head
;
2939 spin_lock(&nchpp
->spin
);
2943 * With the parent and nchpp locked, and the vnode removed
2944 * (no vp->v_namecache), we expect 1 or 2 refs. If there are
2945 * more someone else has a ref and we cannot zap the entry.
2948 * one for our parent link (parent also has one from the linkage)
2956 * On failure undo the work we've done so far and drop the
2957 * caller's ref and ncp.
2959 if (ncp
->nc_refs
!= refcmp
|| TAILQ_FIRST(&ncp
->nc_list
)) {
2961 spin_unlock(&nchpp
->spin
);
2970 * We own all the refs and with the spinlocks held no further
2971 * refs can be acquired by others.
2973 * Remove us from the hash list and parent list. We have to
2974 * drop a ref on the parent's vp if the parent's list becomes
2978 KKASSERT(nchpp
== ncp
->nc_head
);
2979 _cache_unlink_parent(par
, ncp
, nchpp
); /* eats nhcpp */
2980 /*_cache_unlock(par);*/
2981 /* &nchpp->spin is unlocked by call */
2983 KKASSERT(ncp
->nc_head
== NULL
);
2987 * ncp should not have picked up any refs. Physically
2990 if (ncp
->nc_refs
!= refcmp
) {
2991 panic("cache_zap: %p bad refs %d (expected %d)\n",
2992 ncp
, ncp
->nc_refs
, refcmp
);
2994 /* _cache_unlock(ncp) not required */
2995 ncp
->nc_refs
= -1; /* safety */
2997 kfree(ncp
->nc_name
, M_VFSCACHEAUX
);
2998 kfree_obj(ncp
, M_VFSCACHE
);
3002 * Loop up if we can recursively clean out the parent.
3005 refcmp
= 1; /* ref on parent */
3006 if (par
->nc_parent
) /* par->par */
3008 par
->nc_flag
&= ~NCF_DEFEREDZAP
;
3009 if ((par
->nc_flag
& NCF_UNRESOLVED
) &&
3010 par
->nc_refs
== refcmp
&&
3011 TAILQ_EMPTY(&par
->nc_list
))
3023 * Clean up dangling negative cache and defered-drop entries in the
3026 * This routine is called in the critical path and also called from
3027 * vnlru(). When called from vnlru we use a lower limit to try to
3028 * deal with the negative cache before the critical path has to start
3031 typedef enum { CHI_LOW
, CHI_HIGH
} cache_hs_t
;
3033 static cache_hs_t neg_cache_hysteresis_state
[2] = { CHI_LOW
, CHI_LOW
};
3034 static cache_hs_t pos_cache_hysteresis_state
[2] = { CHI_LOW
, CHI_LOW
};
3035 static cache_hs_t exc_cache_hysteresis_state
[2] = { CHI_LOW
, CHI_LOW
};
3037 static int cache_hyst_run
[2];
3040 cache_hysteresis(int critpath
)
3052 * Lets not compete for running a general garbage collection
3054 if (atomic_swap_int(&cache_hyst_run
[critpath
], 1) != 0)
3058 * Calculate negative ncp limit
3060 neglimit
= maxvnodes
/ ncnegfactor
;
3062 neglimit
= neglimit
* 8 / 10;
3065 * Don't cache too many negative hits. We use hysteresis to reduce
3066 * the impact on the critical path.
3070 switch(neg_cache_hysteresis_state
[critpath
]) {
3072 if (vfscache_negs
> MINNEG
&& vfscache_negs
> neglimit
) {
3074 clean_neg
= ncnegflush
;
3076 clean_neg
= ncnegflush
+
3077 vfscache_negs
- neglimit
;
3078 neg_cache_hysteresis_state
[critpath
] = CHI_HIGH
;
3082 if (vfscache_negs
> MINNEG
* 9 / 10 &&
3083 vfscache_negs
* 9 / 10 > neglimit
3086 clean_neg
= ncnegflush
;
3088 clean_neg
= ncnegflush
+
3089 vfscache_negs
* 9 / 10 -
3092 neg_cache_hysteresis_state
[critpath
] = CHI_LOW
;
3097 _cache_cleanneg(clean_neg
);
3100 * Don't cache too many unresolved elements. We use hysteresis to
3101 * reduce the impact on the critical path.
3103 if ((poslimit
= ncposlimit
) == 0)
3104 poslimit
= maxvnodes
/ ncposfactor
;
3106 poslimit
= poslimit
* 8 / 10;
3109 * Number of unresolved leaf elements in the namecache. These
3110 * can build-up for various reasons and may have to be disposed
3111 * of to allow the inactive list to be cleaned out by vnlru_proc()
3115 xnumunres
= vfscache_unres
;
3118 switch(pos_cache_hysteresis_state
[critpath
]) {
3120 if (xnumunres
> poslimit
&& xnumunres
> MINPOS
) {
3122 clean_unres
= ncposflush
;
3124 clean_unres
= ncposflush
+ xnumunres
-
3126 pos_cache_hysteresis_state
[critpath
] = CHI_HIGH
;
3130 if (xnumunres
> poslimit
* 5 / 6 && xnumunres
> MINPOS
) {
3132 clean_unres
= ncposflush
;
3134 clean_unres
= ncposflush
+ xnumunres
-
3137 pos_cache_hysteresis_state
[critpath
] = CHI_LOW
;
3143 * Excessive positive hits can accumulate due to large numbers of
3144 * hardlinks (the vnode cache will not prevent ncps representing
3145 * hardlinks from growing into infinity).
3147 exclimit
= maxvnodes
* 2;
3149 exclimit
= exclimit
* 8 / 10;
3150 xnumleafs
= vfscache_leafs
;
3153 switch(exc_cache_hysteresis_state
[critpath
]) {
3155 if (xnumleafs
> exclimit
&& xnumleafs
> MINPOS
) {
3157 clean_excess
= ncposflush
;
3159 clean_excess
= ncposflush
+ xnumleafs
-
3161 exc_cache_hysteresis_state
[critpath
] = CHI_HIGH
;
3165 if (xnumleafs
> exclimit
* 5 / 6 && xnumleafs
> MINPOS
) {
3167 clean_excess
= ncposflush
;
3169 clean_excess
= ncposflush
+ xnumleafs
-
3172 exc_cache_hysteresis_state
[critpath
] = CHI_LOW
;
3177 if (clean_unres
|| clean_excess
)
3178 _cache_cleanpos(clean_unres
, clean_excess
);
3181 * Clean out dangling defered-zap ncps which could not be cleanly
3182 * dropped if too many build up. Note that numdefered is
3183 * heuristical. Make sure we are real-time for the current cpu,
3184 * plus the global rollup.
3186 if (pcpu_ncache
[mycpu
->gd_cpuid
].numdefered
+ numdefered
> neglimit
) {
3187 _cache_cleandefered();
3190 atomic_swap_int(&cache_hyst_run
[critpath
], 0);
3194 * NEW NAMECACHE LOOKUP API
3196 * Lookup an entry in the namecache. The passed par_nch must be referenced
3197 * and unlocked. A referenced and locked nchandle with a non-NULL nch.ncp
3198 * is ALWAYS returned, eve if the supplied component is illegal.
3200 * The resulting namecache entry should be returned to the system with
3201 * cache_put() or cache_unlock() + cache_drop().
3203 * namecache locks are recursive but care must be taken to avoid lock order
3204 * reversals (hence why the passed par_nch must be unlocked). Locking
3205 * rules are to order for parent traversals, not for child traversals.
3207 * Nobody else will be able to manipulate the associated namespace (e.g.
3208 * create, delete, rename, rename-target) until the caller unlocks the
3211 * The returned entry will be in one of three states: positive hit (non-null
3212 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set).
3213 * Unresolved entries must be resolved through the filesystem to associate the
3214 * vnode and/or determine whether a positive or negative hit has occured.
3216 * It is not necessary to lock a directory in order to lock namespace under
3217 * that directory. In fact, it is explicitly not allowed to do that. A
3218 * directory is typically only locked when being created, renamed, or
3221 * The directory (par) may be unresolved, in which case any returned child
3222 * will likely also be marked unresolved. Likely but not guarenteed. Since
3223 * the filesystem lookup requires a resolved directory vnode the caller is
3224 * responsible for resolving the namecache chain top-down. This API
3225 * specifically allows whole chains to be created in an unresolved state.
3228 cache_nlookup(struct nchandle
*par_nch
, struct nlcomponent
*nlc
)
3230 struct nchandle nch
;
3231 struct namecache
*ncp
;
3232 struct namecache
*new_ncp
;
3233 struct namecache
*rep_ncp
; /* reuse a destroyed ncp */
3234 struct nchash_head
*nchpp
;
3242 mp
= par_nch
->mount
;
3246 * This is a good time to call it, no ncp's are locked by
3249 cache_hysteresis(1);
3252 * Try to locate an existing entry
3254 hash
= fnv_32_buf(nlc
->nlc_nameptr
, nlc
->nlc_namelen
, FNV1_32_INIT
);
3255 hash
= fnv_32_buf(&par_nch
->ncp
, sizeof(par_nch
->ncp
), hash
);
3258 nchpp
= NCHHASH(hash
);
3262 spin_lock(&nchpp
->spin
);
3264 spin_lock_shared(&nchpp
->spin
);
3267 * Do a reverse scan to collect any DESTROYED ncps prior to matching
3268 * an existing entry.
3270 TAILQ_FOREACH_REVERSE(ncp
, &nchpp
->list
, nchash_list
, nc_hash
) {
3272 * Break out if we find a matching entry. Note that
3273 * UNRESOLVED entries may match, but DESTROYED entries
3276 * We may be able to reuse DESTROYED entries that we come
3277 * across, even if the name does not match, as long as
3278 * nc_nlen is correct and the only hold ref is from the nchpp
3281 if (ncp
->nc_parent
== par_nch
->ncp
&&
3282 ncp
->nc_nlen
== nlc
->nlc_namelen
) {
3283 if (ncp
->nc_flag
& NCF_DESTROYED
) {
3284 if (ncp
->nc_refs
== 1 && rep_ncp
== NULL
)
3288 if (bcmp(ncp
->nc_name
, nlc
->nlc_nameptr
, ncp
->nc_nlen
))
3296 _cache_hold(rep_ncp
);
3299 spin_unlock(&nchpp
->spin
);
3301 spin_unlock_shared(&nchpp
->spin
);
3304 _cache_unlock(par_nch
->ncp
);
3309 * Really try to destroy rep_ncp if encountered.
3310 * Various edge cases can build up more than one,
3311 * so loop if we succeed. This isn't perfect, but
3312 * we can't afford to have tons of entries build
3313 * up on a single nhcpp list due to rename-over
3314 * operations. If that were to happen, the system
3315 * would bog down quickly.
3318 if (_cache_lock_nonblock(rep_ncp
) == 0) {
3319 if (rep_ncp
->nc_flag
& NCF_DESTROYED
) {
3320 if (cache_zap(rep_ncp
)) {
3325 _cache_unlock(rep_ncp
);
3326 _cache_drop(rep_ncp
);
3329 _cache_drop(rep_ncp
);
3334 * Continue processing the matched entry
3336 if (_cache_lock_special(ncp
) == 0) {
3338 * Successfully locked but we must re-test
3339 * conditions that might have changed since
3340 * we did not have the lock before.
3342 if (ncp
->nc_parent
!= par_nch
->ncp
||
3343 ncp
->nc_nlen
!= nlc
->nlc_namelen
||
3344 bcmp(ncp
->nc_name
, nlc
->nlc_nameptr
,
3346 (ncp
->nc_flag
& NCF_DESTROYED
)) {
3350 _cache_auto_unresolve(mp
, ncp
);
3352 _cache_free(new_ncp
);
3353 new_ncp
= NULL
; /* safety */
3357 _cache_get(ncp
); /* cycle the lock to block */
3365 * We failed to locate the entry, try to resurrect a destroyed
3366 * entry that we did find that is already correctly linked into
3367 * nchpp and the parent. We must re-test conditions after
3368 * successfully locking rep_ncp.
3370 * This case can occur under heavy loads due to not being able
3371 * to safely lock the parent in cache_zap(). Nominally a repeated
3372 * create/unlink load, but only the namelen needs to match.
3374 * An exclusive lock on the nchpp is required to process this case,
3375 * otherwise a race can cause duplicate entries to be created with
3376 * one cpu reusing a DESTROYED ncp while another creates a new_ncp.
3378 if (rep_ncp
&& use_excl
) {
3379 if (_cache_lock_nonblock(rep_ncp
) == 0) {
3380 _cache_hold(rep_ncp
);
3381 if (rep_ncp
->nc_parent
== par_nch
->ncp
&&
3382 rep_ncp
->nc_nlen
== nlc
->nlc_namelen
&&
3383 (rep_ncp
->nc_flag
& NCF_DESTROYED
) &&
3384 rep_ncp
->nc_refs
== 2)
3391 _cache_ncp_gen_enter(ncp
);
3393 bcopy(nlc
->nlc_nameptr
, ncp
->nc_name
,
3397 * This takes some care. We must clear the
3398 * NCF_DESTROYED flag before unlocking the
3399 * hash chain so other concurrent searches
3400 * do not skip this element.
3402 * We must also unlock the hash chain before
3403 * unresolving the ncp to avoid deadlocks.
3404 * We hold the lock on the ncp so we can safely
3405 * reinitialize nc_flag after that.
3407 ncp
->nc_flag
&= ~NCF_DESTROYED
;
3408 spin_unlock(&nchpp
->spin
); /* use_excl */
3410 _cache_setunresolved(ncp
, 0);
3411 ncp
->nc_flag
= NCF_UNRESOLVED
;
3412 ncp
->nc_error
= ENOTCONN
;
3414 _cache_ncp_gen_exit(ncp
);
3417 _cache_unlock(par_nch
->ncp
);
3421 _cache_free(new_ncp
);
3422 new_ncp
= NULL
; /* safety */
3426 _cache_put(rep_ncp
);
3431 * Otherwise create a new entry and add it to the cache. The parent
3432 * ncp must also be locked so we can link into it.
3434 * We have to relookup after possibly blocking in kmalloc or
3435 * when locking par_nch.
3437 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special
3438 * mount case, in which case nc_name will be NULL.
3440 * NOTE: In the rep_ncp != NULL case we are trying to reuse
3441 * a DESTROYED entry, but didn't have an exclusive lock.
3442 * In this situation we do not create a new_ncp.
3444 if (new_ncp
== NULL
) {
3446 spin_unlock(&nchpp
->spin
);
3448 spin_unlock_shared(&nchpp
->spin
);
3449 if (rep_ncp
== NULL
) {
3450 new_ncp
= cache_alloc(nlc
->nlc_namelen
);
3451 if (nlc
->nlc_namelen
) {
3452 bcopy(nlc
->nlc_nameptr
, new_ncp
->nc_name
,
3454 new_ncp
->nc_name
[nlc
->nlc_namelen
] = 0;
3462 * NOTE! The spinlock is held exclusively here because new_ncp
3465 if (par_locked
== 0) {
3466 spin_unlock(&nchpp
->spin
);
3467 _cache_lock(par_nch
->ncp
);
3473 * Link to parent (requires another ref, the one already in new_ncp
3474 * is what we wil lreturn).
3476 * WARNING! We still hold the spinlock. We have to set the hash
3477 * table entry atomically.
3481 _cache_link_parent(ncp
, par_nch
->ncp
, nchpp
);
3482 spin_unlock(&nchpp
->spin
);
3483 _cache_unlock(par_nch
->ncp
);
3484 /* par_locked = 0 - not used */
3487 * stats and namecache size management
3489 if (ncp
->nc_flag
& NCF_UNRESOLVED
)
3490 ++gd
->gd_nchstats
->ncs_miss
;
3491 else if (ncp
->nc_vp
)
3492 ++gd
->gd_nchstats
->ncs_goodhits
;
3494 ++gd
->gd_nchstats
->ncs_neghits
;
3497 _cache_mntref(nch
.mount
);
3503 * Attempt to lookup a namecache entry and return with a shared namecache
3504 * lock. This operates non-blocking. EWOULDBLOCK is returned if excl is
3505 * set or we are unable to lock.
3508 cache_nlookup_maybe_shared(struct nchandle
*par_nch
,
3509 struct nlcomponent
*nlc
,
3510 int excl
, struct nchandle
*res_nch
)
3512 struct namecache
*ncp
;
3513 struct nchash_head
*nchpp
;
3519 * If exclusive requested or shared namecache locks are disabled,
3522 if (ncp_shared_lock_disable
|| excl
)
3523 return(EWOULDBLOCK
);
3526 mp
= par_nch
->mount
;
3529 * This is a good time to call it, no ncp's are locked by
3532 cache_hysteresis(1);
3535 * Try to locate an existing entry
3537 hash
= fnv_32_buf(nlc
->nlc_nameptr
, nlc
->nlc_namelen
, FNV1_32_INIT
);
3538 hash
= fnv_32_buf(&par_nch
->ncp
, sizeof(par_nch
->ncp
), hash
);
3539 nchpp
= NCHHASH(hash
);
3541 spin_lock_shared(&nchpp
->spin
);
3543 TAILQ_FOREACH(ncp
, &nchpp
->list
, nc_hash
) {
3545 * Break out if we find a matching entry. Note that
3546 * UNRESOLVED entries may match, but DESTROYED entries
3549 if (ncp
->nc_parent
== par_nch
->ncp
&&
3550 ncp
->nc_nlen
== nlc
->nlc_namelen
&&
3551 bcmp(ncp
->nc_name
, nlc
->nlc_nameptr
, ncp
->nc_nlen
) == 0 &&
3552 (ncp
->nc_flag
& NCF_DESTROYED
) == 0
3555 spin_unlock_shared(&nchpp
->spin
);
3557 if (_cache_lock_shared_special(ncp
) == 0) {
3558 if (ncp
->nc_parent
== par_nch
->ncp
&&
3559 ncp
->nc_nlen
== nlc
->nlc_namelen
&&
3560 bcmp(ncp
->nc_name
, nlc
->nlc_nameptr
,
3561 ncp
->nc_nlen
) == 0 &&
3562 (ncp
->nc_flag
& NCF_DESTROYED
) == 0 &&
3563 (ncp
->nc_flag
& NCF_UNRESOLVED
) == 0 &&
3564 _cache_auto_unresolve_test(mp
, ncp
) == 0)
3571 return(EWOULDBLOCK
);
3578 spin_unlock_shared(&nchpp
->spin
);
3579 return(EWOULDBLOCK
);
3584 * Note that nc_error might be non-zero (e.g ENOENT).
3587 res_nch
->mount
= mp
;
3589 ++gd
->gd_nchstats
->ncs_goodhits
;
3590 _cache_mntref(res_nch
->mount
);
3592 KKASSERT(ncp
->nc_error
!= EWOULDBLOCK
);
3593 return(ncp
->nc_error
);
3597 * This is a non-blocking verison of cache_nlookup() used by
3598 * nfs_readdirplusrpc_uio(). It can fail for any reason and
3599 * will return nch.ncp == NULL in that case.
3602 cache_nlookup_nonblock(struct nchandle
*par_nch
, struct nlcomponent
*nlc
)
3604 struct nchandle nch
;
3605 struct namecache
*ncp
;
3606 struct namecache
*new_ncp
;
3607 struct nchash_head
*nchpp
;
3614 mp
= par_nch
->mount
;
3618 * Try to locate an existing entry
3620 hash
= fnv_32_buf(nlc
->nlc_nameptr
, nlc
->nlc_namelen
, FNV1_32_INIT
);
3621 hash
= fnv_32_buf(&par_nch
->ncp
, sizeof(par_nch
->ncp
), hash
);
3623 nchpp
= NCHHASH(hash
);
3625 spin_lock(&nchpp
->spin
);
3626 TAILQ_FOREACH(ncp
, &nchpp
->list
, nc_hash
) {
3628 * Break out if we find a matching entry. Note that
3629 * UNRESOLVED entries may match, but DESTROYED entries
3632 if (ncp
->nc_parent
== par_nch
->ncp
&&
3633 ncp
->nc_nlen
== nlc
->nlc_namelen
&&
3634 bcmp(ncp
->nc_name
, nlc
->nlc_nameptr
, ncp
->nc_nlen
) == 0 &&
3635 (ncp
->nc_flag
& NCF_DESTROYED
) == 0
3638 spin_unlock(&nchpp
->spin
);
3640 _cache_unlock(par_nch
->ncp
);
3643 if (_cache_lock_special(ncp
) == 0) {
3644 if (ncp
->nc_parent
!= par_nch
->ncp
||
3645 ncp
->nc_nlen
!= nlc
->nlc_namelen
||
3646 bcmp(ncp
->nc_name
, nlc
->nlc_nameptr
, ncp
->nc_nlen
) ||
3647 (ncp
->nc_flag
& NCF_DESTROYED
)) {
3648 kprintf("cache_lookup_nonblock: "
3649 "ncp-race %p %*.*s\n",
3658 _cache_auto_unresolve(mp
, ncp
);
3660 _cache_free(new_ncp
);
3671 * We failed to locate an entry, create a new entry and add it to
3672 * the cache. The parent ncp must also be locked so we
3675 * We have to relookup after possibly blocking in kmalloc or
3676 * when locking par_nch.
3678 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special
3679 * mount case, in which case nc_name will be NULL.
3681 if (new_ncp
== NULL
) {
3682 spin_unlock(&nchpp
->spin
);
3683 new_ncp
= cache_alloc(nlc
->nlc_namelen
);
3684 if (nlc
->nlc_namelen
) {
3685 bcopy(nlc
->nlc_nameptr
, new_ncp
->nc_name
,
3687 new_ncp
->nc_name
[nlc
->nlc_namelen
] = 0;
3691 if (par_locked
== 0) {
3692 spin_unlock(&nchpp
->spin
);
3693 if (_cache_lock_nonblock(par_nch
->ncp
) == 0) {
3701 * Link to parent (requires another ref, the one already in new_ncp
3702 * is what we wil lreturn).
3704 * WARNING! We still hold the spinlock. We have to set the hash
3705 * table entry atomically.
3709 _cache_link_parent(ncp
, par_nch
->ncp
, nchpp
);
3710 spin_unlock(&nchpp
->spin
);
3711 _cache_unlock(par_nch
->ncp
);
3712 /* par_locked = 0 - not used */
3715 * stats and namecache size management
3717 if (ncp
->nc_flag
& NCF_UNRESOLVED
)
3718 ++gd
->gd_nchstats
->ncs_miss
;
3719 else if (ncp
->nc_vp
)
3720 ++gd
->gd_nchstats
->ncs_goodhits
;
3722 ++gd
->gd_nchstats
->ncs_neghits
;
3725 _cache_mntref(nch
.mount
);
3730 _cache_free(new_ncp
);
3739 * This is a non-locking optimized lookup that depends on adding a ref
3740 * to prevent normal eviction. nch.ncp can be returned as NULL for any
3741 * reason and the caller will retry with normal locking in that case.
3743 * This function only returns resolved entries so callers do not accidentally
3744 * race doing out of order / unfenced field checks.
3746 * The caller must validate the result for parent-to-child continuity.
3749 cache_nlookup_nonlocked(struct nchandle
*par_nch
, struct nlcomponent
*nlc
)
3751 struct nchandle nch
;
3752 struct namecache
*ncp
;
3753 struct nchash_head
*nchpp
;
3759 mp
= par_nch
->mount
;
3762 * Try to locate an existing entry
3764 hash
= fnv_32_buf(nlc
->nlc_nameptr
, nlc
->nlc_namelen
, FNV1_32_INIT
);
3765 hash
= fnv_32_buf(&par_nch
->ncp
, sizeof(par_nch
->ncp
), hash
);
3766 nchpp
= NCHHASH(hash
);
3768 spin_lock_shared(&nchpp
->spin
);
3769 TAILQ_FOREACH(ncp
, &nchpp
->list
, nc_hash
) {
3771 * Break out if we find a matching entry. Note that
3772 * UNRESOLVED entries may match, but DESTROYED entries
3773 * do not. However, UNRESOLVED entries still return failure.
3775 if (ncp
->nc_parent
== par_nch
->ncp
&&
3776 ncp
->nc_nlen
== nlc
->nlc_namelen
&&
3777 bcmp(ncp
->nc_name
, nlc
->nlc_nameptr
, ncp
->nc_nlen
) == 0 &&
3778 (ncp
->nc_flag
& NCF_DESTROYED
) == 0
3781 * Test NFS timeout for auto-unresolve. Give up if
3782 * the entry is not resolved.
3784 * Getting the ref with the nchpp locked prevents
3785 * any transition to NCF_DESTROYED.
3787 if (_cache_auto_unresolve_test(par_nch
->mount
, ncp
))
3789 if (ncp
->nc_flag
& NCF_UNRESOLVED
)
3792 spin_unlock_shared(&nchpp
->spin
);
3795 * We need an additional test to ensure that the ref
3796 * we got above prevents transitions to NCF_UNRESOLVED.
3797 * This can occur if another thread is currently
3798 * holding the ncp exclusively locked or (if we raced
3799 * that and it unlocked before our test) the flag
3802 * XXX check if superceeded by nc_generation XXX
3804 if (_cache_lockstatus(ncp
) < 0 ||
3805 (ncp
->nc_flag
& (NCF_DESTROYED
| NCF_UNRESOLVED
)))
3807 if ((ncvp_debug
& 4) &&
3809 (NCF_DESTROYED
| NCF_UNRESOLVED
)))
3811 kprintf("ncp state change: %p %08x %d %s\n",
3812 ncp
, ncp
->nc_flag
, ncp
->nc_error
,
3816 spin_lock_shared(&nchpp
->spin
);
3821 * Return the ncp bundled into a nch on success.
3822 * The ref should passively prevent the ncp from
3823 * becoming unresolved without having to hold a lock.
3824 * (XXX this may not be entirely true)
3829 spin_unlock_shared(&nchpp
->spin
);
3836 * stats and namecache size management
3838 if (ncp
->nc_flag
& NCF_UNRESOLVED
)
3839 ++gd
->gd_nchstats
->ncs_miss
;
3840 else if (ncp
->nc_vp
)
3841 ++gd
->gd_nchstats
->ncs_goodhits
;
3843 ++gd
->gd_nchstats
->ncs_neghits
;
3846 _cache_mntref(nch
.mount
);
3852 * The namecache entry is marked as being used as a mount point.
3853 * Locate the mount if it is visible to the caller. The DragonFly
3854 * mount system allows arbitrary loops in the topology and disentangles
3855 * those loops by matching against (mp, ncp) rather than just (ncp).
3856 * This means any given ncp can dive any number of mounts, depending
3857 * on the relative mount (e.g. nullfs) the caller is at in the topology.
3859 * We use a very simple frontend cache to reduce SMP conflicts,
3860 * which we have to do because the mountlist scan needs an exclusive
3861 * lock around its ripout info list. Not to mention that there might
3862 * be a lot of mounts.
3864 * Because all mounts can potentially be accessed by all cpus, break the cpu's
3865 * down a bit to allow some contention rather than making the cache
3868 * The hash table is split into per-cpu areas, is 4-way set-associative.
3870 struct findmount_info
{
3871 struct mount
*result
;
3872 struct mount
*nch_mount
;
3873 struct namecache
*nch_ncp
;
3877 struct ncmount_cache
*
3878 ncmount_cache_lookup4(struct mount
*mp
, struct namecache
*ncp
)
3882 hash
= iscsi_crc32(&mp
, sizeof(mp
));
3883 hash
= iscsi_crc32_ext(&ncp
, sizeof(ncp
), hash
);
3885 hash
= hash
& ((NCMOUNT_NUMCACHE
- 1) & ~(NCMOUNT_SET
- 1));
3887 return (&ncmount_cache
[hash
]);
3891 struct ncmount_cache
*
3892 ncmount_cache_lookup(struct mount
*mp
, struct namecache
*ncp
)
3894 struct ncmount_cache
*ncc
;
3895 struct ncmount_cache
*best
;
3900 ncc
= ncmount_cache_lookup4(mp
, ncp
);
3903 * NOTE: When checking for a ticks overflow implement a slop of
3904 * 2 ticks just to be safe, because ticks is accessed
3905 * non-atomically one CPU can increment it while another
3906 * is still using the old value.
3908 if (ncc
->ncp
== ncp
&& ncc
->mp
== mp
) /* 0 */
3910 delta
= (int)(ticks
- ncc
->ticks
); /* beware GCC opts */
3911 if (delta
< -2) /* overflow reset */
3916 for (i
= 1; i
< NCMOUNT_SET
; ++i
) { /* 1, 2, 3 */
3918 if (ncc
->ncp
== ncp
&& ncc
->mp
== mp
)
3920 delta
= (int)(ticks
- ncc
->ticks
);
3923 if (delta
> best_delta
) {
3932 * pcpu-optimized mount search. Locate the recursive mountpoint, avoid
3933 * doing an expensive mountlist_scan*() if possible.
3935 * (mp, ncp) -> mountonpt.k
3937 * Returns a referenced mount pointer or NULL
3939 * General SMP operation uses a per-cpu umount_spin to interlock unmount
3940 * operations (that is, where the mp_target can be freed out from under us).
3942 * Lookups use the ncc->updating counter to validate the contents in order
3943 * to avoid having to obtain the per cache-element spin-lock. In addition,
3944 * the ticks field is only updated when it changes. However, if our per-cpu
3945 * lock fails due to an unmount-in-progress, we fall-back to the
3946 * cache-element's spin-lock.
3949 cache_findmount(struct nchandle
*nch
)
3951 struct findmount_info info
;
3952 struct ncmount_cache
*ncc
;
3953 struct ncmount_cache ncc_copy
;
3954 struct mount
*target
;
3955 struct pcpu_ncache
*pcpu
;
3956 struct spinlock
*spinlk
;
3960 if (ncmount_cache_enable
== 0 || pcpu
== NULL
) {
3964 pcpu
+= mycpu
->gd_cpuid
;
3967 ncc
= ncmount_cache_lookup(nch
->mount
, nch
->ncp
);
3968 if (ncc
->ncp
== nch
->ncp
&& ncc
->mp
== nch
->mount
) {
3971 * This is a bit messy for now because we do not yet have
3972 * safe disposal of mount structures. We have to ref
3973 * ncc->mp_target but the 'update' counter only tell us
3974 * whether the cache has changed after the fact.
3976 * For now get a per-cpu spinlock that will only contend
3977 * against umount's. This is the best path. If it fails,
3978 * instead of waiting on the umount we fall-back to a
3979 * shared ncc->spin lock, which will generally only cost a
3982 update
= ncc
->updating
;
3983 if (__predict_true(spin_trylock(&pcpu
->umount_spin
))) {
3984 spinlk
= &pcpu
->umount_spin
;
3986 spinlk
= &ncc
->spin
;
3987 spin_lock_shared(spinlk
);
3989 if (update
& 1) { /* update in progress */
3990 spin_unlock_any(spinlk
);
3995 if (ncc
->updating
!= update
) { /* content changed */
3996 spin_unlock_any(spinlk
);
3999 if (ncc_copy
.ncp
!= nch
->ncp
|| ncc_copy
.mp
!= nch
->mount
) {
4000 spin_unlock_any(spinlk
);
4003 if (ncc_copy
.isneg
== 0) {
4004 target
= ncc_copy
.mp_target
;
4005 if (target
->mnt_ncmounton
.mount
== nch
->mount
&&
4006 target
->mnt_ncmounton
.ncp
== nch
->ncp
) {
4008 * Cache hit (positive) (avoid dirtying
4009 * the cache line if possible)
4011 if (ncc
->ticks
!= (int)ticks
)
4012 ncc
->ticks
= (int)ticks
;
4013 _cache_mntref(target
);
4017 * Cache hit (negative) (avoid dirtying
4018 * the cache line if possible)
4020 if (ncc
->ticks
!= (int)ticks
)
4021 ncc
->ticks
= (int)ticks
;
4024 spin_unlock_any(spinlk
);
4034 info
.nch_mount
= nch
->mount
;
4035 info
.nch_ncp
= nch
->ncp
;
4036 mountlist_scan(cache_findmount_callback
, &info
,
4037 MNTSCAN_FORWARD
| MNTSCAN_NOBUSY
| MNTSCAN_NOUNLOCK
);
4040 * To reduce multi-re-entry on the cache, relookup in the cache.
4041 * This can still race, obviously, but that's ok.
4043 ncc
= ncmount_cache_lookup(nch
->mount
, nch
->ncp
);
4044 if (ncc
->ncp
== nch
->ncp
&& ncc
->mp
== nch
->mount
) {
4046 atomic_add_int(&info
.result
->mnt_refs
, -1);
4053 if ((info
.result
== NULL
||
4054 (info
.result
->mnt_kern_flag
& MNTK_UNMOUNT
) == 0)) {
4055 spin_lock(&ncc
->spin
);
4056 atomic_add_int_nonlocked(&ncc
->updating
, 1);
4058 KKASSERT(ncc
->updating
& 1);
4059 if (ncc
->mp
!= nch
->mount
) {
4061 atomic_add_int(&ncc
->mp
->mnt_refs
, -1);
4062 atomic_add_int(&nch
->mount
->mnt_refs
, 1);
4063 ncc
->mp
= nch
->mount
;
4065 ncc
->ncp
= nch
->ncp
; /* ptr compares only, not refd*/
4066 ncc
->ticks
= (int)ticks
;
4070 if (ncc
->mp_target
!= info
.result
) {
4072 atomic_add_int(&ncc
->mp_target
->mnt_refs
, -1);
4073 ncc
->mp_target
= info
.result
;
4074 atomic_add_int(&info
.result
->mnt_refs
, 1);
4078 if (ncc
->mp_target
) {
4079 atomic_add_int(&ncc
->mp_target
->mnt_refs
, -1);
4080 ncc
->mp_target
= NULL
;
4084 atomic_add_int_nonlocked(&ncc
->updating
, 1);
4085 spin_unlock(&ncc
->spin
);
4087 return(info
.result
);
4092 cache_findmount_callback(struct mount
*mp
, void *data
)
4094 struct findmount_info
*info
= data
;
4097 * Check the mount's mounted-on point against the passed nch.
4099 if (mp
->mnt_ncmounton
.mount
== info
->nch_mount
&&
4100 mp
->mnt_ncmounton
.ncp
== info
->nch_ncp
4110 cache_dropmount(struct mount
*mp
)
4116 * mp is being mounted, scrap entries matching mp->mnt_ncmounton (positive
4119 * A full scan is not required, but for now just do it anyway.
4122 cache_ismounting(struct mount
*mp
)
4124 struct ncmount_cache
*ncc
;
4125 struct mount
*ncc_mp
;
4128 if (pcpu_ncache
== NULL
)
4131 for (i
= 0; i
< NCMOUNT_NUMCACHE
; ++i
) {
4132 ncc
= &ncmount_cache
[i
];
4133 if (ncc
->mp
!= mp
->mnt_ncmounton
.mount
||
4134 ncc
->ncp
!= mp
->mnt_ncmounton
.ncp
) {
4137 spin_lock(&ncc
->spin
);
4138 atomic_add_int_nonlocked(&ncc
->updating
, 1);
4140 KKASSERT(ncc
->updating
& 1);
4141 if (ncc
->mp
!= mp
->mnt_ncmounton
.mount
||
4142 ncc
->ncp
!= mp
->mnt_ncmounton
.ncp
) {
4145 spin_unlock(&ncc
->spin
);
4152 atomic_add_int(&ncc_mp
->mnt_refs
, -1);
4153 ncc_mp
= ncc
->mp_target
;
4154 ncc
->mp_target
= NULL
;
4156 atomic_add_int(&ncc_mp
->mnt_refs
, -1);
4157 ncc
->ticks
= (int)ticks
- hz
* 120;
4160 atomic_add_int_nonlocked(&ncc
->updating
, 1);
4161 spin_unlock(&ncc
->spin
);
4165 * Pre-cache the mount point
4167 ncc
= ncmount_cache_lookup(mp
->mnt_ncmounton
.mount
,
4168 mp
->mnt_ncmounton
.ncp
);
4170 spin_lock(&ncc
->spin
);
4171 atomic_add_int_nonlocked(&ncc
->updating
, 1);
4173 KKASSERT(ncc
->updating
& 1);
4176 atomic_add_int(&ncc
->mp
->mnt_refs
, -1);
4177 atomic_add_int(&mp
->mnt_ncmounton
.mount
->mnt_refs
, 1);
4178 ncc
->mp
= mp
->mnt_ncmounton
.mount
;
4179 ncc
->ncp
= mp
->mnt_ncmounton
.ncp
; /* ptr compares only */
4180 ncc
->ticks
= (int)ticks
;
4183 if (ncc
->mp_target
!= mp
) {
4185 atomic_add_int(&ncc
->mp_target
->mnt_refs
, -1);
4186 ncc
->mp_target
= mp
;
4187 atomic_add_int(&mp
->mnt_refs
, 1);
4190 atomic_add_int_nonlocked(&ncc
->updating
, 1);
4191 spin_unlock(&ncc
->spin
);
4195 * Scrap any ncmount_cache entries related to mp. Not only do we need to
4196 * scrap entries matching mp->mnt_ncmounton, but we also need to scrap any
4197 * negative hits involving (mp, <any>).
4199 * A full scan is required.
4202 cache_unmounting(struct mount
*mp
)
4204 struct ncmount_cache
*ncc
;
4205 struct pcpu_ncache
*pcpu
;
4206 struct mount
*ncc_mp
;
4213 for (i
= 0; i
< ncpus
; ++i
)
4214 spin_lock(&pcpu
[i
].umount_spin
);
4216 for (i
= 0; i
< NCMOUNT_NUMCACHE
; ++i
) {
4217 ncc
= &ncmount_cache
[i
];
4218 if (ncc
->mp
!= mp
&& ncc
->mp_target
!= mp
)
4220 spin_lock(&ncc
->spin
);
4221 atomic_add_int_nonlocked(&ncc
->updating
, 1);
4224 if (ncc
->mp
!= mp
&& ncc
->mp_target
!= mp
) {
4225 atomic_add_int_nonlocked(&ncc
->updating
, 1);
4227 spin_unlock(&ncc
->spin
);
4234 atomic_add_int(&ncc_mp
->mnt_refs
, -1);
4235 ncc_mp
= ncc
->mp_target
;
4236 ncc
->mp_target
= NULL
;
4238 atomic_add_int(&ncc_mp
->mnt_refs
, -1);
4239 ncc
->ticks
= (int)ticks
- hz
* 120;
4242 atomic_add_int_nonlocked(&ncc
->updating
, 1);
4243 spin_unlock(&ncc
->spin
);
4246 for (i
= 0; i
< ncpus
; ++i
)
4247 spin_unlock(&pcpu
[i
].umount_spin
);
4251 * Resolve an unresolved namecache entry, generally by looking it up.
4252 * The passed ncp must be locked and refd.
4254 * Theoretically since a vnode cannot be recycled while held, and since
4255 * the nc_parent chain holds its vnode as long as children exist, the
4256 * direct parent of the cache entry we are trying to resolve should
4257 * have a valid vnode. If not then generate an error that we can
4258 * determine is related to a resolver bug.
4260 * However, if a vnode was in the middle of a recyclement when the NCP
4261 * got locked, ncp->nc_vp might point to a vnode that is about to become
4262 * invalid. cache_resolve() handles this case by unresolving the entry
4263 * and then re-resolving it.
4265 * Note that successful resolution does not necessarily return an error
4266 * code of 0. If the ncp resolves to a negative cache hit then ENOENT
4269 * (*genp) is adjusted based on our resolution operation. If it is already
4270 * wrong, that's ok... it will still be wrong on return.
4273 cache_resolve(struct nchandle
*nch
, u_int
*genp
, struct ucred
*cred
)
4275 struct namecache
*par_tmp
;
4276 struct namecache
*par
;
4277 struct namecache
*ncp
;
4278 struct nchandle nctmp
;
4285 KKASSERT(_cache_lockstatus(ncp
) == LK_EXCLUSIVE
);
4289 * If the ncp is already resolved we have nothing to do. However,
4290 * we do want to guarentee that a usable vnode is returned when
4291 * a vnode is present, so make sure it hasn't been reclaimed.
4293 if ((ncp
->nc_flag
& NCF_UNRESOLVED
) == 0) {
4294 if (ncp
->nc_vp
&& (ncp
->nc_vp
->v_flag
& VRECLAIMED
)) {
4295 _cache_ncp_gen_enter(ncp
);
4296 _cache_setunresolved(ncp
, 0);
4297 if ((ncp
->nc_flag
& NCF_UNRESOLVED
) == 0) {
4298 _cache_ncp_gen_exit(ncp
);
4300 return (ncp
->nc_error
);
4302 } else if ((ncp
->nc_flag
& NCF_UNRESOLVED
) == 0) {
4303 return (ncp
->nc_error
);
4305 _cache_ncp_gen_enter(ncp
);
4308 _cache_ncp_gen_enter(ncp
);
4310 /* in gen_enter state */
4314 * If the ncp was destroyed it will never resolve again. This
4315 * can basically only happen when someone is chdir'd into an
4316 * empty directory which is then rmdir'd. We want to catch this
4317 * here and not dive the VFS because the VFS might actually
4318 * have a way to re-resolve the disconnected ncp, which will
4319 * result in inconsistencies in the cdir/nch for proc->p_fd.
4321 if (ncp
->nc_flag
& NCF_DESTROYED
) {
4322 _cache_ncp_gen_exit(ncp
);
4327 * Mount points need special handling because the parent does not
4328 * belong to the same filesystem as the ncp.
4330 if (ncp
== mp
->mnt_ncmountpt
.ncp
) {
4331 error
= cache_resolve_mp(mp
, 0);
4332 _cache_ncp_gen_exit(ncp
);
4337 * We expect an unbroken chain of ncps to at least the mount point,
4338 * and even all the way to root (but this code doesn't have to go
4339 * past the mount point).
4341 if (ncp
->nc_parent
== NULL
) {
4342 kprintf("EXDEV case 1 %p %*.*s\n", ncp
,
4343 ncp
->nc_nlen
, ncp
->nc_nlen
, ncp
->nc_name
);
4344 ncp
->nc_error
= EXDEV
;
4345 _cache_ncp_gen_exit(ncp
);
4346 return(ncp
->nc_error
);
4350 * The vp's of the parent directories in the chain are held via vhold()
4351 * due to the existance of the child, and should not disappear.
4352 * However, there are cases where they can disappear:
4354 * - due to filesystem I/O errors.
4355 * - due to NFS being stupid about tracking the namespace and
4356 * destroys the namespace for entire directories quite often.
4357 * - due to forced unmounts.
4358 * - due to an rmdir (parent will be marked DESTROYED)
4360 * When this occurs we have to track the chain backwards and resolve
4361 * it, looping until the resolver catches up to the current node. We
4362 * could recurse here but we might run ourselves out of kernel stack
4363 * so we do it in a more painful manner. This situation really should
4364 * not occur all that often, or if it does not have to go back too
4365 * many nodes to resolve the ncp.
4367 while ((dvp
= cache_dvpref(ncp
)) == NULL
) {
4369 * This case can occur if a process is CD'd into a
4370 * directory which is then rmdir'd. If the parent is marked
4371 * destroyed there is no point trying to resolve it.
4373 if (ncp
->nc_parent
->nc_flag
& NCF_DESTROYED
) {
4374 if (ncvp_debug
& 8) {
4375 kprintf("nc_parent destroyed: %s/%s\n",
4376 ncp
->nc_parent
->nc_name
, ncp
->nc_name
);
4378 _cache_ncp_gen_exit(ncp
);
4381 par
= ncp
->nc_parent
;
4384 while ((par_tmp
= par
->nc_parent
) != NULL
&&
4385 par_tmp
->nc_vp
== NULL
) {
4386 _cache_hold(par_tmp
);
4387 _cache_lock(par_tmp
);
4391 if (par
->nc_parent
== NULL
) {
4392 kprintf("EXDEV case 2 %*.*s\n",
4393 par
->nc_nlen
, par
->nc_nlen
, par
->nc_name
);
4395 _cache_ncp_gen_exit(ncp
);
4399 * The parent is not set in stone, ref and lock it to prevent
4400 * it from disappearing. Also note that due to renames it
4401 * is possible for our ncp to move and for par to no longer
4402 * be one of its parents. We resolve it anyway, the loop
4403 * will handle any moves.
4405 _cache_get(par
); /* additional hold/lock */
4406 _cache_put(par
); /* from earlier hold/lock */
4407 if (par
== nch
->mount
->mnt_ncmountpt
.ncp
) {
4408 cache_resolve_mp(nch
->mount
, 0);
4409 } else if ((dvp
= cache_dvpref(par
)) == NULL
) {
4410 kprintf("[diagnostic] cache_resolve: raced on %*.*s\n",
4411 par
->nc_nlen
, par
->nc_nlen
, par
->nc_name
);
4415 if (par
->nc_flag
& NCF_UNRESOLVED
) {
4418 par
->nc_error
= VOP_NRESOLVE(&nctmp
, dvp
, cred
);
4422 if ((error
= par
->nc_error
) != 0) {
4423 if (par
->nc_error
!= EAGAIN
) {
4424 kprintf("EXDEV case 3 %*.*s error %d\n",
4425 par
->nc_nlen
, par
->nc_nlen
, par
->nc_name
,
4428 _cache_ncp_gen_exit(ncp
);
4431 kprintf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n",
4432 par
, par
->nc_nlen
, par
->nc_nlen
, par
->nc_name
);
4439 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected
4440 * ncp's and reattach them. If this occurs the original ncp is marked
4441 * EAGAIN to force a relookup.
4443 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed
4444 * ncp must already be resolved.
4449 *genp
+= 4; /* setvp bumps the generation */
4450 ncp
->nc_error
= VOP_NRESOLVE(&nctmp
, dvp
, cred
);
4453 ncp
->nc_error
= EPERM
;
4456 if (ncp
->nc_error
== EAGAIN
) {
4457 kprintf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n",
4458 ncp
, ncp
->nc_nlen
, ncp
->nc_nlen
, ncp
->nc_name
);
4461 _cache_ncp_gen_exit(ncp
);
4463 return(ncp
->nc_error
);
4467 * Resolve the ncp associated with a mount point. Such ncp's almost always
4468 * remain resolved and this routine is rarely called. NFS MPs tends to force
4469 * re-resolution more often due to its mac-truck-smash-the-namecache
4470 * method of tracking namespace changes.
4472 * The semantics for this call is that the passed ncp must be locked on
4473 * entry and will be locked on return. However, if we actually have to
4474 * resolve the mount point we temporarily unlock the entry in order to
4475 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of
4476 * the unlock we have to recheck the flags after we relock.
4479 cache_resolve_mp(struct mount
*mp
, int adjgen
)
4481 struct namecache
*ncp
= mp
->mnt_ncmountpt
.ncp
;
4485 KKASSERT(mp
!= NULL
);
4488 * If the ncp is already resolved we have nothing to do. However,
4489 * we do want to guarentee that a usable vnode is returned when
4490 * a vnode is present, so make sure it hasn't been reclaimed.
4492 if ((ncp
->nc_flag
& NCF_UNRESOLVED
) == 0) {
4493 if (ncp
->nc_vp
&& (ncp
->nc_vp
->v_flag
& VRECLAIMED
))
4494 _cache_setunresolved(ncp
, adjgen
);
4497 if (ncp
->nc_flag
& NCF_UNRESOLVED
) {
4499 * ncp must be unlocked across the vfs_busy(), but
4500 * once busied lock ordering is ncp(s), then vnodes,
4501 * so we must relock the ncp before issuing the VFS_ROOT().
4504 while (vfs_busy(mp
, 0))
4507 error
= VFS_ROOT(mp
, &vp
);
4510 * recheck the ncp state after relocking.
4512 if (ncp
->nc_flag
& NCF_UNRESOLVED
) {
4513 ncp
->nc_error
= error
;
4515 _cache_setvp(mp
, ncp
, vp
, adjgen
);
4518 kprintf("[diagnostic] cache_resolve_mp: failed"
4519 " to resolve mount %p err=%d ncp=%p\n",
4521 _cache_setvp(mp
, ncp
, NULL
, adjgen
);
4523 } else if (error
== 0) {
4528 return(ncp
->nc_error
);
4532 * Resolve the parent vnode
4535 cache_resolve_dvp(struct nchandle
*nch
, struct ucred
*cred
, struct vnode
**dvpp
)
4537 struct namecache
*par_tmp
;
4538 struct namecache
*par
;
4539 struct namecache
*ncp
;
4540 struct nchandle nctmp
;
4548 KKASSERT(_cache_lockstatus(ncp
) == LK_EXCLUSIVE
);
4551 * Treat this as a mount point even if it has a parent (e.g.
4552 * null-mount). Return a NULL dvp and no error.
4554 if (ncp
== mp
->mnt_ncmountpt
.ncp
)
4558 * If the ncp was destroyed there is no parent directory, return
4561 if (ncp
->nc_flag
& NCF_DESTROYED
)
4565 * No parent if at the root of a filesystem, no error. Typically
4566 * not applicable to null-mounts. This case should have been caught
4567 * in the above ncmountpt check.
4569 if (ncp
->nc_parent
== NULL
)
4573 * Resolve the parent dvp.
4575 * The vp's of the parent directories in the chain are held via vhold()
4576 * due to the existance of the child, and should not disappear.
4577 * However, there are cases where they can disappear:
4579 * - due to filesystem I/O errors.
4580 * - due to NFS being stupid about tracking the namespace and
4581 * destroys the namespace for entire directories quite often.
4582 * - due to forced unmounts.
4583 * - due to an rmdir (parent will be marked DESTROYED)
4585 * When this occurs we have to track the chain backwards and resolve
4586 * it, looping until the resolver catches up to the current node. We
4587 * could recurse here but we might run ourselves out of kernel stack
4588 * so we do it in a more painful manner. This situation really should
4589 * not occur all that often, or if it does not have to go back too
4590 * many nodes to resolve the ncp.
4592 while ((dvp
= cache_dvpref(ncp
)) == NULL
) {
4594 * This case can occur if a process is CD'd into a
4595 * directory which is then rmdir'd. If the parent is marked
4596 * destroyed there is no point trying to resolve it.
4598 if (ncp
->nc_parent
->nc_flag
& NCF_DESTROYED
)
4600 par
= ncp
->nc_parent
;
4603 while ((par_tmp
= par
->nc_parent
) != NULL
&&
4604 par_tmp
->nc_vp
== NULL
) {
4605 _cache_hold(par_tmp
);
4606 _cache_lock(par_tmp
);
4610 if (par
->nc_parent
== NULL
) {
4611 kprintf("EXDEV case 2 %*.*s\n",
4612 par
->nc_nlen
, par
->nc_nlen
, par
->nc_name
);
4618 * The parent is not set in stone, ref and lock it to prevent
4619 * it from disappearing. Also note that due to renames it
4620 * is possible for our ncp to move and for par to no longer
4621 * be one of its parents. We resolve it anyway, the loop
4622 * will handle any moves.
4624 _cache_get(par
); /* additional hold/lock */
4625 _cache_put(par
); /* from earlier hold/lock */
4626 if (par
== nch
->mount
->mnt_ncmountpt
.ncp
) {
4627 cache_resolve_mp(nch
->mount
, 1);
4628 } else if ((dvp
= cache_dvpref(par
)) == NULL
) {
4629 kprintf("[diagnostic] cache_resolve: raced on %*.*s\n",
4630 par
->nc_nlen
, par
->nc_nlen
, par
->nc_name
);
4634 if (par
->nc_flag
& NCF_UNRESOLVED
) {
4637 par
->nc_error
= VOP_NRESOLVE(&nctmp
, dvp
, cred
);
4641 if ((error
= par
->nc_error
) != 0) {
4642 if (par
->nc_error
!= EAGAIN
) {
4643 kprintf("EXDEV case 3 %*.*s error %d\n",
4644 par
->nc_nlen
, par
->nc_nlen
, par
->nc_name
,
4649 kprintf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n",
4650 par
, par
->nc_nlen
, par
->nc_nlen
, par
->nc_name
);
4657 * We have a referenced dvp
4664 * Clean out negative cache entries when too many have accumulated.
4667 _cache_cleanneg(long count
)
4669 struct pcpu_ncache
*pn
;
4670 struct namecache
*ncp
;
4671 static uint32_t neg_rover
;
4675 n
= neg_rover
++; /* SMP heuristical, race ok */
4677 n
= n
% (uint32_t)ncpus
;
4680 * Normalize vfscache_negs and count. count is sometimes based
4681 * on vfscache_negs. vfscache_negs is heuristical and can sometimes
4682 * have crazy values.
4684 vnegs
= vfscache_negs
;
4686 if (vnegs
<= MINNEG
)
4691 pn
= &pcpu_ncache
[n
];
4692 spin_lock(&pn
->neg_spin
);
4693 count
= pn
->neg_count
* count
/ vnegs
+ 1;
4694 spin_unlock(&pn
->neg_spin
);
4697 * Attempt to clean out the specified number of negative cache
4701 spin_lock(&pn
->neg_spin
);
4702 ncp
= TAILQ_FIRST(&pn
->neg_list
);
4704 spin_unlock(&pn
->neg_spin
);
4707 TAILQ_REMOVE(&pn
->neg_list
, ncp
, nc_vnode
);
4708 TAILQ_INSERT_TAIL(&pn
->neg_list
, ncp
, nc_vnode
);
4710 spin_unlock(&pn
->neg_spin
);
4713 * This can race, so we must re-check that the ncp
4714 * is on the ncneg.list after successfully locking it.
4716 * Don't scrap actively referenced ncps. There should be
4717 * 3 refs. The natural ref, one from being on the neg list,
4720 * Recheck fields after successfully locking to ensure
4721 * that it is in-fact still on the negative list with no
4724 * WARNING! On the ncneglist scan any race against other
4725 * destructors (zaps or cache_inval_vp_quick() calls)
4726 * will have already unresolved the ncp and cause
4727 * us to drop instead of zap. This fine, if
4728 * our drop winds up being the last one it will
4731 if (_cache_lock_special(ncp
) == 0) {
4732 if (ncp
->nc_vp
== NULL
&&
4733 ncp
->nc_refs
== 3 &&
4734 (ncp
->nc_flag
& NCF_UNRESOLVED
) == 0)
4736 ++pcpu_ncache
[mycpu
->gd_cpuid
].clean_neg_count
;
4750 * Clean out unresolved cache entries when too many have accumulated.
4751 * Resolved cache entries are cleaned out via the vnode reclamation
4752 * mechanism and by _cache_cleanneg().
4755 _cache_cleanpos(long ucount
, long xcount
)
4757 static volatile int rover
;
4758 struct nchash_head
*nchpp
;
4759 struct namecache
*ncp
;
4764 * Don't burn too much cpu looking for stuff
4766 count
= (ucount
> xcount
) ? ucount
: xcount
;
4770 * Attempt to clean out the specified number of cache entries.
4772 while (count
> 0 && (ucount
> 0 || xcount
> 0)) {
4773 rover_copy
= atomic_fetchadd_int(&rover
, 1);
4775 nchpp
= NCHHASH(rover_copy
);
4777 if (TAILQ_FIRST(&nchpp
->list
) == NULL
) {
4785 spin_lock(&nchpp
->spin
);
4786 ncp
= TAILQ_FIRST(&nchpp
->list
);
4789 * Skip placeholder ncp's. Do not shift their
4790 * position in the list.
4792 while (ncp
&& (ncp
->nc_flag
& NCF_DUMMY
))
4793 ncp
= TAILQ_NEXT(ncp
, nc_hash
);
4797 * Move to end of list
4799 TAILQ_REMOVE(&nchpp
->list
, ncp
, nc_hash
);
4800 TAILQ_INSERT_TAIL(&nchpp
->list
, ncp
, nc_hash
);
4802 if (ncp
->nc_refs
!= ncpbaserefs(ncp
)) {
4804 * Do not destroy internal nodes that have
4805 * children or nodes which have thread
4809 } else if (ucount
> 0 &&
4810 (ncp
->nc_flag
& NCF_UNRESOLVED
))
4813 * Destroy unresolved nodes if asked.
4818 } else if (xcount
> 0) {
4820 * Destroy any other node if asked.
4831 spin_unlock(&nchpp
->spin
);
4834 * Try to scap the ncp if we can do so non-blocking.
4835 * We must re-check nc_refs after locking, and it will
4836 * have one additional ref from above.
4839 if (_cache_lock_special(ncp
) == 0) {
4840 if (ncp
->nc_refs
== 1 + ncpbaserefs(ncp
)) {
4841 ++pcpu_ncache
[mycpu
->gd_cpuid
].
4857 * This is a kitchen sink function to clean out ncps which we
4858 * tried to zap from cache_drop() but failed because we were
4859 * unable to acquire the parent lock.
4861 * Such entries can also be removed via cache_inval_vp(), such
4862 * as when unmounting.
4865 _cache_cleandefered(void)
4867 struct nchash_head
*nchpp
;
4868 struct namecache
*ncp
;
4869 struct namecache dummy
;
4873 * Create a list iterator. DUMMY indicates that this is a list
4874 * iterator, DESTROYED prevents matches by lookup functions.
4877 pcpu_ncache
[mycpu
->gd_cpuid
].numdefered
= 0;
4878 bzero(&dummy
, sizeof(dummy
));
4879 dummy
.nc_flag
= NCF_DESTROYED
| NCF_DUMMY
;
4882 for (i
= 0; i
<= nchash
; ++i
) {
4883 nchpp
= &nchashtbl
[i
];
4885 spin_lock(&nchpp
->spin
);
4886 TAILQ_INSERT_HEAD(&nchpp
->list
, &dummy
, nc_hash
);
4888 while ((ncp
= TAILQ_NEXT(ncp
, nc_hash
)) != NULL
) {
4889 if ((ncp
->nc_flag
& NCF_DEFEREDZAP
) == 0)
4891 TAILQ_REMOVE(&nchpp
->list
, &dummy
, nc_hash
);
4892 TAILQ_INSERT_AFTER(&nchpp
->list
, ncp
, &dummy
, nc_hash
);
4894 spin_unlock(&nchpp
->spin
);
4895 if (_cache_lock_nonblock(ncp
) == 0) {
4896 ncp
->nc_flag
&= ~NCF_DEFEREDZAP
;
4900 spin_lock(&nchpp
->spin
);
4903 TAILQ_REMOVE(&nchpp
->list
, &dummy
, nc_hash
);
4904 spin_unlock(&nchpp
->spin
);
4909 * Name cache initialization, from vfsinit() when we are booting
4914 struct pcpu_ncache
*pn
;
4919 * Per-cpu accounting and negative hit list
4921 pcpu_ncache
= kmalloc(sizeof(*pcpu_ncache
) * ncpus
,
4922 M_VFSCACHEAUX
, M_WAITOK
|M_ZERO
);
4923 for (i
= 0; i
< ncpus
; ++i
) {
4924 pn
= &pcpu_ncache
[i
];
4925 TAILQ_INIT(&pn
->neg_list
);
4926 spin_init(&pn
->neg_spin
, "ncneg");
4927 spin_init(&pn
->umount_spin
, "ncumm");
4931 * Initialise per-cpu namecache effectiveness statistics.
4933 for (i
= 0; i
< ncpus
; ++i
) {
4934 gd
= globaldata_find(i
);
4935 gd
->gd_nchstats
= &nchstats
[i
];
4939 * Create a generous namecache hash table
4941 nchashtbl
= hashinit_ext(vfs_inodehashsize(),
4942 sizeof(struct nchash_head
),
4943 M_VFSCACHEAUX
, &nchash
);
4944 for (i
= 0; i
<= (int)nchash
; ++i
) {
4945 TAILQ_INIT(&nchashtbl
[i
].list
);
4946 spin_init(&nchashtbl
[i
].spin
, "nchinit_hash");
4948 for (i
= 0; i
< NCMOUNT_NUMCACHE
; ++i
)
4949 spin_init(&ncmount_cache
[i
].spin
, "nchinit_cache");
4950 nclockwarn
= 5 * hz
;
4954 * Called from start_init() to bootstrap the root filesystem. Returns
4955 * a referenced, unlocked namecache record to serve as a root or the
4956 * root of the system.
4958 * Adjust our namecache counts
4961 cache_allocroot(struct nchandle
*nch
, struct mount
*mp
, struct vnode
*vp
)
4963 /*struct pcpu_ncache *pn = &pcpu_ncache[mycpu->gd_cpuid];*/
4965 /* nc_parent is NULL, doesn't count as a leaf or unresolved */
4966 /*atomic_add_long(&pn->vfscache_leafs, 1);*/
4967 /*atomic_add_long(&pn->vfscache_unres, 1);*/
4969 nch
->ncp
= cache_alloc(0);
4973 _cache_setvp(nch
->mount
, nch
->ncp
, vp
, 1);
4977 * vfs_cache_setroot()
4979 * Create an association between the root of our namecache and
4980 * the root vnode. This routine may be called several times during
4983 * If the caller intends to save the returned namecache pointer somewhere
4984 * it must cache_hold() it.
4987 vfs_cache_setroot(struct vnode
*nvp
, struct nchandle
*nch
)
4990 struct nchandle onch
;
4998 cache_zero(&rootnch
);
5006 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache
5007 * topology and is being removed as quickly as possible. The new VOP_N*()
5008 * API calls are required to make specific adjustments using the supplied
5009 * ncp pointers rather then just bogusly purging random vnodes.
5011 * Invalidate all namecache entries to a particular vnode as well as
5012 * any direct children of that vnode in the namecache. This is a
5013 * 'catch all' purge used by filesystems that do not know any better.
5015 * Note that the linkage between the vnode and its namecache entries will
5016 * be removed, but the namecache entries themselves might stay put due to
5017 * active references from elsewhere in the system or due to the existance of
5018 * the children. The namecache topology is left intact even if we do not
5019 * know what the vnode association is. Such entries will be marked
5023 cache_purge(struct vnode
*vp
)
5025 cache_inval_vp(vp
, CINV_DESTROY
| CINV_CHILDREN
);
5028 __read_mostly
static int disablecwd
;
5029 SYSCTL_INT(_debug
, OID_AUTO
, disablecwd
, CTLFLAG_RW
, &disablecwd
, 0,
5036 sys___getcwd(struct sysmsg
*sysmsg
, const struct __getcwd_args
*uap
)
5046 buflen
= uap
->buflen
;
5049 if (buflen
> MAXPATHLEN
)
5050 buflen
= MAXPATHLEN
;
5052 buf
= kmalloc(buflen
, M_TEMP
, M_WAITOK
);
5053 bp
= kern_getcwd(buf
, buflen
, &error
);
5055 error
= copyout(bp
, uap
->buf
, strlen(bp
) + 1);
5061 kern_getcwd(char *buf
, size_t buflen
, int *error
)
5063 struct proc
*p
= curproc
;
5065 int i
, slash_prefixed
;
5066 struct filedesc
*fdp
;
5067 struct nchandle nch
;
5068 struct namecache
*ncp
;
5076 nch
= fdp
->fd_ncdir
;
5081 while (ncp
&& (ncp
!= fdp
->fd_nrdir
.ncp
||
5082 nch
.mount
!= fdp
->fd_nrdir
.mount
)
5084 if (ncp
->nc_flag
& NCF_DESTROYED
) {
5090 * While traversing upwards if we encounter the root
5091 * of the current mount we have to skip to the mount point
5092 * in the underlying filesystem.
5094 if (ncp
== nch
.mount
->mnt_ncmountpt
.ncp
) {
5095 nch
= nch
.mount
->mnt_ncmounton
;
5104 * Prepend the path segment
5106 for (i
= ncp
->nc_nlen
- 1; i
>= 0; i
--) {
5112 *--bp
= ncp
->nc_name
[i
];
5123 * Go up a directory. This isn't a mount point so we don't
5124 * have to check again.
5126 while ((nch
.ncp
= ncp
->nc_parent
) != NULL
) {
5127 if (ncp_shared_lock_disable
)
5130 _cache_lock_shared(ncp
);
5131 if (nch
.ncp
!= ncp
->nc_parent
) {
5135 _cache_hold(nch
.ncp
);
5147 if (!slash_prefixed
) {
5163 * Thus begins the fullpath magic.
5165 * The passed nchp is referenced but not locked.
5167 __read_mostly
static int disablefullpath
;
5168 SYSCTL_INT(_debug
, OID_AUTO
, disablefullpath
, CTLFLAG_RW
,
5169 &disablefullpath
, 0,
5170 "Disable fullpath lookups");
5173 cache_fullpath(struct proc
*p
, struct nchandle
*nchp
, struct nchandle
*nchbase
,
5174 char **retbuf
, char **freebuf
, int guess
)
5176 struct nchandle fd_nrdir
;
5177 struct nchandle nch
;
5178 struct namecache
*ncp
;
5179 struct mount
*mp
, *new_mp
;
5188 buf
= kmalloc(MAXPATHLEN
, M_TEMP
, M_WAITOK
);
5189 bp
= buf
+ MAXPATHLEN
- 1;
5192 fd_nrdir
= *nchbase
;
5194 fd_nrdir
= p
->p_fd
->fd_nrdir
;
5204 while (ncp
&& (ncp
!= fd_nrdir
.ncp
|| mp
!= fd_nrdir
.mount
)) {
5208 * If we are asked to guess the upwards path, we do so whenever
5209 * we encounter an ncp marked as a mountpoint. We try to find
5210 * the actual mountpoint by finding the mountpoint with this
5213 if (guess
&& (ncp
->nc_flag
& NCF_ISMOUNTPT
)) {
5214 new_mp
= mount_get_by_nc(ncp
);
5217 * While traversing upwards if we encounter the root
5218 * of the current mount we have to skip to the mount point.
5220 if (ncp
== mp
->mnt_ncmountpt
.ncp
) {
5224 nch
= new_mp
->mnt_ncmounton
;
5234 * Prepend the path segment
5236 for (i
= ncp
->nc_nlen
- 1; i
>= 0; i
--) {
5242 *--bp
= ncp
->nc_name
[i
];
5253 * Go up a directory. This isn't a mount point so we don't
5254 * have to check again.
5256 * We can only safely access nc_parent with ncp held locked.
5258 while ((nch
.ncp
= ncp
->nc_parent
) != NULL
) {
5259 _cache_lock_shared(ncp
);
5260 if (nch
.ncp
!= ncp
->nc_parent
) {
5264 _cache_hold(nch
.ncp
);
5277 if (!slash_prefixed
) {
5295 vn_fullpath(struct proc
*p
, struct vnode
*vn
, char **retbuf
,
5296 char **freebuf
, int guess
)
5298 struct namecache
*ncp
;
5299 struct nchandle nch
;
5303 if (disablefullpath
)
5309 /* vn is NULL, client wants us to use p->p_textvp */
5311 if ((vn
= p
->p_textvp
) == NULL
)
5314 spin_lock_shared(&vn
->v_spin
);
5315 TAILQ_FOREACH(ncp
, &vn
->v_namecache
, nc_vnode
) {
5320 spin_unlock_shared(&vn
->v_spin
);
5324 spin_unlock_shared(&vn
->v_spin
);
5327 nch
.mount
= vn
->v_mount
;
5328 error
= cache_fullpath(p
, &nch
, NULL
, retbuf
, freebuf
, guess
);
5334 vfscache_rollup_cpu(struct globaldata
*gd
)
5336 struct pcpu_ncache
*pn
;
5339 if (pcpu_ncache
== NULL
)
5341 pn
= &pcpu_ncache
[gd
->gd_cpuid
];
5344 * namecache statistics
5346 if (pn
->vfscache_count
) {
5347 count
= atomic_swap_long(&pn
->vfscache_count
, 0);
5348 atomic_add_long(&vfscache_count
, count
);
5350 if (pn
->vfscache_leafs
) {
5351 count
= atomic_swap_long(&pn
->vfscache_leafs
, 0);
5352 atomic_add_long(&vfscache_leafs
, count
);
5354 if (pn
->vfscache_unres
) {
5355 count
= atomic_swap_long(&pn
->vfscache_unres
, 0);
5356 atomic_add_long(&vfscache_unres
, count
);
5358 if (pn
->vfscache_negs
) {
5359 count
= atomic_swap_long(&pn
->vfscache_negs
, 0);
5360 atomic_add_long(&vfscache_negs
, count
);
5364 * hysteresis based cleanings
5366 if (pn
->inv_kid_quick_count
) {
5367 count
= atomic_swap_long(&pn
->inv_kid_quick_count
, 0);
5368 atomic_add_long(&inv_kid_quick_count
, count
);
5370 if (pn
->inv_ncp_quick_count
) {
5371 count
= atomic_swap_long(&pn
->inv_ncp_quick_count
, 0);
5372 atomic_add_long(&inv_ncp_quick_count
, count
);
5374 if (pn
->clean_pos_count
) {
5375 count
= atomic_swap_long(&pn
->clean_pos_count
, 0);
5376 atomic_add_long(&clean_pos_count
, count
);
5378 if (pn
->clean_neg_count
) {
5379 count
= atomic_swap_long(&pn
->clean_neg_count
, 0);
5380 atomic_add_long(&clean_neg_count
, count
);
5383 if (pn
->numdefered
) {
5384 count
= atomic_swap_long(&pn
->numdefered
, 0);
5385 atomic_add_long(&numdefered
, count
);