kernel - Cleanup, add assertions in the vnode freeing path
[dragonfly.git] / sys / kern / vfs_cache.c
blob03d160d5b6a0083878a3075f5b621d16d1f667d6
1 /*
2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * Copyright (c) 1989, 1993, 1995
35 * The Regents of the University of California. All rights reserved.
37 * This code is derived from software contributed to Berkeley by
38 * Poul-Henning Kamp of the FreeBSD Project.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/sysctl.h>
73 #include <sys/mount.h>
74 #include <sys/vnode.h>
75 #include <sys/malloc.h>
76 #include <sys/sysproto.h>
77 #include <sys/spinlock.h>
78 #include <sys/proc.h>
79 #include <sys/namei.h>
80 #include <sys/nlookup.h>
81 #include <sys/filedesc.h>
82 #include <sys/fnv_hash.h>
83 #include <sys/globaldata.h>
84 #include <sys/kern_syscall.h>
85 #include <sys/dirent.h>
86 #include <ddb/ddb.h>
88 #include <sys/sysref2.h>
89 #include <sys/spinlock2.h>
90 #include <sys/mplock2.h>
92 #define MAX_RECURSION_DEPTH 64
95 * Random lookups in the cache are accomplished with a hash table using
96 * a hash key of (nc_src_vp, name). Each hash chain has its own spin lock.
98 * Negative entries may exist and correspond to resolved namecache
99 * structures where nc_vp is NULL. In a negative entry, NCF_WHITEOUT
100 * will be set if the entry corresponds to a whited-out directory entry
101 * (verses simply not finding the entry at all). ncneglist is locked
102 * with a global spinlock (ncspin).
104 * MPSAFE RULES:
106 * (1) A ncp must be referenced before it can be locked.
108 * (2) A ncp must be locked in order to modify it.
110 * (3) ncp locks are always ordered child -> parent. That may seem
111 * backwards but forward scans use the hash table and thus can hold
112 * the parent unlocked when traversing downward.
114 * This allows insert/rename/delete/dot-dot and other operations
115 * to use ncp->nc_parent links.
117 * This also prevents a locked up e.g. NFS node from creating a
118 * chain reaction all the way back to the root vnode / namecache.
120 * (4) parent linkages require both the parent and child to be locked.
124 * Structures associated with name cacheing.
126 #define NCHHASH(hash) (&nchashtbl[(hash) & nchash])
127 #define MINNEG 1024
129 MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
131 LIST_HEAD(nchash_list, namecache);
133 struct nchash_head {
134 struct nchash_list list;
135 struct spinlock spin;
138 static struct nchash_head *nchashtbl;
139 static struct namecache_list ncneglist;
140 static struct spinlock ncspin;
143 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server
144 * to create the namecache infrastructure leading to a dangling vnode.
146 * 0 Only errors are reported
147 * 1 Successes are reported
148 * 2 Successes + the whole directory scan is reported
149 * 3 Force the directory scan code run as if the parent vnode did not
150 * have a namecache record, even if it does have one.
152 static int ncvp_debug;
153 SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0, "");
155 static u_long nchash; /* size of hash table */
156 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, "");
158 static int ncnegfactor = 16; /* ratio of negative entries */
159 SYSCTL_INT(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, "");
161 static int nclockwarn; /* warn on locked entries in ticks */
162 SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0, "");
164 static int numneg; /* number of cache entries allocated */
165 SYSCTL_INT(_debug, OID_AUTO, numneg, CTLFLAG_RD, &numneg, 0, "");
167 static int numdefered; /* number of cache entries allocated */
168 SYSCTL_INT(_debug, OID_AUTO, numdefered, CTLFLAG_RD, &numdefered, 0, "");
170 static int numcache; /* number of cache entries allocated */
171 SYSCTL_INT(_debug, OID_AUTO, numcache, CTLFLAG_RD, &numcache, 0, "");
173 SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), "");
174 SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), "");
176 int cache_mpsafe;
177 SYSCTL_INT(_vfs, OID_AUTO, cache_mpsafe, CTLFLAG_RW, &cache_mpsafe, 0, "");
179 static int cache_resolve_mp(struct mount *mp);
180 static struct vnode *cache_dvpref(struct namecache *ncp);
181 static void _cache_lock(struct namecache *ncp);
182 static void _cache_setunresolved(struct namecache *ncp);
183 static void _cache_cleanneg(int count);
184 static void _cache_cleandefered(void);
187 * The new name cache statistics
189 SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
190 #define STATNODE(mode, name, var) \
191 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
192 STATNODE(CTLFLAG_RD, numneg, &numneg);
193 STATNODE(CTLFLAG_RD, numcache, &numcache);
194 static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls);
195 static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits);
196 static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits);
197 static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks);
198 static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss);
199 static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap);
200 static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps);
201 static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits);
202 static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps);
203 static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits);
205 struct nchstats nchstats[SMP_MAXCPU];
207 * Export VFS cache effectiveness statistics to user-land.
209 * The statistics are left for aggregation to user-land so
210 * neat things can be achieved, like observing per-CPU cache
211 * distribution.
213 static int
214 sysctl_nchstats(SYSCTL_HANDLER_ARGS)
216 struct globaldata *gd;
217 int i, error;
219 error = 0;
220 for (i = 0; i < ncpus; ++i) {
221 gd = globaldata_find(i);
222 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats),
223 sizeof(struct nchstats))))
224 break;
227 return (error);
229 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD,
230 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics");
232 static struct namecache *cache_zap(struct namecache *ncp, int nonblock);
235 * Namespace locking. The caller must already hold a reference to the
236 * namecache structure in order to lock/unlock it. This function prevents
237 * the namespace from being created or destroyed by accessors other then
238 * the lock holder.
240 * Note that holding a locked namecache structure prevents other threads
241 * from making namespace changes (e.g. deleting or creating), prevents
242 * vnode association state changes by other threads, and prevents the
243 * namecache entry from being resolved or unresolved by other threads.
245 * The lock owner has full authority to associate/disassociate vnodes
246 * and resolve/unresolve the locked ncp.
248 * The primary lock field is nc_exlocks. nc_locktd is set after the
249 * fact (when locking) or cleared prior to unlocking.
251 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed
252 * or recycled, but it does NOT help you if the vnode had already
253 * initiated a recyclement. If this is important, use cache_get()
254 * rather then cache_lock() (and deal with the differences in the
255 * way the refs counter is handled). Or, alternatively, make an
256 * unconditional call to cache_validate() or cache_resolve()
257 * after cache_lock() returns.
259 * MPSAFE
261 static
262 void
263 _cache_lock(struct namecache *ncp)
265 thread_t td;
266 int didwarn;
267 int error;
268 u_int count;
270 KKASSERT(ncp->nc_refs != 0);
271 didwarn = 0;
272 td = curthread;
274 for (;;) {
275 count = ncp->nc_exlocks;
277 if (count == 0) {
278 if (atomic_cmpset_int(&ncp->nc_exlocks, 0, 1)) {
280 * The vp associated with a locked ncp must
281 * be held to prevent it from being recycled.
283 * WARNING! If VRECLAIMED is set the vnode
284 * could already be in the middle of a recycle.
285 * Callers must use cache_vref() or
286 * cache_vget() on the locked ncp to
287 * validate the vp or set the cache entry
288 * to unresolved.
290 * NOTE! vhold() is allowed if we hold a
291 * lock on the ncp (which we do).
293 ncp->nc_locktd = td;
294 if (ncp->nc_vp)
295 vhold(ncp->nc_vp); /* MPSAFE */
296 break;
298 /* cmpset failed */
299 continue;
301 if (ncp->nc_locktd == td) {
302 if (atomic_cmpset_int(&ncp->nc_exlocks, count,
303 count + 1)) {
304 break;
306 /* cmpset failed */
307 continue;
309 tsleep_interlock(ncp, 0);
310 if (atomic_cmpset_int(&ncp->nc_exlocks, count,
311 count | NC_EXLOCK_REQ) == 0) {
312 /* cmpset failed */
313 continue;
315 error = tsleep(ncp, PINTERLOCKED, "clock", nclockwarn);
316 if (error == EWOULDBLOCK) {
317 if (didwarn == 0) {
318 didwarn = ticks;
319 kprintf("[diagnostic] cache_lock: blocked "
320 "on %p",
321 ncp);
322 kprintf(" \"%*.*s\"\n",
323 ncp->nc_nlen, ncp->nc_nlen,
324 ncp->nc_name);
328 if (didwarn) {
329 kprintf("[diagnostic] cache_lock: unblocked %*.*s after "
330 "%d secs\n",
331 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name,
332 (int)(ticks - didwarn) / hz);
337 * NOTE: nc_refs may be zero if the ncp is interlocked by circumstance,
338 * such as the case where one of its children is locked.
340 * MPSAFE
342 static
344 _cache_lock_nonblock(struct namecache *ncp)
346 thread_t td;
347 u_int count;
349 td = curthread;
351 for (;;) {
352 count = ncp->nc_exlocks;
354 if (count == 0) {
355 if (atomic_cmpset_int(&ncp->nc_exlocks, 0, 1)) {
357 * The vp associated with a locked ncp must
358 * be held to prevent it from being recycled.
360 * WARNING! If VRECLAIMED is set the vnode
361 * could already be in the middle of a recycle.
362 * Callers must use cache_vref() or
363 * cache_vget() on the locked ncp to
364 * validate the vp or set the cache entry
365 * to unresolved.
367 * NOTE! vhold() is allowed if we hold a
368 * lock on the ncp (which we do).
370 ncp->nc_locktd = td;
371 if (ncp->nc_vp)
372 vhold(ncp->nc_vp); /* MPSAFE */
373 break;
375 /* cmpset failed */
376 continue;
378 if (ncp->nc_locktd == td) {
379 if (atomic_cmpset_int(&ncp->nc_exlocks, count,
380 count + 1)) {
381 break;
383 /* cmpset failed */
384 continue;
386 return(EWOULDBLOCK);
388 return(0);
392 * Helper function
394 * NOTE: nc_refs can be 0 (degenerate case during _cache_drop).
396 * nc_locktd must be NULLed out prior to nc_exlocks getting cleared.
398 * MPSAFE
400 static
401 void
402 _cache_unlock(struct namecache *ncp)
404 thread_t td __debugvar = curthread;
405 u_int count;
407 KKASSERT(ncp->nc_refs >= 0);
408 KKASSERT(ncp->nc_exlocks > 0);
409 KKASSERT(ncp->nc_locktd == td);
411 count = ncp->nc_exlocks;
412 if ((count & ~NC_EXLOCK_REQ) == 1) {
413 ncp->nc_locktd = NULL;
414 if (ncp->nc_vp)
415 vdrop(ncp->nc_vp);
417 for (;;) {
418 if ((count & ~NC_EXLOCK_REQ) == 1) {
419 if (atomic_cmpset_int(&ncp->nc_exlocks, count, 0)) {
420 if (count & NC_EXLOCK_REQ)
421 wakeup(ncp);
422 break;
424 } else {
425 if (atomic_cmpset_int(&ncp->nc_exlocks, count,
426 count - 1)) {
427 break;
430 count = ncp->nc_exlocks;
436 * cache_hold() and cache_drop() prevent the premature deletion of a
437 * namecache entry but do not prevent operations (such as zapping) on
438 * that namecache entry.
440 * This routine may only be called from outside this source module if
441 * nc_refs is already at least 1.
443 * This is a rare case where callers are allowed to hold a spinlock,
444 * so we can't ourselves.
446 * MPSAFE
448 static __inline
449 struct namecache *
450 _cache_hold(struct namecache *ncp)
452 atomic_add_int(&ncp->nc_refs, 1);
453 return(ncp);
457 * Drop a cache entry, taking care to deal with races.
459 * For potential 1->0 transitions we must hold the ncp lock to safely
460 * test its flags. An unresolved entry with no children must be zapped
461 * to avoid leaks.
463 * The call to cache_zap() itself will handle all remaining races and
464 * will decrement the ncp's refs regardless. If we are resolved or
465 * have children nc_refs can safely be dropped to 0 without having to
466 * zap the entry.
468 * NOTE: cache_zap() will re-check nc_refs and nc_list in a MPSAFE fashion.
470 * NOTE: cache_zap() may return a non-NULL referenced parent which must
471 * be dropped in a loop.
473 * MPSAFE
475 static __inline
476 void
477 _cache_drop(struct namecache *ncp)
479 int refs;
481 while (ncp) {
482 KKASSERT(ncp->nc_refs > 0);
483 refs = ncp->nc_refs;
485 if (refs == 1) {
486 if (_cache_lock_nonblock(ncp) == 0) {
487 ncp->nc_flag &= ~NCF_DEFEREDZAP;
488 if ((ncp->nc_flag & NCF_UNRESOLVED) &&
489 TAILQ_EMPTY(&ncp->nc_list)) {
490 ncp = cache_zap(ncp, 1);
491 continue;
493 if (atomic_cmpset_int(&ncp->nc_refs, 1, 0)) {
494 _cache_unlock(ncp);
495 break;
497 _cache_unlock(ncp);
499 } else {
500 if (atomic_cmpset_int(&ncp->nc_refs, refs, refs - 1))
501 break;
503 cpu_pause();
508 * Link a new namecache entry to its parent and to the hash table. Be
509 * careful to avoid races if vhold() blocks in the future.
511 * Both ncp and par must be referenced and locked.
513 * NOTE: The hash table spinlock is likely held during this call, we
514 * can't do anything fancy.
516 * MPSAFE
518 static void
519 _cache_link_parent(struct namecache *ncp, struct namecache *par,
520 struct nchash_head *nchpp)
522 KKASSERT(ncp->nc_parent == NULL);
523 ncp->nc_parent = par;
524 ncp->nc_head = nchpp;
525 LIST_INSERT_HEAD(&nchpp->list, ncp, nc_hash);
527 if (TAILQ_EMPTY(&par->nc_list)) {
528 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
530 * Any vp associated with an ncp which has children must
531 * be held to prevent it from being recycled.
533 if (par->nc_vp)
534 vhold(par->nc_vp);
535 } else {
536 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
541 * Remove the parent and hash associations from a namecache structure.
542 * If this is the last child of the parent the cache_drop(par) will
543 * attempt to recursively zap the parent.
545 * ncp must be locked. This routine will acquire a temporary lock on
546 * the parent as wlel as the appropriate hash chain.
548 * MPSAFE
550 static void
551 _cache_unlink_parent(struct namecache *ncp)
553 struct namecache *par;
554 struct vnode *dropvp;
556 if ((par = ncp->nc_parent) != NULL) {
557 KKASSERT(ncp->nc_parent == par);
558 _cache_hold(par);
559 _cache_lock(par);
560 spin_lock_wr(&ncp->nc_head->spin);
561 LIST_REMOVE(ncp, nc_hash);
562 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
563 dropvp = NULL;
564 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
565 dropvp = par->nc_vp;
566 spin_unlock_wr(&ncp->nc_head->spin);
567 ncp->nc_parent = NULL;
568 ncp->nc_head = NULL;
569 _cache_unlock(par);
570 _cache_drop(par);
573 * We can only safely vdrop with no spinlocks held.
575 if (dropvp)
576 vdrop(dropvp);
581 * Allocate a new namecache structure. Most of the code does not require
582 * zero-termination of the string but it makes vop_compat_ncreate() easier.
584 * MPSAFE
586 static struct namecache *
587 cache_alloc(int nlen)
589 struct namecache *ncp;
591 ncp = kmalloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO);
592 if (nlen)
593 ncp->nc_name = kmalloc(nlen + 1, M_VFSCACHE, M_WAITOK);
594 ncp->nc_nlen = nlen;
595 ncp->nc_flag = NCF_UNRESOLVED;
596 ncp->nc_error = ENOTCONN; /* needs to be resolved */
597 ncp->nc_refs = 1;
599 TAILQ_INIT(&ncp->nc_list);
600 _cache_lock(ncp);
601 return(ncp);
605 * Can only be called for the case where the ncp has never been
606 * associated with anything (so no spinlocks are needed).
608 * MPSAFE
610 static void
611 _cache_free(struct namecache *ncp)
613 KKASSERT(ncp->nc_refs == 1 && ncp->nc_exlocks == 1);
614 if (ncp->nc_name)
615 kfree(ncp->nc_name, M_VFSCACHE);
616 kfree(ncp, M_VFSCACHE);
620 * MPSAFE
622 void
623 cache_zero(struct nchandle *nch)
625 nch->ncp = NULL;
626 nch->mount = NULL;
630 * Ref and deref a namecache structure.
632 * The caller must specify a stable ncp pointer, typically meaning the
633 * ncp is already referenced but this can also occur indirectly through
634 * e.g. holding a lock on a direct child.
636 * WARNING: Caller may hold an unrelated read spinlock, which means we can't
637 * use read spinlocks here.
639 * MPSAFE if nch is
641 struct nchandle *
642 cache_hold(struct nchandle *nch)
644 _cache_hold(nch->ncp);
645 atomic_add_int(&nch->mount->mnt_refs, 1);
646 return(nch);
650 * Create a copy of a namecache handle for an already-referenced
651 * entry.
653 * MPSAFE if nch is
655 void
656 cache_copy(struct nchandle *nch, struct nchandle *target)
658 *target = *nch;
659 if (target->ncp)
660 _cache_hold(target->ncp);
661 atomic_add_int(&nch->mount->mnt_refs, 1);
665 * MPSAFE if nch is
667 void
668 cache_changemount(struct nchandle *nch, struct mount *mp)
670 atomic_add_int(&nch->mount->mnt_refs, -1);
671 nch->mount = mp;
672 atomic_add_int(&nch->mount->mnt_refs, 1);
676 * MPSAFE
678 void
679 cache_drop(struct nchandle *nch)
681 atomic_add_int(&nch->mount->mnt_refs, -1);
682 _cache_drop(nch->ncp);
683 nch->ncp = NULL;
684 nch->mount = NULL;
688 * MPSAFE
690 void
691 cache_lock(struct nchandle *nch)
693 _cache_lock(nch->ncp);
697 * Relock nch1 given an unlocked nch1 and a locked nch2. The caller
698 * is responsible for checking both for validity on return as they
699 * may have become invalid.
701 * We have to deal with potential deadlocks here, just ping pong
702 * the lock until we get it (we will always block somewhere when
703 * looping so this is not cpu-intensive).
705 * which = 0 nch1 not locked, nch2 is locked
706 * which = 1 nch1 is locked, nch2 is not locked
708 void
709 cache_relock(struct nchandle *nch1, struct ucred *cred1,
710 struct nchandle *nch2, struct ucred *cred2)
712 int which;
714 which = 0;
716 for (;;) {
717 if (which == 0) {
718 if (cache_lock_nonblock(nch1) == 0) {
719 cache_resolve(nch1, cred1);
720 break;
722 cache_unlock(nch2);
723 cache_lock(nch1);
724 cache_resolve(nch1, cred1);
725 which = 1;
726 } else {
727 if (cache_lock_nonblock(nch2) == 0) {
728 cache_resolve(nch2, cred2);
729 break;
731 cache_unlock(nch1);
732 cache_lock(nch2);
733 cache_resolve(nch2, cred2);
734 which = 0;
740 * MPSAFE
743 cache_lock_nonblock(struct nchandle *nch)
745 return(_cache_lock_nonblock(nch->ncp));
750 * MPSAFE
752 void
753 cache_unlock(struct nchandle *nch)
755 _cache_unlock(nch->ncp);
759 * ref-and-lock, unlock-and-deref functions.
761 * This function is primarily used by nlookup. Even though cache_lock
762 * holds the vnode, it is possible that the vnode may have already
763 * initiated a recyclement.
765 * We want cache_get() to return a definitively usable vnode or a
766 * definitively unresolved ncp.
768 * MPSAFE
770 static
771 struct namecache *
772 _cache_get(struct namecache *ncp)
774 _cache_hold(ncp);
775 _cache_lock(ncp);
776 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
777 _cache_setunresolved(ncp);
778 return(ncp);
782 * This is a special form of _cache_lock() which only succeeds if
783 * it can get a pristine, non-recursive lock. The caller must have
784 * already ref'd the ncp.
786 * On success the ncp will be locked, on failure it will not. The
787 * ref count does not change either way.
789 * We want _cache_lock_special() (on success) to return a definitively
790 * usable vnode or a definitively unresolved ncp.
792 * MPSAFE
794 static int
795 _cache_lock_special(struct namecache *ncp)
797 if (_cache_lock_nonblock(ncp) == 0) {
798 if ((ncp->nc_exlocks & ~NC_EXLOCK_REQ) == 1) {
799 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
800 _cache_setunresolved(ncp);
801 return(0);
803 _cache_unlock(ncp);
805 return(EWOULDBLOCK);
810 * NOTE: The same nchandle can be passed for both arguments.
812 * MPSAFE
814 void
815 cache_get(struct nchandle *nch, struct nchandle *target)
817 KKASSERT(nch->ncp->nc_refs > 0);
818 target->mount = nch->mount;
819 target->ncp = _cache_get(nch->ncp);
820 atomic_add_int(&target->mount->mnt_refs, 1);
824 * MPSAFE
826 static __inline
827 void
828 _cache_put(struct namecache *ncp)
830 _cache_unlock(ncp);
831 _cache_drop(ncp);
835 * MPSAFE
837 void
838 cache_put(struct nchandle *nch)
840 atomic_add_int(&nch->mount->mnt_refs, -1);
841 _cache_put(nch->ncp);
842 nch->ncp = NULL;
843 nch->mount = NULL;
847 * Resolve an unresolved ncp by associating a vnode with it. If the
848 * vnode is NULL, a negative cache entry is created.
850 * The ncp should be locked on entry and will remain locked on return.
852 * MPSAFE
854 static
855 void
856 _cache_setvp(struct mount *mp, struct namecache *ncp, struct vnode *vp)
858 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED);
860 if (vp != NULL) {
862 * Any vp associated with an ncp which has children must
863 * be held. Any vp associated with a locked ncp must be held.
865 if (!TAILQ_EMPTY(&ncp->nc_list))
866 vhold(vp);
867 spin_lock_wr(&vp->v_spinlock);
868 ncp->nc_vp = vp;
869 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode);
870 spin_unlock_wr(&vp->v_spinlock);
871 if (ncp->nc_exlocks)
872 vhold(vp);
875 * Set auxiliary flags
877 switch(vp->v_type) {
878 case VDIR:
879 ncp->nc_flag |= NCF_ISDIR;
880 break;
881 case VLNK:
882 ncp->nc_flag |= NCF_ISSYMLINK;
883 /* XXX cache the contents of the symlink */
884 break;
885 default:
886 break;
888 atomic_add_int(&numcache, 1);
889 ncp->nc_error = 0;
890 } else {
892 * When creating a negative cache hit we set the
893 * namecache_gen. A later resolve will clean out the
894 * negative cache hit if the mount point's namecache_gen
895 * has changed. Used by devfs, could also be used by
896 * other remote FSs.
898 ncp->nc_vp = NULL;
899 spin_lock_wr(&ncspin);
900 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
901 ++numneg;
902 spin_unlock_wr(&ncspin);
903 ncp->nc_error = ENOENT;
904 if (mp)
905 ncp->nc_namecache_gen = mp->mnt_namecache_gen;
907 ncp->nc_flag &= ~(NCF_UNRESOLVED | NCF_DEFEREDZAP);
911 * MPSAFE
913 void
914 cache_setvp(struct nchandle *nch, struct vnode *vp)
916 _cache_setvp(nch->mount, nch->ncp, vp);
920 * MPSAFE
922 void
923 cache_settimeout(struct nchandle *nch, int nticks)
925 struct namecache *ncp = nch->ncp;
927 if ((ncp->nc_timeout = ticks + nticks) == 0)
928 ncp->nc_timeout = 1;
932 * Disassociate the vnode or negative-cache association and mark a
933 * namecache entry as unresolved again. Note that the ncp is still
934 * left in the hash table and still linked to its parent.
936 * The ncp should be locked and refd on entry and will remain locked and refd
937 * on return.
939 * This routine is normally never called on a directory containing children.
940 * However, NFS often does just that in its rename() code as a cop-out to
941 * avoid complex namespace operations. This disconnects a directory vnode
942 * from its namecache and can cause the OLDAPI and NEWAPI to get out of
943 * sync.
945 * MPSAFE
947 static
948 void
949 _cache_setunresolved(struct namecache *ncp)
951 struct vnode *vp;
953 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
954 ncp->nc_flag |= NCF_UNRESOLVED;
955 ncp->nc_timeout = 0;
956 ncp->nc_error = ENOTCONN;
957 if ((vp = ncp->nc_vp) != NULL) {
958 atomic_add_int(&numcache, -1);
959 spin_lock_wr(&vp->v_spinlock);
960 ncp->nc_vp = NULL;
961 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode);
962 spin_unlock_wr(&vp->v_spinlock);
965 * Any vp associated with an ncp with children is
966 * held by that ncp. Any vp associated with a locked
967 * ncp is held by that ncp. These conditions must be
968 * undone when the vp is cleared out from the ncp.
970 if (!TAILQ_EMPTY(&ncp->nc_list))
971 vdrop(vp);
972 if (ncp->nc_exlocks)
973 vdrop(vp);
974 } else {
975 spin_lock_wr(&ncspin);
976 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
977 --numneg;
978 spin_unlock_wr(&ncspin);
980 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK);
985 * The cache_nresolve() code calls this function to automatically
986 * set a resolved cache element to unresolved if it has timed out
987 * or if it is a negative cache hit and the mount point namecache_gen
988 * has changed.
990 * MPSAFE
992 static __inline void
993 _cache_auto_unresolve(struct mount *mp, struct namecache *ncp)
996 * Already in an unresolved state, nothing to do.
998 if (ncp->nc_flag & NCF_UNRESOLVED)
999 return;
1002 * Try to zap entries that have timed out. We have
1003 * to be careful here because locked leafs may depend
1004 * on the vnode remaining intact in a parent, so only
1005 * do this under very specific conditions.
1007 if (ncp->nc_timeout && (int)(ncp->nc_timeout - ticks) < 0 &&
1008 TAILQ_EMPTY(&ncp->nc_list)) {
1009 _cache_setunresolved(ncp);
1010 return;
1014 * If a resolved negative cache hit is invalid due to
1015 * the mount's namecache generation being bumped, zap it.
1017 if (ncp->nc_vp == NULL &&
1018 ncp->nc_namecache_gen != mp->mnt_namecache_gen) {
1019 _cache_setunresolved(ncp);
1020 return;
1025 * MPSAFE
1027 void
1028 cache_setunresolved(struct nchandle *nch)
1030 _cache_setunresolved(nch->ncp);
1034 * Determine if we can clear NCF_ISMOUNTPT by scanning the mountlist
1035 * looking for matches. This flag tells the lookup code when it must
1036 * check for a mount linkage and also prevents the directories in question
1037 * from being deleted or renamed.
1039 * MPSAFE
1041 static
1043 cache_clrmountpt_callback(struct mount *mp, void *data)
1045 struct nchandle *nch = data;
1047 if (mp->mnt_ncmounton.ncp == nch->ncp)
1048 return(1);
1049 if (mp->mnt_ncmountpt.ncp == nch->ncp)
1050 return(1);
1051 return(0);
1055 * MPSAFE
1057 void
1058 cache_clrmountpt(struct nchandle *nch)
1060 int count;
1062 count = mountlist_scan(cache_clrmountpt_callback, nch,
1063 MNTSCAN_FORWARD|MNTSCAN_NOBUSY);
1064 if (count == 0)
1065 nch->ncp->nc_flag &= ~NCF_ISMOUNTPT;
1069 * Invalidate portions of the namecache topology given a starting entry.
1070 * The passed ncp is set to an unresolved state and:
1072 * The passed ncp must be referencxed and locked. The routine may unlock
1073 * and relock ncp several times, and will recheck the children and loop
1074 * to catch races. When done the passed ncp will be returned with the
1075 * reference and lock intact.
1077 * CINV_DESTROY - Set a flag in the passed ncp entry indicating
1078 * that the physical underlying nodes have been
1079 * destroyed... as in deleted. For example, when
1080 * a directory is removed. This will cause record
1081 * lookups on the name to no longer be able to find
1082 * the record and tells the resolver to return failure
1083 * rather then trying to resolve through the parent.
1085 * The topology itself, including ncp->nc_name,
1086 * remains intact.
1088 * This only applies to the passed ncp, if CINV_CHILDREN
1089 * is specified the children are not flagged.
1091 * CINV_CHILDREN - Set all children (recursively) to an unresolved
1092 * state as well.
1094 * Note that this will also have the side effect of
1095 * cleaning out any unreferenced nodes in the topology
1096 * from the leaves up as the recursion backs out.
1098 * Note that the topology for any referenced nodes remains intact, but
1099 * the nodes will be marked as having been destroyed and will be set
1100 * to an unresolved state.
1102 * It is possible for cache_inval() to race a cache_resolve(), meaning that
1103 * the namecache entry may not actually be invalidated on return if it was
1104 * revalidated while recursing down into its children. This code guarentees
1105 * that the node(s) will go through an invalidation cycle, but does not
1106 * guarentee that they will remain in an invalidated state.
1108 * Returns non-zero if a revalidation was detected during the invalidation
1109 * recursion, zero otherwise. Note that since only the original ncp is
1110 * locked the revalidation ultimately can only indicate that the original ncp
1111 * *MIGHT* no have been reresolved.
1113 * DEEP RECURSION HANDLING - If a recursive invalidation recurses deeply we
1114 * have to avoid blowing out the kernel stack. We do this by saving the
1115 * deep namecache node and aborting the recursion, then re-recursing at that
1116 * node using a depth-first algorithm in order to allow multiple deep
1117 * recursions to chain through each other, then we restart the invalidation
1118 * from scratch.
1120 * MPSAFE
1123 struct cinvtrack {
1124 struct namecache *resume_ncp;
1125 int depth;
1128 static int _cache_inval_internal(struct namecache *, int, struct cinvtrack *);
1130 static
1132 _cache_inval(struct namecache *ncp, int flags)
1134 struct cinvtrack track;
1135 struct namecache *ncp2;
1136 int r;
1138 track.depth = 0;
1139 track.resume_ncp = NULL;
1141 for (;;) {
1142 r = _cache_inval_internal(ncp, flags, &track);
1143 if (track.resume_ncp == NULL)
1144 break;
1145 kprintf("Warning: deep namecache recursion at %s\n",
1146 ncp->nc_name);
1147 _cache_unlock(ncp);
1148 while ((ncp2 = track.resume_ncp) != NULL) {
1149 track.resume_ncp = NULL;
1150 _cache_lock(ncp2);
1151 _cache_inval_internal(ncp2, flags & ~CINV_DESTROY,
1152 &track);
1153 _cache_put(ncp2);
1155 _cache_lock(ncp);
1157 return(r);
1161 cache_inval(struct nchandle *nch, int flags)
1163 return(_cache_inval(nch->ncp, flags));
1167 * Helper for _cache_inval(). The passed ncp is refd and locked and
1168 * remains that way on return, but may be unlocked/relocked multiple
1169 * times by the routine.
1171 static int
1172 _cache_inval_internal(struct namecache *ncp, int flags, struct cinvtrack *track)
1174 struct namecache *kid;
1175 struct namecache *nextkid;
1176 int rcnt = 0;
1178 KKASSERT(ncp->nc_exlocks);
1180 _cache_setunresolved(ncp);
1181 if (flags & CINV_DESTROY)
1182 ncp->nc_flag |= NCF_DESTROYED;
1183 if ((flags & CINV_CHILDREN) &&
1184 (kid = TAILQ_FIRST(&ncp->nc_list)) != NULL
1186 _cache_hold(kid);
1187 if (++track->depth > MAX_RECURSION_DEPTH) {
1188 track->resume_ncp = ncp;
1189 _cache_hold(ncp);
1190 ++rcnt;
1192 _cache_unlock(ncp);
1193 while (kid) {
1194 if (track->resume_ncp) {
1195 _cache_drop(kid);
1196 break;
1198 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL)
1199 _cache_hold(nextkid);
1200 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 ||
1201 TAILQ_FIRST(&kid->nc_list)
1203 _cache_lock(kid);
1204 rcnt += _cache_inval_internal(kid, flags & ~CINV_DESTROY, track);
1205 _cache_unlock(kid);
1207 _cache_drop(kid);
1208 kid = nextkid;
1210 --track->depth;
1211 _cache_lock(ncp);
1215 * Someone could have gotten in there while ncp was unlocked,
1216 * retry if so.
1218 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
1219 ++rcnt;
1220 return (rcnt);
1224 * Invalidate a vnode's namecache associations. To avoid races against
1225 * the resolver we do not invalidate a node which we previously invalidated
1226 * but which was then re-resolved while we were in the invalidation loop.
1228 * Returns non-zero if any namecache entries remain after the invalidation
1229 * loop completed.
1231 * NOTE: Unlike the namecache topology which guarentees that ncp's will not
1232 * be ripped out of the topology while held, the vnode's v_namecache
1233 * list has no such restriction. NCP's can be ripped out of the list
1234 * at virtually any time if not locked, even if held.
1236 * In addition, the v_namecache list itself must be locked via
1237 * the vnode's spinlock.
1239 * MPSAFE
1242 cache_inval_vp(struct vnode *vp, int flags)
1244 struct namecache *ncp;
1245 struct namecache *next;
1247 restart:
1248 spin_lock_wr(&vp->v_spinlock);
1249 ncp = TAILQ_FIRST(&vp->v_namecache);
1250 if (ncp)
1251 _cache_hold(ncp);
1252 while (ncp) {
1253 /* loop entered with ncp held and vp spin-locked */
1254 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
1255 _cache_hold(next);
1256 spin_unlock_wr(&vp->v_spinlock);
1257 _cache_lock(ncp);
1258 if (ncp->nc_vp != vp) {
1259 kprintf("Warning: cache_inval_vp: race-A detected on "
1260 "%s\n", ncp->nc_name);
1261 _cache_put(ncp);
1262 if (next)
1263 _cache_drop(next);
1264 goto restart;
1266 _cache_inval(ncp, flags);
1267 _cache_put(ncp); /* also releases reference */
1268 ncp = next;
1269 spin_lock_wr(&vp->v_spinlock);
1270 if (ncp && ncp->nc_vp != vp) {
1271 spin_unlock_wr(&vp->v_spinlock);
1272 kprintf("Warning: cache_inval_vp: race-B detected on "
1273 "%s\n", ncp->nc_name);
1274 _cache_drop(ncp);
1275 goto restart;
1278 spin_unlock_wr(&vp->v_spinlock);
1279 return(TAILQ_FIRST(&vp->v_namecache) != NULL);
1283 * This routine is used instead of the normal cache_inval_vp() when we
1284 * are trying to recycle otherwise good vnodes.
1286 * Return 0 on success, non-zero if not all namecache records could be
1287 * disassociated from the vnode (for various reasons).
1289 * MPSAFE
1292 cache_inval_vp_nonblock(struct vnode *vp)
1294 struct namecache *ncp;
1295 struct namecache *next;
1297 spin_lock_wr(&vp->v_spinlock);
1298 ncp = TAILQ_FIRST(&vp->v_namecache);
1299 if (ncp)
1300 _cache_hold(ncp);
1301 while (ncp) {
1302 /* loop entered with ncp held */
1303 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
1304 _cache_hold(next);
1305 spin_unlock_wr(&vp->v_spinlock);
1306 if (_cache_lock_nonblock(ncp)) {
1307 _cache_drop(ncp);
1308 if (next)
1309 _cache_drop(next);
1310 goto done;
1312 if (ncp->nc_vp != vp) {
1313 kprintf("Warning: cache_inval_vp: race-A detected on "
1314 "%s\n", ncp->nc_name);
1315 _cache_put(ncp);
1316 if (next)
1317 _cache_drop(next);
1318 goto done;
1320 _cache_inval(ncp, 0);
1321 _cache_put(ncp); /* also releases reference */
1322 ncp = next;
1323 spin_lock_wr(&vp->v_spinlock);
1324 if (ncp && ncp->nc_vp != vp) {
1325 spin_unlock_wr(&vp->v_spinlock);
1326 kprintf("Warning: cache_inval_vp: race-B detected on "
1327 "%s\n", ncp->nc_name);
1328 _cache_drop(ncp);
1329 goto done;
1332 spin_unlock_wr(&vp->v_spinlock);
1333 done:
1334 return(TAILQ_FIRST(&vp->v_namecache) != NULL);
1338 * The source ncp has been renamed to the target ncp. Both fncp and tncp
1339 * must be locked. The target ncp is destroyed (as a normal rename-over
1340 * would destroy the target file or directory).
1342 * Because there may be references to the source ncp we cannot copy its
1343 * contents to the target. Instead the source ncp is relinked as the target
1344 * and the target ncp is removed from the namecache topology.
1346 * MPSAFE
1348 void
1349 cache_rename(struct nchandle *fnch, struct nchandle *tnch)
1351 struct namecache *fncp = fnch->ncp;
1352 struct namecache *tncp = tnch->ncp;
1353 struct namecache *tncp_par;
1354 struct nchash_head *nchpp;
1355 u_int32_t hash;
1356 char *oname;
1359 * Rename fncp (unlink)
1361 _cache_unlink_parent(fncp);
1362 oname = fncp->nc_name;
1363 fncp->nc_name = tncp->nc_name;
1364 fncp->nc_nlen = tncp->nc_nlen;
1365 tncp_par = tncp->nc_parent;
1366 _cache_hold(tncp_par);
1367 _cache_lock(tncp_par);
1370 * Rename fncp (relink)
1372 hash = fnv_32_buf(fncp->nc_name, fncp->nc_nlen, FNV1_32_INIT);
1373 hash = fnv_32_buf(&tncp_par, sizeof(tncp_par), hash);
1374 nchpp = NCHHASH(hash);
1376 spin_lock_wr(&nchpp->spin);
1377 _cache_link_parent(fncp, tncp_par, nchpp);
1378 spin_unlock_wr(&nchpp->spin);
1380 _cache_put(tncp_par);
1383 * Get rid of the overwritten tncp (unlink)
1385 _cache_setunresolved(tncp);
1386 _cache_unlink_parent(tncp);
1387 tncp->nc_name = NULL;
1388 tncp->nc_nlen = 0;
1390 if (oname)
1391 kfree(oname, M_VFSCACHE);
1395 * vget the vnode associated with the namecache entry. Resolve the namecache
1396 * entry if necessary. The passed ncp must be referenced and locked.
1398 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked
1399 * (depending on the passed lk_type) will be returned in *vpp with an error
1400 * of 0, or NULL will be returned in *vpp with a non-0 error code. The
1401 * most typical error is ENOENT, meaning that the ncp represents a negative
1402 * cache hit and there is no vnode to retrieve, but other errors can occur
1403 * too.
1405 * The vget() can race a reclaim. If this occurs we re-resolve the
1406 * namecache entry.
1408 * There are numerous places in the kernel where vget() is called on a
1409 * vnode while one or more of its namecache entries is locked. Releasing
1410 * a vnode never deadlocks against locked namecache entries (the vnode
1411 * will not get recycled while referenced ncp's exist). This means we
1412 * can safely acquire the vnode. In fact, we MUST NOT release the ncp
1413 * lock when acquiring the vp lock or we might cause a deadlock.
1415 * MPSAFE
1418 cache_vget(struct nchandle *nch, struct ucred *cred,
1419 int lk_type, struct vnode **vpp)
1421 struct namecache *ncp;
1422 struct vnode *vp;
1423 int error;
1425 ncp = nch->ncp;
1426 KKASSERT(ncp->nc_locktd == curthread);
1427 again:
1428 vp = NULL;
1429 if (ncp->nc_flag & NCF_UNRESOLVED)
1430 error = cache_resolve(nch, cred);
1431 else
1432 error = 0;
1434 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
1435 error = vget(vp, lk_type);
1436 if (error) {
1438 * VRECLAIM race
1440 if (error == ENOENT) {
1441 kprintf("Warning: vnode reclaim race detected "
1442 "in cache_vget on %p (%s)\n",
1443 vp, ncp->nc_name);
1444 _cache_setunresolved(ncp);
1445 goto again;
1449 * Not a reclaim race, some other error.
1451 KKASSERT(ncp->nc_vp == vp);
1452 vp = NULL;
1453 } else {
1454 KKASSERT(ncp->nc_vp == vp);
1455 KKASSERT((vp->v_flag & VRECLAIMED) == 0);
1458 if (error == 0 && vp == NULL)
1459 error = ENOENT;
1460 *vpp = vp;
1461 return(error);
1465 cache_vref(struct nchandle *nch, struct ucred *cred, struct vnode **vpp)
1467 struct namecache *ncp;
1468 struct vnode *vp;
1469 int error;
1471 ncp = nch->ncp;
1472 KKASSERT(ncp->nc_locktd == curthread);
1473 again:
1474 vp = NULL;
1475 if (ncp->nc_flag & NCF_UNRESOLVED)
1476 error = cache_resolve(nch, cred);
1477 else
1478 error = 0;
1480 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
1481 error = vget(vp, LK_SHARED);
1482 if (error) {
1484 * VRECLAIM race
1486 if (error == ENOENT) {
1487 kprintf("Warning: vnode reclaim race detected "
1488 "in cache_vget on %p (%s)\n",
1489 vp, ncp->nc_name);
1490 _cache_setunresolved(ncp);
1491 goto again;
1495 * Not a reclaim race, some other error.
1497 KKASSERT(ncp->nc_vp == vp);
1498 vp = NULL;
1499 } else {
1500 KKASSERT(ncp->nc_vp == vp);
1501 KKASSERT((vp->v_flag & VRECLAIMED) == 0);
1502 /* caller does not want a lock */
1503 vn_unlock(vp);
1506 if (error == 0 && vp == NULL)
1507 error = ENOENT;
1508 *vpp = vp;
1509 return(error);
1513 * Return a referenced vnode representing the parent directory of
1514 * ncp.
1516 * Because the caller has locked the ncp it should not be possible for
1517 * the parent ncp to go away. However, the parent can unresolve its
1518 * dvp at any time so we must be able to acquire a lock on the parent
1519 * to safely access nc_vp.
1521 * We have to leave par unlocked when vget()ing dvp to avoid a deadlock,
1522 * so use vhold()/vdrop() while holding the lock to prevent dvp from
1523 * getting destroyed.
1525 * MPSAFE - Note vhold() is allowed when dvp has 0 refs if we hold a
1526 * lock on the ncp in question..
1528 static struct vnode *
1529 cache_dvpref(struct namecache *ncp)
1531 struct namecache *par;
1532 struct vnode *dvp;
1534 dvp = NULL;
1535 if ((par = ncp->nc_parent) != NULL) {
1536 _cache_hold(par);
1537 _cache_lock(par);
1538 if ((par->nc_flag & NCF_UNRESOLVED) == 0) {
1539 if ((dvp = par->nc_vp) != NULL)
1540 vhold(dvp);
1542 _cache_unlock(par);
1543 if (dvp) {
1544 if (vget(dvp, LK_SHARED) == 0) {
1545 vn_unlock(dvp);
1546 vdrop(dvp);
1547 /* return refd, unlocked dvp */
1548 } else {
1549 vdrop(dvp);
1550 dvp = NULL;
1553 _cache_drop(par);
1555 return(dvp);
1559 * Convert a directory vnode to a namecache record without any other
1560 * knowledge of the topology. This ONLY works with directory vnodes and
1561 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the
1562 * returned ncp (if not NULL) will be held and unlocked.
1564 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned.
1565 * If 'makeit' is 1 we attempt to track-down and create the namecache topology
1566 * for dvp. This will fail only if the directory has been deleted out from
1567 * under the caller.
1569 * Callers must always check for a NULL return no matter the value of 'makeit'.
1571 * To avoid underflowing the kernel stack each recursive call increments
1572 * the makeit variable.
1575 static int cache_inefficient_scan(struct nchandle *nch, struct ucred *cred,
1576 struct vnode *dvp, char *fakename);
1577 static int cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
1578 struct vnode **saved_dvp);
1581 cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit,
1582 struct nchandle *nch)
1584 struct vnode *saved_dvp;
1585 struct vnode *pvp;
1586 char *fakename;
1587 int error;
1589 nch->ncp = NULL;
1590 nch->mount = dvp->v_mount;
1591 saved_dvp = NULL;
1592 fakename = NULL;
1595 * Handle the makeit == 0 degenerate case
1597 if (makeit == 0) {
1598 spin_lock_wr(&dvp->v_spinlock);
1599 nch->ncp = TAILQ_FIRST(&dvp->v_namecache);
1600 if (nch->ncp)
1601 cache_hold(nch);
1602 spin_unlock_wr(&dvp->v_spinlock);
1606 * Loop until resolution, inside code will break out on error.
1608 while (makeit) {
1610 * Break out if we successfully acquire a working ncp.
1612 spin_lock_wr(&dvp->v_spinlock);
1613 nch->ncp = TAILQ_FIRST(&dvp->v_namecache);
1614 if (nch->ncp) {
1615 cache_hold(nch);
1616 spin_unlock_wr(&dvp->v_spinlock);
1617 break;
1619 spin_unlock_wr(&dvp->v_spinlock);
1622 * If dvp is the root of its filesystem it should already
1623 * have a namecache pointer associated with it as a side
1624 * effect of the mount, but it may have been disassociated.
1626 if (dvp->v_flag & VROOT) {
1627 nch->ncp = _cache_get(nch->mount->mnt_ncmountpt.ncp);
1628 error = cache_resolve_mp(nch->mount);
1629 _cache_put(nch->ncp);
1630 if (ncvp_debug) {
1631 kprintf("cache_fromdvp: resolve root of mount %p error %d",
1632 dvp->v_mount, error);
1634 if (error) {
1635 if (ncvp_debug)
1636 kprintf(" failed\n");
1637 nch->ncp = NULL;
1638 break;
1640 if (ncvp_debug)
1641 kprintf(" succeeded\n");
1642 continue;
1646 * If we are recursed too deeply resort to an O(n^2)
1647 * algorithm to resolve the namecache topology. The
1648 * resolved pvp is left referenced in saved_dvp to
1649 * prevent the tree from being destroyed while we loop.
1651 if (makeit > 20) {
1652 error = cache_fromdvp_try(dvp, cred, &saved_dvp);
1653 if (error) {
1654 kprintf("lookupdotdot(longpath) failed %d "
1655 "dvp %p\n", error, dvp);
1656 nch->ncp = NULL;
1657 break;
1659 continue;
1663 * Get the parent directory and resolve its ncp.
1665 if (fakename) {
1666 kfree(fakename, M_TEMP);
1667 fakename = NULL;
1669 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred,
1670 &fakename);
1671 if (error) {
1672 kprintf("lookupdotdot failed %d dvp %p\n", error, dvp);
1673 break;
1675 vn_unlock(pvp);
1678 * Reuse makeit as a recursion depth counter. On success
1679 * nch will be fully referenced.
1681 cache_fromdvp(pvp, cred, makeit + 1, nch);
1682 vrele(pvp);
1683 if (nch->ncp == NULL)
1684 break;
1687 * Do an inefficient scan of pvp (embodied by ncp) to look
1688 * for dvp. This will create a namecache record for dvp on
1689 * success. We loop up to recheck on success.
1691 * ncp and dvp are both held but not locked.
1693 error = cache_inefficient_scan(nch, cred, dvp, fakename);
1694 if (error) {
1695 kprintf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n",
1696 pvp, nch->ncp->nc_name, dvp);
1697 cache_drop(nch);
1698 /* nch was NULLed out, reload mount */
1699 nch->mount = dvp->v_mount;
1700 break;
1702 if (ncvp_debug) {
1703 kprintf("cache_fromdvp: scan %p (%s) succeeded\n",
1704 pvp, nch->ncp->nc_name);
1706 cache_drop(nch);
1707 /* nch was NULLed out, reload mount */
1708 nch->mount = dvp->v_mount;
1712 * If nch->ncp is non-NULL it will have been held already.
1714 if (fakename)
1715 kfree(fakename, M_TEMP);
1716 if (saved_dvp)
1717 vrele(saved_dvp);
1718 if (nch->ncp)
1719 return (0);
1720 return (EINVAL);
1724 * Go up the chain of parent directories until we find something
1725 * we can resolve into the namecache. This is very inefficient.
1727 static
1729 cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
1730 struct vnode **saved_dvp)
1732 struct nchandle nch;
1733 struct vnode *pvp;
1734 int error;
1735 static time_t last_fromdvp_report;
1736 char *fakename;
1739 * Loop getting the parent directory vnode until we get something we
1740 * can resolve in the namecache.
1742 vref(dvp);
1743 nch.mount = dvp->v_mount;
1744 nch.ncp = NULL;
1745 fakename = NULL;
1747 for (;;) {
1748 if (fakename) {
1749 kfree(fakename, M_TEMP);
1750 fakename = NULL;
1752 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred,
1753 &fakename);
1754 if (error) {
1755 vrele(dvp);
1756 break;
1758 vn_unlock(pvp);
1759 spin_lock_wr(&pvp->v_spinlock);
1760 if ((nch.ncp = TAILQ_FIRST(&pvp->v_namecache)) != NULL) {
1761 _cache_hold(nch.ncp);
1762 spin_unlock_wr(&pvp->v_spinlock);
1763 vrele(pvp);
1764 break;
1766 spin_unlock_wr(&pvp->v_spinlock);
1767 if (pvp->v_flag & VROOT) {
1768 nch.ncp = _cache_get(pvp->v_mount->mnt_ncmountpt.ncp);
1769 error = cache_resolve_mp(nch.mount);
1770 _cache_unlock(nch.ncp);
1771 vrele(pvp);
1772 if (error) {
1773 _cache_drop(nch.ncp);
1774 nch.ncp = NULL;
1775 vrele(dvp);
1777 break;
1779 vrele(dvp);
1780 dvp = pvp;
1782 if (error == 0) {
1783 if (last_fromdvp_report != time_second) {
1784 last_fromdvp_report = time_second;
1785 kprintf("Warning: extremely inefficient path "
1786 "resolution on %s\n",
1787 nch.ncp->nc_name);
1789 error = cache_inefficient_scan(&nch, cred, dvp, fakename);
1792 * Hopefully dvp now has a namecache record associated with
1793 * it. Leave it referenced to prevent the kernel from
1794 * recycling the vnode. Otherwise extremely long directory
1795 * paths could result in endless recycling.
1797 if (*saved_dvp)
1798 vrele(*saved_dvp);
1799 *saved_dvp = dvp;
1800 _cache_drop(nch.ncp);
1802 if (fakename)
1803 kfree(fakename, M_TEMP);
1804 return (error);
1808 * Do an inefficient scan of the directory represented by ncp looking for
1809 * the directory vnode dvp. ncp must be held but not locked on entry and
1810 * will be held on return. dvp must be refd but not locked on entry and
1811 * will remain refd on return.
1813 * Why do this at all? Well, due to its stateless nature the NFS server
1814 * converts file handles directly to vnodes without necessarily going through
1815 * the namecache ops that would otherwise create the namecache topology
1816 * leading to the vnode. We could either (1) Change the namecache algorithms
1817 * to allow disconnect namecache records that are re-merged opportunistically,
1818 * or (2) Make the NFS server backtrack and scan to recover a connected
1819 * namecache topology in order to then be able to issue new API lookups.
1821 * It turns out that (1) is a huge mess. It takes a nice clean set of
1822 * namecache algorithms and introduces a lot of complication in every subsystem
1823 * that calls into the namecache to deal with the re-merge case, especially
1824 * since we are using the namecache to placehold negative lookups and the
1825 * vnode might not be immediately assigned. (2) is certainly far less
1826 * efficient then (1), but since we are only talking about directories here
1827 * (which are likely to remain cached), the case does not actually run all
1828 * that often and has the supreme advantage of not polluting the namecache
1829 * algorithms.
1831 * If a fakename is supplied just construct a namecache entry using the
1832 * fake name.
1834 static int
1835 cache_inefficient_scan(struct nchandle *nch, struct ucred *cred,
1836 struct vnode *dvp, char *fakename)
1838 struct nlcomponent nlc;
1839 struct nchandle rncp;
1840 struct dirent *den;
1841 struct vnode *pvp;
1842 struct vattr vat;
1843 struct iovec iov;
1844 struct uio uio;
1845 int blksize;
1846 int eofflag;
1847 int bytes;
1848 char *rbuf;
1849 int error;
1851 vat.va_blocksize = 0;
1852 if ((error = VOP_GETATTR(dvp, &vat)) != 0)
1853 return (error);
1854 cache_lock(nch);
1855 error = cache_vref(nch, cred, &pvp);
1856 cache_unlock(nch);
1857 if (error)
1858 return (error);
1859 if (ncvp_debug) {
1860 kprintf("inefficient_scan: directory iosize %ld "
1861 "vattr fileid = %lld\n",
1862 vat.va_blocksize,
1863 (long long)vat.va_fileid);
1867 * Use the supplied fakename if not NULL. Fake names are typically
1868 * not in the actual filesystem hierarchy. This is used by HAMMER
1869 * to glue @@timestamp recursions together.
1871 if (fakename) {
1872 nlc.nlc_nameptr = fakename;
1873 nlc.nlc_namelen = strlen(fakename);
1874 rncp = cache_nlookup(nch, &nlc);
1875 goto done;
1878 if ((blksize = vat.va_blocksize) == 0)
1879 blksize = DEV_BSIZE;
1880 rbuf = kmalloc(blksize, M_TEMP, M_WAITOK);
1881 rncp.ncp = NULL;
1883 eofflag = 0;
1884 uio.uio_offset = 0;
1885 again:
1886 iov.iov_base = rbuf;
1887 iov.iov_len = blksize;
1888 uio.uio_iov = &iov;
1889 uio.uio_iovcnt = 1;
1890 uio.uio_resid = blksize;
1891 uio.uio_segflg = UIO_SYSSPACE;
1892 uio.uio_rw = UIO_READ;
1893 uio.uio_td = curthread;
1895 if (ncvp_debug >= 2)
1896 kprintf("cache_inefficient_scan: readdir @ %08x\n", (int)uio.uio_offset);
1897 error = VOP_READDIR(pvp, &uio, cred, &eofflag, NULL, NULL);
1898 if (error == 0) {
1899 den = (struct dirent *)rbuf;
1900 bytes = blksize - uio.uio_resid;
1902 while (bytes > 0) {
1903 if (ncvp_debug >= 2) {
1904 kprintf("cache_inefficient_scan: %*.*s\n",
1905 den->d_namlen, den->d_namlen,
1906 den->d_name);
1908 if (den->d_type != DT_WHT &&
1909 den->d_ino == vat.va_fileid) {
1910 if (ncvp_debug) {
1911 kprintf("cache_inefficient_scan: "
1912 "MATCHED inode %lld path %s/%*.*s\n",
1913 (long long)vat.va_fileid,
1914 nch->ncp->nc_name,
1915 den->d_namlen, den->d_namlen,
1916 den->d_name);
1918 nlc.nlc_nameptr = den->d_name;
1919 nlc.nlc_namelen = den->d_namlen;
1920 rncp = cache_nlookup(nch, &nlc);
1921 KKASSERT(rncp.ncp != NULL);
1922 break;
1924 bytes -= _DIRENT_DIRSIZ(den);
1925 den = _DIRENT_NEXT(den);
1927 if (rncp.ncp == NULL && eofflag == 0 && uio.uio_resid != blksize)
1928 goto again;
1930 kfree(rbuf, M_TEMP);
1931 done:
1932 vrele(pvp);
1933 if (rncp.ncp) {
1934 if (rncp.ncp->nc_flag & NCF_UNRESOLVED) {
1935 _cache_setvp(rncp.mount, rncp.ncp, dvp);
1936 if (ncvp_debug >= 2) {
1937 kprintf("cache_inefficient_scan: setvp %s/%s = %p\n",
1938 nch->ncp->nc_name, rncp.ncp->nc_name, dvp);
1940 } else {
1941 if (ncvp_debug >= 2) {
1942 kprintf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n",
1943 nch->ncp->nc_name, rncp.ncp->nc_name, dvp,
1944 rncp.ncp->nc_vp);
1947 if (rncp.ncp->nc_vp == NULL)
1948 error = rncp.ncp->nc_error;
1950 * Release rncp after a successful nlookup. rncp was fully
1951 * referenced.
1953 cache_put(&rncp);
1954 } else {
1955 kprintf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n",
1956 dvp, nch->ncp->nc_name);
1957 error = ENOENT;
1959 return (error);
1963 * Zap a namecache entry. The ncp is unconditionally set to an unresolved
1964 * state, which disassociates it from its vnode or ncneglist.
1966 * Then, if there are no additional references to the ncp and no children,
1967 * the ncp is removed from the topology and destroyed.
1969 * References and/or children may exist if the ncp is in the middle of the
1970 * topology, preventing the ncp from being destroyed.
1972 * This function must be called with the ncp held and locked and will unlock
1973 * and drop it during zapping.
1975 * If nonblock is non-zero and the parent ncp cannot be locked we give up.
1976 * This case can occur in the cache_drop() path.
1978 * This function may returned a held (but NOT locked) parent node which the
1979 * caller must drop. We do this so _cache_drop() can loop, to avoid
1980 * blowing out the kernel stack.
1982 * WARNING! For MPSAFE operation this routine must acquire up to three
1983 * spin locks to be able to safely test nc_refs. Lock order is
1984 * very important.
1986 * hash spinlock if on hash list
1987 * parent spinlock if child of parent
1988 * (the ncp is unresolved so there is no vnode association)
1990 static struct namecache *
1991 cache_zap(struct namecache *ncp, int nonblock)
1993 struct namecache *par;
1994 struct vnode *dropvp;
1995 int refs;
1998 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED.
2000 _cache_setunresolved(ncp);
2003 * Try to scrap the entry and possibly tail-recurse on its parent.
2004 * We only scrap unref'd (other then our ref) unresolved entries,
2005 * we do not scrap 'live' entries.
2007 * Note that once the spinlocks are acquired if nc_refs == 1 no
2008 * other references are possible. If it isn't, however, we have
2009 * to decrement but also be sure to avoid a 1->0 transition.
2011 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED);
2012 KKASSERT(ncp->nc_refs > 0);
2015 * Acquire locks. Note that the parent can't go away while we hold
2016 * a child locked.
2018 if ((par = ncp->nc_parent) != NULL) {
2019 if (nonblock) {
2020 for (;;) {
2021 if (_cache_lock_nonblock(par) == 0)
2022 break;
2023 refs = ncp->nc_refs;
2024 ncp->nc_flag |= NCF_DEFEREDZAP;
2025 ++numdefered; /* MP race ok */
2026 if (atomic_cmpset_int(&ncp->nc_refs,
2027 refs, refs - 1)) {
2028 _cache_unlock(ncp);
2029 return(NULL);
2031 cpu_pause();
2033 _cache_hold(par);
2034 } else {
2035 _cache_hold(par);
2036 _cache_lock(par);
2038 spin_lock_wr(&ncp->nc_head->spin);
2042 * If someone other then us has a ref or we have children
2043 * we cannot zap the entry. The 1->0 transition and any
2044 * further list operation is protected by the spinlocks
2045 * we have acquired but other transitions are not.
2047 for (;;) {
2048 refs = ncp->nc_refs;
2049 if (refs == 1 && TAILQ_EMPTY(&ncp->nc_list))
2050 break;
2051 if (atomic_cmpset_int(&ncp->nc_refs, refs, refs - 1)) {
2052 if (par) {
2053 spin_unlock_wr(&ncp->nc_head->spin);
2054 _cache_put(par);
2056 _cache_unlock(ncp);
2057 return(NULL);
2059 cpu_pause();
2063 * We are the only ref and with the spinlocks held no further
2064 * refs can be acquired by others.
2066 * Remove us from the hash list and parent list. We have to
2067 * drop a ref on the parent's vp if the parent's list becomes
2068 * empty.
2070 dropvp = NULL;
2071 if (par) {
2072 struct nchash_head *nchpp = ncp->nc_head;
2074 KKASSERT(nchpp != NULL);
2075 LIST_REMOVE(ncp, nc_hash);
2076 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
2077 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
2078 dropvp = par->nc_vp;
2079 ncp->nc_head = NULL;
2080 ncp->nc_parent = NULL;
2081 spin_unlock_wr(&nchpp->spin);
2082 _cache_unlock(par);
2083 } else {
2084 KKASSERT(ncp->nc_head == NULL);
2088 * ncp should not have picked up any refs. Physically
2089 * destroy the ncp.
2091 KKASSERT(ncp->nc_refs == 1);
2092 /* _cache_unlock(ncp) not required */
2093 ncp->nc_refs = -1; /* safety */
2094 if (ncp->nc_name)
2095 kfree(ncp->nc_name, M_VFSCACHE);
2096 kfree(ncp, M_VFSCACHE);
2099 * Delayed drop (we had to release our spinlocks)
2101 * The refed parent (if not NULL) must be dropped. The
2102 * caller is responsible for looping.
2104 if (dropvp)
2105 vdrop(dropvp);
2106 return(par);
2110 * Clean up dangling negative cache and defered-drop entries in the
2111 * namecache.
2113 static enum { CHI_LOW, CHI_HIGH } cache_hysteresis_state = CHI_LOW;
2115 void
2116 cache_hysteresis(void)
2119 * Don't cache too many negative hits. We use hysteresis to reduce
2120 * the impact on the critical path.
2122 switch(cache_hysteresis_state) {
2123 case CHI_LOW:
2124 if (numneg > MINNEG && numneg * ncnegfactor > numcache) {
2125 _cache_cleanneg(10);
2126 cache_hysteresis_state = CHI_HIGH;
2128 break;
2129 case CHI_HIGH:
2130 if (numneg > MINNEG * 9 / 10 &&
2131 numneg * ncnegfactor * 9 / 10 > numcache
2133 _cache_cleanneg(10);
2134 } else {
2135 cache_hysteresis_state = CHI_LOW;
2137 break;
2141 * Clean out dangling defered-zap ncps which could not
2142 * be cleanly dropped if too many build up. Note
2143 * that numdefered is not an exact number as such ncps
2144 * can be reused and the counter is not handled in a MP
2145 * safe manner by design.
2147 if (numdefered * ncnegfactor > numcache) {
2148 _cache_cleandefered();
2153 * NEW NAMECACHE LOOKUP API
2155 * Lookup an entry in the namecache. The passed par_nch must be referenced
2156 * and unlocked. A referenced and locked nchandle with a non-NULL nch.ncp
2157 * is ALWAYS returned, eve if the supplied component is illegal.
2159 * The resulting namecache entry should be returned to the system with
2160 * cache_put() or cache_unlock() + cache_drop().
2162 * namecache locks are recursive but care must be taken to avoid lock order
2163 * reversals (hence why the passed par_nch must be unlocked). Locking
2164 * rules are to order for parent traversals, not for child traversals.
2166 * Nobody else will be able to manipulate the associated namespace (e.g.
2167 * create, delete, rename, rename-target) until the caller unlocks the
2168 * entry.
2170 * The returned entry will be in one of three states: positive hit (non-null
2171 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set).
2172 * Unresolved entries must be resolved through the filesystem to associate the
2173 * vnode and/or determine whether a positive or negative hit has occured.
2175 * It is not necessary to lock a directory in order to lock namespace under
2176 * that directory. In fact, it is explicitly not allowed to do that. A
2177 * directory is typically only locked when being created, renamed, or
2178 * destroyed.
2180 * The directory (par) may be unresolved, in which case any returned child
2181 * will likely also be marked unresolved. Likely but not guarenteed. Since
2182 * the filesystem lookup requires a resolved directory vnode the caller is
2183 * responsible for resolving the namecache chain top-down. This API
2184 * specifically allows whole chains to be created in an unresolved state.
2186 struct nchandle
2187 cache_nlookup(struct nchandle *par_nch, struct nlcomponent *nlc)
2189 struct nchandle nch;
2190 struct namecache *ncp;
2191 struct namecache *new_ncp;
2192 struct nchash_head *nchpp;
2193 struct mount *mp;
2194 u_int32_t hash;
2195 globaldata_t gd;
2196 int par_locked;
2198 numcalls++;
2199 gd = mycpu;
2200 mp = par_nch->mount;
2201 par_locked = 0;
2204 * This is a good time to call it, no ncp's are locked by
2205 * the caller or us.
2207 cache_hysteresis();
2210 * Try to locate an existing entry
2212 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
2213 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash);
2214 new_ncp = NULL;
2215 nchpp = NCHHASH(hash);
2216 restart:
2217 spin_lock_wr(&nchpp->spin);
2218 LIST_FOREACH(ncp, &nchpp->list, nc_hash) {
2219 numchecks++;
2222 * Break out if we find a matching entry. Note that
2223 * UNRESOLVED entries may match, but DESTROYED entries
2224 * do not.
2226 if (ncp->nc_parent == par_nch->ncp &&
2227 ncp->nc_nlen == nlc->nlc_namelen &&
2228 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
2229 (ncp->nc_flag & NCF_DESTROYED) == 0
2231 _cache_hold(ncp);
2232 spin_unlock_wr(&nchpp->spin);
2233 if (par_locked) {
2234 _cache_unlock(par_nch->ncp);
2235 par_locked = 0;
2237 if (_cache_lock_special(ncp) == 0) {
2238 _cache_auto_unresolve(mp, ncp);
2239 if (new_ncp)
2240 _cache_free(new_ncp);
2241 goto found;
2243 _cache_get(ncp);
2244 _cache_put(ncp);
2245 _cache_drop(ncp);
2246 goto restart;
2251 * We failed to locate an entry, create a new entry and add it to
2252 * the cache. The parent ncp must also be locked so we
2253 * can link into it.
2255 * We have to relookup after possibly blocking in kmalloc or
2256 * when locking par_nch.
2258 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special
2259 * mount case, in which case nc_name will be NULL.
2261 if (new_ncp == NULL) {
2262 spin_unlock_wr(&nchpp->spin);
2263 new_ncp = cache_alloc(nlc->nlc_namelen);
2264 if (nlc->nlc_namelen) {
2265 bcopy(nlc->nlc_nameptr, new_ncp->nc_name,
2266 nlc->nlc_namelen);
2267 new_ncp->nc_name[nlc->nlc_namelen] = 0;
2269 goto restart;
2271 if (par_locked == 0) {
2272 spin_unlock_wr(&nchpp->spin);
2273 _cache_lock(par_nch->ncp);
2274 par_locked = 1;
2275 goto restart;
2279 * WARNING! We still hold the spinlock. We have to set the hash
2280 * table entry atomically.
2282 ncp = new_ncp;
2283 _cache_link_parent(ncp, par_nch->ncp, nchpp);
2284 spin_unlock_wr(&nchpp->spin);
2285 _cache_unlock(par_nch->ncp);
2286 /* par_locked = 0 - not used */
2287 found:
2289 * stats and namecache size management
2291 if (ncp->nc_flag & NCF_UNRESOLVED)
2292 ++gd->gd_nchstats->ncs_miss;
2293 else if (ncp->nc_vp)
2294 ++gd->gd_nchstats->ncs_goodhits;
2295 else
2296 ++gd->gd_nchstats->ncs_neghits;
2297 nch.mount = mp;
2298 nch.ncp = ncp;
2299 atomic_add_int(&nch.mount->mnt_refs, 1);
2300 return(nch);
2304 * This is a non-blocking verison of cache_nlookup() used by
2305 * nfs_readdirplusrpc_uio(). It can fail for any reason and
2306 * will return nch.ncp == NULL in that case.
2308 struct nchandle
2309 cache_nlookup_nonblock(struct nchandle *par_nch, struct nlcomponent *nlc)
2311 struct nchandle nch;
2312 struct namecache *ncp;
2313 struct namecache *new_ncp;
2314 struct nchash_head *nchpp;
2315 struct mount *mp;
2316 u_int32_t hash;
2317 globaldata_t gd;
2318 int par_locked;
2320 numcalls++;
2321 gd = mycpu;
2322 mp = par_nch->mount;
2323 par_locked = 0;
2326 * Try to locate an existing entry
2328 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
2329 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash);
2330 new_ncp = NULL;
2331 nchpp = NCHHASH(hash);
2332 restart:
2333 spin_lock_wr(&nchpp->spin);
2334 LIST_FOREACH(ncp, &nchpp->list, nc_hash) {
2335 numchecks++;
2338 * Break out if we find a matching entry. Note that
2339 * UNRESOLVED entries may match, but DESTROYED entries
2340 * do not.
2342 if (ncp->nc_parent == par_nch->ncp &&
2343 ncp->nc_nlen == nlc->nlc_namelen &&
2344 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
2345 (ncp->nc_flag & NCF_DESTROYED) == 0
2347 _cache_hold(ncp);
2348 spin_unlock_wr(&nchpp->spin);
2349 if (par_locked) {
2350 _cache_unlock(par_nch->ncp);
2351 par_locked = 0;
2353 if (_cache_lock_special(ncp) == 0) {
2354 _cache_auto_unresolve(mp, ncp);
2355 if (new_ncp) {
2356 _cache_free(new_ncp);
2357 new_ncp = NULL;
2359 goto found;
2361 _cache_drop(ncp);
2362 goto failed;
2367 * We failed to locate an entry, create a new entry and add it to
2368 * the cache. The parent ncp must also be locked so we
2369 * can link into it.
2371 * We have to relookup after possibly blocking in kmalloc or
2372 * when locking par_nch.
2374 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special
2375 * mount case, in which case nc_name will be NULL.
2377 if (new_ncp == NULL) {
2378 spin_unlock_wr(&nchpp->spin);
2379 new_ncp = cache_alloc(nlc->nlc_namelen);
2380 if (nlc->nlc_namelen) {
2381 bcopy(nlc->nlc_nameptr, new_ncp->nc_name,
2382 nlc->nlc_namelen);
2383 new_ncp->nc_name[nlc->nlc_namelen] = 0;
2385 goto restart;
2387 if (par_locked == 0) {
2388 spin_unlock_wr(&nchpp->spin);
2389 if (_cache_lock_nonblock(par_nch->ncp) == 0) {
2390 par_locked = 1;
2391 goto restart;
2393 goto failed;
2397 * WARNING! We still hold the spinlock. We have to set the hash
2398 * table entry atomically.
2400 ncp = new_ncp;
2401 _cache_link_parent(ncp, par_nch->ncp, nchpp);
2402 spin_unlock_wr(&nchpp->spin);
2403 _cache_unlock(par_nch->ncp);
2404 /* par_locked = 0 - not used */
2405 found:
2407 * stats and namecache size management
2409 if (ncp->nc_flag & NCF_UNRESOLVED)
2410 ++gd->gd_nchstats->ncs_miss;
2411 else if (ncp->nc_vp)
2412 ++gd->gd_nchstats->ncs_goodhits;
2413 else
2414 ++gd->gd_nchstats->ncs_neghits;
2415 nch.mount = mp;
2416 nch.ncp = ncp;
2417 atomic_add_int(&nch.mount->mnt_refs, 1);
2418 return(nch);
2419 failed:
2420 if (new_ncp) {
2421 _cache_free(new_ncp);
2422 new_ncp = NULL;
2424 nch.mount = NULL;
2425 nch.ncp = NULL;
2426 return(nch);
2430 * The namecache entry is marked as being used as a mount point.
2431 * Locate the mount if it is visible to the caller.
2433 struct findmount_info {
2434 struct mount *result;
2435 struct mount *nch_mount;
2436 struct namecache *nch_ncp;
2439 static
2441 cache_findmount_callback(struct mount *mp, void *data)
2443 struct findmount_info *info = data;
2446 * Check the mount's mounted-on point against the passed nch.
2448 if (mp->mnt_ncmounton.mount == info->nch_mount &&
2449 mp->mnt_ncmounton.ncp == info->nch_ncp
2451 info->result = mp;
2452 return(-1);
2454 return(0);
2457 struct mount *
2458 cache_findmount(struct nchandle *nch)
2460 struct findmount_info info;
2462 info.result = NULL;
2463 info.nch_mount = nch->mount;
2464 info.nch_ncp = nch->ncp;
2465 mountlist_scan(cache_findmount_callback, &info,
2466 MNTSCAN_FORWARD|MNTSCAN_NOBUSY);
2467 return(info.result);
2471 * Resolve an unresolved namecache entry, generally by looking it up.
2472 * The passed ncp must be locked and refd.
2474 * Theoretically since a vnode cannot be recycled while held, and since
2475 * the nc_parent chain holds its vnode as long as children exist, the
2476 * direct parent of the cache entry we are trying to resolve should
2477 * have a valid vnode. If not then generate an error that we can
2478 * determine is related to a resolver bug.
2480 * However, if a vnode was in the middle of a recyclement when the NCP
2481 * got locked, ncp->nc_vp might point to a vnode that is about to become
2482 * invalid. cache_resolve() handles this case by unresolving the entry
2483 * and then re-resolving it.
2485 * Note that successful resolution does not necessarily return an error
2486 * code of 0. If the ncp resolves to a negative cache hit then ENOENT
2487 * will be returned.
2489 * MPSAFE
2492 cache_resolve(struct nchandle *nch, struct ucred *cred)
2494 struct namecache *par_tmp;
2495 struct namecache *par;
2496 struct namecache *ncp;
2497 struct nchandle nctmp;
2498 struct mount *mp;
2499 struct vnode *dvp;
2500 int error;
2502 ncp = nch->ncp;
2503 mp = nch->mount;
2504 restart:
2506 * If the ncp is already resolved we have nothing to do. However,
2507 * we do want to guarentee that a usable vnode is returned when
2508 * a vnode is present, so make sure it hasn't been reclaimed.
2510 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
2511 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
2512 _cache_setunresolved(ncp);
2513 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
2514 return (ncp->nc_error);
2518 * Mount points need special handling because the parent does not
2519 * belong to the same filesystem as the ncp.
2521 if (ncp == mp->mnt_ncmountpt.ncp)
2522 return (cache_resolve_mp(mp));
2525 * We expect an unbroken chain of ncps to at least the mount point,
2526 * and even all the way to root (but this code doesn't have to go
2527 * past the mount point).
2529 if (ncp->nc_parent == NULL) {
2530 kprintf("EXDEV case 1 %p %*.*s\n", ncp,
2531 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
2532 ncp->nc_error = EXDEV;
2533 return(ncp->nc_error);
2537 * The vp's of the parent directories in the chain are held via vhold()
2538 * due to the existance of the child, and should not disappear.
2539 * However, there are cases where they can disappear:
2541 * - due to filesystem I/O errors.
2542 * - due to NFS being stupid about tracking the namespace and
2543 * destroys the namespace for entire directories quite often.
2544 * - due to forced unmounts.
2545 * - due to an rmdir (parent will be marked DESTROYED)
2547 * When this occurs we have to track the chain backwards and resolve
2548 * it, looping until the resolver catches up to the current node. We
2549 * could recurse here but we might run ourselves out of kernel stack
2550 * so we do it in a more painful manner. This situation really should
2551 * not occur all that often, or if it does not have to go back too
2552 * many nodes to resolve the ncp.
2554 while ((dvp = cache_dvpref(ncp)) == NULL) {
2556 * This case can occur if a process is CD'd into a
2557 * directory which is then rmdir'd. If the parent is marked
2558 * destroyed there is no point trying to resolve it.
2560 if (ncp->nc_parent->nc_flag & NCF_DESTROYED)
2561 return(ENOENT);
2562 par = ncp->nc_parent;
2563 _cache_hold(par);
2564 _cache_lock(par);
2565 while ((par_tmp = par->nc_parent) != NULL &&
2566 par_tmp->nc_vp == NULL) {
2567 _cache_hold(par_tmp);
2568 _cache_lock(par_tmp);
2569 _cache_put(par);
2570 par = par_tmp;
2572 if (par->nc_parent == NULL) {
2573 kprintf("EXDEV case 2 %*.*s\n",
2574 par->nc_nlen, par->nc_nlen, par->nc_name);
2575 _cache_put(par);
2576 return (EXDEV);
2578 kprintf("[diagnostic] cache_resolve: had to recurse on %*.*s\n",
2579 par->nc_nlen, par->nc_nlen, par->nc_name);
2581 * The parent is not set in stone, ref and lock it to prevent
2582 * it from disappearing. Also note that due to renames it
2583 * is possible for our ncp to move and for par to no longer
2584 * be one of its parents. We resolve it anyway, the loop
2585 * will handle any moves.
2587 _cache_get(par); /* additional hold/lock */
2588 _cache_put(par); /* from earlier hold/lock */
2589 if (par == nch->mount->mnt_ncmountpt.ncp) {
2590 cache_resolve_mp(nch->mount);
2591 } else if ((dvp = cache_dvpref(par)) == NULL) {
2592 kprintf("[diagnostic] cache_resolve: raced on %*.*s\n", par->nc_nlen, par->nc_nlen, par->nc_name);
2593 _cache_put(par);
2594 continue;
2595 } else {
2596 if (par->nc_flag & NCF_UNRESOLVED) {
2597 nctmp.mount = mp;
2598 nctmp.ncp = par;
2599 par->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred);
2601 vrele(dvp);
2603 if ((error = par->nc_error) != 0) {
2604 if (par->nc_error != EAGAIN) {
2605 kprintf("EXDEV case 3 %*.*s error %d\n",
2606 par->nc_nlen, par->nc_nlen, par->nc_name,
2607 par->nc_error);
2608 _cache_put(par);
2609 return(error);
2611 kprintf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n",
2612 par, par->nc_nlen, par->nc_nlen, par->nc_name);
2614 _cache_put(par);
2615 /* loop */
2619 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected
2620 * ncp's and reattach them. If this occurs the original ncp is marked
2621 * EAGAIN to force a relookup.
2623 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed
2624 * ncp must already be resolved.
2626 if (dvp) {
2627 nctmp.mount = mp;
2628 nctmp.ncp = ncp;
2629 ncp->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred);
2630 vrele(dvp);
2631 } else {
2632 ncp->nc_error = EPERM;
2634 if (ncp->nc_error == EAGAIN) {
2635 kprintf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n",
2636 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
2637 goto restart;
2639 return(ncp->nc_error);
2643 * Resolve the ncp associated with a mount point. Such ncp's almost always
2644 * remain resolved and this routine is rarely called. NFS MPs tends to force
2645 * re-resolution more often due to its mac-truck-smash-the-namecache
2646 * method of tracking namespace changes.
2648 * The semantics for this call is that the passed ncp must be locked on
2649 * entry and will be locked on return. However, if we actually have to
2650 * resolve the mount point we temporarily unlock the entry in order to
2651 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of
2652 * the unlock we have to recheck the flags after we relock.
2654 static int
2655 cache_resolve_mp(struct mount *mp)
2657 struct namecache *ncp = mp->mnt_ncmountpt.ncp;
2658 struct vnode *vp;
2659 int error;
2661 KKASSERT(mp != NULL);
2664 * If the ncp is already resolved we have nothing to do. However,
2665 * we do want to guarentee that a usable vnode is returned when
2666 * a vnode is present, so make sure it hasn't been reclaimed.
2668 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
2669 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
2670 _cache_setunresolved(ncp);
2673 if (ncp->nc_flag & NCF_UNRESOLVED) {
2674 _cache_unlock(ncp);
2675 while (vfs_busy(mp, 0))
2677 error = VFS_ROOT(mp, &vp);
2678 _cache_lock(ncp);
2681 * recheck the ncp state after relocking.
2683 if (ncp->nc_flag & NCF_UNRESOLVED) {
2684 ncp->nc_error = error;
2685 if (error == 0) {
2686 _cache_setvp(mp, ncp, vp);
2687 vput(vp);
2688 } else {
2689 kprintf("[diagnostic] cache_resolve_mp: failed"
2690 " to resolve mount %p err=%d ncp=%p\n",
2691 mp, error, ncp);
2692 _cache_setvp(mp, ncp, NULL);
2694 } else if (error == 0) {
2695 vput(vp);
2697 vfs_unbusy(mp);
2699 return(ncp->nc_error);
2703 * Clean out negative cache entries when too many have accumulated.
2705 * MPSAFE
2707 static void
2708 _cache_cleanneg(int count)
2710 struct namecache *ncp;
2713 * Automode from the vnlru proc - clean out 10% of the negative cache
2714 * entries.
2716 if (count == 0)
2717 count = numneg / 10 + 1;
2720 * Attempt to clean out the specified number of negative cache
2721 * entries.
2723 while (count) {
2724 spin_lock_wr(&ncspin);
2725 ncp = TAILQ_FIRST(&ncneglist);
2726 if (ncp == NULL) {
2727 spin_unlock_wr(&ncspin);
2728 break;
2730 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
2731 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
2732 _cache_hold(ncp);
2733 spin_unlock_wr(&ncspin);
2734 if (_cache_lock_special(ncp) == 0) {
2735 ncp = cache_zap(ncp, 0);
2736 if (ncp)
2737 _cache_drop(ncp);
2738 } else {
2739 _cache_drop(ncp);
2741 --count;
2746 * This is a kitchen sink function to clean out ncps which we
2747 * tried to zap from cache_drop() but failed because we were
2748 * unable to acquire the parent lock.
2750 * Such entries can also be removed via cache_inval_vp(), such
2751 * as when unmounting.
2753 * MPSAFE
2755 static void
2756 _cache_cleandefered(void)
2758 struct nchash_head *nchpp;
2759 struct namecache *ncp;
2760 struct namecache dummy;
2761 int i;
2763 numdefered = 0;
2764 bzero(&dummy, sizeof(dummy));
2765 dummy.nc_flag = NCF_DESTROYED;
2767 for (i = 0; i <= nchash; ++i) {
2768 nchpp = &nchashtbl[i];
2770 spin_lock_wr(&nchpp->spin);
2771 LIST_INSERT_HEAD(&nchpp->list, &dummy, nc_hash);
2772 ncp = &dummy;
2773 while ((ncp = LIST_NEXT(ncp, nc_hash)) != NULL) {
2774 if ((ncp->nc_flag & NCF_DEFEREDZAP) == 0)
2775 continue;
2776 LIST_REMOVE(&dummy, nc_hash);
2777 LIST_INSERT_AFTER(ncp, &dummy, nc_hash);
2778 _cache_hold(ncp);
2779 spin_unlock_wr(&nchpp->spin);
2780 if (_cache_lock_nonblock(ncp) == 0) {
2781 ncp->nc_flag &= ~NCF_DEFEREDZAP;
2782 _cache_unlock(ncp);
2784 _cache_drop(ncp);
2785 spin_lock_wr(&nchpp->spin);
2786 ncp = &dummy;
2788 LIST_REMOVE(&dummy, nc_hash);
2789 spin_unlock_wr(&nchpp->spin);
2794 * Name cache initialization, from vfsinit() when we are booting
2796 void
2797 nchinit(void)
2799 int i;
2800 globaldata_t gd;
2802 /* initialise per-cpu namecache effectiveness statistics. */
2803 for (i = 0; i < ncpus; ++i) {
2804 gd = globaldata_find(i);
2805 gd->gd_nchstats = &nchstats[i];
2807 TAILQ_INIT(&ncneglist);
2808 spin_init(&ncspin);
2809 nchashtbl = hashinit_ext(desiredvnodes*2, sizeof(struct nchash_head),
2810 M_VFSCACHE, &nchash);
2811 for (i = 0; i <= (int)nchash; ++i) {
2812 LIST_INIT(&nchashtbl[i].list);
2813 spin_init(&nchashtbl[i].spin);
2815 nclockwarn = 5 * hz;
2819 * Called from start_init() to bootstrap the root filesystem. Returns
2820 * a referenced, unlocked namecache record.
2822 void
2823 cache_allocroot(struct nchandle *nch, struct mount *mp, struct vnode *vp)
2825 nch->ncp = cache_alloc(0);
2826 nch->mount = mp;
2827 atomic_add_int(&mp->mnt_refs, 1);
2828 if (vp)
2829 _cache_setvp(nch->mount, nch->ncp, vp);
2833 * vfs_cache_setroot()
2835 * Create an association between the root of our namecache and
2836 * the root vnode. This routine may be called several times during
2837 * booting.
2839 * If the caller intends to save the returned namecache pointer somewhere
2840 * it must cache_hold() it.
2842 void
2843 vfs_cache_setroot(struct vnode *nvp, struct nchandle *nch)
2845 struct vnode *ovp;
2846 struct nchandle onch;
2848 ovp = rootvnode;
2849 onch = rootnch;
2850 rootvnode = nvp;
2851 if (nch)
2852 rootnch = *nch;
2853 else
2854 cache_zero(&rootnch);
2855 if (ovp)
2856 vrele(ovp);
2857 if (onch.ncp)
2858 cache_drop(&onch);
2862 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache
2863 * topology and is being removed as quickly as possible. The new VOP_N*()
2864 * API calls are required to make specific adjustments using the supplied
2865 * ncp pointers rather then just bogusly purging random vnodes.
2867 * Invalidate all namecache entries to a particular vnode as well as
2868 * any direct children of that vnode in the namecache. This is a
2869 * 'catch all' purge used by filesystems that do not know any better.
2871 * Note that the linkage between the vnode and its namecache entries will
2872 * be removed, but the namecache entries themselves might stay put due to
2873 * active references from elsewhere in the system or due to the existance of
2874 * the children. The namecache topology is left intact even if we do not
2875 * know what the vnode association is. Such entries will be marked
2876 * NCF_UNRESOLVED.
2878 void
2879 cache_purge(struct vnode *vp)
2881 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN);
2885 * Flush all entries referencing a particular filesystem.
2887 * Since we need to check it anyway, we will flush all the invalid
2888 * entries at the same time.
2890 #if 0
2892 void
2893 cache_purgevfs(struct mount *mp)
2895 struct nchash_head *nchpp;
2896 struct namecache *ncp, *nnp;
2899 * Scan hash tables for applicable entries.
2901 for (nchpp = &nchashtbl[nchash]; nchpp >= nchashtbl; nchpp--) {
2902 spin_lock_wr(&nchpp->spin); XXX
2903 ncp = LIST_FIRST(&nchpp->list);
2904 if (ncp)
2905 _cache_hold(ncp);
2906 while (ncp) {
2907 nnp = LIST_NEXT(ncp, nc_hash);
2908 if (nnp)
2909 _cache_hold(nnp);
2910 if (ncp->nc_mount == mp) {
2911 _cache_lock(ncp);
2912 ncp = cache_zap(ncp, 0);
2913 if (ncp)
2914 _cache_drop(ncp);
2915 } else {
2916 _cache_drop(ncp);
2918 ncp = nnp;
2920 spin_unlock_wr(&nchpp->spin); XXX
2924 #endif
2926 static int disablecwd;
2927 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, "");
2929 static u_long numcwdcalls; STATNODE(CTLFLAG_RD, numcwdcalls, &numcwdcalls);
2930 static u_long numcwdfail1; STATNODE(CTLFLAG_RD, numcwdfail1, &numcwdfail1);
2931 static u_long numcwdfail2; STATNODE(CTLFLAG_RD, numcwdfail2, &numcwdfail2);
2932 static u_long numcwdfail3; STATNODE(CTLFLAG_RD, numcwdfail3, &numcwdfail3);
2933 static u_long numcwdfail4; STATNODE(CTLFLAG_RD, numcwdfail4, &numcwdfail4);
2934 static u_long numcwdfound; STATNODE(CTLFLAG_RD, numcwdfound, &numcwdfound);
2937 * MPALMOSTSAFE
2940 sys___getcwd(struct __getcwd_args *uap)
2942 int buflen;
2943 int error;
2944 char *buf;
2945 char *bp;
2947 if (disablecwd)
2948 return (ENODEV);
2950 buflen = uap->buflen;
2951 if (buflen == 0)
2952 return (EINVAL);
2953 if (buflen > MAXPATHLEN)
2954 buflen = MAXPATHLEN;
2956 buf = kmalloc(buflen, M_TEMP, M_WAITOK);
2957 get_mplock();
2958 bp = kern_getcwd(buf, buflen, &error);
2959 rel_mplock();
2960 if (error == 0)
2961 error = copyout(bp, uap->buf, strlen(bp) + 1);
2962 kfree(buf, M_TEMP);
2963 return (error);
2966 char *
2967 kern_getcwd(char *buf, size_t buflen, int *error)
2969 struct proc *p = curproc;
2970 char *bp;
2971 int i, slash_prefixed;
2972 struct filedesc *fdp;
2973 struct nchandle nch;
2974 struct namecache *ncp;
2976 numcwdcalls++;
2977 bp = buf;
2978 bp += buflen - 1;
2979 *bp = '\0';
2980 fdp = p->p_fd;
2981 slash_prefixed = 0;
2983 nch = fdp->fd_ncdir;
2984 ncp = nch.ncp;
2985 if (ncp)
2986 _cache_hold(ncp);
2988 while (ncp && (ncp != fdp->fd_nrdir.ncp ||
2989 nch.mount != fdp->fd_nrdir.mount)
2992 * While traversing upwards if we encounter the root
2993 * of the current mount we have to skip to the mount point
2994 * in the underlying filesystem.
2996 if (ncp == nch.mount->mnt_ncmountpt.ncp) {
2997 nch = nch.mount->mnt_ncmounton;
2998 _cache_drop(ncp);
2999 ncp = nch.ncp;
3000 if (ncp)
3001 _cache_hold(ncp);
3002 continue;
3006 * Prepend the path segment
3008 for (i = ncp->nc_nlen - 1; i >= 0; i--) {
3009 if (bp == buf) {
3010 numcwdfail4++;
3011 *error = ERANGE;
3012 bp = NULL;
3013 goto done;
3015 *--bp = ncp->nc_name[i];
3017 if (bp == buf) {
3018 numcwdfail4++;
3019 *error = ERANGE;
3020 bp = NULL;
3021 goto done;
3023 *--bp = '/';
3024 slash_prefixed = 1;
3027 * Go up a directory. This isn't a mount point so we don't
3028 * have to check again.
3030 while ((nch.ncp = ncp->nc_parent) != NULL) {
3031 _cache_lock(ncp);
3032 if (nch.ncp != ncp->nc_parent) {
3033 _cache_unlock(ncp);
3034 continue;
3036 _cache_hold(nch.ncp);
3037 _cache_unlock(ncp);
3038 break;
3040 _cache_drop(ncp);
3041 ncp = nch.ncp;
3043 if (ncp == NULL) {
3044 numcwdfail2++;
3045 *error = ENOENT;
3046 bp = NULL;
3047 goto done;
3049 if (!slash_prefixed) {
3050 if (bp == buf) {
3051 numcwdfail4++;
3052 *error = ERANGE;
3053 bp = NULL;
3054 goto done;
3056 *--bp = '/';
3058 numcwdfound++;
3059 *error = 0;
3060 done:
3061 if (ncp)
3062 _cache_drop(ncp);
3063 return (bp);
3067 * Thus begins the fullpath magic.
3069 * The passed nchp is referenced but not locked.
3071 #undef STATNODE
3072 #define STATNODE(name) \
3073 static u_int name; \
3074 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "")
3076 static int disablefullpath;
3077 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW,
3078 &disablefullpath, 0, "");
3080 STATNODE(numfullpathcalls);
3081 STATNODE(numfullpathfail1);
3082 STATNODE(numfullpathfail2);
3083 STATNODE(numfullpathfail3);
3084 STATNODE(numfullpathfail4);
3085 STATNODE(numfullpathfound);
3088 cache_fullpath(struct proc *p, struct nchandle *nchp,
3089 char **retbuf, char **freebuf)
3091 struct nchandle fd_nrdir;
3092 struct nchandle nch;
3093 struct namecache *ncp;
3094 struct mount *mp;
3095 char *bp, *buf;
3096 int slash_prefixed;
3097 int error = 0;
3098 int i;
3100 atomic_add_int(&numfullpathcalls, -1);
3102 *retbuf = NULL;
3103 *freebuf = NULL;
3105 buf = kmalloc(MAXPATHLEN, M_TEMP, M_WAITOK);
3106 bp = buf + MAXPATHLEN - 1;
3107 *bp = '\0';
3108 if (p != NULL)
3109 fd_nrdir = p->p_fd->fd_nrdir;
3110 else
3111 fd_nrdir = rootnch;
3112 slash_prefixed = 0;
3113 nch = *nchp;
3114 ncp = nch.ncp;
3115 if (ncp)
3116 _cache_hold(ncp);
3117 mp = nch.mount;
3119 while (ncp && (ncp != fd_nrdir.ncp || mp != fd_nrdir.mount)) {
3121 * While traversing upwards if we encounter the root
3122 * of the current mount we have to skip to the mount point.
3124 if (ncp == mp->mnt_ncmountpt.ncp) {
3125 nch = mp->mnt_ncmounton;
3126 _cache_drop(ncp);
3127 ncp = nch.ncp;
3128 if (ncp)
3129 _cache_hold(ncp);
3130 mp = nch.mount;
3131 continue;
3135 * Prepend the path segment
3137 for (i = ncp->nc_nlen - 1; i >= 0; i--) {
3138 if (bp == buf) {
3139 numfullpathfail4++;
3140 kfree(buf, M_TEMP);
3141 error = ENOMEM;
3142 goto done;
3144 *--bp = ncp->nc_name[i];
3146 if (bp == buf) {
3147 numfullpathfail4++;
3148 kfree(buf, M_TEMP);
3149 error = ENOMEM;
3150 goto done;
3152 *--bp = '/';
3153 slash_prefixed = 1;
3156 * Go up a directory. This isn't a mount point so we don't
3157 * have to check again.
3159 * We can only safely access nc_parent with ncp held locked.
3161 while ((nch.ncp = ncp->nc_parent) != NULL) {
3162 _cache_lock(ncp);
3163 if (nch.ncp != ncp->nc_parent) {
3164 _cache_unlock(ncp);
3165 continue;
3167 _cache_hold(nch.ncp);
3168 _cache_unlock(ncp);
3169 break;
3171 _cache_drop(ncp);
3172 ncp = nch.ncp;
3174 if (ncp == NULL) {
3175 numfullpathfail2++;
3176 kfree(buf, M_TEMP);
3177 error = ENOENT;
3178 goto done;
3181 if (!slash_prefixed) {
3182 if (bp == buf) {
3183 numfullpathfail4++;
3184 kfree(buf, M_TEMP);
3185 error = ENOMEM;
3186 goto done;
3188 *--bp = '/';
3190 numfullpathfound++;
3191 *retbuf = bp;
3192 *freebuf = buf;
3193 error = 0;
3194 done:
3195 if (ncp)
3196 _cache_drop(ncp);
3197 return(error);
3201 vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf, char **freebuf)
3203 struct namecache *ncp;
3204 struct nchandle nch;
3205 int error;
3207 atomic_add_int(&numfullpathcalls, 1);
3208 if (disablefullpath)
3209 return (ENODEV);
3211 if (p == NULL)
3212 return (EINVAL);
3214 /* vn is NULL, client wants us to use p->p_textvp */
3215 if (vn == NULL) {
3216 if ((vn = p->p_textvp) == NULL)
3217 return (EINVAL);
3219 spin_lock_wr(&vn->v_spinlock);
3220 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) {
3221 if (ncp->nc_nlen)
3222 break;
3224 if (ncp == NULL) {
3225 spin_unlock_wr(&vn->v_spinlock);
3226 return (EINVAL);
3228 _cache_hold(ncp);
3229 spin_unlock_wr(&vn->v_spinlock);
3231 atomic_add_int(&numfullpathcalls, -1);
3232 nch.ncp = ncp;;
3233 nch.mount = vn->v_mount;
3234 error = cache_fullpath(p, &nch, retbuf, freebuf);
3235 _cache_drop(ncp);
3236 return (error);