kernel - Force NFSv3 for diskless nfs mount
[dragonfly.git] / sys / kern / vfs_cache.c
blob5fb7d226ce4b605b2c40bf6dca13c5f91fe1741c
1 /*
2 * Copyright (c) 2003,2004,2009 The DragonFly Project. All rights reserved.
3 *
4 * This code is derived from software contributed to The DragonFly Project
5 * by Matthew Dillon <dillon@backplane.com>
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * 3. Neither the name of The DragonFly Project nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific, prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
25 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
34 * Copyright (c) 1989, 1993, 1995
35 * The Regents of the University of California. All rights reserved.
37 * This code is derived from software contributed to Berkeley by
38 * Poul-Henning Kamp of the FreeBSD Project.
40 * Redistribution and use in source and binary forms, with or without
41 * modification, are permitted provided that the following conditions
42 * are met:
43 * 1. Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * 2. Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in the
47 * documentation and/or other materials provided with the distribution.
48 * 3. All advertising materials mentioning features or use of this software
49 * must display the following acknowledgement:
50 * This product includes software developed by the University of
51 * California, Berkeley and its contributors.
52 * 4. Neither the name of the University nor the names of its contributors
53 * may be used to endorse or promote products derived from this software
54 * without specific prior written permission.
56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
66 * SUCH DAMAGE.
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kernel.h>
72 #include <sys/sysctl.h>
73 #include <sys/mount.h>
74 #include <sys/vnode.h>
75 #include <sys/malloc.h>
76 #include <sys/sysproto.h>
77 #include <sys/spinlock.h>
78 #include <sys/proc.h>
79 #include <sys/namei.h>
80 #include <sys/nlookup.h>
81 #include <sys/filedesc.h>
82 #include <sys/fnv_hash.h>
83 #include <sys/globaldata.h>
84 #include <sys/kern_syscall.h>
85 #include <sys/dirent.h>
86 #include <ddb/ddb.h>
88 #include <sys/sysref2.h>
89 #include <sys/spinlock2.h>
90 #include <sys/mplock2.h>
92 #define MAX_RECURSION_DEPTH 64
95 * Random lookups in the cache are accomplished with a hash table using
96 * a hash key of (nc_src_vp, name). Each hash chain has its own spin lock.
98 * Negative entries may exist and correspond to resolved namecache
99 * structures where nc_vp is NULL. In a negative entry, NCF_WHITEOUT
100 * will be set if the entry corresponds to a whited-out directory entry
101 * (verses simply not finding the entry at all). ncneglist is locked
102 * with a global spinlock (ncspin).
104 * MPSAFE RULES:
106 * (1) A ncp must be referenced before it can be locked.
108 * (2) A ncp must be locked in order to modify it.
110 * (3) ncp locks are always ordered child -> parent. That may seem
111 * backwards but forward scans use the hash table and thus can hold
112 * the parent unlocked when traversing downward.
114 * This allows insert/rename/delete/dot-dot and other operations
115 * to use ncp->nc_parent links.
117 * This also prevents a locked up e.g. NFS node from creating a
118 * chain reaction all the way back to the root vnode / namecache.
120 * (4) parent linkages require both the parent and child to be locked.
124 * Structures associated with name cacheing.
126 #define NCHHASH(hash) (&nchashtbl[(hash) & nchash])
127 #define MINNEG 1024
129 MALLOC_DEFINE(M_VFSCACHE, "vfscache", "VFS name cache entries");
131 LIST_HEAD(nchash_list, namecache);
133 struct nchash_head {
134 struct nchash_list list;
135 struct spinlock spin;
138 static struct nchash_head *nchashtbl;
139 static struct namecache_list ncneglist;
140 static struct spinlock ncspin;
143 * ncvp_debug - debug cache_fromvp(). This is used by the NFS server
144 * to create the namecache infrastructure leading to a dangling vnode.
146 * 0 Only errors are reported
147 * 1 Successes are reported
148 * 2 Successes + the whole directory scan is reported
149 * 3 Force the directory scan code run as if the parent vnode did not
150 * have a namecache record, even if it does have one.
152 static int ncvp_debug;
153 SYSCTL_INT(_debug, OID_AUTO, ncvp_debug, CTLFLAG_RW, &ncvp_debug, 0, "");
155 static u_long nchash; /* size of hash table */
156 SYSCTL_ULONG(_debug, OID_AUTO, nchash, CTLFLAG_RD, &nchash, 0, "");
158 static int ncnegfactor = 16; /* ratio of negative entries */
159 SYSCTL_INT(_debug, OID_AUTO, ncnegfactor, CTLFLAG_RW, &ncnegfactor, 0, "");
161 static int nclockwarn; /* warn on locked entries in ticks */
162 SYSCTL_INT(_debug, OID_AUTO, nclockwarn, CTLFLAG_RW, &nclockwarn, 0, "");
164 static int numdefered; /* number of cache entries allocated */
165 SYSCTL_INT(_debug, OID_AUTO, numdefered, CTLFLAG_RD, &numdefered, 0, "");
167 SYSCTL_INT(_debug, OID_AUTO, vnsize, CTLFLAG_RD, 0, sizeof(struct vnode), "");
168 SYSCTL_INT(_debug, OID_AUTO, ncsize, CTLFLAG_RD, 0, sizeof(struct namecache), "");
170 int cache_mpsafe = 1;
171 SYSCTL_INT(_vfs, OID_AUTO, cache_mpsafe, CTLFLAG_RW, &cache_mpsafe, 0, "");
173 static int cache_resolve_mp(struct mount *mp);
174 static struct vnode *cache_dvpref(struct namecache *ncp);
175 static void _cache_lock(struct namecache *ncp);
176 static void _cache_setunresolved(struct namecache *ncp);
177 static void _cache_cleanneg(int count);
178 static void _cache_cleandefered(void);
181 * The new name cache statistics
183 SYSCTL_NODE(_vfs, OID_AUTO, cache, CTLFLAG_RW, 0, "Name cache statistics");
184 #define STATNODE(mode, name, var) \
185 SYSCTL_ULONG(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
186 #define STATNODE_INT(mode, name, var) \
187 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, mode, var, 0, "");
188 static int numneg; STATNODE_INT(CTLFLAG_RD, numneg, &numneg);
189 static int numcache; STATNODE_INT(CTLFLAG_RD, numcache, &numcache);
190 static u_long numcalls; STATNODE(CTLFLAG_RD, numcalls, &numcalls);
191 static u_long dothits; STATNODE(CTLFLAG_RD, dothits, &dothits);
192 static u_long dotdothits; STATNODE(CTLFLAG_RD, dotdothits, &dotdothits);
193 static u_long numchecks; STATNODE(CTLFLAG_RD, numchecks, &numchecks);
194 static u_long nummiss; STATNODE(CTLFLAG_RD, nummiss, &nummiss);
195 static u_long nummisszap; STATNODE(CTLFLAG_RD, nummisszap, &nummisszap);
196 static u_long numposzaps; STATNODE(CTLFLAG_RD, numposzaps, &numposzaps);
197 static u_long numposhits; STATNODE(CTLFLAG_RD, numposhits, &numposhits);
198 static u_long numnegzaps; STATNODE(CTLFLAG_RD, numnegzaps, &numnegzaps);
199 static u_long numneghits; STATNODE(CTLFLAG_RD, numneghits, &numneghits);
201 struct nchstats nchstats[SMP_MAXCPU];
203 * Export VFS cache effectiveness statistics to user-land.
205 * The statistics are left for aggregation to user-land so
206 * neat things can be achieved, like observing per-CPU cache
207 * distribution.
209 static int
210 sysctl_nchstats(SYSCTL_HANDLER_ARGS)
212 struct globaldata *gd;
213 int i, error;
215 error = 0;
216 for (i = 0; i < ncpus; ++i) {
217 gd = globaldata_find(i);
218 if ((error = SYSCTL_OUT(req, (void *)&(*gd->gd_nchstats),
219 sizeof(struct nchstats))))
220 break;
223 return (error);
225 SYSCTL_PROC(_vfs_cache, OID_AUTO, nchstats, CTLTYPE_OPAQUE|CTLFLAG_RD,
226 0, 0, sysctl_nchstats, "S,nchstats", "VFS cache effectiveness statistics");
228 static struct namecache *cache_zap(struct namecache *ncp, int nonblock);
231 * Namespace locking. The caller must already hold a reference to the
232 * namecache structure in order to lock/unlock it. This function prevents
233 * the namespace from being created or destroyed by accessors other then
234 * the lock holder.
236 * Note that holding a locked namecache structure prevents other threads
237 * from making namespace changes (e.g. deleting or creating), prevents
238 * vnode association state changes by other threads, and prevents the
239 * namecache entry from being resolved or unresolved by other threads.
241 * The lock owner has full authority to associate/disassociate vnodes
242 * and resolve/unresolve the locked ncp.
244 * The primary lock field is nc_exlocks. nc_locktd is set after the
245 * fact (when locking) or cleared prior to unlocking.
247 * WARNING! Holding a locked ncp will prevent a vnode from being destroyed
248 * or recycled, but it does NOT help you if the vnode had already
249 * initiated a recyclement. If this is important, use cache_get()
250 * rather then cache_lock() (and deal with the differences in the
251 * way the refs counter is handled). Or, alternatively, make an
252 * unconditional call to cache_validate() or cache_resolve()
253 * after cache_lock() returns.
255 * MPSAFE
257 static
258 void
259 _cache_lock(struct namecache *ncp)
261 thread_t td;
262 int didwarn;
263 int error;
264 u_int count;
266 KKASSERT(ncp->nc_refs != 0);
267 didwarn = 0;
268 td = curthread;
270 for (;;) {
271 count = ncp->nc_exlocks;
273 if (count == 0) {
274 if (atomic_cmpset_int(&ncp->nc_exlocks, 0, 1)) {
276 * The vp associated with a locked ncp must
277 * be held to prevent it from being recycled.
279 * WARNING! If VRECLAIMED is set the vnode
280 * could already be in the middle of a recycle.
281 * Callers must use cache_vref() or
282 * cache_vget() on the locked ncp to
283 * validate the vp or set the cache entry
284 * to unresolved.
286 * NOTE! vhold() is allowed if we hold a
287 * lock on the ncp (which we do).
289 ncp->nc_locktd = td;
290 if (ncp->nc_vp)
291 vhold(ncp->nc_vp); /* MPSAFE */
292 break;
294 /* cmpset failed */
295 continue;
297 if (ncp->nc_locktd == td) {
298 if (atomic_cmpset_int(&ncp->nc_exlocks, count,
299 count + 1)) {
300 break;
302 /* cmpset failed */
303 continue;
305 tsleep_interlock(ncp, 0);
306 if (atomic_cmpset_int(&ncp->nc_exlocks, count,
307 count | NC_EXLOCK_REQ) == 0) {
308 /* cmpset failed */
309 continue;
311 error = tsleep(ncp, PINTERLOCKED, "clock", nclockwarn);
312 if (error == EWOULDBLOCK) {
313 if (didwarn == 0) {
314 didwarn = ticks;
315 kprintf("[diagnostic] cache_lock: blocked "
316 "on %p",
317 ncp);
318 kprintf(" \"%*.*s\"\n",
319 ncp->nc_nlen, ncp->nc_nlen,
320 ncp->nc_name);
324 if (didwarn) {
325 kprintf("[diagnostic] cache_lock: unblocked %*.*s after "
326 "%d secs\n",
327 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name,
328 (int)(ticks - didwarn) / hz);
333 * NOTE: nc_refs may be zero if the ncp is interlocked by circumstance,
334 * such as the case where one of its children is locked.
336 * MPSAFE
338 static
340 _cache_lock_nonblock(struct namecache *ncp)
342 thread_t td;
343 u_int count;
345 td = curthread;
347 for (;;) {
348 count = ncp->nc_exlocks;
350 if (count == 0) {
351 if (atomic_cmpset_int(&ncp->nc_exlocks, 0, 1)) {
353 * The vp associated with a locked ncp must
354 * be held to prevent it from being recycled.
356 * WARNING! If VRECLAIMED is set the vnode
357 * could already be in the middle of a recycle.
358 * Callers must use cache_vref() or
359 * cache_vget() on the locked ncp to
360 * validate the vp or set the cache entry
361 * to unresolved.
363 * NOTE! vhold() is allowed if we hold a
364 * lock on the ncp (which we do).
366 ncp->nc_locktd = td;
367 if (ncp->nc_vp)
368 vhold(ncp->nc_vp); /* MPSAFE */
369 break;
371 /* cmpset failed */
372 continue;
374 if (ncp->nc_locktd == td) {
375 if (atomic_cmpset_int(&ncp->nc_exlocks, count,
376 count + 1)) {
377 break;
379 /* cmpset failed */
380 continue;
382 return(EWOULDBLOCK);
384 return(0);
388 * Helper function
390 * NOTE: nc_refs can be 0 (degenerate case during _cache_drop).
392 * nc_locktd must be NULLed out prior to nc_exlocks getting cleared.
394 * MPSAFE
396 static
397 void
398 _cache_unlock(struct namecache *ncp)
400 thread_t td __debugvar = curthread;
401 u_int count;
403 KKASSERT(ncp->nc_refs >= 0);
404 KKASSERT(ncp->nc_exlocks > 0);
405 KKASSERT(ncp->nc_locktd == td);
407 count = ncp->nc_exlocks;
408 if ((count & ~NC_EXLOCK_REQ) == 1) {
409 ncp->nc_locktd = NULL;
410 if (ncp->nc_vp)
411 vdrop(ncp->nc_vp);
413 for (;;) {
414 if ((count & ~NC_EXLOCK_REQ) == 1) {
415 if (atomic_cmpset_int(&ncp->nc_exlocks, count, 0)) {
416 if (count & NC_EXLOCK_REQ)
417 wakeup(ncp);
418 break;
420 } else {
421 if (atomic_cmpset_int(&ncp->nc_exlocks, count,
422 count - 1)) {
423 break;
426 count = ncp->nc_exlocks;
432 * cache_hold() and cache_drop() prevent the premature deletion of a
433 * namecache entry but do not prevent operations (such as zapping) on
434 * that namecache entry.
436 * This routine may only be called from outside this source module if
437 * nc_refs is already at least 1.
439 * This is a rare case where callers are allowed to hold a spinlock,
440 * so we can't ourselves.
442 * MPSAFE
444 static __inline
445 struct namecache *
446 _cache_hold(struct namecache *ncp)
448 atomic_add_int(&ncp->nc_refs, 1);
449 return(ncp);
453 * Drop a cache entry, taking care to deal with races.
455 * For potential 1->0 transitions we must hold the ncp lock to safely
456 * test its flags. An unresolved entry with no children must be zapped
457 * to avoid leaks.
459 * The call to cache_zap() itself will handle all remaining races and
460 * will decrement the ncp's refs regardless. If we are resolved or
461 * have children nc_refs can safely be dropped to 0 without having to
462 * zap the entry.
464 * NOTE: cache_zap() will re-check nc_refs and nc_list in a MPSAFE fashion.
466 * NOTE: cache_zap() may return a non-NULL referenced parent which must
467 * be dropped in a loop.
469 * MPSAFE
471 static __inline
472 void
473 _cache_drop(struct namecache *ncp)
475 int refs;
477 while (ncp) {
478 KKASSERT(ncp->nc_refs > 0);
479 refs = ncp->nc_refs;
481 if (refs == 1) {
482 if (_cache_lock_nonblock(ncp) == 0) {
483 ncp->nc_flag &= ~NCF_DEFEREDZAP;
484 if ((ncp->nc_flag & NCF_UNRESOLVED) &&
485 TAILQ_EMPTY(&ncp->nc_list)) {
486 ncp = cache_zap(ncp, 1);
487 continue;
489 if (atomic_cmpset_int(&ncp->nc_refs, 1, 0)) {
490 _cache_unlock(ncp);
491 break;
493 _cache_unlock(ncp);
495 } else {
496 if (atomic_cmpset_int(&ncp->nc_refs, refs, refs - 1))
497 break;
499 cpu_pause();
504 * Link a new namecache entry to its parent and to the hash table. Be
505 * careful to avoid races if vhold() blocks in the future.
507 * Both ncp and par must be referenced and locked.
509 * NOTE: The hash table spinlock is likely held during this call, we
510 * can't do anything fancy.
512 * MPSAFE
514 static void
515 _cache_link_parent(struct namecache *ncp, struct namecache *par,
516 struct nchash_head *nchpp)
518 KKASSERT(ncp->nc_parent == NULL);
519 ncp->nc_parent = par;
520 ncp->nc_head = nchpp;
523 * Set inheritance flags. Note that the parent flags may be
524 * stale due to getattr potentially not having been run yet
525 * (it gets run during nlookup()'s).
527 ncp->nc_flag &= ~(NCF_SF_PNOCACHE | NCF_UF_PCACHE);
528 if (par->nc_flag & (NCF_SF_NOCACHE | NCF_SF_PNOCACHE))
529 ncp->nc_flag |= NCF_SF_PNOCACHE;
530 if (par->nc_flag & (NCF_UF_CACHE | NCF_UF_PCACHE))
531 ncp->nc_flag |= NCF_UF_PCACHE;
533 LIST_INSERT_HEAD(&nchpp->list, ncp, nc_hash);
535 if (TAILQ_EMPTY(&par->nc_list)) {
536 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
538 * Any vp associated with an ncp which has children must
539 * be held to prevent it from being recycled.
541 if (par->nc_vp)
542 vhold(par->nc_vp);
543 } else {
544 TAILQ_INSERT_HEAD(&par->nc_list, ncp, nc_entry);
549 * Remove the parent and hash associations from a namecache structure.
550 * If this is the last child of the parent the cache_drop(par) will
551 * attempt to recursively zap the parent.
553 * ncp must be locked. This routine will acquire a temporary lock on
554 * the parent as wlel as the appropriate hash chain.
556 * MPSAFE
558 static void
559 _cache_unlink_parent(struct namecache *ncp)
561 struct namecache *par;
562 struct vnode *dropvp;
564 if ((par = ncp->nc_parent) != NULL) {
565 KKASSERT(ncp->nc_parent == par);
566 _cache_hold(par);
567 _cache_lock(par);
568 spin_lock(&ncp->nc_head->spin);
569 LIST_REMOVE(ncp, nc_hash);
570 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
571 dropvp = NULL;
572 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
573 dropvp = par->nc_vp;
574 spin_unlock(&ncp->nc_head->spin);
575 ncp->nc_parent = NULL;
576 ncp->nc_head = NULL;
577 _cache_unlock(par);
578 _cache_drop(par);
581 * We can only safely vdrop with no spinlocks held.
583 if (dropvp)
584 vdrop(dropvp);
589 * Allocate a new namecache structure. Most of the code does not require
590 * zero-termination of the string but it makes vop_compat_ncreate() easier.
592 * MPSAFE
594 static struct namecache *
595 cache_alloc(int nlen)
597 struct namecache *ncp;
599 ncp = kmalloc(sizeof(*ncp), M_VFSCACHE, M_WAITOK|M_ZERO);
600 if (nlen)
601 ncp->nc_name = kmalloc(nlen + 1, M_VFSCACHE, M_WAITOK);
602 ncp->nc_nlen = nlen;
603 ncp->nc_flag = NCF_UNRESOLVED;
604 ncp->nc_error = ENOTCONN; /* needs to be resolved */
605 ncp->nc_refs = 1;
607 TAILQ_INIT(&ncp->nc_list);
608 _cache_lock(ncp);
609 return(ncp);
613 * Can only be called for the case where the ncp has never been
614 * associated with anything (so no spinlocks are needed).
616 * MPSAFE
618 static void
619 _cache_free(struct namecache *ncp)
621 KKASSERT(ncp->nc_refs == 1 && ncp->nc_exlocks == 1);
622 if (ncp->nc_name)
623 kfree(ncp->nc_name, M_VFSCACHE);
624 kfree(ncp, M_VFSCACHE);
628 * MPSAFE
630 void
631 cache_zero(struct nchandle *nch)
633 nch->ncp = NULL;
634 nch->mount = NULL;
638 * Ref and deref a namecache structure.
640 * The caller must specify a stable ncp pointer, typically meaning the
641 * ncp is already referenced but this can also occur indirectly through
642 * e.g. holding a lock on a direct child.
644 * WARNING: Caller may hold an unrelated read spinlock, which means we can't
645 * use read spinlocks here.
647 * MPSAFE if nch is
649 struct nchandle *
650 cache_hold(struct nchandle *nch)
652 _cache_hold(nch->ncp);
653 atomic_add_int(&nch->mount->mnt_refs, 1);
654 return(nch);
658 * Create a copy of a namecache handle for an already-referenced
659 * entry.
661 * MPSAFE if nch is
663 void
664 cache_copy(struct nchandle *nch, struct nchandle *target)
666 *target = *nch;
667 if (target->ncp)
668 _cache_hold(target->ncp);
669 atomic_add_int(&nch->mount->mnt_refs, 1);
673 * MPSAFE if nch is
675 void
676 cache_changemount(struct nchandle *nch, struct mount *mp)
678 atomic_add_int(&nch->mount->mnt_refs, -1);
679 nch->mount = mp;
680 atomic_add_int(&nch->mount->mnt_refs, 1);
684 * MPSAFE
686 void
687 cache_drop(struct nchandle *nch)
689 atomic_add_int(&nch->mount->mnt_refs, -1);
690 _cache_drop(nch->ncp);
691 nch->ncp = NULL;
692 nch->mount = NULL;
696 * MPSAFE
698 void
699 cache_lock(struct nchandle *nch)
701 _cache_lock(nch->ncp);
705 * Relock nch1 given an unlocked nch1 and a locked nch2. The caller
706 * is responsible for checking both for validity on return as they
707 * may have become invalid.
709 * We have to deal with potential deadlocks here, just ping pong
710 * the lock until we get it (we will always block somewhere when
711 * looping so this is not cpu-intensive).
713 * which = 0 nch1 not locked, nch2 is locked
714 * which = 1 nch1 is locked, nch2 is not locked
716 void
717 cache_relock(struct nchandle *nch1, struct ucred *cred1,
718 struct nchandle *nch2, struct ucred *cred2)
720 int which;
722 which = 0;
724 for (;;) {
725 if (which == 0) {
726 if (cache_lock_nonblock(nch1) == 0) {
727 cache_resolve(nch1, cred1);
728 break;
730 cache_unlock(nch2);
731 cache_lock(nch1);
732 cache_resolve(nch1, cred1);
733 which = 1;
734 } else {
735 if (cache_lock_nonblock(nch2) == 0) {
736 cache_resolve(nch2, cred2);
737 break;
739 cache_unlock(nch1);
740 cache_lock(nch2);
741 cache_resolve(nch2, cred2);
742 which = 0;
748 * MPSAFE
751 cache_lock_nonblock(struct nchandle *nch)
753 return(_cache_lock_nonblock(nch->ncp));
758 * MPSAFE
760 void
761 cache_unlock(struct nchandle *nch)
763 _cache_unlock(nch->ncp);
767 * ref-and-lock, unlock-and-deref functions.
769 * This function is primarily used by nlookup. Even though cache_lock
770 * holds the vnode, it is possible that the vnode may have already
771 * initiated a recyclement.
773 * We want cache_get() to return a definitively usable vnode or a
774 * definitively unresolved ncp.
776 * MPSAFE
778 static
779 struct namecache *
780 _cache_get(struct namecache *ncp)
782 _cache_hold(ncp);
783 _cache_lock(ncp);
784 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
785 _cache_setunresolved(ncp);
786 return(ncp);
790 * This is a special form of _cache_lock() which only succeeds if
791 * it can get a pristine, non-recursive lock. The caller must have
792 * already ref'd the ncp.
794 * On success the ncp will be locked, on failure it will not. The
795 * ref count does not change either way.
797 * We want _cache_lock_special() (on success) to return a definitively
798 * usable vnode or a definitively unresolved ncp.
800 * MPSAFE
802 static int
803 _cache_lock_special(struct namecache *ncp)
805 if (_cache_lock_nonblock(ncp) == 0) {
806 if ((ncp->nc_exlocks & ~NC_EXLOCK_REQ) == 1) {
807 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
808 _cache_setunresolved(ncp);
809 return(0);
811 _cache_unlock(ncp);
813 return(EWOULDBLOCK);
818 * NOTE: The same nchandle can be passed for both arguments.
820 * MPSAFE
822 void
823 cache_get(struct nchandle *nch, struct nchandle *target)
825 KKASSERT(nch->ncp->nc_refs > 0);
826 target->mount = nch->mount;
827 target->ncp = _cache_get(nch->ncp);
828 atomic_add_int(&target->mount->mnt_refs, 1);
832 * MPSAFE
834 static __inline
835 void
836 _cache_put(struct namecache *ncp)
838 _cache_unlock(ncp);
839 _cache_drop(ncp);
843 * MPSAFE
845 void
846 cache_put(struct nchandle *nch)
848 atomic_add_int(&nch->mount->mnt_refs, -1);
849 _cache_put(nch->ncp);
850 nch->ncp = NULL;
851 nch->mount = NULL;
855 * Resolve an unresolved ncp by associating a vnode with it. If the
856 * vnode is NULL, a negative cache entry is created.
858 * The ncp should be locked on entry and will remain locked on return.
860 * MPSAFE
862 static
863 void
864 _cache_setvp(struct mount *mp, struct namecache *ncp, struct vnode *vp)
866 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED);
868 if (vp != NULL) {
870 * Any vp associated with an ncp which has children must
871 * be held. Any vp associated with a locked ncp must be held.
873 if (!TAILQ_EMPTY(&ncp->nc_list))
874 vhold(vp);
875 spin_lock(&vp->v_spinlock);
876 ncp->nc_vp = vp;
877 TAILQ_INSERT_HEAD(&vp->v_namecache, ncp, nc_vnode);
878 spin_unlock(&vp->v_spinlock);
879 if (ncp->nc_exlocks)
880 vhold(vp);
883 * Set auxiliary flags
885 switch(vp->v_type) {
886 case VDIR:
887 ncp->nc_flag |= NCF_ISDIR;
888 break;
889 case VLNK:
890 ncp->nc_flag |= NCF_ISSYMLINK;
891 /* XXX cache the contents of the symlink */
892 break;
893 default:
894 break;
896 atomic_add_int(&numcache, 1);
897 ncp->nc_error = 0;
898 } else {
900 * When creating a negative cache hit we set the
901 * namecache_gen. A later resolve will clean out the
902 * negative cache hit if the mount point's namecache_gen
903 * has changed. Used by devfs, could also be used by
904 * other remote FSs.
906 ncp->nc_vp = NULL;
907 spin_lock(&ncspin);
908 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
909 ++numneg;
910 spin_unlock(&ncspin);
911 ncp->nc_error = ENOENT;
912 if (mp)
913 ncp->nc_namecache_gen = mp->mnt_namecache_gen;
915 ncp->nc_flag &= ~(NCF_UNRESOLVED | NCF_DEFEREDZAP);
919 * MPSAFE
921 void
922 cache_setvp(struct nchandle *nch, struct vnode *vp)
924 _cache_setvp(nch->mount, nch->ncp, vp);
928 * MPSAFE
930 void
931 cache_settimeout(struct nchandle *nch, int nticks)
933 struct namecache *ncp = nch->ncp;
935 if ((ncp->nc_timeout = ticks + nticks) == 0)
936 ncp->nc_timeout = 1;
940 * Disassociate the vnode or negative-cache association and mark a
941 * namecache entry as unresolved again. Note that the ncp is still
942 * left in the hash table and still linked to its parent.
944 * The ncp should be locked and refd on entry and will remain locked and refd
945 * on return.
947 * This routine is normally never called on a directory containing children.
948 * However, NFS often does just that in its rename() code as a cop-out to
949 * avoid complex namespace operations. This disconnects a directory vnode
950 * from its namecache and can cause the OLDAPI and NEWAPI to get out of
951 * sync.
953 * MPSAFE
955 static
956 void
957 _cache_setunresolved(struct namecache *ncp)
959 struct vnode *vp;
961 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
962 ncp->nc_flag |= NCF_UNRESOLVED;
963 ncp->nc_timeout = 0;
964 ncp->nc_error = ENOTCONN;
965 if ((vp = ncp->nc_vp) != NULL) {
966 atomic_add_int(&numcache, -1);
967 spin_lock(&vp->v_spinlock);
968 ncp->nc_vp = NULL;
969 TAILQ_REMOVE(&vp->v_namecache, ncp, nc_vnode);
970 spin_unlock(&vp->v_spinlock);
973 * Any vp associated with an ncp with children is
974 * held by that ncp. Any vp associated with a locked
975 * ncp is held by that ncp. These conditions must be
976 * undone when the vp is cleared out from the ncp.
978 if (!TAILQ_EMPTY(&ncp->nc_list))
979 vdrop(vp);
980 if (ncp->nc_exlocks)
981 vdrop(vp);
982 } else {
983 spin_lock(&ncspin);
984 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
985 --numneg;
986 spin_unlock(&ncspin);
988 ncp->nc_flag &= ~(NCF_WHITEOUT|NCF_ISDIR|NCF_ISSYMLINK);
993 * The cache_nresolve() code calls this function to automatically
994 * set a resolved cache element to unresolved if it has timed out
995 * or if it is a negative cache hit and the mount point namecache_gen
996 * has changed.
998 * MPSAFE
1000 static __inline void
1001 _cache_auto_unresolve(struct mount *mp, struct namecache *ncp)
1004 * Already in an unresolved state, nothing to do.
1006 if (ncp->nc_flag & NCF_UNRESOLVED)
1007 return;
1010 * Try to zap entries that have timed out. We have
1011 * to be careful here because locked leafs may depend
1012 * on the vnode remaining intact in a parent, so only
1013 * do this under very specific conditions.
1015 if (ncp->nc_timeout && (int)(ncp->nc_timeout - ticks) < 0 &&
1016 TAILQ_EMPTY(&ncp->nc_list)) {
1017 _cache_setunresolved(ncp);
1018 return;
1022 * If a resolved negative cache hit is invalid due to
1023 * the mount's namecache generation being bumped, zap it.
1025 if (ncp->nc_vp == NULL &&
1026 ncp->nc_namecache_gen != mp->mnt_namecache_gen) {
1027 _cache_setunresolved(ncp);
1028 return;
1033 * MPSAFE
1035 void
1036 cache_setunresolved(struct nchandle *nch)
1038 _cache_setunresolved(nch->ncp);
1042 * Determine if we can clear NCF_ISMOUNTPT by scanning the mountlist
1043 * looking for matches. This flag tells the lookup code when it must
1044 * check for a mount linkage and also prevents the directories in question
1045 * from being deleted or renamed.
1047 * MPSAFE
1049 static
1051 cache_clrmountpt_callback(struct mount *mp, void *data)
1053 struct nchandle *nch = data;
1055 if (mp->mnt_ncmounton.ncp == nch->ncp)
1056 return(1);
1057 if (mp->mnt_ncmountpt.ncp == nch->ncp)
1058 return(1);
1059 return(0);
1063 * MPSAFE
1065 void
1066 cache_clrmountpt(struct nchandle *nch)
1068 int count;
1070 count = mountlist_scan(cache_clrmountpt_callback, nch,
1071 MNTSCAN_FORWARD|MNTSCAN_NOBUSY);
1072 if (count == 0)
1073 nch->ncp->nc_flag &= ~NCF_ISMOUNTPT;
1077 * Invalidate portions of the namecache topology given a starting entry.
1078 * The passed ncp is set to an unresolved state and:
1080 * The passed ncp must be referencxed and locked. The routine may unlock
1081 * and relock ncp several times, and will recheck the children and loop
1082 * to catch races. When done the passed ncp will be returned with the
1083 * reference and lock intact.
1085 * CINV_DESTROY - Set a flag in the passed ncp entry indicating
1086 * that the physical underlying nodes have been
1087 * destroyed... as in deleted. For example, when
1088 * a directory is removed. This will cause record
1089 * lookups on the name to no longer be able to find
1090 * the record and tells the resolver to return failure
1091 * rather then trying to resolve through the parent.
1093 * The topology itself, including ncp->nc_name,
1094 * remains intact.
1096 * This only applies to the passed ncp, if CINV_CHILDREN
1097 * is specified the children are not flagged.
1099 * CINV_CHILDREN - Set all children (recursively) to an unresolved
1100 * state as well.
1102 * Note that this will also have the side effect of
1103 * cleaning out any unreferenced nodes in the topology
1104 * from the leaves up as the recursion backs out.
1106 * Note that the topology for any referenced nodes remains intact, but
1107 * the nodes will be marked as having been destroyed and will be set
1108 * to an unresolved state.
1110 * It is possible for cache_inval() to race a cache_resolve(), meaning that
1111 * the namecache entry may not actually be invalidated on return if it was
1112 * revalidated while recursing down into its children. This code guarentees
1113 * that the node(s) will go through an invalidation cycle, but does not
1114 * guarentee that they will remain in an invalidated state.
1116 * Returns non-zero if a revalidation was detected during the invalidation
1117 * recursion, zero otherwise. Note that since only the original ncp is
1118 * locked the revalidation ultimately can only indicate that the original ncp
1119 * *MIGHT* no have been reresolved.
1121 * DEEP RECURSION HANDLING - If a recursive invalidation recurses deeply we
1122 * have to avoid blowing out the kernel stack. We do this by saving the
1123 * deep namecache node and aborting the recursion, then re-recursing at that
1124 * node using a depth-first algorithm in order to allow multiple deep
1125 * recursions to chain through each other, then we restart the invalidation
1126 * from scratch.
1128 * MPSAFE
1131 struct cinvtrack {
1132 struct namecache *resume_ncp;
1133 int depth;
1136 static int _cache_inval_internal(struct namecache *, int, struct cinvtrack *);
1138 static
1140 _cache_inval(struct namecache *ncp, int flags)
1142 struct cinvtrack track;
1143 struct namecache *ncp2;
1144 int r;
1146 track.depth = 0;
1147 track.resume_ncp = NULL;
1149 for (;;) {
1150 r = _cache_inval_internal(ncp, flags, &track);
1151 if (track.resume_ncp == NULL)
1152 break;
1153 kprintf("Warning: deep namecache recursion at %s\n",
1154 ncp->nc_name);
1155 _cache_unlock(ncp);
1156 while ((ncp2 = track.resume_ncp) != NULL) {
1157 track.resume_ncp = NULL;
1158 _cache_lock(ncp2);
1159 _cache_inval_internal(ncp2, flags & ~CINV_DESTROY,
1160 &track);
1161 _cache_put(ncp2);
1163 _cache_lock(ncp);
1165 return(r);
1169 cache_inval(struct nchandle *nch, int flags)
1171 return(_cache_inval(nch->ncp, flags));
1175 * Helper for _cache_inval(). The passed ncp is refd and locked and
1176 * remains that way on return, but may be unlocked/relocked multiple
1177 * times by the routine.
1179 static int
1180 _cache_inval_internal(struct namecache *ncp, int flags, struct cinvtrack *track)
1182 struct namecache *kid;
1183 struct namecache *nextkid;
1184 int rcnt = 0;
1186 KKASSERT(ncp->nc_exlocks);
1188 _cache_setunresolved(ncp);
1189 if (flags & CINV_DESTROY)
1190 ncp->nc_flag |= NCF_DESTROYED;
1191 if ((flags & CINV_CHILDREN) &&
1192 (kid = TAILQ_FIRST(&ncp->nc_list)) != NULL
1194 _cache_hold(kid);
1195 if (++track->depth > MAX_RECURSION_DEPTH) {
1196 track->resume_ncp = ncp;
1197 _cache_hold(ncp);
1198 ++rcnt;
1200 _cache_unlock(ncp);
1201 while (kid) {
1202 if (track->resume_ncp) {
1203 _cache_drop(kid);
1204 break;
1206 if ((nextkid = TAILQ_NEXT(kid, nc_entry)) != NULL)
1207 _cache_hold(nextkid);
1208 if ((kid->nc_flag & NCF_UNRESOLVED) == 0 ||
1209 TAILQ_FIRST(&kid->nc_list)
1211 _cache_lock(kid);
1212 rcnt += _cache_inval_internal(kid, flags & ~CINV_DESTROY, track);
1213 _cache_unlock(kid);
1215 _cache_drop(kid);
1216 kid = nextkid;
1218 --track->depth;
1219 _cache_lock(ncp);
1223 * Someone could have gotten in there while ncp was unlocked,
1224 * retry if so.
1226 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
1227 ++rcnt;
1228 return (rcnt);
1232 * Invalidate a vnode's namecache associations. To avoid races against
1233 * the resolver we do not invalidate a node which we previously invalidated
1234 * but which was then re-resolved while we were in the invalidation loop.
1236 * Returns non-zero if any namecache entries remain after the invalidation
1237 * loop completed.
1239 * NOTE: Unlike the namecache topology which guarentees that ncp's will not
1240 * be ripped out of the topology while held, the vnode's v_namecache
1241 * list has no such restriction. NCP's can be ripped out of the list
1242 * at virtually any time if not locked, even if held.
1244 * In addition, the v_namecache list itself must be locked via
1245 * the vnode's spinlock.
1247 * MPSAFE
1250 cache_inval_vp(struct vnode *vp, int flags)
1252 struct namecache *ncp;
1253 struct namecache *next;
1255 restart:
1256 spin_lock(&vp->v_spinlock);
1257 ncp = TAILQ_FIRST(&vp->v_namecache);
1258 if (ncp)
1259 _cache_hold(ncp);
1260 while (ncp) {
1261 /* loop entered with ncp held and vp spin-locked */
1262 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
1263 _cache_hold(next);
1264 spin_unlock(&vp->v_spinlock);
1265 _cache_lock(ncp);
1266 if (ncp->nc_vp != vp) {
1267 kprintf("Warning: cache_inval_vp: race-A detected on "
1268 "%s\n", ncp->nc_name);
1269 _cache_put(ncp);
1270 if (next)
1271 _cache_drop(next);
1272 goto restart;
1274 _cache_inval(ncp, flags);
1275 _cache_put(ncp); /* also releases reference */
1276 ncp = next;
1277 spin_lock(&vp->v_spinlock);
1278 if (ncp && ncp->nc_vp != vp) {
1279 spin_unlock(&vp->v_spinlock);
1280 kprintf("Warning: cache_inval_vp: race-B detected on "
1281 "%s\n", ncp->nc_name);
1282 _cache_drop(ncp);
1283 goto restart;
1286 spin_unlock(&vp->v_spinlock);
1287 return(TAILQ_FIRST(&vp->v_namecache) != NULL);
1291 * This routine is used instead of the normal cache_inval_vp() when we
1292 * are trying to recycle otherwise good vnodes.
1294 * Return 0 on success, non-zero if not all namecache records could be
1295 * disassociated from the vnode (for various reasons).
1297 * MPSAFE
1300 cache_inval_vp_nonblock(struct vnode *vp)
1302 struct namecache *ncp;
1303 struct namecache *next;
1305 spin_lock(&vp->v_spinlock);
1306 ncp = TAILQ_FIRST(&vp->v_namecache);
1307 if (ncp)
1308 _cache_hold(ncp);
1309 while (ncp) {
1310 /* loop entered with ncp held */
1311 if ((next = TAILQ_NEXT(ncp, nc_vnode)) != NULL)
1312 _cache_hold(next);
1313 spin_unlock(&vp->v_spinlock);
1314 if (_cache_lock_nonblock(ncp)) {
1315 _cache_drop(ncp);
1316 if (next)
1317 _cache_drop(next);
1318 goto done;
1320 if (ncp->nc_vp != vp) {
1321 kprintf("Warning: cache_inval_vp: race-A detected on "
1322 "%s\n", ncp->nc_name);
1323 _cache_put(ncp);
1324 if (next)
1325 _cache_drop(next);
1326 goto done;
1328 _cache_inval(ncp, 0);
1329 _cache_put(ncp); /* also releases reference */
1330 ncp = next;
1331 spin_lock(&vp->v_spinlock);
1332 if (ncp && ncp->nc_vp != vp) {
1333 spin_unlock(&vp->v_spinlock);
1334 kprintf("Warning: cache_inval_vp: race-B detected on "
1335 "%s\n", ncp->nc_name);
1336 _cache_drop(ncp);
1337 goto done;
1340 spin_unlock(&vp->v_spinlock);
1341 done:
1342 return(TAILQ_FIRST(&vp->v_namecache) != NULL);
1346 * The source ncp has been renamed to the target ncp. Both fncp and tncp
1347 * must be locked. The target ncp is destroyed (as a normal rename-over
1348 * would destroy the target file or directory).
1350 * Because there may be references to the source ncp we cannot copy its
1351 * contents to the target. Instead the source ncp is relinked as the target
1352 * and the target ncp is removed from the namecache topology.
1354 * MPSAFE
1356 void
1357 cache_rename(struct nchandle *fnch, struct nchandle *tnch)
1359 struct namecache *fncp = fnch->ncp;
1360 struct namecache *tncp = tnch->ncp;
1361 struct namecache *tncp_par;
1362 struct nchash_head *nchpp;
1363 u_int32_t hash;
1364 char *oname;
1367 * Rename fncp (unlink)
1369 _cache_unlink_parent(fncp);
1370 oname = fncp->nc_name;
1371 fncp->nc_name = tncp->nc_name;
1372 fncp->nc_nlen = tncp->nc_nlen;
1373 tncp_par = tncp->nc_parent;
1374 _cache_hold(tncp_par);
1375 _cache_lock(tncp_par);
1378 * Rename fncp (relink)
1380 hash = fnv_32_buf(fncp->nc_name, fncp->nc_nlen, FNV1_32_INIT);
1381 hash = fnv_32_buf(&tncp_par, sizeof(tncp_par), hash);
1382 nchpp = NCHHASH(hash);
1384 spin_lock(&nchpp->spin);
1385 _cache_link_parent(fncp, tncp_par, nchpp);
1386 spin_unlock(&nchpp->spin);
1388 _cache_put(tncp_par);
1391 * Get rid of the overwritten tncp (unlink)
1393 _cache_setunresolved(tncp);
1394 _cache_unlink_parent(tncp);
1395 tncp->nc_name = NULL;
1396 tncp->nc_nlen = 0;
1398 if (oname)
1399 kfree(oname, M_VFSCACHE);
1403 * vget the vnode associated with the namecache entry. Resolve the namecache
1404 * entry if necessary. The passed ncp must be referenced and locked.
1406 * lk_type may be LK_SHARED, LK_EXCLUSIVE. A ref'd, possibly locked
1407 * (depending on the passed lk_type) will be returned in *vpp with an error
1408 * of 0, or NULL will be returned in *vpp with a non-0 error code. The
1409 * most typical error is ENOENT, meaning that the ncp represents a negative
1410 * cache hit and there is no vnode to retrieve, but other errors can occur
1411 * too.
1413 * The vget() can race a reclaim. If this occurs we re-resolve the
1414 * namecache entry.
1416 * There are numerous places in the kernel where vget() is called on a
1417 * vnode while one or more of its namecache entries is locked. Releasing
1418 * a vnode never deadlocks against locked namecache entries (the vnode
1419 * will not get recycled while referenced ncp's exist). This means we
1420 * can safely acquire the vnode. In fact, we MUST NOT release the ncp
1421 * lock when acquiring the vp lock or we might cause a deadlock.
1423 * MPSAFE
1426 cache_vget(struct nchandle *nch, struct ucred *cred,
1427 int lk_type, struct vnode **vpp)
1429 struct namecache *ncp;
1430 struct vnode *vp;
1431 int error;
1433 ncp = nch->ncp;
1434 KKASSERT(ncp->nc_locktd == curthread);
1435 again:
1436 vp = NULL;
1437 if (ncp->nc_flag & NCF_UNRESOLVED)
1438 error = cache_resolve(nch, cred);
1439 else
1440 error = 0;
1442 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
1443 error = vget(vp, lk_type);
1444 if (error) {
1446 * VRECLAIM race
1448 if (error == ENOENT) {
1449 kprintf("Warning: vnode reclaim race detected "
1450 "in cache_vget on %p (%s)\n",
1451 vp, ncp->nc_name);
1452 _cache_setunresolved(ncp);
1453 goto again;
1457 * Not a reclaim race, some other error.
1459 KKASSERT(ncp->nc_vp == vp);
1460 vp = NULL;
1461 } else {
1462 KKASSERT(ncp->nc_vp == vp);
1463 KKASSERT((vp->v_flag & VRECLAIMED) == 0);
1466 if (error == 0 && vp == NULL)
1467 error = ENOENT;
1468 *vpp = vp;
1469 return(error);
1473 cache_vref(struct nchandle *nch, struct ucred *cred, struct vnode **vpp)
1475 struct namecache *ncp;
1476 struct vnode *vp;
1477 int error;
1479 ncp = nch->ncp;
1480 KKASSERT(ncp->nc_locktd == curthread);
1481 again:
1482 vp = NULL;
1483 if (ncp->nc_flag & NCF_UNRESOLVED)
1484 error = cache_resolve(nch, cred);
1485 else
1486 error = 0;
1488 if (error == 0 && (vp = ncp->nc_vp) != NULL) {
1489 error = vget(vp, LK_SHARED);
1490 if (error) {
1492 * VRECLAIM race
1494 if (error == ENOENT) {
1495 kprintf("Warning: vnode reclaim race detected "
1496 "in cache_vget on %p (%s)\n",
1497 vp, ncp->nc_name);
1498 _cache_setunresolved(ncp);
1499 goto again;
1503 * Not a reclaim race, some other error.
1505 KKASSERT(ncp->nc_vp == vp);
1506 vp = NULL;
1507 } else {
1508 KKASSERT(ncp->nc_vp == vp);
1509 KKASSERT((vp->v_flag & VRECLAIMED) == 0);
1510 /* caller does not want a lock */
1511 vn_unlock(vp);
1514 if (error == 0 && vp == NULL)
1515 error = ENOENT;
1516 *vpp = vp;
1517 return(error);
1521 * Return a referenced vnode representing the parent directory of
1522 * ncp.
1524 * Because the caller has locked the ncp it should not be possible for
1525 * the parent ncp to go away. However, the parent can unresolve its
1526 * dvp at any time so we must be able to acquire a lock on the parent
1527 * to safely access nc_vp.
1529 * We have to leave par unlocked when vget()ing dvp to avoid a deadlock,
1530 * so use vhold()/vdrop() while holding the lock to prevent dvp from
1531 * getting destroyed.
1533 * MPSAFE - Note vhold() is allowed when dvp has 0 refs if we hold a
1534 * lock on the ncp in question..
1536 static struct vnode *
1537 cache_dvpref(struct namecache *ncp)
1539 struct namecache *par;
1540 struct vnode *dvp;
1542 dvp = NULL;
1543 if ((par = ncp->nc_parent) != NULL) {
1544 _cache_hold(par);
1545 _cache_lock(par);
1546 if ((par->nc_flag & NCF_UNRESOLVED) == 0) {
1547 if ((dvp = par->nc_vp) != NULL)
1548 vhold(dvp);
1550 _cache_unlock(par);
1551 if (dvp) {
1552 if (vget(dvp, LK_SHARED) == 0) {
1553 vn_unlock(dvp);
1554 vdrop(dvp);
1555 /* return refd, unlocked dvp */
1556 } else {
1557 vdrop(dvp);
1558 dvp = NULL;
1561 _cache_drop(par);
1563 return(dvp);
1567 * Convert a directory vnode to a namecache record without any other
1568 * knowledge of the topology. This ONLY works with directory vnodes and
1569 * is ONLY used by the NFS server. dvp must be refd but unlocked, and the
1570 * returned ncp (if not NULL) will be held and unlocked.
1572 * If 'makeit' is 0 and dvp has no existing namecache record, NULL is returned.
1573 * If 'makeit' is 1 we attempt to track-down and create the namecache topology
1574 * for dvp. This will fail only if the directory has been deleted out from
1575 * under the caller.
1577 * Callers must always check for a NULL return no matter the value of 'makeit'.
1579 * To avoid underflowing the kernel stack each recursive call increments
1580 * the makeit variable.
1583 static int cache_inefficient_scan(struct nchandle *nch, struct ucred *cred,
1584 struct vnode *dvp, char *fakename);
1585 static int cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
1586 struct vnode **saved_dvp);
1589 cache_fromdvp(struct vnode *dvp, struct ucred *cred, int makeit,
1590 struct nchandle *nch)
1592 struct vnode *saved_dvp;
1593 struct vnode *pvp;
1594 char *fakename;
1595 int error;
1597 nch->ncp = NULL;
1598 nch->mount = dvp->v_mount;
1599 saved_dvp = NULL;
1600 fakename = NULL;
1603 * Handle the makeit == 0 degenerate case
1605 if (makeit == 0) {
1606 spin_lock(&dvp->v_spinlock);
1607 nch->ncp = TAILQ_FIRST(&dvp->v_namecache);
1608 if (nch->ncp)
1609 cache_hold(nch);
1610 spin_unlock(&dvp->v_spinlock);
1614 * Loop until resolution, inside code will break out on error.
1616 while (makeit) {
1618 * Break out if we successfully acquire a working ncp.
1620 spin_lock(&dvp->v_spinlock);
1621 nch->ncp = TAILQ_FIRST(&dvp->v_namecache);
1622 if (nch->ncp) {
1623 cache_hold(nch);
1624 spin_unlock(&dvp->v_spinlock);
1625 break;
1627 spin_unlock(&dvp->v_spinlock);
1630 * If dvp is the root of its filesystem it should already
1631 * have a namecache pointer associated with it as a side
1632 * effect of the mount, but it may have been disassociated.
1634 if (dvp->v_flag & VROOT) {
1635 nch->ncp = _cache_get(nch->mount->mnt_ncmountpt.ncp);
1636 error = cache_resolve_mp(nch->mount);
1637 _cache_put(nch->ncp);
1638 if (ncvp_debug) {
1639 kprintf("cache_fromdvp: resolve root of mount %p error %d",
1640 dvp->v_mount, error);
1642 if (error) {
1643 if (ncvp_debug)
1644 kprintf(" failed\n");
1645 nch->ncp = NULL;
1646 break;
1648 if (ncvp_debug)
1649 kprintf(" succeeded\n");
1650 continue;
1654 * If we are recursed too deeply resort to an O(n^2)
1655 * algorithm to resolve the namecache topology. The
1656 * resolved pvp is left referenced in saved_dvp to
1657 * prevent the tree from being destroyed while we loop.
1659 if (makeit > 20) {
1660 error = cache_fromdvp_try(dvp, cred, &saved_dvp);
1661 if (error) {
1662 kprintf("lookupdotdot(longpath) failed %d "
1663 "dvp %p\n", error, dvp);
1664 nch->ncp = NULL;
1665 break;
1667 continue;
1671 * Get the parent directory and resolve its ncp.
1673 if (fakename) {
1674 kfree(fakename, M_TEMP);
1675 fakename = NULL;
1677 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred,
1678 &fakename);
1679 if (error) {
1680 kprintf("lookupdotdot failed %d dvp %p\n", error, dvp);
1681 break;
1683 vn_unlock(pvp);
1686 * Reuse makeit as a recursion depth counter. On success
1687 * nch will be fully referenced.
1689 cache_fromdvp(pvp, cred, makeit + 1, nch);
1690 vrele(pvp);
1691 if (nch->ncp == NULL)
1692 break;
1695 * Do an inefficient scan of pvp (embodied by ncp) to look
1696 * for dvp. This will create a namecache record for dvp on
1697 * success. We loop up to recheck on success.
1699 * ncp and dvp are both held but not locked.
1701 error = cache_inefficient_scan(nch, cred, dvp, fakename);
1702 if (error) {
1703 kprintf("cache_fromdvp: scan %p (%s) failed on dvp=%p\n",
1704 pvp, nch->ncp->nc_name, dvp);
1705 cache_drop(nch);
1706 /* nch was NULLed out, reload mount */
1707 nch->mount = dvp->v_mount;
1708 break;
1710 if (ncvp_debug) {
1711 kprintf("cache_fromdvp: scan %p (%s) succeeded\n",
1712 pvp, nch->ncp->nc_name);
1714 cache_drop(nch);
1715 /* nch was NULLed out, reload mount */
1716 nch->mount = dvp->v_mount;
1720 * If nch->ncp is non-NULL it will have been held already.
1722 if (fakename)
1723 kfree(fakename, M_TEMP);
1724 if (saved_dvp)
1725 vrele(saved_dvp);
1726 if (nch->ncp)
1727 return (0);
1728 return (EINVAL);
1732 * Go up the chain of parent directories until we find something
1733 * we can resolve into the namecache. This is very inefficient.
1735 static
1737 cache_fromdvp_try(struct vnode *dvp, struct ucred *cred,
1738 struct vnode **saved_dvp)
1740 struct nchandle nch;
1741 struct vnode *pvp;
1742 int error;
1743 static time_t last_fromdvp_report;
1744 char *fakename;
1747 * Loop getting the parent directory vnode until we get something we
1748 * can resolve in the namecache.
1750 vref(dvp);
1751 nch.mount = dvp->v_mount;
1752 nch.ncp = NULL;
1753 fakename = NULL;
1755 for (;;) {
1756 if (fakename) {
1757 kfree(fakename, M_TEMP);
1758 fakename = NULL;
1760 error = vop_nlookupdotdot(*dvp->v_ops, dvp, &pvp, cred,
1761 &fakename);
1762 if (error) {
1763 vrele(dvp);
1764 break;
1766 vn_unlock(pvp);
1767 spin_lock(&pvp->v_spinlock);
1768 if ((nch.ncp = TAILQ_FIRST(&pvp->v_namecache)) != NULL) {
1769 _cache_hold(nch.ncp);
1770 spin_unlock(&pvp->v_spinlock);
1771 vrele(pvp);
1772 break;
1774 spin_unlock(&pvp->v_spinlock);
1775 if (pvp->v_flag & VROOT) {
1776 nch.ncp = _cache_get(pvp->v_mount->mnt_ncmountpt.ncp);
1777 error = cache_resolve_mp(nch.mount);
1778 _cache_unlock(nch.ncp);
1779 vrele(pvp);
1780 if (error) {
1781 _cache_drop(nch.ncp);
1782 nch.ncp = NULL;
1783 vrele(dvp);
1785 break;
1787 vrele(dvp);
1788 dvp = pvp;
1790 if (error == 0) {
1791 if (last_fromdvp_report != time_second) {
1792 last_fromdvp_report = time_second;
1793 kprintf("Warning: extremely inefficient path "
1794 "resolution on %s\n",
1795 nch.ncp->nc_name);
1797 error = cache_inefficient_scan(&nch, cred, dvp, fakename);
1800 * Hopefully dvp now has a namecache record associated with
1801 * it. Leave it referenced to prevent the kernel from
1802 * recycling the vnode. Otherwise extremely long directory
1803 * paths could result in endless recycling.
1805 if (*saved_dvp)
1806 vrele(*saved_dvp);
1807 *saved_dvp = dvp;
1808 _cache_drop(nch.ncp);
1810 if (fakename)
1811 kfree(fakename, M_TEMP);
1812 return (error);
1816 * Do an inefficient scan of the directory represented by ncp looking for
1817 * the directory vnode dvp. ncp must be held but not locked on entry and
1818 * will be held on return. dvp must be refd but not locked on entry and
1819 * will remain refd on return.
1821 * Why do this at all? Well, due to its stateless nature the NFS server
1822 * converts file handles directly to vnodes without necessarily going through
1823 * the namecache ops that would otherwise create the namecache topology
1824 * leading to the vnode. We could either (1) Change the namecache algorithms
1825 * to allow disconnect namecache records that are re-merged opportunistically,
1826 * or (2) Make the NFS server backtrack and scan to recover a connected
1827 * namecache topology in order to then be able to issue new API lookups.
1829 * It turns out that (1) is a huge mess. It takes a nice clean set of
1830 * namecache algorithms and introduces a lot of complication in every subsystem
1831 * that calls into the namecache to deal with the re-merge case, especially
1832 * since we are using the namecache to placehold negative lookups and the
1833 * vnode might not be immediately assigned. (2) is certainly far less
1834 * efficient then (1), but since we are only talking about directories here
1835 * (which are likely to remain cached), the case does not actually run all
1836 * that often and has the supreme advantage of not polluting the namecache
1837 * algorithms.
1839 * If a fakename is supplied just construct a namecache entry using the
1840 * fake name.
1842 static int
1843 cache_inefficient_scan(struct nchandle *nch, struct ucred *cred,
1844 struct vnode *dvp, char *fakename)
1846 struct nlcomponent nlc;
1847 struct nchandle rncp;
1848 struct dirent *den;
1849 struct vnode *pvp;
1850 struct vattr vat;
1851 struct iovec iov;
1852 struct uio uio;
1853 int blksize;
1854 int eofflag;
1855 int bytes;
1856 char *rbuf;
1857 int error;
1859 vat.va_blocksize = 0;
1860 if ((error = VOP_GETATTR(dvp, &vat)) != 0)
1861 return (error);
1862 cache_lock(nch);
1863 error = cache_vref(nch, cred, &pvp);
1864 cache_unlock(nch);
1865 if (error)
1866 return (error);
1867 if (ncvp_debug) {
1868 kprintf("inefficient_scan: directory iosize %ld "
1869 "vattr fileid = %lld\n",
1870 vat.va_blocksize,
1871 (long long)vat.va_fileid);
1875 * Use the supplied fakename if not NULL. Fake names are typically
1876 * not in the actual filesystem hierarchy. This is used by HAMMER
1877 * to glue @@timestamp recursions together.
1879 if (fakename) {
1880 nlc.nlc_nameptr = fakename;
1881 nlc.nlc_namelen = strlen(fakename);
1882 rncp = cache_nlookup(nch, &nlc);
1883 goto done;
1886 if ((blksize = vat.va_blocksize) == 0)
1887 blksize = DEV_BSIZE;
1888 rbuf = kmalloc(blksize, M_TEMP, M_WAITOK);
1889 rncp.ncp = NULL;
1891 eofflag = 0;
1892 uio.uio_offset = 0;
1893 again:
1894 iov.iov_base = rbuf;
1895 iov.iov_len = blksize;
1896 uio.uio_iov = &iov;
1897 uio.uio_iovcnt = 1;
1898 uio.uio_resid = blksize;
1899 uio.uio_segflg = UIO_SYSSPACE;
1900 uio.uio_rw = UIO_READ;
1901 uio.uio_td = curthread;
1903 if (ncvp_debug >= 2)
1904 kprintf("cache_inefficient_scan: readdir @ %08x\n", (int)uio.uio_offset);
1905 error = VOP_READDIR(pvp, &uio, cred, &eofflag, NULL, NULL);
1906 if (error == 0) {
1907 den = (struct dirent *)rbuf;
1908 bytes = blksize - uio.uio_resid;
1910 while (bytes > 0) {
1911 if (ncvp_debug >= 2) {
1912 kprintf("cache_inefficient_scan: %*.*s\n",
1913 den->d_namlen, den->d_namlen,
1914 den->d_name);
1916 if (den->d_type != DT_WHT &&
1917 den->d_ino == vat.va_fileid) {
1918 if (ncvp_debug) {
1919 kprintf("cache_inefficient_scan: "
1920 "MATCHED inode %lld path %s/%*.*s\n",
1921 (long long)vat.va_fileid,
1922 nch->ncp->nc_name,
1923 den->d_namlen, den->d_namlen,
1924 den->d_name);
1926 nlc.nlc_nameptr = den->d_name;
1927 nlc.nlc_namelen = den->d_namlen;
1928 rncp = cache_nlookup(nch, &nlc);
1929 KKASSERT(rncp.ncp != NULL);
1930 break;
1932 bytes -= _DIRENT_DIRSIZ(den);
1933 den = _DIRENT_NEXT(den);
1935 if (rncp.ncp == NULL && eofflag == 0 && uio.uio_resid != blksize)
1936 goto again;
1938 kfree(rbuf, M_TEMP);
1939 done:
1940 vrele(pvp);
1941 if (rncp.ncp) {
1942 if (rncp.ncp->nc_flag & NCF_UNRESOLVED) {
1943 _cache_setvp(rncp.mount, rncp.ncp, dvp);
1944 if (ncvp_debug >= 2) {
1945 kprintf("cache_inefficient_scan: setvp %s/%s = %p\n",
1946 nch->ncp->nc_name, rncp.ncp->nc_name, dvp);
1948 } else {
1949 if (ncvp_debug >= 2) {
1950 kprintf("cache_inefficient_scan: setvp %s/%s already set %p/%p\n",
1951 nch->ncp->nc_name, rncp.ncp->nc_name, dvp,
1952 rncp.ncp->nc_vp);
1955 if (rncp.ncp->nc_vp == NULL)
1956 error = rncp.ncp->nc_error;
1958 * Release rncp after a successful nlookup. rncp was fully
1959 * referenced.
1961 cache_put(&rncp);
1962 } else {
1963 kprintf("cache_inefficient_scan: dvp %p NOT FOUND in %s\n",
1964 dvp, nch->ncp->nc_name);
1965 error = ENOENT;
1967 return (error);
1971 * Zap a namecache entry. The ncp is unconditionally set to an unresolved
1972 * state, which disassociates it from its vnode or ncneglist.
1974 * Then, if there are no additional references to the ncp and no children,
1975 * the ncp is removed from the topology and destroyed.
1977 * References and/or children may exist if the ncp is in the middle of the
1978 * topology, preventing the ncp from being destroyed.
1980 * This function must be called with the ncp held and locked and will unlock
1981 * and drop it during zapping.
1983 * If nonblock is non-zero and the parent ncp cannot be locked we give up.
1984 * This case can occur in the cache_drop() path.
1986 * This function may returned a held (but NOT locked) parent node which the
1987 * caller must drop. We do this so _cache_drop() can loop, to avoid
1988 * blowing out the kernel stack.
1990 * WARNING! For MPSAFE operation this routine must acquire up to three
1991 * spin locks to be able to safely test nc_refs. Lock order is
1992 * very important.
1994 * hash spinlock if on hash list
1995 * parent spinlock if child of parent
1996 * (the ncp is unresolved so there is no vnode association)
1998 static struct namecache *
1999 cache_zap(struct namecache *ncp, int nonblock)
2001 struct namecache *par;
2002 struct vnode *dropvp;
2003 int refs;
2006 * Disassociate the vnode or negative cache ref and set NCF_UNRESOLVED.
2008 _cache_setunresolved(ncp);
2011 * Try to scrap the entry and possibly tail-recurse on its parent.
2012 * We only scrap unref'd (other then our ref) unresolved entries,
2013 * we do not scrap 'live' entries.
2015 * Note that once the spinlocks are acquired if nc_refs == 1 no
2016 * other references are possible. If it isn't, however, we have
2017 * to decrement but also be sure to avoid a 1->0 transition.
2019 KKASSERT(ncp->nc_flag & NCF_UNRESOLVED);
2020 KKASSERT(ncp->nc_refs > 0);
2023 * Acquire locks. Note that the parent can't go away while we hold
2024 * a child locked.
2026 if ((par = ncp->nc_parent) != NULL) {
2027 if (nonblock) {
2028 for (;;) {
2029 if (_cache_lock_nonblock(par) == 0)
2030 break;
2031 refs = ncp->nc_refs;
2032 ncp->nc_flag |= NCF_DEFEREDZAP;
2033 ++numdefered; /* MP race ok */
2034 if (atomic_cmpset_int(&ncp->nc_refs,
2035 refs, refs - 1)) {
2036 _cache_unlock(ncp);
2037 return(NULL);
2039 cpu_pause();
2041 _cache_hold(par);
2042 } else {
2043 _cache_hold(par);
2044 _cache_lock(par);
2046 spin_lock(&ncp->nc_head->spin);
2050 * If someone other then us has a ref or we have children
2051 * we cannot zap the entry. The 1->0 transition and any
2052 * further list operation is protected by the spinlocks
2053 * we have acquired but other transitions are not.
2055 for (;;) {
2056 refs = ncp->nc_refs;
2057 if (refs == 1 && TAILQ_EMPTY(&ncp->nc_list))
2058 break;
2059 if (atomic_cmpset_int(&ncp->nc_refs, refs, refs - 1)) {
2060 if (par) {
2061 spin_unlock(&ncp->nc_head->spin);
2062 _cache_put(par);
2064 _cache_unlock(ncp);
2065 return(NULL);
2067 cpu_pause();
2071 * We are the only ref and with the spinlocks held no further
2072 * refs can be acquired by others.
2074 * Remove us from the hash list and parent list. We have to
2075 * drop a ref on the parent's vp if the parent's list becomes
2076 * empty.
2078 dropvp = NULL;
2079 if (par) {
2080 struct nchash_head *nchpp = ncp->nc_head;
2082 KKASSERT(nchpp != NULL);
2083 LIST_REMOVE(ncp, nc_hash);
2084 TAILQ_REMOVE(&par->nc_list, ncp, nc_entry);
2085 if (par->nc_vp && TAILQ_EMPTY(&par->nc_list))
2086 dropvp = par->nc_vp;
2087 ncp->nc_head = NULL;
2088 ncp->nc_parent = NULL;
2089 spin_unlock(&nchpp->spin);
2090 _cache_unlock(par);
2091 } else {
2092 KKASSERT(ncp->nc_head == NULL);
2096 * ncp should not have picked up any refs. Physically
2097 * destroy the ncp.
2099 KKASSERT(ncp->nc_refs == 1);
2100 /* _cache_unlock(ncp) not required */
2101 ncp->nc_refs = -1; /* safety */
2102 if (ncp->nc_name)
2103 kfree(ncp->nc_name, M_VFSCACHE);
2104 kfree(ncp, M_VFSCACHE);
2107 * Delayed drop (we had to release our spinlocks)
2109 * The refed parent (if not NULL) must be dropped. The
2110 * caller is responsible for looping.
2112 if (dropvp)
2113 vdrop(dropvp);
2114 return(par);
2118 * Clean up dangling negative cache and defered-drop entries in the
2119 * namecache.
2121 static enum { CHI_LOW, CHI_HIGH } cache_hysteresis_state = CHI_LOW;
2123 void
2124 cache_hysteresis(void)
2127 * Don't cache too many negative hits. We use hysteresis to reduce
2128 * the impact on the critical path.
2130 switch(cache_hysteresis_state) {
2131 case CHI_LOW:
2132 if (numneg > MINNEG && numneg * ncnegfactor > numcache) {
2133 _cache_cleanneg(10);
2134 cache_hysteresis_state = CHI_HIGH;
2136 break;
2137 case CHI_HIGH:
2138 if (numneg > MINNEG * 9 / 10 &&
2139 numneg * ncnegfactor * 9 / 10 > numcache
2141 _cache_cleanneg(10);
2142 } else {
2143 cache_hysteresis_state = CHI_LOW;
2145 break;
2149 * Clean out dangling defered-zap ncps which could not
2150 * be cleanly dropped if too many build up. Note
2151 * that numdefered is not an exact number as such ncps
2152 * can be reused and the counter is not handled in a MP
2153 * safe manner by design.
2155 if (numdefered * ncnegfactor > numcache) {
2156 _cache_cleandefered();
2161 * NEW NAMECACHE LOOKUP API
2163 * Lookup an entry in the namecache. The passed par_nch must be referenced
2164 * and unlocked. A referenced and locked nchandle with a non-NULL nch.ncp
2165 * is ALWAYS returned, eve if the supplied component is illegal.
2167 * The resulting namecache entry should be returned to the system with
2168 * cache_put() or cache_unlock() + cache_drop().
2170 * namecache locks are recursive but care must be taken to avoid lock order
2171 * reversals (hence why the passed par_nch must be unlocked). Locking
2172 * rules are to order for parent traversals, not for child traversals.
2174 * Nobody else will be able to manipulate the associated namespace (e.g.
2175 * create, delete, rename, rename-target) until the caller unlocks the
2176 * entry.
2178 * The returned entry will be in one of three states: positive hit (non-null
2179 * vnode), negative hit (null vnode), or unresolved (NCF_UNRESOLVED is set).
2180 * Unresolved entries must be resolved through the filesystem to associate the
2181 * vnode and/or determine whether a positive or negative hit has occured.
2183 * It is not necessary to lock a directory in order to lock namespace under
2184 * that directory. In fact, it is explicitly not allowed to do that. A
2185 * directory is typically only locked when being created, renamed, or
2186 * destroyed.
2188 * The directory (par) may be unresolved, in which case any returned child
2189 * will likely also be marked unresolved. Likely but not guarenteed. Since
2190 * the filesystem lookup requires a resolved directory vnode the caller is
2191 * responsible for resolving the namecache chain top-down. This API
2192 * specifically allows whole chains to be created in an unresolved state.
2194 struct nchandle
2195 cache_nlookup(struct nchandle *par_nch, struct nlcomponent *nlc)
2197 struct nchandle nch;
2198 struct namecache *ncp;
2199 struct namecache *new_ncp;
2200 struct nchash_head *nchpp;
2201 struct mount *mp;
2202 u_int32_t hash;
2203 globaldata_t gd;
2204 int par_locked;
2206 numcalls++;
2207 gd = mycpu;
2208 mp = par_nch->mount;
2209 par_locked = 0;
2212 * This is a good time to call it, no ncp's are locked by
2213 * the caller or us.
2215 cache_hysteresis();
2218 * Try to locate an existing entry
2220 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
2221 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash);
2222 new_ncp = NULL;
2223 nchpp = NCHHASH(hash);
2224 restart:
2225 spin_lock(&nchpp->spin);
2226 LIST_FOREACH(ncp, &nchpp->list, nc_hash) {
2227 numchecks++;
2230 * Break out if we find a matching entry. Note that
2231 * UNRESOLVED entries may match, but DESTROYED entries
2232 * do not.
2234 if (ncp->nc_parent == par_nch->ncp &&
2235 ncp->nc_nlen == nlc->nlc_namelen &&
2236 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
2237 (ncp->nc_flag & NCF_DESTROYED) == 0
2239 _cache_hold(ncp);
2240 spin_unlock(&nchpp->spin);
2241 if (par_locked) {
2242 _cache_unlock(par_nch->ncp);
2243 par_locked = 0;
2245 if (_cache_lock_special(ncp) == 0) {
2246 _cache_auto_unresolve(mp, ncp);
2247 if (new_ncp)
2248 _cache_free(new_ncp);
2249 goto found;
2251 _cache_get(ncp);
2252 _cache_put(ncp);
2253 _cache_drop(ncp);
2254 goto restart;
2259 * We failed to locate an entry, create a new entry and add it to
2260 * the cache. The parent ncp must also be locked so we
2261 * can link into it.
2263 * We have to relookup after possibly blocking in kmalloc or
2264 * when locking par_nch.
2266 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special
2267 * mount case, in which case nc_name will be NULL.
2269 if (new_ncp == NULL) {
2270 spin_unlock(&nchpp->spin);
2271 new_ncp = cache_alloc(nlc->nlc_namelen);
2272 if (nlc->nlc_namelen) {
2273 bcopy(nlc->nlc_nameptr, new_ncp->nc_name,
2274 nlc->nlc_namelen);
2275 new_ncp->nc_name[nlc->nlc_namelen] = 0;
2277 goto restart;
2279 if (par_locked == 0) {
2280 spin_unlock(&nchpp->spin);
2281 _cache_lock(par_nch->ncp);
2282 par_locked = 1;
2283 goto restart;
2287 * WARNING! We still hold the spinlock. We have to set the hash
2288 * table entry atomically.
2290 ncp = new_ncp;
2291 _cache_link_parent(ncp, par_nch->ncp, nchpp);
2292 spin_unlock(&nchpp->spin);
2293 _cache_unlock(par_nch->ncp);
2294 /* par_locked = 0 - not used */
2295 found:
2297 * stats and namecache size management
2299 if (ncp->nc_flag & NCF_UNRESOLVED)
2300 ++gd->gd_nchstats->ncs_miss;
2301 else if (ncp->nc_vp)
2302 ++gd->gd_nchstats->ncs_goodhits;
2303 else
2304 ++gd->gd_nchstats->ncs_neghits;
2305 nch.mount = mp;
2306 nch.ncp = ncp;
2307 atomic_add_int(&nch.mount->mnt_refs, 1);
2308 return(nch);
2312 * This is a non-blocking verison of cache_nlookup() used by
2313 * nfs_readdirplusrpc_uio(). It can fail for any reason and
2314 * will return nch.ncp == NULL in that case.
2316 struct nchandle
2317 cache_nlookup_nonblock(struct nchandle *par_nch, struct nlcomponent *nlc)
2319 struct nchandle nch;
2320 struct namecache *ncp;
2321 struct namecache *new_ncp;
2322 struct nchash_head *nchpp;
2323 struct mount *mp;
2324 u_int32_t hash;
2325 globaldata_t gd;
2326 int par_locked;
2328 numcalls++;
2329 gd = mycpu;
2330 mp = par_nch->mount;
2331 par_locked = 0;
2334 * Try to locate an existing entry
2336 hash = fnv_32_buf(nlc->nlc_nameptr, nlc->nlc_namelen, FNV1_32_INIT);
2337 hash = fnv_32_buf(&par_nch->ncp, sizeof(par_nch->ncp), hash);
2338 new_ncp = NULL;
2339 nchpp = NCHHASH(hash);
2340 restart:
2341 spin_lock(&nchpp->spin);
2342 LIST_FOREACH(ncp, &nchpp->list, nc_hash) {
2343 numchecks++;
2346 * Break out if we find a matching entry. Note that
2347 * UNRESOLVED entries may match, but DESTROYED entries
2348 * do not.
2350 if (ncp->nc_parent == par_nch->ncp &&
2351 ncp->nc_nlen == nlc->nlc_namelen &&
2352 bcmp(ncp->nc_name, nlc->nlc_nameptr, ncp->nc_nlen) == 0 &&
2353 (ncp->nc_flag & NCF_DESTROYED) == 0
2355 _cache_hold(ncp);
2356 spin_unlock(&nchpp->spin);
2357 if (par_locked) {
2358 _cache_unlock(par_nch->ncp);
2359 par_locked = 0;
2361 if (_cache_lock_special(ncp) == 0) {
2362 _cache_auto_unresolve(mp, ncp);
2363 if (new_ncp) {
2364 _cache_free(new_ncp);
2365 new_ncp = NULL;
2367 goto found;
2369 _cache_drop(ncp);
2370 goto failed;
2375 * We failed to locate an entry, create a new entry and add it to
2376 * the cache. The parent ncp must also be locked so we
2377 * can link into it.
2379 * We have to relookup after possibly blocking in kmalloc or
2380 * when locking par_nch.
2382 * NOTE: nlc_namelen can be 0 and nlc_nameptr NULL as a special
2383 * mount case, in which case nc_name will be NULL.
2385 if (new_ncp == NULL) {
2386 spin_unlock(&nchpp->spin);
2387 new_ncp = cache_alloc(nlc->nlc_namelen);
2388 if (nlc->nlc_namelen) {
2389 bcopy(nlc->nlc_nameptr, new_ncp->nc_name,
2390 nlc->nlc_namelen);
2391 new_ncp->nc_name[nlc->nlc_namelen] = 0;
2393 goto restart;
2395 if (par_locked == 0) {
2396 spin_unlock(&nchpp->spin);
2397 if (_cache_lock_nonblock(par_nch->ncp) == 0) {
2398 par_locked = 1;
2399 goto restart;
2401 goto failed;
2405 * WARNING! We still hold the spinlock. We have to set the hash
2406 * table entry atomically.
2408 ncp = new_ncp;
2409 _cache_link_parent(ncp, par_nch->ncp, nchpp);
2410 spin_unlock(&nchpp->spin);
2411 _cache_unlock(par_nch->ncp);
2412 /* par_locked = 0 - not used */
2413 found:
2415 * stats and namecache size management
2417 if (ncp->nc_flag & NCF_UNRESOLVED)
2418 ++gd->gd_nchstats->ncs_miss;
2419 else if (ncp->nc_vp)
2420 ++gd->gd_nchstats->ncs_goodhits;
2421 else
2422 ++gd->gd_nchstats->ncs_neghits;
2423 nch.mount = mp;
2424 nch.ncp = ncp;
2425 atomic_add_int(&nch.mount->mnt_refs, 1);
2426 return(nch);
2427 failed:
2428 if (new_ncp) {
2429 _cache_free(new_ncp);
2430 new_ncp = NULL;
2432 nch.mount = NULL;
2433 nch.ncp = NULL;
2434 return(nch);
2438 * The namecache entry is marked as being used as a mount point.
2439 * Locate the mount if it is visible to the caller.
2441 struct findmount_info {
2442 struct mount *result;
2443 struct mount *nch_mount;
2444 struct namecache *nch_ncp;
2447 static
2449 cache_findmount_callback(struct mount *mp, void *data)
2451 struct findmount_info *info = data;
2454 * Check the mount's mounted-on point against the passed nch.
2456 if (mp->mnt_ncmounton.mount == info->nch_mount &&
2457 mp->mnt_ncmounton.ncp == info->nch_ncp
2459 info->result = mp;
2460 return(-1);
2462 return(0);
2465 struct mount *
2466 cache_findmount(struct nchandle *nch)
2468 struct findmount_info info;
2470 info.result = NULL;
2471 info.nch_mount = nch->mount;
2472 info.nch_ncp = nch->ncp;
2473 mountlist_scan(cache_findmount_callback, &info,
2474 MNTSCAN_FORWARD|MNTSCAN_NOBUSY);
2475 return(info.result);
2479 * Resolve an unresolved namecache entry, generally by looking it up.
2480 * The passed ncp must be locked and refd.
2482 * Theoretically since a vnode cannot be recycled while held, and since
2483 * the nc_parent chain holds its vnode as long as children exist, the
2484 * direct parent of the cache entry we are trying to resolve should
2485 * have a valid vnode. If not then generate an error that we can
2486 * determine is related to a resolver bug.
2488 * However, if a vnode was in the middle of a recyclement when the NCP
2489 * got locked, ncp->nc_vp might point to a vnode that is about to become
2490 * invalid. cache_resolve() handles this case by unresolving the entry
2491 * and then re-resolving it.
2493 * Note that successful resolution does not necessarily return an error
2494 * code of 0. If the ncp resolves to a negative cache hit then ENOENT
2495 * will be returned.
2497 * MPSAFE
2500 cache_resolve(struct nchandle *nch, struct ucred *cred)
2502 struct namecache *par_tmp;
2503 struct namecache *par;
2504 struct namecache *ncp;
2505 struct nchandle nctmp;
2506 struct mount *mp;
2507 struct vnode *dvp;
2508 int error;
2510 ncp = nch->ncp;
2511 mp = nch->mount;
2512 restart:
2514 * If the ncp is already resolved we have nothing to do. However,
2515 * we do want to guarentee that a usable vnode is returned when
2516 * a vnode is present, so make sure it hasn't been reclaimed.
2518 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
2519 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
2520 _cache_setunresolved(ncp);
2521 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0)
2522 return (ncp->nc_error);
2526 * Mount points need special handling because the parent does not
2527 * belong to the same filesystem as the ncp.
2529 if (ncp == mp->mnt_ncmountpt.ncp)
2530 return (cache_resolve_mp(mp));
2533 * We expect an unbroken chain of ncps to at least the mount point,
2534 * and even all the way to root (but this code doesn't have to go
2535 * past the mount point).
2537 if (ncp->nc_parent == NULL) {
2538 kprintf("EXDEV case 1 %p %*.*s\n", ncp,
2539 ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
2540 ncp->nc_error = EXDEV;
2541 return(ncp->nc_error);
2545 * The vp's of the parent directories in the chain are held via vhold()
2546 * due to the existance of the child, and should not disappear.
2547 * However, there are cases where they can disappear:
2549 * - due to filesystem I/O errors.
2550 * - due to NFS being stupid about tracking the namespace and
2551 * destroys the namespace for entire directories quite often.
2552 * - due to forced unmounts.
2553 * - due to an rmdir (parent will be marked DESTROYED)
2555 * When this occurs we have to track the chain backwards and resolve
2556 * it, looping until the resolver catches up to the current node. We
2557 * could recurse here but we might run ourselves out of kernel stack
2558 * so we do it in a more painful manner. This situation really should
2559 * not occur all that often, or if it does not have to go back too
2560 * many nodes to resolve the ncp.
2562 while ((dvp = cache_dvpref(ncp)) == NULL) {
2564 * This case can occur if a process is CD'd into a
2565 * directory which is then rmdir'd. If the parent is marked
2566 * destroyed there is no point trying to resolve it.
2568 if (ncp->nc_parent->nc_flag & NCF_DESTROYED)
2569 return(ENOENT);
2570 par = ncp->nc_parent;
2571 _cache_hold(par);
2572 _cache_lock(par);
2573 while ((par_tmp = par->nc_parent) != NULL &&
2574 par_tmp->nc_vp == NULL) {
2575 _cache_hold(par_tmp);
2576 _cache_lock(par_tmp);
2577 _cache_put(par);
2578 par = par_tmp;
2580 if (par->nc_parent == NULL) {
2581 kprintf("EXDEV case 2 %*.*s\n",
2582 par->nc_nlen, par->nc_nlen, par->nc_name);
2583 _cache_put(par);
2584 return (EXDEV);
2586 kprintf("[diagnostic] cache_resolve: had to recurse on %*.*s\n",
2587 par->nc_nlen, par->nc_nlen, par->nc_name);
2589 * The parent is not set in stone, ref and lock it to prevent
2590 * it from disappearing. Also note that due to renames it
2591 * is possible for our ncp to move and for par to no longer
2592 * be one of its parents. We resolve it anyway, the loop
2593 * will handle any moves.
2595 _cache_get(par); /* additional hold/lock */
2596 _cache_put(par); /* from earlier hold/lock */
2597 if (par == nch->mount->mnt_ncmountpt.ncp) {
2598 cache_resolve_mp(nch->mount);
2599 } else if ((dvp = cache_dvpref(par)) == NULL) {
2600 kprintf("[diagnostic] cache_resolve: raced on %*.*s\n", par->nc_nlen, par->nc_nlen, par->nc_name);
2601 _cache_put(par);
2602 continue;
2603 } else {
2604 if (par->nc_flag & NCF_UNRESOLVED) {
2605 nctmp.mount = mp;
2606 nctmp.ncp = par;
2607 par->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred);
2609 vrele(dvp);
2611 if ((error = par->nc_error) != 0) {
2612 if (par->nc_error != EAGAIN) {
2613 kprintf("EXDEV case 3 %*.*s error %d\n",
2614 par->nc_nlen, par->nc_nlen, par->nc_name,
2615 par->nc_error);
2616 _cache_put(par);
2617 return(error);
2619 kprintf("[diagnostic] cache_resolve: EAGAIN par %p %*.*s\n",
2620 par, par->nc_nlen, par->nc_nlen, par->nc_name);
2622 _cache_put(par);
2623 /* loop */
2627 * Call VOP_NRESOLVE() to get the vp, then scan for any disconnected
2628 * ncp's and reattach them. If this occurs the original ncp is marked
2629 * EAGAIN to force a relookup.
2631 * NOTE: in order to call VOP_NRESOLVE(), the parent of the passed
2632 * ncp must already be resolved.
2634 if (dvp) {
2635 nctmp.mount = mp;
2636 nctmp.ncp = ncp;
2637 ncp->nc_error = VOP_NRESOLVE(&nctmp, dvp, cred);
2638 vrele(dvp);
2639 } else {
2640 ncp->nc_error = EPERM;
2642 if (ncp->nc_error == EAGAIN) {
2643 kprintf("[diagnostic] cache_resolve: EAGAIN ncp %p %*.*s\n",
2644 ncp, ncp->nc_nlen, ncp->nc_nlen, ncp->nc_name);
2645 goto restart;
2647 return(ncp->nc_error);
2651 * Resolve the ncp associated with a mount point. Such ncp's almost always
2652 * remain resolved and this routine is rarely called. NFS MPs tends to force
2653 * re-resolution more often due to its mac-truck-smash-the-namecache
2654 * method of tracking namespace changes.
2656 * The semantics for this call is that the passed ncp must be locked on
2657 * entry and will be locked on return. However, if we actually have to
2658 * resolve the mount point we temporarily unlock the entry in order to
2659 * avoid race-to-root deadlocks due to e.g. dead NFS mounts. Because of
2660 * the unlock we have to recheck the flags after we relock.
2662 static int
2663 cache_resolve_mp(struct mount *mp)
2665 struct namecache *ncp = mp->mnt_ncmountpt.ncp;
2666 struct vnode *vp;
2667 int error;
2669 KKASSERT(mp != NULL);
2672 * If the ncp is already resolved we have nothing to do. However,
2673 * we do want to guarentee that a usable vnode is returned when
2674 * a vnode is present, so make sure it hasn't been reclaimed.
2676 if ((ncp->nc_flag & NCF_UNRESOLVED) == 0) {
2677 if (ncp->nc_vp && (ncp->nc_vp->v_flag & VRECLAIMED))
2678 _cache_setunresolved(ncp);
2681 if (ncp->nc_flag & NCF_UNRESOLVED) {
2682 _cache_unlock(ncp);
2683 while (vfs_busy(mp, 0))
2685 error = VFS_ROOT(mp, &vp);
2686 _cache_lock(ncp);
2689 * recheck the ncp state after relocking.
2691 if (ncp->nc_flag & NCF_UNRESOLVED) {
2692 ncp->nc_error = error;
2693 if (error == 0) {
2694 _cache_setvp(mp, ncp, vp);
2695 vput(vp);
2696 } else {
2697 kprintf("[diagnostic] cache_resolve_mp: failed"
2698 " to resolve mount %p err=%d ncp=%p\n",
2699 mp, error, ncp);
2700 _cache_setvp(mp, ncp, NULL);
2702 } else if (error == 0) {
2703 vput(vp);
2705 vfs_unbusy(mp);
2707 return(ncp->nc_error);
2711 * Clean out negative cache entries when too many have accumulated.
2713 * MPSAFE
2715 static void
2716 _cache_cleanneg(int count)
2718 struct namecache *ncp;
2721 * Automode from the vnlru proc - clean out 10% of the negative cache
2722 * entries.
2724 if (count == 0)
2725 count = numneg / 10 + 1;
2728 * Attempt to clean out the specified number of negative cache
2729 * entries.
2731 while (count) {
2732 spin_lock(&ncspin);
2733 ncp = TAILQ_FIRST(&ncneglist);
2734 if (ncp == NULL) {
2735 spin_unlock(&ncspin);
2736 break;
2738 TAILQ_REMOVE(&ncneglist, ncp, nc_vnode);
2739 TAILQ_INSERT_TAIL(&ncneglist, ncp, nc_vnode);
2740 _cache_hold(ncp);
2741 spin_unlock(&ncspin);
2742 if (_cache_lock_special(ncp) == 0) {
2743 ncp = cache_zap(ncp, 1);
2744 if (ncp)
2745 _cache_drop(ncp);
2746 } else {
2747 _cache_drop(ncp);
2749 --count;
2754 * This is a kitchen sink function to clean out ncps which we
2755 * tried to zap from cache_drop() but failed because we were
2756 * unable to acquire the parent lock.
2758 * Such entries can also be removed via cache_inval_vp(), such
2759 * as when unmounting.
2761 * MPSAFE
2763 static void
2764 _cache_cleandefered(void)
2766 struct nchash_head *nchpp;
2767 struct namecache *ncp;
2768 struct namecache dummy;
2769 int i;
2771 numdefered = 0;
2772 bzero(&dummy, sizeof(dummy));
2773 dummy.nc_flag = NCF_DESTROYED;
2775 for (i = 0; i <= nchash; ++i) {
2776 nchpp = &nchashtbl[i];
2778 spin_lock(&nchpp->spin);
2779 LIST_INSERT_HEAD(&nchpp->list, &dummy, nc_hash);
2780 ncp = &dummy;
2781 while ((ncp = LIST_NEXT(ncp, nc_hash)) != NULL) {
2782 if ((ncp->nc_flag & NCF_DEFEREDZAP) == 0)
2783 continue;
2784 LIST_REMOVE(&dummy, nc_hash);
2785 LIST_INSERT_AFTER(ncp, &dummy, nc_hash);
2786 _cache_hold(ncp);
2787 spin_unlock(&nchpp->spin);
2788 if (_cache_lock_nonblock(ncp) == 0) {
2789 ncp->nc_flag &= ~NCF_DEFEREDZAP;
2790 _cache_unlock(ncp);
2792 _cache_drop(ncp);
2793 spin_lock(&nchpp->spin);
2794 ncp = &dummy;
2796 LIST_REMOVE(&dummy, nc_hash);
2797 spin_unlock(&nchpp->spin);
2802 * Name cache initialization, from vfsinit() when we are booting
2804 void
2805 nchinit(void)
2807 int i;
2808 globaldata_t gd;
2810 /* initialise per-cpu namecache effectiveness statistics. */
2811 for (i = 0; i < ncpus; ++i) {
2812 gd = globaldata_find(i);
2813 gd->gd_nchstats = &nchstats[i];
2815 TAILQ_INIT(&ncneglist);
2816 spin_init(&ncspin);
2817 nchashtbl = hashinit_ext(desiredvnodes*2, sizeof(struct nchash_head),
2818 M_VFSCACHE, &nchash);
2819 for (i = 0; i <= (int)nchash; ++i) {
2820 LIST_INIT(&nchashtbl[i].list);
2821 spin_init(&nchashtbl[i].spin);
2823 nclockwarn = 5 * hz;
2827 * Called from start_init() to bootstrap the root filesystem. Returns
2828 * a referenced, unlocked namecache record.
2830 void
2831 cache_allocroot(struct nchandle *nch, struct mount *mp, struct vnode *vp)
2833 nch->ncp = cache_alloc(0);
2834 nch->mount = mp;
2835 atomic_add_int(&mp->mnt_refs, 1);
2836 if (vp)
2837 _cache_setvp(nch->mount, nch->ncp, vp);
2841 * vfs_cache_setroot()
2843 * Create an association between the root of our namecache and
2844 * the root vnode. This routine may be called several times during
2845 * booting.
2847 * If the caller intends to save the returned namecache pointer somewhere
2848 * it must cache_hold() it.
2850 void
2851 vfs_cache_setroot(struct vnode *nvp, struct nchandle *nch)
2853 struct vnode *ovp;
2854 struct nchandle onch;
2856 ovp = rootvnode;
2857 onch = rootnch;
2858 rootvnode = nvp;
2859 if (nch)
2860 rootnch = *nch;
2861 else
2862 cache_zero(&rootnch);
2863 if (ovp)
2864 vrele(ovp);
2865 if (onch.ncp)
2866 cache_drop(&onch);
2870 * XXX OLD API COMPAT FUNCTION. This really messes up the new namecache
2871 * topology and is being removed as quickly as possible. The new VOP_N*()
2872 * API calls are required to make specific adjustments using the supplied
2873 * ncp pointers rather then just bogusly purging random vnodes.
2875 * Invalidate all namecache entries to a particular vnode as well as
2876 * any direct children of that vnode in the namecache. This is a
2877 * 'catch all' purge used by filesystems that do not know any better.
2879 * Note that the linkage between the vnode and its namecache entries will
2880 * be removed, but the namecache entries themselves might stay put due to
2881 * active references from elsewhere in the system or due to the existance of
2882 * the children. The namecache topology is left intact even if we do not
2883 * know what the vnode association is. Such entries will be marked
2884 * NCF_UNRESOLVED.
2886 void
2887 cache_purge(struct vnode *vp)
2889 cache_inval_vp(vp, CINV_DESTROY | CINV_CHILDREN);
2893 * Flush all entries referencing a particular filesystem.
2895 * Since we need to check it anyway, we will flush all the invalid
2896 * entries at the same time.
2898 #if 0
2900 void
2901 cache_purgevfs(struct mount *mp)
2903 struct nchash_head *nchpp;
2904 struct namecache *ncp, *nnp;
2907 * Scan hash tables for applicable entries.
2909 for (nchpp = &nchashtbl[nchash]; nchpp >= nchashtbl; nchpp--) {
2910 spin_lock_wr(&nchpp->spin); XXX
2911 ncp = LIST_FIRST(&nchpp->list);
2912 if (ncp)
2913 _cache_hold(ncp);
2914 while (ncp) {
2915 nnp = LIST_NEXT(ncp, nc_hash);
2916 if (nnp)
2917 _cache_hold(nnp);
2918 if (ncp->nc_mount == mp) {
2919 _cache_lock(ncp);
2920 ncp = cache_zap(ncp, 0);
2921 if (ncp)
2922 _cache_drop(ncp);
2923 } else {
2924 _cache_drop(ncp);
2926 ncp = nnp;
2928 spin_unlock_wr(&nchpp->spin); XXX
2932 #endif
2934 static int disablecwd;
2935 SYSCTL_INT(_debug, OID_AUTO, disablecwd, CTLFLAG_RW, &disablecwd, 0, "");
2937 static u_long numcwdcalls; STATNODE(CTLFLAG_RD, numcwdcalls, &numcwdcalls);
2938 static u_long numcwdfail1; STATNODE(CTLFLAG_RD, numcwdfail1, &numcwdfail1);
2939 static u_long numcwdfail2; STATNODE(CTLFLAG_RD, numcwdfail2, &numcwdfail2);
2940 static u_long numcwdfail3; STATNODE(CTLFLAG_RD, numcwdfail3, &numcwdfail3);
2941 static u_long numcwdfail4; STATNODE(CTLFLAG_RD, numcwdfail4, &numcwdfail4);
2942 static u_long numcwdfound; STATNODE(CTLFLAG_RD, numcwdfound, &numcwdfound);
2945 * MPALMOSTSAFE
2948 sys___getcwd(struct __getcwd_args *uap)
2950 u_int buflen;
2951 int error;
2952 char *buf;
2953 char *bp;
2955 if (disablecwd)
2956 return (ENODEV);
2958 buflen = uap->buflen;
2959 if (buflen == 0)
2960 return (EINVAL);
2961 if (buflen > MAXPATHLEN)
2962 buflen = MAXPATHLEN;
2964 buf = kmalloc(buflen, M_TEMP, M_WAITOK);
2965 get_mplock();
2966 bp = kern_getcwd(buf, buflen, &error);
2967 rel_mplock();
2968 if (error == 0)
2969 error = copyout(bp, uap->buf, strlen(bp) + 1);
2970 kfree(buf, M_TEMP);
2971 return (error);
2974 char *
2975 kern_getcwd(char *buf, size_t buflen, int *error)
2977 struct proc *p = curproc;
2978 char *bp;
2979 int i, slash_prefixed;
2980 struct filedesc *fdp;
2981 struct nchandle nch;
2982 struct namecache *ncp;
2984 numcwdcalls++;
2985 bp = buf;
2986 bp += buflen - 1;
2987 *bp = '\0';
2988 fdp = p->p_fd;
2989 slash_prefixed = 0;
2991 nch = fdp->fd_ncdir;
2992 ncp = nch.ncp;
2993 if (ncp)
2994 _cache_hold(ncp);
2996 while (ncp && (ncp != fdp->fd_nrdir.ncp ||
2997 nch.mount != fdp->fd_nrdir.mount)
3000 * While traversing upwards if we encounter the root
3001 * of the current mount we have to skip to the mount point
3002 * in the underlying filesystem.
3004 if (ncp == nch.mount->mnt_ncmountpt.ncp) {
3005 nch = nch.mount->mnt_ncmounton;
3006 _cache_drop(ncp);
3007 ncp = nch.ncp;
3008 if (ncp)
3009 _cache_hold(ncp);
3010 continue;
3014 * Prepend the path segment
3016 for (i = ncp->nc_nlen - 1; i >= 0; i--) {
3017 if (bp == buf) {
3018 numcwdfail4++;
3019 *error = ERANGE;
3020 bp = NULL;
3021 goto done;
3023 *--bp = ncp->nc_name[i];
3025 if (bp == buf) {
3026 numcwdfail4++;
3027 *error = ERANGE;
3028 bp = NULL;
3029 goto done;
3031 *--bp = '/';
3032 slash_prefixed = 1;
3035 * Go up a directory. This isn't a mount point so we don't
3036 * have to check again.
3038 while ((nch.ncp = ncp->nc_parent) != NULL) {
3039 _cache_lock(ncp);
3040 if (nch.ncp != ncp->nc_parent) {
3041 _cache_unlock(ncp);
3042 continue;
3044 _cache_hold(nch.ncp);
3045 _cache_unlock(ncp);
3046 break;
3048 _cache_drop(ncp);
3049 ncp = nch.ncp;
3051 if (ncp == NULL) {
3052 numcwdfail2++;
3053 *error = ENOENT;
3054 bp = NULL;
3055 goto done;
3057 if (!slash_prefixed) {
3058 if (bp == buf) {
3059 numcwdfail4++;
3060 *error = ERANGE;
3061 bp = NULL;
3062 goto done;
3064 *--bp = '/';
3066 numcwdfound++;
3067 *error = 0;
3068 done:
3069 if (ncp)
3070 _cache_drop(ncp);
3071 return (bp);
3075 * Thus begins the fullpath magic.
3077 * The passed nchp is referenced but not locked.
3079 #undef STATNODE
3080 #define STATNODE(name) \
3081 static u_int name; \
3082 SYSCTL_UINT(_vfs_cache, OID_AUTO, name, CTLFLAG_RD, &name, 0, "")
3084 static int disablefullpath;
3085 SYSCTL_INT(_debug, OID_AUTO, disablefullpath, CTLFLAG_RW,
3086 &disablefullpath, 0, "");
3088 STATNODE(numfullpathcalls);
3089 STATNODE(numfullpathfail1);
3090 STATNODE(numfullpathfail2);
3091 STATNODE(numfullpathfail3);
3092 STATNODE(numfullpathfail4);
3093 STATNODE(numfullpathfound);
3096 cache_fullpath(struct proc *p, struct nchandle *nchp,
3097 char **retbuf, char **freebuf, int guess)
3099 struct nchandle fd_nrdir;
3100 struct nchandle nch;
3101 struct namecache *ncp;
3102 struct mount *mp, *new_mp;
3103 char *bp, *buf;
3104 int slash_prefixed;
3105 int error = 0;
3106 int i;
3108 atomic_add_int(&numfullpathcalls, -1);
3110 *retbuf = NULL;
3111 *freebuf = NULL;
3113 buf = kmalloc(MAXPATHLEN, M_TEMP, M_WAITOK);
3114 bp = buf + MAXPATHLEN - 1;
3115 *bp = '\0';
3116 if (p != NULL)
3117 fd_nrdir = p->p_fd->fd_nrdir;
3118 else
3119 fd_nrdir = rootnch;
3120 slash_prefixed = 0;
3121 nch = *nchp;
3122 ncp = nch.ncp;
3123 if (ncp)
3124 _cache_hold(ncp);
3125 mp = nch.mount;
3127 while (ncp && (ncp != fd_nrdir.ncp || mp != fd_nrdir.mount)) {
3128 new_mp = NULL;
3131 * If we are asked to guess the upwards path, we do so whenever
3132 * we encounter an ncp marked as a mountpoint. We try to find
3133 * the actual mountpoint by finding the mountpoint with this ncp.
3135 if (guess && (ncp->nc_flag & NCF_ISMOUNTPT)) {
3136 new_mp = mount_get_by_nc(ncp);
3139 * While traversing upwards if we encounter the root
3140 * of the current mount we have to skip to the mount point.
3142 if (ncp == mp->mnt_ncmountpt.ncp) {
3143 new_mp = mp;
3145 if (new_mp) {
3146 nch = new_mp->mnt_ncmounton;
3147 _cache_drop(ncp);
3148 ncp = nch.ncp;
3149 if (ncp)
3150 _cache_hold(ncp);
3151 mp = nch.mount;
3152 continue;
3156 * Prepend the path segment
3158 for (i = ncp->nc_nlen - 1; i >= 0; i--) {
3159 if (bp == buf) {
3160 numfullpathfail4++;
3161 kfree(buf, M_TEMP);
3162 error = ENOMEM;
3163 goto done;
3165 *--bp = ncp->nc_name[i];
3167 if (bp == buf) {
3168 numfullpathfail4++;
3169 kfree(buf, M_TEMP);
3170 error = ENOMEM;
3171 goto done;
3173 *--bp = '/';
3174 slash_prefixed = 1;
3177 * Go up a directory. This isn't a mount point so we don't
3178 * have to check again.
3180 * We can only safely access nc_parent with ncp held locked.
3182 while ((nch.ncp = ncp->nc_parent) != NULL) {
3183 _cache_lock(ncp);
3184 if (nch.ncp != ncp->nc_parent) {
3185 _cache_unlock(ncp);
3186 continue;
3188 _cache_hold(nch.ncp);
3189 _cache_unlock(ncp);
3190 break;
3192 _cache_drop(ncp);
3193 ncp = nch.ncp;
3195 if (ncp == NULL) {
3196 numfullpathfail2++;
3197 kfree(buf, M_TEMP);
3198 error = ENOENT;
3199 goto done;
3202 if (!slash_prefixed) {
3203 if (bp == buf) {
3204 numfullpathfail4++;
3205 kfree(buf, M_TEMP);
3206 error = ENOMEM;
3207 goto done;
3209 *--bp = '/';
3211 numfullpathfound++;
3212 *retbuf = bp;
3213 *freebuf = buf;
3214 error = 0;
3215 done:
3216 if (ncp)
3217 _cache_drop(ncp);
3218 return(error);
3222 vn_fullpath(struct proc *p, struct vnode *vn, char **retbuf, char **freebuf, int guess)
3224 struct namecache *ncp;
3225 struct nchandle nch;
3226 int error;
3228 atomic_add_int(&numfullpathcalls, 1);
3229 if (disablefullpath)
3230 return (ENODEV);
3232 if (p == NULL)
3233 return (EINVAL);
3235 /* vn is NULL, client wants us to use p->p_textvp */
3236 if (vn == NULL) {
3237 if ((vn = p->p_textvp) == NULL)
3238 return (EINVAL);
3240 spin_lock(&vn->v_spinlock);
3241 TAILQ_FOREACH(ncp, &vn->v_namecache, nc_vnode) {
3242 if (ncp->nc_nlen)
3243 break;
3245 if (ncp == NULL) {
3246 spin_unlock(&vn->v_spinlock);
3247 return (EINVAL);
3249 _cache_hold(ncp);
3250 spin_unlock(&vn->v_spinlock);
3252 atomic_add_int(&numfullpathcalls, -1);
3253 nch.ncp = ncp;;
3254 nch.mount = vn->v_mount;
3255 error = cache_fullpath(p, &nch, retbuf, freebuf, guess);
3256 _cache_drop(ncp);
3257 return (error);