4 * Complete reimplementation
5 * (C) 1997 Thomas Schoebel-Theuer,
6 * with heavy changes by Linus Torvalds
10 * Notes on the allocation strategy:
12 * The dcache is a master of the icache - whenever a dcache entry
13 * exists, the inode will always exist. "iput()" is done either when
14 * the dcache entry is deleted or garbage collected.
17 #include <linux/config.h>
18 #include <linux/string.h>
21 #include <linux/slab.h>
22 #include <linux/init.h>
23 #include <linux/smp_lock.h>
24 #include <linux/cache.h>
25 #include <linux/module.h>
26 #include <linux/mount.h>
27 #include <linux/file.h>
28 #include <asm/uaccess.h>
29 #include <linux/security.h>
30 #include <linux/seqlock.h>
32 #define DCACHE_PARANOIA 1
33 /* #define DCACHE_DEBUG 1 */
35 spinlock_t dcache_lock __cacheline_aligned_in_smp
= SPIN_LOCK_UNLOCKED
;
36 seqlock_t rename_lock __cacheline_aligned_in_smp
= SEQLOCK_UNLOCKED
;
38 EXPORT_SYMBOL(dcache_lock
);
40 static kmem_cache_t
*dentry_cache
;
43 * This is the single most critical data structure when it comes
44 * to the dcache: the hashtable for lookups. Somebody should try
45 * to make this good - I've just made it work.
47 * This hash-function tries to avoid losing too many bits of hash
48 * information, yet avoid using a prime hash-size or similar.
50 #define D_HASHBITS d_hash_shift
51 #define D_HASHMASK d_hash_mask
53 static unsigned int d_hash_mask
;
54 static unsigned int d_hash_shift
;
55 static struct hlist_head
*dentry_hashtable
;
56 static LIST_HEAD(dentry_unused
);
58 /* Statistics gathering. */
59 struct dentry_stat_t dentry_stat
= {
63 static void d_callback(void *arg
)
65 struct dentry
* dentry
= (struct dentry
*)arg
;
67 if (dname_external(dentry
)) {
68 kfree(dentry
->d_qstr
);
70 kmem_cache_free(dentry_cache
, dentry
);
74 * no dcache_lock, please. The caller must decrement dentry_stat.nr_dentry
77 static void d_free(struct dentry
*dentry
)
79 if (dentry
->d_op
&& dentry
->d_op
->d_release
)
80 dentry
->d_op
->d_release(dentry
);
81 call_rcu(&dentry
->d_rcu
, d_callback
, dentry
);
85 * Release the dentry's inode, using the filesystem
86 * d_iput() operation if defined.
87 * Called with dcache_lock and per dentry lock held, drops both.
89 static inline void dentry_iput(struct dentry
* dentry
)
91 struct inode
*inode
= dentry
->d_inode
;
93 dentry
->d_inode
= NULL
;
94 list_del_init(&dentry
->d_alias
);
95 spin_unlock(&dentry
->d_lock
);
96 spin_unlock(&dcache_lock
);
97 if (dentry
->d_op
&& dentry
->d_op
->d_iput
)
98 dentry
->d_op
->d_iput(dentry
, inode
);
102 spin_unlock(&dentry
->d_lock
);
103 spin_unlock(&dcache_lock
);
110 * This is complicated by the fact that we do not want to put
111 * dentries that are no longer on any hash chain on the unused
112 * list: we'd much rather just get rid of them immediately.
114 * However, that implies that we have to traverse the dentry
115 * tree upwards to the parents which might _also_ now be
116 * scheduled for deletion (it may have been only waiting for
117 * its last child to go away).
119 * This tail recursion is done by hand as we don't want to depend
120 * on the compiler to always get this right (gcc generally doesn't).
121 * Real recursion would eat up our stack space.
125 * dput - release a dentry
126 * @dentry: dentry to release
128 * Release a dentry. This will drop the usage count and if appropriate
129 * call the dentry unlink method as well as removing it from the queues and
130 * releasing its resources. If the parent dentries were scheduled for release
131 * they too may now get deleted.
133 * no dcache lock, please.
136 void dput(struct dentry
*dentry
)
142 if (!atomic_dec_and_lock(&dentry
->d_count
, &dcache_lock
))
145 spin_lock(&dentry
->d_lock
);
146 if (atomic_read(&dentry
->d_count
)) {
147 spin_unlock(&dentry
->d_lock
);
148 spin_unlock(&dcache_lock
);
153 * AV: ->d_delete() is _NOT_ allowed to block now.
155 if (dentry
->d_op
&& dentry
->d_op
->d_delete
) {
156 if (dentry
->d_op
->d_delete(dentry
))
159 /* Unreachable? Get rid of it */
160 if (d_unhashed(dentry
))
162 if (list_empty(&dentry
->d_lru
)) {
163 dentry
->d_vfs_flags
|= DCACHE_REFERENCED
;
164 list_add(&dentry
->d_lru
, &dentry_unused
);
165 dentry_stat
.nr_unused
++;
167 spin_unlock(&dentry
->d_lock
);
168 spin_unlock(&dcache_lock
);
175 struct dentry
*parent
;
177 /* If dentry was on d_lru list
178 * delete it from there
180 if (!list_empty(&dentry
->d_lru
)) {
181 list_del(&dentry
->d_lru
);
182 dentry_stat
.nr_unused
--;
184 list_del(&dentry
->d_child
);
185 dentry_stat
.nr_dentry
--; /* For d_free, below */
186 /*drops the locks, at that point nobody can reach this dentry */
188 parent
= dentry
->d_parent
;
190 if (dentry
== parent
)
198 * d_invalidate - invalidate a dentry
199 * @dentry: dentry to invalidate
201 * Try to invalidate the dentry if it turns out to be
202 * possible. If there are other dentries that can be
203 * reached through this one we can't delete it and we
204 * return -EBUSY. On success we return 0.
209 int d_invalidate(struct dentry
* dentry
)
212 * If it's already been dropped, return OK.
214 spin_lock(&dcache_lock
);
215 if (d_unhashed(dentry
)) {
216 spin_unlock(&dcache_lock
);
220 * Check whether to do a partial shrink_dcache
221 * to get rid of unused child entries.
223 if (!list_empty(&dentry
->d_subdirs
)) {
224 spin_unlock(&dcache_lock
);
225 shrink_dcache_parent(dentry
);
226 spin_lock(&dcache_lock
);
230 * Somebody else still using it?
232 * If it's a directory, we can't drop it
233 * for fear of somebody re-populating it
234 * with children (even though dropping it
235 * would make it unreachable from the root,
236 * we might still populate it if it was a
237 * working directory or similar).
239 spin_lock(&dentry
->d_lock
);
240 if (atomic_read(&dentry
->d_count
) > 1) {
241 if (dentry
->d_inode
&& S_ISDIR(dentry
->d_inode
->i_mode
)) {
242 spin_unlock(&dentry
->d_lock
);
243 spin_unlock(&dcache_lock
);
249 spin_unlock(&dentry
->d_lock
);
250 spin_unlock(&dcache_lock
);
254 /* This should be called _only_ with dcache_lock held */
256 static inline struct dentry
* __dget_locked(struct dentry
*dentry
)
258 atomic_inc(&dentry
->d_count
);
259 if (atomic_read(&dentry
->d_count
) == 1) {
260 dentry_stat
.nr_unused
--;
261 list_del_init(&dentry
->d_lru
);
266 struct dentry
* dget_locked(struct dentry
*dentry
)
268 return __dget_locked(dentry
);
272 * d_find_alias - grab a hashed alias of inode
273 * @inode: inode in question
275 * If inode has a hashed alias - acquire the reference to alias and
276 * return it. Otherwise return NULL. Notice that if inode is a directory
277 * there can be only one alias and it can be unhashed only if it has
280 * If the inode has a DCACHE_DISCONNECTED alias, then prefer
281 * any other hashed alias over that one.
284 struct dentry
* d_find_alias(struct inode
*inode
)
286 struct list_head
*head
, *next
, *tmp
;
287 struct dentry
*alias
, *discon_alias
=NULL
;
289 spin_lock(&dcache_lock
);
290 head
= &inode
->i_dentry
;
291 next
= inode
->i_dentry
.next
;
292 while (next
!= head
) {
296 alias
= list_entry(tmp
, struct dentry
, d_alias
);
297 if (!d_unhashed(alias
)) {
298 if (alias
->d_flags
& DCACHE_DISCONNECTED
)
299 discon_alias
= alias
;
301 __dget_locked(alias
);
302 spin_unlock(&dcache_lock
);
308 __dget_locked(discon_alias
);
309 spin_unlock(&dcache_lock
);
314 * Try to kill dentries associated with this inode.
315 * WARNING: you must own a reference to inode.
317 void d_prune_aliases(struct inode
*inode
)
319 struct list_head
*tmp
, *head
= &inode
->i_dentry
;
321 spin_lock(&dcache_lock
);
323 while ((tmp
= tmp
->next
) != head
) {
324 struct dentry
*dentry
= list_entry(tmp
, struct dentry
, d_alias
);
325 if (!atomic_read(&dentry
->d_count
)) {
326 __dget_locked(dentry
);
328 spin_unlock(&dcache_lock
);
333 spin_unlock(&dcache_lock
);
337 * Throw away a dentry - free the inode, dput the parent.
338 * This requires that the LRU list has already been
340 * Called with dcache_lock, drops it and then regains.
342 static inline void prune_one_dentry(struct dentry
* dentry
)
344 struct dentry
* parent
;
347 list_del(&dentry
->d_child
);
348 dentry_stat
.nr_dentry
--; /* For d_free, below */
350 parent
= dentry
->d_parent
;
352 if (parent
!= dentry
)
354 spin_lock(&dcache_lock
);
358 * prune_dcache - shrink the dcache
359 * @count: number of entries to try and free
361 * Shrink the dcache. This is done when we need
362 * more memory, or simply when we need to unmount
363 * something (at which point we need to unuse
366 * This function may fail to free any resources if
367 * all the dentries are in use.
370 static void prune_dcache(int count
)
372 spin_lock(&dcache_lock
);
373 for (; count
; count
--) {
374 struct dentry
*dentry
;
375 struct list_head
*tmp
;
377 tmp
= dentry_unused
.prev
;
378 if (tmp
== &dentry_unused
)
381 prefetch(dentry_unused
.prev
);
382 dentry_stat
.nr_unused
--;
383 dentry
= list_entry(tmp
, struct dentry
, d_lru
);
385 spin_lock(&dentry
->d_lock
);
386 /* leave inuse dentries */
387 if (atomic_read(&dentry
->d_count
)) {
388 spin_unlock(&dentry
->d_lock
);
391 /* If the dentry was recently referenced, don't free it. */
392 if (dentry
->d_vfs_flags
& DCACHE_REFERENCED
) {
393 dentry
->d_vfs_flags
&= ~DCACHE_REFERENCED
;
394 list_add(&dentry
->d_lru
, &dentry_unused
);
395 dentry_stat
.nr_unused
++;
396 spin_unlock(&dentry
->d_lock
);
399 prune_one_dentry(dentry
);
401 spin_unlock(&dcache_lock
);
405 * Shrink the dcache for the specified super block.
406 * This allows us to unmount a device without disturbing
407 * the dcache for the other devices.
409 * This implementation makes just two traversals of the
410 * unused list. On the first pass we move the selected
411 * dentries to the most recent end, and on the second
412 * pass we free them. The second pass must restart after
413 * each dput(), but since the target dentries are all at
414 * the end, it's really just a single traversal.
418 * shrink_dcache_sb - shrink dcache for a superblock
421 * Shrink the dcache for the specified super block. This
422 * is used to free the dcache before unmounting a file
426 void shrink_dcache_sb(struct super_block
* sb
)
428 struct list_head
*tmp
, *next
;
429 struct dentry
*dentry
;
432 * Pass one ... move the dentries for the specified
433 * superblock to the most recent end of the unused list.
435 spin_lock(&dcache_lock
);
436 next
= dentry_unused
.next
;
437 while (next
!= &dentry_unused
) {
440 dentry
= list_entry(tmp
, struct dentry
, d_lru
);
441 if (dentry
->d_sb
!= sb
)
444 list_add(tmp
, &dentry_unused
);
448 * Pass two ... free the dentries for this superblock.
451 next
= dentry_unused
.next
;
452 while (next
!= &dentry_unused
) {
455 dentry
= list_entry(tmp
, struct dentry
, d_lru
);
456 if (dentry
->d_sb
!= sb
)
458 dentry_stat
.nr_unused
--;
460 spin_lock(&dentry
->d_lock
);
461 if (atomic_read(&dentry
->d_count
)) {
462 spin_unlock(&dentry
->d_lock
);
465 prune_one_dentry(dentry
);
468 spin_unlock(&dcache_lock
);
472 * Search for at least 1 mount point in the dentry's subdirs.
473 * We descend to the next level whenever the d_subdirs
474 * list is non-empty and continue searching.
478 * have_submounts - check for mounts over a dentry
479 * @parent: dentry to check.
481 * Return true if the parent or its subdirectories contain
485 int have_submounts(struct dentry
*parent
)
487 struct dentry
*this_parent
= parent
;
488 struct list_head
*next
;
490 spin_lock(&dcache_lock
);
491 if (d_mountpoint(parent
))
494 next
= this_parent
->d_subdirs
.next
;
496 while (next
!= &this_parent
->d_subdirs
) {
497 struct list_head
*tmp
= next
;
498 struct dentry
*dentry
= list_entry(tmp
, struct dentry
, d_child
);
500 /* Have we found a mount point ? */
501 if (d_mountpoint(dentry
))
503 if (!list_empty(&dentry
->d_subdirs
)) {
504 this_parent
= dentry
;
509 * All done at this level ... ascend and resume the search.
511 if (this_parent
!= parent
) {
512 next
= this_parent
->d_child
.next
;
513 this_parent
= this_parent
->d_parent
;
516 spin_unlock(&dcache_lock
);
517 return 0; /* No mount points found in tree */
519 spin_unlock(&dcache_lock
);
524 * Search the dentry child list for the specified parent,
525 * and move any unused dentries to the end of the unused
526 * list for prune_dcache(). We descend to the next level
527 * whenever the d_subdirs list is non-empty and continue
530 static int select_parent(struct dentry
* parent
)
532 struct dentry
*this_parent
= parent
;
533 struct list_head
*next
;
536 spin_lock(&dcache_lock
);
538 next
= this_parent
->d_subdirs
.next
;
540 while (next
!= &this_parent
->d_subdirs
) {
541 struct list_head
*tmp
= next
;
542 struct dentry
*dentry
= list_entry(tmp
, struct dentry
, d_child
);
545 if (!list_empty(&dentry
->d_lru
)) {
546 dentry_stat
.nr_unused
--;
547 list_del_init(&dentry
->d_lru
);
550 * move only zero ref count dentries to the end
551 * of the unused list for prune_dcache
553 if (!atomic_read(&dentry
->d_count
)) {
554 list_add(&dentry
->d_lru
, dentry_unused
.prev
);
555 dentry_stat
.nr_unused
++;
559 * Descend a level if the d_subdirs list is non-empty.
561 if (!list_empty(&dentry
->d_subdirs
)) {
562 this_parent
= dentry
;
564 printk(KERN_DEBUG
"select_parent: descending to %s/%s, found=%d\n",
565 dentry
->d_parent
->d_name
.name
, dentry
->d_name
.name
, found
);
571 * All done at this level ... ascend and resume the search.
573 if (this_parent
!= parent
) {
574 next
= this_parent
->d_child
.next
;
575 this_parent
= this_parent
->d_parent
;
577 printk(KERN_DEBUG
"select_parent: ascending to %s/%s, found=%d\n",
578 this_parent
->d_parent
->d_name
.name
, this_parent
->d_name
.name
, found
);
582 spin_unlock(&dcache_lock
);
587 * shrink_dcache_parent - prune dcache
588 * @parent: parent of entries to prune
590 * Prune the dcache to remove unused children of the parent dentry.
593 void shrink_dcache_parent(struct dentry
* parent
)
597 while ((found
= select_parent(parent
)) != 0)
602 * shrink_dcache_anon - further prune the cache
603 * @head: head of d_hash list of dentries to prune
605 * Prune the dentries that are anonymous
607 * parsing d_hash list does not read_barrier_depends() as it
608 * done under dcache_lock.
611 void shrink_dcache_anon(struct hlist_head
*head
)
613 struct hlist_node
*lp
;
617 spin_lock(&dcache_lock
);
618 hlist_for_each(lp
, head
) {
619 struct dentry
*this = hlist_entry(lp
, struct dentry
, d_hash
);
620 if (!list_empty(&this->d_lru
)) {
621 dentry_stat
.nr_unused
--;
622 list_del(&this->d_lru
);
626 * move only zero ref count dentries to the end
627 * of the unused list for prune_dcache
629 if (!atomic_read(&this->d_count
)) {
630 list_add_tail(&this->d_lru
, &dentry_unused
);
631 dentry_stat
.nr_unused
++;
635 spin_unlock(&dcache_lock
);
641 * This is called from kswapd when we think we need some more memory.
643 static int shrink_dcache_memory(int nr
, unsigned int gfp_mask
)
647 * Nasty deadlock avoidance.
649 * ext2_new_block->getblk->GFP->shrink_dcache_memory->
650 * prune_dcache->prune_one_dentry->dput->dentry_iput->iput->
651 * inode->i_sb->s_op->put_inode->ext2_discard_prealloc->
652 * ext2_free_blocks->lock_super->DEADLOCK.
654 * We should make sure we don't hold the superblock lock over
655 * block allocations, but for now:
657 if (gfp_mask
& __GFP_FS
)
660 return dentry_stat
.nr_unused
;
663 #define NAME_ALLOC_LEN(len) ((len+16) & ~15)
666 * d_alloc - allocate a dcache entry
667 * @parent: parent of entry to allocate
668 * @name: qstr of the name
670 * Allocates a dentry. It returns %NULL if there is insufficient memory
671 * available. On a success the dentry is returned. The name passed in is
672 * copied and the copy passed in may be reused after this call.
675 struct dentry
* d_alloc(struct dentry
* parent
, const struct qstr
*name
)
678 struct dentry
*dentry
;
681 dentry
= kmem_cache_alloc(dentry_cache
, GFP_KERNEL
);
685 if (name
->len
> DNAME_INLINE_LEN
-1) {
686 qstr
= kmalloc(sizeof(*qstr
) + NAME_ALLOC_LEN(name
->len
),
689 kmem_cache_free(dentry_cache
, dentry
);
692 qstr
->name
= qstr
->name_str
;
693 qstr
->len
= name
->len
;
694 qstr
->hash
= name
->hash
;
695 dentry
->d_qstr
= qstr
;
696 str
= qstr
->name_str
;
698 dentry
->d_qstr
= &dentry
->d_name
;
699 str
= dentry
->d_iname
;
702 memcpy(str
, name
->name
, name
->len
);
705 atomic_set(&dentry
->d_count
, 1);
706 dentry
->d_vfs_flags
= DCACHE_UNHASHED
;
707 dentry
->d_lock
= SPIN_LOCK_UNLOCKED
;
709 dentry
->d_inode
= NULL
;
710 dentry
->d_parent
= NULL
;
711 dentry
->d_move_count
= 0;
713 dentry
->d_name
.name
= str
;
714 dentry
->d_name
.len
= name
->len
;
715 dentry
->d_name
.hash
= name
->hash
;
717 dentry
->d_fsdata
= NULL
;
718 dentry
->d_mounted
= 0;
719 dentry
->d_cookie
= NULL
;
720 dentry
->d_bucket
= NULL
;
721 INIT_HLIST_NODE(&dentry
->d_hash
);
722 INIT_LIST_HEAD(&dentry
->d_lru
);
723 INIT_LIST_HEAD(&dentry
->d_subdirs
);
724 INIT_LIST_HEAD(&dentry
->d_alias
);
727 dentry
->d_parent
= dget(parent
);
728 dentry
->d_sb
= parent
->d_sb
;
730 INIT_LIST_HEAD(&dentry
->d_child
);
733 spin_lock(&dcache_lock
);
735 list_add(&dentry
->d_child
, &parent
->d_subdirs
);
736 dentry_stat
.nr_dentry
++;
737 spin_unlock(&dcache_lock
);
743 * d_instantiate - fill in inode information for a dentry
744 * @entry: dentry to complete
745 * @inode: inode to attach to this dentry
747 * Fill in inode information in the entry.
749 * This turns negative dentries into productive full members
752 * NOTE! This assumes that the inode count has been incremented
753 * (or otherwise set) by the caller to indicate that it is now
754 * in use by the dcache.
757 void d_instantiate(struct dentry
*entry
, struct inode
* inode
)
759 if (!list_empty(&entry
->d_alias
)) BUG();
760 spin_lock(&dcache_lock
);
762 list_add(&entry
->d_alias
, &inode
->i_dentry
);
763 entry
->d_inode
= inode
;
764 spin_unlock(&dcache_lock
);
765 security_d_instantiate(entry
, inode
);
769 * d_alloc_root - allocate root dentry
770 * @root_inode: inode to allocate the root for
772 * Allocate a root ("/") dentry for the inode given. The inode is
773 * instantiated and returned. %NULL is returned if there is insufficient
774 * memory or the inode passed is %NULL.
777 struct dentry
* d_alloc_root(struct inode
* root_inode
)
779 struct dentry
*res
= NULL
;
782 static const struct qstr name
= { .name
= "/", .len
= 1, .hash
= 0 };
783 res
= d_alloc(NULL
, &name
);
785 res
->d_sb
= root_inode
->i_sb
;
787 d_instantiate(res
, root_inode
);
793 static inline struct hlist_head
* d_hash(struct dentry
* parent
, unsigned long hash
)
795 hash
+= (unsigned long) parent
/ L1_CACHE_BYTES
;
796 hash
= hash
^ (hash
>> D_HASHBITS
);
797 return dentry_hashtable
+ (hash
& D_HASHMASK
);
801 * d_alloc_anon - allocate an anonymous dentry
802 * @inode: inode to allocate the dentry for
804 * This is similar to d_alloc_root. It is used by filesystems when
805 * creating a dentry for a given inode, often in the process of
806 * mapping a filehandle to a dentry. The returned dentry may be
807 * anonymous, or may have a full name (if the inode was already
808 * in the cache). The file system may need to make further
809 * efforts to connect this dentry into the dcache properly.
811 * When called on a directory inode, we must ensure that
812 * the inode only ever has one dentry. If a dentry is
813 * found, that is returned instead of allocating a new one.
815 * On successful return, the reference to the inode has been transferred
816 * to the dentry. If %NULL is returned (indicating kmalloc failure),
817 * the reference on the inode has not been released.
820 struct dentry
* d_alloc_anon(struct inode
*inode
)
822 static const struct qstr anonstring
= { "", 0, 0};
826 if ((res
= d_find_alias(inode
))) {
831 tmp
= d_alloc(NULL
, &anonstring
);
835 tmp
->d_parent
= tmp
; /* make sure dput doesn't croak */
837 spin_lock(&dcache_lock
);
838 if (S_ISDIR(inode
->i_mode
) && !list_empty(&inode
->i_dentry
)) {
839 /* A directory can only have one dentry.
840 * This (now) has one, so use it.
842 res
= list_entry(inode
->i_dentry
.next
, struct dentry
, d_alias
);
845 /* attach a disconnected dentry */
849 spin_lock(&res
->d_lock
);
850 res
->d_sb
= inode
->i_sb
;
852 res
->d_inode
= inode
;
853 res
->d_bucket
= d_hash(res
, res
->d_name
.hash
);
854 res
->d_flags
|= DCACHE_DISCONNECTED
;
855 res
->d_vfs_flags
&= ~DCACHE_UNHASHED
;
856 list_add(&res
->d_alias
, &inode
->i_dentry
);
857 hlist_add_head(&res
->d_hash
, &inode
->i_sb
->s_anon
);
858 spin_unlock(&res
->d_lock
);
860 inode
= NULL
; /* don't drop reference */
862 spin_unlock(&dcache_lock
);
873 * d_splice_alias - splice a disconnected dentry into the tree if one exists
874 * @inode: the inode which may have a disconnected dentry
875 * @dentry: a negative dentry which we want to point to the inode.
877 * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and
878 * DCACHE_DISCONNECTED), then d_move that in place of the given dentry
879 * and return it, else simply d_add the inode to the dentry and return NULL.
881 * This is (will be) needed in the lookup routine of any filesystem that is exportable
882 * (via knfsd) so that we can build dcache paths to directories effectively.
884 * If a dentry was found and moved, then it is returned. Otherwise NULL
885 * is returned. This matches the expected return value of ->lookup.
888 struct dentry
*d_splice_alias(struct inode
*inode
, struct dentry
*dentry
)
890 struct dentry
*new = NULL
;
892 if (inode
&& S_ISDIR(inode
->i_mode
)) {
893 spin_lock(&dcache_lock
);
894 if (!list_empty(&inode
->i_dentry
)) {
895 new = list_entry(inode
->i_dentry
.next
, struct dentry
, d_alias
);
897 spin_unlock(&dcache_lock
);
898 security_d_instantiate(new, inode
);
903 /* d_instantiate takes dcache_lock, so we do it by hand */
904 list_add(&dentry
->d_alias
, &inode
->i_dentry
);
905 dentry
->d_inode
= inode
;
906 spin_unlock(&dcache_lock
);
907 security_d_instantiate(dentry
, inode
);
911 d_add(dentry
, inode
);
917 * d_lookup - search for a dentry
918 * @parent: parent dentry
919 * @name: qstr of name we wish to find
921 * Searches the children of the parent dentry for the name in question. If
922 * the dentry is found its reference count is incremented and the dentry
923 * is returned. The caller must use d_put to free the entry when it has
924 * finished using it. %NULL is returned on failure.
926 * __d_lookup is dcache_lock free. The hash list is protected using RCU.
927 * Memory barriers are used while updating and doing lockless traversal.
928 * To avoid races with d_move while rename is happening, d_move_count is
931 * Overflows in memcmp(), while d_move, are avoided by keeping the length
932 * and name pointer in one structure pointed by d_qstr.
934 * rcu_read_lock() and rcu_read_unlock() are used to disable preemption while
935 * lookup is going on.
937 * d_lru list is not updated, which can leave non-zero d_count dentries
938 * around in d_lru list.
940 * d_lookup() is protected against the concurrent renames in some unrelated
941 * directory using the seqlockt_t rename_lock.
944 struct dentry
* d_lookup(struct dentry
* parent
, struct qstr
* name
)
946 struct dentry
* dentry
= NULL
;
950 seq
= read_seqbegin(&rename_lock
);
951 dentry
= __d_lookup(parent
, name
);
954 } while (read_seqretry(&rename_lock
, seq
));
958 struct dentry
* __d_lookup(struct dentry
* parent
, struct qstr
* name
)
960 unsigned int len
= name
->len
;
961 unsigned int hash
= name
->hash
;
962 const unsigned char *str
= name
->name
;
963 struct hlist_head
*head
= d_hash(parent
,hash
);
964 struct dentry
*found
= NULL
;
965 struct hlist_node
*node
;
969 hlist_for_each (node
, head
) {
970 struct dentry
*dentry
;
971 unsigned long move_count
;
974 smp_read_barrier_depends();
975 dentry
= hlist_entry(node
, struct dentry
, d_hash
);
977 /* if lookup ends up in a different bucket
978 * due to concurrent rename, fail it
980 if (unlikely(dentry
->d_bucket
!= head
))
984 * We must take a snapshot of d_move_count followed by
985 * read memory barrier before any search key comparison
987 move_count
= dentry
->d_move_count
;
990 if (dentry
->d_name
.hash
!= hash
)
992 if (dentry
->d_parent
!= parent
)
995 qstr
= dentry
->d_qstr
;
996 smp_read_barrier_depends();
997 if (parent
->d_op
&& parent
->d_op
->d_compare
) {
998 if (parent
->d_op
->d_compare(parent
, qstr
, name
))
1001 if (qstr
->len
!= len
)
1003 if (memcmp(qstr
->name
, str
, len
))
1006 spin_lock(&dentry
->d_lock
);
1008 * If dentry is moved, fail the lookup
1010 if (likely(move_count
== dentry
->d_move_count
)) {
1011 if (!d_unhashed(dentry
)) {
1012 atomic_inc(&dentry
->d_count
);
1016 spin_unlock(&dentry
->d_lock
);
1025 * d_validate - verify dentry provided from insecure source
1026 * @dentry: The dentry alleged to be valid child of @dparent
1027 * @dparent: The parent dentry (known to be valid)
1028 * @hash: Hash of the dentry
1029 * @len: Length of the name
1031 * An insecure source has sent us a dentry, here we verify it and dget() it.
1032 * This is used by ncpfs in its readdir implementation.
1033 * Zero is returned in the dentry is invalid.
1036 int d_validate(struct dentry
*dentry
, struct dentry
*dparent
)
1038 struct hlist_head
*base
;
1039 struct hlist_node
*lhp
;
1041 /* Check whether the ptr might be valid at all.. */
1042 if (!kmem_ptr_validate(dentry_cache
, dentry
))
1045 if (dentry
->d_parent
!= dparent
)
1048 spin_lock(&dcache_lock
);
1049 base
= d_hash(dparent
, dentry
->d_name
.hash
);
1050 hlist_for_each(lhp
,base
) {
1051 /* read_barrier_depends() not required for d_hash list
1052 * as it is parsed under dcache_lock
1054 if (dentry
== hlist_entry(lhp
, struct dentry
, d_hash
)) {
1055 __dget_locked(dentry
);
1056 spin_unlock(&dcache_lock
);
1060 spin_unlock(&dcache_lock
);
1066 * When a file is deleted, we have two options:
1067 * - turn this dentry into a negative dentry
1068 * - unhash this dentry and free it.
1070 * Usually, we want to just turn this into
1071 * a negative dentry, but if anybody else is
1072 * currently using the dentry or the inode
1073 * we can't do that and we fall back on removing
1074 * it from the hash queues and waiting for
1075 * it to be deleted later when it has no users
1079 * d_delete - delete a dentry
1080 * @dentry: The dentry to delete
1082 * Turn the dentry into a negative dentry if possible, otherwise
1083 * remove it from the hash queues so it can be deleted later
1086 void d_delete(struct dentry
* dentry
)
1089 * Are we the only user?
1091 spin_lock(&dcache_lock
);
1092 spin_lock(&dentry
->d_lock
);
1093 if (atomic_read(&dentry
->d_count
) == 1) {
1094 dentry_iput(dentry
);
1098 if (!d_unhashed(dentry
))
1101 spin_unlock(&dentry
->d_lock
);
1102 spin_unlock(&dcache_lock
);
1106 * d_rehash - add an entry back to the hash
1107 * @entry: dentry to add to the hash
1109 * Adds a dentry to the hash according to its name.
1112 void d_rehash(struct dentry
* entry
)
1114 struct hlist_head
*list
= d_hash(entry
->d_parent
, entry
->d_name
.hash
);
1115 spin_lock(&dcache_lock
);
1116 entry
->d_vfs_flags
&= ~DCACHE_UNHASHED
;
1117 entry
->d_bucket
= list
;
1118 hlist_add_head_rcu(&entry
->d_hash
, list
);
1119 spin_unlock(&dcache_lock
);
1122 #define do_switch(x,y) do { \
1123 __typeof__ (x) __tmp = x; \
1124 x = y; y = __tmp; } while (0)
1127 * When switching names, the actual string doesn't strictly have to
1128 * be preserved in the target - because we're dropping the target
1129 * anyway. As such, we can just do a simple memcpy() to copy over
1130 * the new name before we switch.
1132 * Note that we have to be a lot more careful about getting the hash
1133 * switched - we have to switch the hash value properly even if it
1134 * then no longer matches the actual (corrupted) string of the target.
1135 * The hash value has to match the hash queue that the dentry is on..
1137 static inline void switch_names(struct dentry
* dentry
, struct dentry
* target
)
1139 const unsigned char *old_name
, *new_name
;
1140 struct qstr
*old_qstr
, *new_qstr
;
1142 memcpy(dentry
->d_iname
, target
->d_iname
, DNAME_INLINE_LEN
);
1143 old_qstr
= target
->d_qstr
;
1144 old_name
= target
->d_name
.name
;
1145 new_qstr
= dentry
->d_qstr
;
1146 new_name
= dentry
->d_name
.name
;
1147 if (old_name
== target
->d_iname
) {
1148 old_name
= dentry
->d_iname
;
1149 old_qstr
= &dentry
->d_name
;
1151 if (new_name
== dentry
->d_iname
) {
1152 new_name
= target
->d_iname
;
1153 new_qstr
= &target
->d_name
;
1155 target
->d_name
.name
= new_name
;
1156 dentry
->d_name
.name
= old_name
;
1157 target
->d_qstr
= new_qstr
;
1158 dentry
->d_qstr
= old_qstr
;
1162 * We cannibalize "target" when moving dentry on top of it,
1163 * because it's going to be thrown away anyway. We could be more
1164 * polite about it, though.
1166 * This forceful removal will result in ugly /proc output if
1167 * somebody holds a file open that got deleted due to a rename.
1168 * We could be nicer about the deleted file, and let it show
1169 * up under the name it got deleted rather than the name that
1174 * d_move - move a dentry
1175 * @dentry: entry to move
1176 * @target: new dentry
1178 * Update the dcache to reflect the move of a file name. Negative
1179 * dcache entries should not be moved in this way.
1182 void d_move(struct dentry
* dentry
, struct dentry
* target
)
1184 if (!dentry
->d_inode
)
1185 printk(KERN_WARNING
"VFS: moving negative dcache entry\n");
1187 spin_lock(&dcache_lock
);
1188 write_seqlock(&rename_lock
);
1190 * XXXX: do we really need to take target->d_lock?
1192 if (target
< dentry
) {
1193 spin_lock(&target
->d_lock
);
1194 spin_lock(&dentry
->d_lock
);
1196 spin_lock(&dentry
->d_lock
);
1197 spin_lock(&target
->d_lock
);
1200 /* Move the dentry to the target hash queue, if on different bucket */
1201 if (dentry
->d_vfs_flags
& DCACHE_UNHASHED
)
1202 goto already_unhashed
;
1203 if (dentry
->d_bucket
!= target
->d_bucket
) {
1204 hlist_del_rcu(&dentry
->d_hash
);
1206 dentry
->d_bucket
= target
->d_bucket
;
1207 hlist_add_head_rcu(&dentry
->d_hash
, target
->d_bucket
);
1208 dentry
->d_vfs_flags
&= ~DCACHE_UNHASHED
;
1211 /* Unhash the target: dput() will then get rid of it */
1214 list_del(&dentry
->d_child
);
1215 list_del(&target
->d_child
);
1217 /* Switch the names.. */
1218 switch_names(dentry
, target
);
1220 do_switch(dentry
->d_name
.len
, target
->d_name
.len
);
1221 do_switch(dentry
->d_name
.hash
, target
->d_name
.hash
);
1223 /* ... and switch the parents */
1224 if (IS_ROOT(dentry
)) {
1225 dentry
->d_parent
= target
->d_parent
;
1226 target
->d_parent
= target
;
1227 INIT_LIST_HEAD(&target
->d_child
);
1229 do_switch(dentry
->d_parent
, target
->d_parent
);
1231 /* And add them back to the (new) parent lists */
1232 list_add(&target
->d_child
, &target
->d_parent
->d_subdirs
);
1235 list_add(&dentry
->d_child
, &dentry
->d_parent
->d_subdirs
);
1236 dentry
->d_move_count
++;
1237 spin_unlock(&target
->d_lock
);
1238 spin_unlock(&dentry
->d_lock
);
1239 write_sequnlock(&rename_lock
);
1240 spin_unlock(&dcache_lock
);
1244 * d_path - return the path of a dentry
1245 * @dentry: dentry to report
1246 * @vfsmnt: vfsmnt to which the dentry belongs
1247 * @root: root dentry
1248 * @rootmnt: vfsmnt to which the root dentry belongs
1249 * @buffer: buffer to return value in
1250 * @buflen: buffer length
1252 * Convert a dentry into an ASCII path name. If the entry has been deleted
1253 * the string " (deleted)" is appended. Note that this is ambiguous.
1255 * Returns the buffer or an error code if the path was too long.
1257 * "buflen" should be positive. Caller holds the dcache_lock.
1259 static char * __d_path( struct dentry
*dentry
, struct vfsmount
*vfsmnt
,
1260 struct dentry
*root
, struct vfsmount
*rootmnt
,
1261 char *buffer
, int buflen
)
1263 char * end
= buffer
+buflen
;
1269 if (!IS_ROOT(dentry
) && d_unhashed(dentry
)) {
1274 memcpy(end
, " (deleted)", 10);
1284 struct dentry
* parent
;
1286 if (dentry
== root
&& vfsmnt
== rootmnt
)
1288 if (dentry
== vfsmnt
->mnt_root
|| IS_ROOT(dentry
)) {
1290 spin_lock(&vfsmount_lock
);
1291 if (vfsmnt
->mnt_parent
== vfsmnt
) {
1292 spin_unlock(&vfsmount_lock
);
1295 dentry
= vfsmnt
->mnt_mountpoint
;
1296 vfsmnt
= vfsmnt
->mnt_parent
;
1297 spin_unlock(&vfsmount_lock
);
1300 parent
= dentry
->d_parent
;
1302 namelen
= dentry
->d_name
.len
;
1303 buflen
-= namelen
+ 1;
1307 memcpy(end
, dentry
->d_name
.name
, namelen
);
1316 namelen
= dentry
->d_name
.len
;
1320 retval
-= namelen
-1; /* hit the slash */
1321 memcpy(retval
, dentry
->d_name
.name
, namelen
);
1324 return ERR_PTR(-ENAMETOOLONG
);
1327 /* write full pathname into buffer and return start of pathname */
1328 char * d_path(struct dentry
*dentry
, struct vfsmount
*vfsmnt
,
1329 char *buf
, int buflen
)
1332 struct vfsmount
*rootmnt
;
1333 struct dentry
*root
;
1334 read_lock(¤t
->fs
->lock
);
1335 rootmnt
= mntget(current
->fs
->rootmnt
);
1336 root
= dget(current
->fs
->root
);
1337 read_unlock(¤t
->fs
->lock
);
1338 spin_lock(&dcache_lock
);
1339 res
= __d_path(dentry
, vfsmnt
, root
, rootmnt
, buf
, buflen
);
1340 spin_unlock(&dcache_lock
);
1347 * NOTE! The user-level library version returns a
1348 * character pointer. The kernel system call just
1349 * returns the length of the buffer filled (which
1350 * includes the ending '\0' character), or a negative
1351 * error value. So libc would do something like
1353 * char *getcwd(char * buf, size_t size)
1357 * retval = sys_getcwd(buf, size);
1364 asmlinkage
long sys_getcwd(char __user
*buf
, unsigned long size
)
1367 struct vfsmount
*pwdmnt
, *rootmnt
;
1368 struct dentry
*pwd
, *root
;
1369 char *page
= (char *) __get_free_page(GFP_USER
);
1374 read_lock(¤t
->fs
->lock
);
1375 pwdmnt
= mntget(current
->fs
->pwdmnt
);
1376 pwd
= dget(current
->fs
->pwd
);
1377 rootmnt
= mntget(current
->fs
->rootmnt
);
1378 root
= dget(current
->fs
->root
);
1379 read_unlock(¤t
->fs
->lock
);
1382 /* Has the current directory has been unlinked? */
1383 spin_lock(&dcache_lock
);
1384 if (pwd
->d_parent
== pwd
|| !d_unhashed(pwd
)) {
1388 cwd
= __d_path(pwd
, pwdmnt
, root
, rootmnt
, page
, PAGE_SIZE
);
1389 spin_unlock(&dcache_lock
);
1391 error
= PTR_ERR(cwd
);
1396 len
= PAGE_SIZE
+ page
- cwd
;
1399 if (copy_to_user(buf
, cwd
, len
))
1403 spin_unlock(&dcache_lock
);
1410 free_page((unsigned long) page
);
1415 * Test whether new_dentry is a subdirectory of old_dentry.
1417 * Trivially implemented using the dcache structure
1421 * is_subdir - is new dentry a subdirectory of old_dentry
1422 * @new_dentry: new dentry
1423 * @old_dentry: old dentry
1425 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth).
1426 * Returns 0 otherwise.
1427 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
1430 int is_subdir(struct dentry
* new_dentry
, struct dentry
* old_dentry
)
1433 struct dentry
* saved
= new_dentry
;
1437 /* need rcu_readlock to protect against the d_parent trashing due to
1442 /* for restarting inner loop in case of seq retry */
1444 seq
= read_seqbegin(&rename_lock
);
1446 if (new_dentry
!= old_dentry
) {
1447 struct dentry
* parent
= new_dentry
->d_parent
;
1448 if (parent
== new_dentry
)
1450 new_dentry
= parent
;
1456 } while (read_seqretry(&rename_lock
, seq
));
1462 void d_genocide(struct dentry
*root
)
1464 struct dentry
*this_parent
= root
;
1465 struct list_head
*next
;
1467 spin_lock(&dcache_lock
);
1469 next
= this_parent
->d_subdirs
.next
;
1471 while (next
!= &this_parent
->d_subdirs
) {
1472 struct list_head
*tmp
= next
;
1473 struct dentry
*dentry
= list_entry(tmp
, struct dentry
, d_child
);
1475 if (d_unhashed(dentry
)||!dentry
->d_inode
)
1477 if (!list_empty(&dentry
->d_subdirs
)) {
1478 this_parent
= dentry
;
1481 atomic_dec(&dentry
->d_count
);
1483 if (this_parent
!= root
) {
1484 next
= this_parent
->d_child
.next
;
1485 atomic_dec(&this_parent
->d_count
);
1486 this_parent
= this_parent
->d_parent
;
1489 spin_unlock(&dcache_lock
);
1493 * find_inode_number - check for dentry with name
1494 * @dir: directory to check
1495 * @name: Name to find.
1497 * Check whether a dentry already exists for the given name,
1498 * and return the inode number if it has an inode. Otherwise
1501 * This routine is used to post-process directory listings for
1502 * filesystems using synthetic inode numbers, and is necessary
1503 * to keep getcwd() working.
1506 ino_t
find_inode_number(struct dentry
*dir
, struct qstr
*name
)
1508 struct dentry
* dentry
;
1512 * Check for a fs-specific hash function. Note that we must
1513 * calculate the standard hash first, as the d_op->d_hash()
1514 * routine may choose to leave the hash value unchanged.
1516 name
->hash
= full_name_hash(name
->name
, name
->len
);
1517 if (dir
->d_op
&& dir
->d_op
->d_hash
)
1519 if (dir
->d_op
->d_hash(dir
, name
) != 0)
1523 dentry
= d_lookup(dir
, name
);
1526 if (dentry
->d_inode
)
1527 ino
= dentry
->d_inode
->i_ino
;
1534 static __initdata
unsigned long dhash_entries
;
1535 static int __init
set_dhash_entries(char *str
)
1539 dhash_entries
= simple_strtoul(str
, &str
, 0);
1542 __setup("dhash_entries=", set_dhash_entries
);
1544 static void __init
dcache_init(unsigned long mempages
)
1546 struct hlist_head
*d
;
1547 unsigned long order
;
1548 unsigned int nr_hash
;
1552 * A constructor could be added for stable state like the lists,
1553 * but it is probably not worth it because of the cache nature
1555 * If fragmentation is too bad then the SLAB_HWCACHE_ALIGN
1556 * flag could be removed here, to hint to the allocator that
1557 * it should not try to get multiple page regions.
1559 dentry_cache
= kmem_cache_create("dentry_cache",
1560 sizeof(struct dentry
),
1562 SLAB_HWCACHE_ALIGN
|SLAB_RECLAIM_ACCOUNT
,
1565 panic("Cannot create dentry cache");
1567 set_shrinker(DEFAULT_SEEKS
, shrink_dcache_memory
);
1570 dhash_entries
= PAGE_SHIFT
< 13 ?
1571 mempages
>> (13 - PAGE_SHIFT
) :
1572 mempages
<< (PAGE_SHIFT
- 13);
1574 dhash_entries
*= sizeof(struct hlist_head
);
1575 for (order
= 0; ((1UL << order
) << PAGE_SHIFT
) < dhash_entries
; order
++)
1581 nr_hash
= (1UL << order
) * PAGE_SIZE
/
1582 sizeof(struct hlist_head
);
1583 d_hash_mask
= (nr_hash
- 1);
1587 while ((tmp
>>= 1UL) != 0UL)
1590 dentry_hashtable
= (struct hlist_head
*)
1591 __get_free_pages(GFP_ATOMIC
, order
);
1592 } while (dentry_hashtable
== NULL
&& --order
>= 0);
1594 printk(KERN_INFO
"Dentry cache hash table entries: %d (order: %ld, %ld bytes)\n",
1595 nr_hash
, order
, (PAGE_SIZE
<< order
));
1597 if (!dentry_hashtable
)
1598 panic("Failed to allocate dcache hash table\n");
1600 d
= dentry_hashtable
;
1609 /* SLAB cache for __getname() consumers */
1610 kmem_cache_t
*names_cachep
;
1612 /* SLAB cache for file structures */
1613 kmem_cache_t
*filp_cachep
;
1615 EXPORT_SYMBOL(d_genocide
);
1617 extern void bdev_cache_init(void);
1618 extern void chrdev_init(void);
1620 void __init
vfs_caches_init(unsigned long mempages
)
1622 names_cachep
= kmem_cache_create("names_cache",
1624 SLAB_HWCACHE_ALIGN
, NULL
, NULL
);
1626 panic("Cannot create names SLAB cache");
1628 filp_cachep
= kmem_cache_create("filp",
1629 sizeof(struct file
), 0,
1630 SLAB_HWCACHE_ALIGN
, filp_ctor
, filp_dtor
);
1632 panic("Cannot create filp SLAB cache");
1634 dcache_init(mempages
);
1635 inode_init(mempages
);
1636 files_init(mempages
);
1642 EXPORT_SYMBOL(d_alloc
);
1643 EXPORT_SYMBOL(d_alloc_anon
);
1644 EXPORT_SYMBOL(d_alloc_root
);
1645 EXPORT_SYMBOL(d_delete
);
1646 EXPORT_SYMBOL(d_find_alias
);
1647 EXPORT_SYMBOL(d_instantiate
);
1648 EXPORT_SYMBOL(d_invalidate
);
1649 EXPORT_SYMBOL(d_lookup
);
1650 EXPORT_SYMBOL(d_move
);
1651 EXPORT_SYMBOL(d_path
);
1652 EXPORT_SYMBOL(d_prune_aliases
);
1653 EXPORT_SYMBOL(d_rehash
);
1654 EXPORT_SYMBOL(d_splice_alias
);
1655 EXPORT_SYMBOL(d_validate
);
1656 EXPORT_SYMBOL(dget_locked
);
1657 EXPORT_SYMBOL(dput
);
1658 EXPORT_SYMBOL(find_inode_number
);
1659 EXPORT_SYMBOL(have_submounts
);
1660 EXPORT_SYMBOL(is_subdir
);
1661 EXPORT_SYMBOL(names_cachep
);
1662 EXPORT_SYMBOL(shrink_dcache_anon
);
1663 EXPORT_SYMBOL(shrink_dcache_parent
);
1664 EXPORT_SYMBOL(shrink_dcache_sb
);