4 * Complete reimplementation
5 * (C) 1997 Thomas Schoebel-Theuer,
6 * with heavy changes by Linus Torvalds
10 * Notes on the allocation strategy:
12 * The dcache is a master of the icache - whenever a dcache entry
13 * exists, the inode will always exist. "iput()" is done either when
14 * the dcache entry is deleted or garbage collected.
17 #include <linux/string.h>
20 #include <linux/malloc.h>
21 #include <linux/slab.h>
22 #include <linux/init.h>
23 #include <linux/smp_lock.h>
24 #include <linux/cache.h>
26 #include <asm/uaccess.h>
28 #define DCACHE_PARANOIA 1
29 /* #define DCACHE_DEBUG 1 */
31 spinlock_t dcache_lock
= SPIN_LOCK_UNLOCKED
;
33 /* Right now the dcache depends on the kernel lock */
34 #define check_lock() if (!kernel_locked()) BUG()
36 static kmem_cache_t
*dentry_cache
;
39 * This is the single most critical data structure when it comes
40 * to the dcache: the hashtable for lookups. Somebody should try
41 * to make this good - I've just made it work.
43 * This hash-function tries to avoid losing too many bits of hash
44 * information, yet avoid using a prime hash-size or similar.
46 #define D_HASHBITS d_hash_shift
47 #define D_HASHMASK d_hash_mask
49 static unsigned int d_hash_mask
;
50 static unsigned int d_hash_shift
;
51 static struct list_head
*dentry_hashtable
;
52 static LIST_HEAD(dentry_unused
);
57 int age_limit
; /* age in seconds */
58 int want_pages
; /* pages requested by system */
60 } dentry_stat
= {0, 0, 45, 0,};
62 /* no dcache_lock, please */
63 static inline void d_free(struct dentry
*dentry
)
65 if (dentry
->d_op
&& dentry
->d_op
->d_release
)
66 dentry
->d_op
->d_release(dentry
);
67 if (dname_external(dentry
))
68 kfree(dentry
->d_name
.name
);
69 kmem_cache_free(dentry_cache
, dentry
);
70 dentry_stat
.nr_dentry
--;
74 * Release the dentry's inode, using the fileystem
75 * d_iput() operation if defined.
76 * Called with dcache_lock held, drops it.
78 static inline void dentry_iput(struct dentry
* dentry
)
80 struct inode
*inode
= dentry
->d_inode
;
82 dentry
->d_inode
= NULL
;
83 list_del(&dentry
->d_alias
);
84 INIT_LIST_HEAD(&dentry
->d_alias
);
85 spin_unlock(&dcache_lock
);
86 if (dentry
->d_op
&& dentry
->d_op
->d_iput
)
87 dentry
->d_op
->d_iput(dentry
, inode
);
91 spin_unlock(&dcache_lock
);
97 * This is complicated by the fact that we do not want to put
98 * dentries that are no longer on any hash chain on the unused
99 * list: we'd much rather just get rid of them immediately.
101 * However, that implies that we have to traverse the dentry
102 * tree upwards to the parents which might _also_ now be
103 * scheduled for deletion (it may have been only waiting for
104 * its last child to go away).
106 * This tail recursion is done by hand as we don't want to depend
107 * on the compiler to always get this right (gcc generally doesn't).
108 * Real recursion would eat up our stack space.
112 * dput - release a dentry
113 * @dentry: dentry to release
115 * Release a dentry. This will drop the usage count and if appropriate
116 * call the dentry unlink method as well as removing it from the queues and
117 * releasing its resources. If the parent dentries were scheduled for release
118 * they too may now get deleted.
120 * no dcache lock, please.
123 void dput(struct dentry
*dentry
)
129 if (!atomic_dec_and_lock(&dentry
->d_count
, &dcache_lock
))
132 /* dput on a free dentry? */
133 if (!list_empty(&dentry
->d_lru
))
136 * AV: ->d_delete() is _NOT_ allowed to block now.
138 if (dentry
->d_op
&& dentry
->d_op
->d_delete
) {
139 if (dentry
->d_op
->d_delete(dentry
))
142 /* Unreachable? Get rid of it */
143 if (list_empty(&dentry
->d_hash
))
145 list_add(&dentry
->d_lru
, &dentry_unused
);
146 dentry_stat
.nr_unused
++;
148 * Update the timestamp
150 dentry
->d_reftime
= jiffies
;
151 spin_unlock(&dcache_lock
);
155 list_del(&dentry
->d_hash
);
158 struct dentry
*parent
;
159 list_del(&dentry
->d_child
);
160 /* drops the lock, at that point nobody can reach this dentry */
162 parent
= dentry
->d_parent
;
164 if (dentry
== parent
)
172 * d_invalidate - invalidate a dentry
173 * @dentry: dentry to invalidate
175 * Try to invalidate the dentry if it turns out to be
176 * possible. If there are other dentries that can be
177 * reached through this one we can't delete it and we
178 * return -EBUSY. On success we return 0.
183 int d_invalidate(struct dentry
* dentry
)
186 * If it's already been dropped, return OK.
188 spin_lock(&dcache_lock
);
189 if (list_empty(&dentry
->d_hash
)) {
190 spin_unlock(&dcache_lock
);
194 * Check whether to do a partial shrink_dcache
195 * to get rid of unused child entries.
197 if (!list_empty(&dentry
->d_subdirs
)) {
198 spin_unlock(&dcache_lock
);
199 shrink_dcache_parent(dentry
);
200 spin_lock(&dcache_lock
);
204 * Somebody else still using it?
206 * If it's a directory, we can't drop it
207 * for fear of somebody re-populating it
208 * with children (even though dropping it
209 * would make it unreachable from the root,
210 * we might still populate it if it was a
211 * working directory or similar).
213 if (atomic_read(&dentry
->d_count
) > 1) {
214 if (dentry
->d_inode
&& S_ISDIR(dentry
->d_inode
->i_mode
)) {
215 spin_unlock(&dcache_lock
);
220 list_del(&dentry
->d_hash
);
221 INIT_LIST_HEAD(&dentry
->d_hash
);
222 spin_unlock(&dcache_lock
);
226 /* This should be called _only_ with dcache_lock held */
228 static inline struct dentry
* __dget_locked(struct dentry
*dentry
)
230 atomic_inc(&dentry
->d_count
);
231 if (atomic_read(&dentry
->d_count
) == 1) {
232 dentry_stat
.nr_unused
--;
233 list_del(&dentry
->d_lru
);
234 INIT_LIST_HEAD(&dentry
->d_lru
); /* make "list_empty()" work */
239 struct dentry
* dget_locked(struct dentry
*dentry
)
241 return __dget_locked(dentry
);
245 * d_find_alias - grab a hashed alias of inode
246 * @inode: inode in question
248 * If inode has a hashed alias - acquire the reference to alias and
249 * return it. Otherwise return NULL. Notice that if inode is a directory
250 * there can be only one alias and it can be unhashed only if it has
254 struct dentry
* d_find_alias(struct inode
*inode
)
256 struct list_head
*head
, *next
, *tmp
;
257 struct dentry
*alias
;
259 spin_lock(&dcache_lock
);
260 head
= &inode
->i_dentry
;
261 next
= inode
->i_dentry
.next
;
262 while (next
!= head
) {
265 alias
= list_entry(tmp
, struct dentry
, d_alias
);
266 if (!list_empty(&alias
->d_hash
)) {
267 __dget_locked(alias
);
268 spin_unlock(&dcache_lock
);
272 spin_unlock(&dcache_lock
);
277 * Try to kill dentries associated with this inode.
278 * WARNING: you must own a reference to inode.
280 void d_prune_aliases(struct inode
*inode
)
282 struct list_head
*tmp
, *head
= &inode
->i_dentry
;
284 spin_lock(&dcache_lock
);
286 while ((tmp
= tmp
->next
) != head
) {
287 struct dentry
*dentry
= list_entry(tmp
, struct dentry
, d_alias
);
288 if (!atomic_read(&dentry
->d_count
)) {
289 __dget_locked(dentry
);
290 spin_unlock(&dcache_lock
);
296 spin_unlock(&dcache_lock
);
300 * Throw away a dentry - free the inode, dput the parent.
301 * This requires that the LRU list has already been
303 * Called with dcache_lock, drops it and then regains.
305 static inline void prune_one_dentry(struct dentry
* dentry
)
307 struct dentry
* parent
;
309 list_del(&dentry
->d_hash
);
310 list_del(&dentry
->d_child
);
312 parent
= dentry
->d_parent
;
314 if (parent
!= dentry
)
316 spin_lock(&dcache_lock
);
320 * prune_dcache - shrink the dcache
321 * @count: number of entries to try and free
323 * Shrink the dcache. This is done when we need
324 * more memory, or simply when we need to unmount
325 * something (at which point we need to unuse
328 * This function may fail to free any resources if
329 * all the dentries are in use.
332 void prune_dcache(int count
)
334 spin_lock(&dcache_lock
);
336 struct dentry
*dentry
;
337 struct list_head
*tmp
;
339 tmp
= dentry_unused
.prev
;
341 if (tmp
== &dentry_unused
)
343 dentry_stat
.nr_unused
--;
346 dentry
= list_entry(tmp
, struct dentry
, d_lru
);
348 /* Unused dentry with a count? */
349 if (atomic_read(&dentry
->d_count
))
352 prune_one_dentry(dentry
);
356 spin_unlock(&dcache_lock
);
360 * Shrink the dcache for the specified super block.
361 * This allows us to unmount a device without disturbing
362 * the dcache for the other devices.
364 * This implementation makes just two traversals of the
365 * unused list. On the first pass we move the selected
366 * dentries to the most recent end, and on the second
367 * pass we free them. The second pass must restart after
368 * each dput(), but since the target dentries are all at
369 * the end, it's really just a single traversal.
373 * shrink_dcache_sb - shrink dcache for a superblock
376 * Shrink the dcache for the specified super block. This
377 * is used to free the dcache before unmounting a file
381 void shrink_dcache_sb(struct super_block
* sb
)
383 struct list_head
*tmp
, *next
;
384 struct dentry
*dentry
;
387 * Pass one ... move the dentries for the specified
388 * superblock to the most recent end of the unused list.
390 spin_lock(&dcache_lock
);
391 next
= dentry_unused
.next
;
392 while (next
!= &dentry_unused
) {
395 dentry
= list_entry(tmp
, struct dentry
, d_lru
);
396 if (dentry
->d_sb
!= sb
)
399 list_add(tmp
, &dentry_unused
);
403 * Pass two ... free the dentries for this superblock.
406 next
= dentry_unused
.next
;
407 while (next
!= &dentry_unused
) {
410 dentry
= list_entry(tmp
, struct dentry
, d_lru
);
411 if (dentry
->d_sb
!= sb
)
413 if (atomic_read(&dentry
->d_count
))
415 dentry_stat
.nr_unused
--;
418 prune_one_dentry(dentry
);
421 spin_unlock(&dcache_lock
);
425 * Search for at least 1 mount point in the dentry's subdirs.
426 * We descend to the next level whenever the d_subdirs
427 * list is non-empty and continue searching.
431 * have_submounts - check for mounts over a dentry
432 * @parent: dentry to check.
434 * Return true if the parent or its subdirectories contain
438 int have_submounts(struct dentry
*parent
)
440 struct dentry
*this_parent
= parent
;
441 struct list_head
*next
;
443 spin_lock(&dcache_lock
);
444 if (d_mountpoint(parent
))
447 next
= this_parent
->d_subdirs
.next
;
449 while (next
!= &this_parent
->d_subdirs
) {
450 struct list_head
*tmp
= next
;
451 struct dentry
*dentry
= list_entry(tmp
, struct dentry
, d_child
);
453 /* Have we found a mount point ? */
454 if (d_mountpoint(dentry
))
456 if (!list_empty(&dentry
->d_subdirs
)) {
457 this_parent
= dentry
;
462 * All done at this level ... ascend and resume the search.
464 if (this_parent
!= parent
) {
465 next
= this_parent
->d_child
.next
;
466 this_parent
= this_parent
->d_parent
;
469 spin_unlock(&dcache_lock
);
470 return 0; /* No mount points found in tree */
472 spin_unlock(&dcache_lock
);
477 * Search the dentry child list for the specified parent,
478 * and move any unused dentries to the end of the unused
479 * list for prune_dcache(). We descend to the next level
480 * whenever the d_subdirs list is non-empty and continue
483 static int select_parent(struct dentry
* parent
)
485 struct dentry
*this_parent
= parent
;
486 struct list_head
*next
;
489 spin_lock(&dcache_lock
);
491 next
= this_parent
->d_subdirs
.next
;
493 while (next
!= &this_parent
->d_subdirs
) {
494 struct list_head
*tmp
= next
;
495 struct dentry
*dentry
= list_entry(tmp
, struct dentry
, d_child
);
497 if (!atomic_read(&dentry
->d_count
)) {
498 list_del(&dentry
->d_lru
);
499 list_add(&dentry
->d_lru
, dentry_unused
.prev
);
503 * Descend a level if the d_subdirs list is non-empty.
505 if (!list_empty(&dentry
->d_subdirs
)) {
506 this_parent
= dentry
;
508 printk(KERN_DEBUG
"select_parent: descending to %s/%s, found=%d\n",
509 dentry
->d_parent
->d_name
.name
, dentry
->d_name
.name
, found
);
515 * All done at this level ... ascend and resume the search.
517 if (this_parent
!= parent
) {
518 next
= this_parent
->d_child
.next
;
519 this_parent
= this_parent
->d_parent
;
521 printk(KERN_DEBUG
"select_parent: ascending to %s/%s, found=%d\n",
522 this_parent
->d_parent
->d_name
.name
, this_parent
->d_name
.name
, found
);
526 spin_unlock(&dcache_lock
);
531 * shrink_dcache_parent - prune dcache
532 * @parent: parent of entries to prune
534 * Prune the dcache to remove unused children of the parent dentry.
537 void shrink_dcache_parent(struct dentry
* parent
)
541 while ((found
= select_parent(parent
)) != 0)
546 * This is called from kswapd when we think we need some
547 * more memory, but aren't really sure how much. So we
548 * carefully try to free a _bit_ of our dcache, but not
552 * 0 - very urgent: shrink everything
554 * 6 - base-level: try to shrink a bit.
556 int shrink_dcache_memory(int priority
, unsigned int gfp_mask
)
560 count
= dentry_stat
.nr_unused
/ priority
;
562 /* FIXME: kmem_cache_shrink here should tell us
563 the number of pages freed, and it should
564 work in a __GFP_DMA/__GFP_HIGHMEM behaviour
565 to free only the interesting pages in
566 function of the needs of the current allocation. */
567 kmem_cache_shrink(dentry_cache
);
572 #define NAME_ALLOC_LEN(len) ((len+16) & ~15)
575 * d_alloc - allocate a dcache entry
576 * @parent: parent of entry to allocate
577 * @name: qstr of the name
579 * Allocates a dentry. It returns %NULL if there is insufficient memory
580 * available. On a success the dentry is returned. The name passed in is
581 * copied and the copy passed in may be reused after this call.
584 struct dentry
* d_alloc(struct dentry
* parent
, const struct qstr
*name
)
587 struct dentry
*dentry
;
589 dentry
= kmem_cache_alloc(dentry_cache
, GFP_KERNEL
);
593 if (name
->len
> DNAME_INLINE_LEN
-1) {
594 str
= kmalloc(NAME_ALLOC_LEN(name
->len
), GFP_KERNEL
);
596 kmem_cache_free(dentry_cache
, dentry
);
600 str
= dentry
->d_iname
;
602 memcpy(str
, name
->name
, name
->len
);
605 atomic_set(&dentry
->d_count
, 1);
607 dentry
->d_inode
= NULL
;
608 dentry
->d_parent
= NULL
;
610 dentry
->d_name
.name
= str
;
611 dentry
->d_name
.len
= name
->len
;
612 dentry
->d_name
.hash
= name
->hash
;
614 dentry
->d_fsdata
= NULL
;
615 INIT_LIST_HEAD(&dentry
->d_vfsmnt
);
616 INIT_LIST_HEAD(&dentry
->d_hash
);
617 INIT_LIST_HEAD(&dentry
->d_lru
);
618 INIT_LIST_HEAD(&dentry
->d_subdirs
);
619 INIT_LIST_HEAD(&dentry
->d_alias
);
621 dentry
->d_parent
= dget(parent
);
622 dentry
->d_sb
= parent
->d_sb
;
623 spin_lock(&dcache_lock
);
624 list_add(&dentry
->d_child
, &parent
->d_subdirs
);
625 spin_unlock(&dcache_lock
);
627 INIT_LIST_HEAD(&dentry
->d_child
);
629 dentry_stat
.nr_dentry
++;
634 * d_instantiate - fill in inode information for a dentry
635 * @entry: dentry to complete
636 * @inode: inode to attach to this dentry
638 * Fill in inode information in the entry.
640 * This turns negative dentries into productive full members
643 * NOTE! This assumes that the inode count has been incremented
644 * (or otherwise set) by the caller to indicate that it is now
645 * in use by the dcache.
648 void d_instantiate(struct dentry
*entry
, struct inode
* inode
)
650 spin_lock(&dcache_lock
);
652 list_add(&entry
->d_alias
, &inode
->i_dentry
);
653 entry
->d_inode
= inode
;
654 spin_unlock(&dcache_lock
);
658 * d_alloc_root - allocate root dentry
659 * @root_inode: inode to allocate the root for
661 * Allocate a root ("/") dentry for the inode given. The inode is
662 * instantiated and returned. %NULL is returned if there is insufficient
663 * memory or the inode passed is %NULL.
666 struct dentry
* d_alloc_root(struct inode
* root_inode
)
668 struct dentry
*res
= NULL
;
671 res
= d_alloc(NULL
, &(const struct qstr
) { "/", 1, 0 });
673 res
->d_sb
= root_inode
->i_sb
;
675 d_instantiate(res
, root_inode
);
681 static inline struct list_head
* d_hash(struct dentry
* parent
, unsigned long hash
)
683 hash
+= (unsigned long) parent
/ L1_CACHE_BYTES
;
684 hash
= hash
^ (hash
>> D_HASHBITS
) ^ (hash
>> D_HASHBITS
*2);
685 return dentry_hashtable
+ (hash
& D_HASHMASK
);
689 * d_lookup - search for a dentry
690 * @parent: parent dentry
691 * @name: qstr of name we wish to find
693 * Searches the children of the parent dentry for the name in question. If
694 * the dentry is found its reference count is incremented and the dentry
695 * is returned. The caller must use d_put to free the entry when it has
696 * finished using it. %NULL is returned on failure.
699 struct dentry
* d_lookup(struct dentry
* parent
, struct qstr
* name
)
701 unsigned int len
= name
->len
;
702 unsigned int hash
= name
->hash
;
703 const unsigned char *str
= name
->name
;
704 struct list_head
*head
= d_hash(parent
,hash
);
705 struct list_head
*tmp
;
707 spin_lock(&dcache_lock
);
710 struct dentry
* dentry
= list_entry(tmp
, struct dentry
, d_hash
);
714 if (dentry
->d_name
.hash
!= hash
)
716 if (dentry
->d_parent
!= parent
)
718 if (parent
->d_op
&& parent
->d_op
->d_compare
) {
719 if (parent
->d_op
->d_compare(parent
, &dentry
->d_name
, name
))
722 if (dentry
->d_name
.len
!= len
)
724 if (memcmp(dentry
->d_name
.name
, str
, len
))
727 __dget_locked(dentry
);
728 spin_unlock(&dcache_lock
);
731 spin_unlock(&dcache_lock
);
736 * d_validate - verify dentry provided from insecure source
737 * @dentry: The dentry alleged to be valid
738 * @dparent: The parent dentry
739 * @hash: Hash of the dentry
740 * @len: Length of the name
742 * An insecure source has sent us a dentry, here we verify it and dget() it.
743 * This is used by ncpfs in its readdir implementation.
744 * Zero is returned in the dentry is invalid.
746 * NOTE: This function does _not_ dereference the pointers before we have
747 * validated them. We can test the pointer values, but we
748 * must not actually use them until we have found a valid
749 * copy of the pointer in kernel space..
752 int d_validate(struct dentry
*dentry
, struct dentry
*dparent
,
753 unsigned int hash
, unsigned int len
)
755 struct list_head
*base
, *lhp
;
758 spin_lock(&dcache_lock
);
759 if (dentry
!= dparent
) {
760 base
= d_hash(dparent
, hash
);
762 while ((lhp
= lhp
->next
) != base
) {
763 if (dentry
== list_entry(lhp
, struct dentry
, d_hash
)) {
764 __dget_locked(dentry
);
770 * Special case: local mount points don't live in
771 * the hashes, so we search the super blocks.
773 struct super_block
*sb
= sb_entry(super_blocks
.next
);
775 for (; sb
!= sb_entry(&super_blocks
);
776 sb
= sb_entry(sb
->s_list
.next
)) {
779 if (sb
->s_root
== dentry
) {
780 __dget_locked(dentry
);
787 spin_unlock(&dcache_lock
);
792 * When a file is deleted, we have two options:
793 * - turn this dentry into a negative dentry
794 * - unhash this dentry and free it.
796 * Usually, we want to just turn this into
797 * a negative dentry, but if anybody else is
798 * currently using the dentry or the inode
799 * we can't do that and we fall back on removing
800 * it from the hash queues and waiting for
801 * it to be deleted later when it has no users
805 * d_delete - delete a dentry
806 * @dentry: The dentry to delete
808 * Turn the dentry into a negative dentry if possible, otherwise
809 * remove it from the hash queues so it can be deleted later
812 void d_delete(struct dentry
* dentry
)
815 * Are we the only user?
817 spin_lock(&dcache_lock
);
818 if (atomic_read(&dentry
->d_count
) == 1) {
822 spin_unlock(&dcache_lock
);
825 * If not, just drop the dentry and let dput
832 * d_rehash - add an entry back to the hash
833 * @entry: dentry to add to the hash
835 * Adds a dentry to the hash according to its name.
838 void d_rehash(struct dentry
* entry
)
840 struct list_head
*list
= d_hash(entry
->d_parent
, entry
->d_name
.hash
);
841 spin_lock(&dcache_lock
);
842 list_add(&entry
->d_hash
, list
);
843 spin_unlock(&dcache_lock
);
846 #define do_switch(x,y) do { \
847 __typeof__ (x) __tmp = x; \
848 x = y; y = __tmp; } while (0)
851 * When switching names, the actual string doesn't strictly have to
852 * be preserved in the target - because we're dropping the target
853 * anyway. As such, we can just do a simple memcpy() to copy over
854 * the new name before we switch.
856 * Note that we have to be a lot more careful about getting the hash
857 * switched - we have to switch the hash value properly even if it
858 * then no longer matches the actual (corrupted) string of the target.
859 * The hash value has to match the hash queue that the dentry is on..
861 static inline void switch_names(struct dentry
* dentry
, struct dentry
* target
)
863 const unsigned char *old_name
, *new_name
;
866 memcpy(dentry
->d_iname
, target
->d_iname
, DNAME_INLINE_LEN
);
867 old_name
= target
->d_name
.name
;
868 new_name
= dentry
->d_name
.name
;
869 if (old_name
== target
->d_iname
)
870 old_name
= dentry
->d_iname
;
871 if (new_name
== dentry
->d_iname
)
872 new_name
= target
->d_iname
;
873 target
->d_name
.name
= new_name
;
874 dentry
->d_name
.name
= old_name
;
878 * We cannibalize "target" when moving dentry on top of it,
879 * because it's going to be thrown away anyway. We could be more
880 * polite about it, though.
882 * This forceful removal will result in ugly /proc output if
883 * somebody holds a file open that got deleted due to a rename.
884 * We could be nicer about the deleted file, and let it show
885 * up under the name it got deleted rather than the name that
888 * Careful with the hash switch. The hash switch depends on
889 * the fact that any list-entry can be a head of the list.
894 * d_move - move a dentry
895 * @dentry: entry to move
896 * @target: new dentry
898 * Update the dcache to reflect the move of a file name. Negative
899 * dcache entries should not be moved in this way.
902 void d_move(struct dentry
* dentry
, struct dentry
* target
)
906 if (!dentry
->d_inode
)
907 printk(KERN_WARNING
"VFS: moving negative dcache entry\n");
909 spin_lock(&dcache_lock
);
910 /* Move the dentry to the target hash queue */
911 list_del(&dentry
->d_hash
);
912 list_add(&dentry
->d_hash
, &target
->d_hash
);
914 /* Unhash the target: dput() will then get rid of it */
915 list_del(&target
->d_hash
);
916 INIT_LIST_HEAD(&target
->d_hash
);
918 list_del(&dentry
->d_child
);
919 list_del(&target
->d_child
);
921 /* Switch the parents and the names.. */
922 switch_names(dentry
, target
);
923 do_switch(dentry
->d_parent
, target
->d_parent
);
924 do_switch(dentry
->d_name
.len
, target
->d_name
.len
);
925 do_switch(dentry
->d_name
.hash
, target
->d_name
.hash
);
927 /* And add them back to the (new) parent lists */
928 list_add(&target
->d_child
, &target
->d_parent
->d_subdirs
);
929 list_add(&dentry
->d_child
, &dentry
->d_parent
->d_subdirs
);
930 spin_unlock(&dcache_lock
);
934 * d_path - return the path of a dentry
935 * @dentry: dentry to report
936 * @vfsmnt: vfsmnt to which the dentry belongs
938 * @rootmnt: vfsmnt to which the root dentry belongs
939 * @buffer: buffer to return value in
940 * @buflen: buffer length
942 * Convert a dentry into an ASCII path name. If the entry has been deleted
943 * the string " (deleted)" is appended. Note that this is ambiguous. Returns
946 * "buflen" should be %PAGE_SIZE or more. Caller holds the dcache_lock.
948 char * __d_path(struct dentry
*dentry
, struct vfsmount
*vfsmnt
,
949 struct dentry
*root
, struct vfsmount
*rootmnt
,
950 char *buffer
, int buflen
)
952 char * end
= buffer
+buflen
;
958 if (!IS_ROOT(dentry
) && list_empty(&dentry
->d_hash
)) {
961 memcpy(end
, " (deleted)", 10);
969 struct dentry
* parent
;
971 if (dentry
== root
&& vfsmnt
== rootmnt
)
973 if (dentry
== vfsmnt
->mnt_root
|| IS_ROOT(dentry
)) {
975 if (vfsmnt
->mnt_parent
== vfsmnt
)
977 dentry
= vfsmnt
->mnt_mountpoint
;
978 vfsmnt
= vfsmnt
->mnt_parent
;
981 parent
= dentry
->d_parent
;
982 namelen
= dentry
->d_name
.len
;
983 buflen
-= namelen
+ 1;
987 memcpy(end
, dentry
->d_name
.name
, namelen
);
994 namelen
= dentry
->d_name
.len
;
997 retval
-= namelen
-1; /* hit the slash */
998 memcpy(retval
, dentry
->d_name
.name
, namelen
);
1004 * NOTE! The user-level library version returns a
1005 * character pointer. The kernel system call just
1006 * returns the length of the buffer filled (which
1007 * includes the ending '\0' character), or a negative
1008 * error value. So libc would do something like
1010 * char *getcwd(char * buf, size_t size)
1014 * retval = sys_getcwd(buf, size);
1021 asmlinkage
long sys_getcwd(char *buf
, unsigned long size
)
1024 struct vfsmount
*pwdmnt
, *rootmnt
;
1025 struct dentry
*pwd
, *root
;
1026 char *page
= (char *) __get_free_page(GFP_USER
);
1031 read_lock(¤t
->fs
->lock
);
1032 pwdmnt
= mntget(current
->fs
->pwdmnt
);
1033 pwd
= dget(current
->fs
->pwd
);
1034 rootmnt
= mntget(current
->fs
->rootmnt
);
1035 root
= dget(current
->fs
->root
);
1036 read_unlock(¤t
->fs
->lock
);
1039 /* Has the current directory has been unlinked? */
1040 spin_lock(&dcache_lock
);
1041 if (pwd
->d_parent
== pwd
|| !list_empty(&pwd
->d_hash
)) {
1045 cwd
= __d_path(pwd
, pwdmnt
, root
, rootmnt
, page
, PAGE_SIZE
);
1046 spin_unlock(&dcache_lock
);
1049 len
= PAGE_SIZE
+ page
- cwd
;
1052 if (copy_to_user(buf
, cwd
, len
))
1056 spin_unlock(&dcache_lock
);
1061 free_page((unsigned long) page
);
1066 * Test whether new_dentry is a subdirectory of old_dentry.
1068 * Trivially implemented using the dcache structure
1072 * is_subdir - is new dentry a subdirectory of old_dentry
1073 * @new_dentry: new dentry
1074 * @old_dentry: old dentry
1076 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth).
1077 * Returns 0 otherwise.
1080 int is_subdir(struct dentry
* new_dentry
, struct dentry
* old_dentry
)
1086 if (new_dentry
!= old_dentry
) {
1087 struct dentry
* parent
= new_dentry
->d_parent
;
1088 if (parent
== new_dentry
)
1090 new_dentry
= parent
;
1099 void d_genocide(struct dentry
*root
)
1101 struct dentry
*this_parent
= root
;
1102 struct list_head
*next
;
1104 spin_lock(&dcache_lock
);
1106 next
= this_parent
->d_subdirs
.next
;
1108 while (next
!= &this_parent
->d_subdirs
) {
1109 struct list_head
*tmp
= next
;
1110 struct dentry
*dentry
= list_entry(tmp
, struct dentry
, d_child
);
1112 if (d_unhashed(dentry
)||!dentry
->d_inode
)
1114 if (!list_empty(&dentry
->d_subdirs
)) {
1115 this_parent
= dentry
;
1118 atomic_dec(&dentry
->d_count
);
1120 if (this_parent
!= root
) {
1121 next
= this_parent
->d_child
.next
;
1122 atomic_dec(&this_parent
->d_count
);
1123 this_parent
= this_parent
->d_parent
;
1126 spin_unlock(&dcache_lock
);
1130 * find_inode_number - check for dentry with name
1131 * @dir: directory to check
1132 * @name: Name to find.
1134 * Check whether a dentry already exists for the given name,
1135 * and return the inode number if it has an inode. Otherwise
1138 * This routine is used to post-process directory listings for
1139 * filesystems using synthetic inode numbers, and is necessary
1140 * to keep getcwd() working.
1143 ino_t
find_inode_number(struct dentry
*dir
, struct qstr
*name
)
1145 struct dentry
* dentry
;
1149 * Check for a fs-specific hash function. Note that we must
1150 * calculate the standard hash first, as the d_op->d_hash()
1151 * routine may choose to leave the hash value unchanged.
1153 name
->hash
= full_name_hash(name
->name
, name
->len
);
1154 if (dir
->d_op
&& dir
->d_op
->d_hash
)
1156 if (dir
->d_op
->d_hash(dir
, name
) != 0)
1160 dentry
= d_lookup(dir
, name
);
1163 if (dentry
->d_inode
)
1164 ino
= dentry
->d_inode
->i_ino
;
1171 void __init
dcache_init(unsigned long mempages
)
1173 struct list_head
*d
;
1174 unsigned long order
;
1175 unsigned int nr_hash
;
1179 * A constructor could be added for stable state like the lists,
1180 * but it is probably not worth it because of the cache nature
1182 * If fragmentation is too bad then the SLAB_HWCACHE_ALIGN
1183 * flag could be removed here, to hint to the allocator that
1184 * it should not try to get multiple page regions.
1186 dentry_cache
= kmem_cache_create("dentry_cache",
1187 sizeof(struct dentry
),
1192 panic("Cannot create dentry cache");
1194 mempages
>>= (13 - PAGE_SHIFT
);
1195 mempages
*= sizeof(struct list_head
);
1196 for (order
= 0; ((1UL << order
) << PAGE_SHIFT
) < mempages
; order
++)
1202 nr_hash
= (1UL << order
) * PAGE_SIZE
/
1203 sizeof(struct list_head
);
1204 d_hash_mask
= (nr_hash
- 1);
1208 while ((tmp
>>= 1UL) != 0UL)
1211 dentry_hashtable
= (struct list_head
*)
1212 __get_free_pages(GFP_ATOMIC
, order
);
1213 } while (dentry_hashtable
== NULL
&& --order
>= 0);
1215 printk("Dentry-cache hash table entries: %d (order: %ld, %ld bytes)\n",
1216 nr_hash
, order
, (PAGE_SIZE
<< order
));
1218 if (!dentry_hashtable
)
1219 panic("Failed to allocate dcache hash table\n");
1221 d
= dentry_hashtable
;