Merge with 2.4.0-test3-pre7.
[linux-2.6/linux-mips.git] / fs / dcache.c
blob3de8547ef2844736363c6de4bf0aa26623c57292
1 /*
2 * fs/dcache.c
4 * Complete reimplementation
5 * (C) 1997 Thomas Schoebel-Theuer,
6 * with heavy changes by Linus Torvalds
7 */
9 /*
10 * Notes on the allocation strategy:
12 * The dcache is a master of the icache - whenever a dcache entry
13 * exists, the inode will always exist. "iput()" is done either when
14 * the dcache entry is deleted or garbage collected.
17 #include <linux/string.h>
18 #include <linux/mm.h>
19 #include <linux/fs.h>
20 #include <linux/malloc.h>
21 #include <linux/slab.h>
22 #include <linux/init.h>
23 #include <linux/smp_lock.h>
24 #include <linux/cache.h>
26 #include <asm/uaccess.h>
28 #define DCACHE_PARANOIA 1
29 /* #define DCACHE_DEBUG 1 */
31 spinlock_t dcache_lock = SPIN_LOCK_UNLOCKED;
33 /* Right now the dcache depends on the kernel lock */
34 #define check_lock() if (!kernel_locked()) BUG()
36 static kmem_cache_t *dentry_cache;
39 * This is the single most critical data structure when it comes
40 * to the dcache: the hashtable for lookups. Somebody should try
41 * to make this good - I've just made it work.
43 * This hash-function tries to avoid losing too many bits of hash
44 * information, yet avoid using a prime hash-size or similar.
46 #define D_HASHBITS d_hash_shift
47 #define D_HASHMASK d_hash_mask
49 static unsigned int d_hash_mask;
50 static unsigned int d_hash_shift;
51 static struct list_head *dentry_hashtable;
52 static LIST_HEAD(dentry_unused);
54 struct {
55 int nr_dentry;
56 int nr_unused;
57 int age_limit; /* age in seconds */
58 int want_pages; /* pages requested by system */
59 int dummy[2];
60 } dentry_stat = {0, 0, 45, 0,};
62 /* no dcache_lock, please */
63 static inline void d_free(struct dentry *dentry)
65 if (dentry->d_op && dentry->d_op->d_release)
66 dentry->d_op->d_release(dentry);
67 if (dname_external(dentry))
68 kfree(dentry->d_name.name);
69 kmem_cache_free(dentry_cache, dentry);
70 dentry_stat.nr_dentry--;
74 * Release the dentry's inode, using the fileystem
75 * d_iput() operation if defined.
76 * Called with dcache_lock held, drops it.
78 static inline void dentry_iput(struct dentry * dentry)
80 struct inode *inode = dentry->d_inode;
81 if (inode) {
82 dentry->d_inode = NULL;
83 list_del(&dentry->d_alias);
84 INIT_LIST_HEAD(&dentry->d_alias);
85 spin_unlock(&dcache_lock);
86 if (dentry->d_op && dentry->d_op->d_iput)
87 dentry->d_op->d_iput(dentry, inode);
88 else
89 iput(inode);
90 } else
91 spin_unlock(&dcache_lock);
94 /*
95 * This is dput
97 * This is complicated by the fact that we do not want to put
98 * dentries that are no longer on any hash chain on the unused
99 * list: we'd much rather just get rid of them immediately.
101 * However, that implies that we have to traverse the dentry
102 * tree upwards to the parents which might _also_ now be
103 * scheduled for deletion (it may have been only waiting for
104 * its last child to go away).
106 * This tail recursion is done by hand as we don't want to depend
107 * on the compiler to always get this right (gcc generally doesn't).
108 * Real recursion would eat up our stack space.
112 * dput - release a dentry
113 * @dentry: dentry to release
115 * Release a dentry. This will drop the usage count and if appropriate
116 * call the dentry unlink method as well as removing it from the queues and
117 * releasing its resources. If the parent dentries were scheduled for release
118 * they too may now get deleted.
120 * no dcache lock, please.
123 void dput(struct dentry *dentry)
125 if (!dentry)
126 return;
128 repeat:
129 if (!atomic_dec_and_lock(&dentry->d_count, &dcache_lock))
130 return;
132 /* dput on a free dentry? */
133 if (!list_empty(&dentry->d_lru))
134 BUG();
136 * AV: ->d_delete() is _NOT_ allowed to block now.
138 if (dentry->d_op && dentry->d_op->d_delete) {
139 if (dentry->d_op->d_delete(dentry))
140 goto unhash_it;
142 /* Unreachable? Get rid of it */
143 if (list_empty(&dentry->d_hash))
144 goto kill_it;
145 list_add(&dentry->d_lru, &dentry_unused);
146 dentry_stat.nr_unused++;
148 * Update the timestamp
150 dentry->d_reftime = jiffies;
151 spin_unlock(&dcache_lock);
152 return;
154 unhash_it:
155 list_del(&dentry->d_hash);
157 kill_it: {
158 struct dentry *parent;
159 list_del(&dentry->d_child);
160 /* drops the lock, at that point nobody can reach this dentry */
161 dentry_iput(dentry);
162 parent = dentry->d_parent;
163 d_free(dentry);
164 if (dentry == parent)
165 return;
166 dentry = parent;
167 goto repeat;
172 * d_invalidate - invalidate a dentry
173 * @dentry: dentry to invalidate
175 * Try to invalidate the dentry if it turns out to be
176 * possible. If there are other dentries that can be
177 * reached through this one we can't delete it and we
178 * return -EBUSY. On success we return 0.
180 * no dcache lock.
183 int d_invalidate(struct dentry * dentry)
186 * If it's already been dropped, return OK.
188 spin_lock(&dcache_lock);
189 if (list_empty(&dentry->d_hash)) {
190 spin_unlock(&dcache_lock);
191 return 0;
194 * Check whether to do a partial shrink_dcache
195 * to get rid of unused child entries.
197 if (!list_empty(&dentry->d_subdirs)) {
198 spin_unlock(&dcache_lock);
199 shrink_dcache_parent(dentry);
200 spin_lock(&dcache_lock);
204 * Somebody else still using it?
206 * If it's a directory, we can't drop it
207 * for fear of somebody re-populating it
208 * with children (even though dropping it
209 * would make it unreachable from the root,
210 * we might still populate it if it was a
211 * working directory or similar).
213 if (atomic_read(&dentry->d_count) > 1) {
214 if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) {
215 spin_unlock(&dcache_lock);
216 return -EBUSY;
220 list_del(&dentry->d_hash);
221 INIT_LIST_HEAD(&dentry->d_hash);
222 spin_unlock(&dcache_lock);
223 return 0;
226 /* This should be called _only_ with dcache_lock held */
228 static inline struct dentry * __dget_locked(struct dentry *dentry)
230 atomic_inc(&dentry->d_count);
231 if (atomic_read(&dentry->d_count) == 1) {
232 dentry_stat.nr_unused--;
233 list_del(&dentry->d_lru);
234 INIT_LIST_HEAD(&dentry->d_lru); /* make "list_empty()" work */
236 return dentry;
239 struct dentry * dget_locked(struct dentry *dentry)
241 return __dget_locked(dentry);
245 * d_find_alias - grab a hashed alias of inode
246 * @inode: inode in question
248 * If inode has a hashed alias - acquire the reference to alias and
249 * return it. Otherwise return NULL. Notice that if inode is a directory
250 * there can be only one alias and it can be unhashed only if it has
251 * no children.
254 struct dentry * d_find_alias(struct inode *inode)
256 struct list_head *head, *next, *tmp;
257 struct dentry *alias;
259 spin_lock(&dcache_lock);
260 head = &inode->i_dentry;
261 next = inode->i_dentry.next;
262 while (next != head) {
263 tmp = next;
264 next = tmp->next;
265 alias = list_entry(tmp, struct dentry, d_alias);
266 if (!list_empty(&alias->d_hash)) {
267 __dget_locked(alias);
268 spin_unlock(&dcache_lock);
269 return alias;
272 spin_unlock(&dcache_lock);
273 return NULL;
277 * Try to kill dentries associated with this inode.
278 * WARNING: you must own a reference to inode.
280 void d_prune_aliases(struct inode *inode)
282 struct list_head *tmp, *head = &inode->i_dentry;
283 restart:
284 spin_lock(&dcache_lock);
285 tmp = head;
286 while ((tmp = tmp->next) != head) {
287 struct dentry *dentry = list_entry(tmp, struct dentry, d_alias);
288 if (!atomic_read(&dentry->d_count)) {
289 __dget_locked(dentry);
290 spin_unlock(&dcache_lock);
291 d_drop(dentry);
292 dput(dentry);
293 goto restart;
296 spin_unlock(&dcache_lock);
300 * Throw away a dentry - free the inode, dput the parent.
301 * This requires that the LRU list has already been
302 * removed.
303 * Called with dcache_lock, drops it and then regains.
305 static inline void prune_one_dentry(struct dentry * dentry)
307 struct dentry * parent;
309 list_del(&dentry->d_hash);
310 list_del(&dentry->d_child);
311 dentry_iput(dentry);
312 parent = dentry->d_parent;
313 d_free(dentry);
314 if (parent != dentry)
315 dput(parent);
316 spin_lock(&dcache_lock);
320 * prune_dcache - shrink the dcache
321 * @count: number of entries to try and free
323 * Shrink the dcache. This is done when we need
324 * more memory, or simply when we need to unmount
325 * something (at which point we need to unuse
326 * all dentries).
328 * This function may fail to free any resources if
329 * all the dentries are in use.
332 void prune_dcache(int count)
334 spin_lock(&dcache_lock);
335 for (;;) {
336 struct dentry *dentry;
337 struct list_head *tmp;
339 tmp = dentry_unused.prev;
341 if (tmp == &dentry_unused)
342 break;
343 dentry_stat.nr_unused--;
344 list_del(tmp);
345 INIT_LIST_HEAD(tmp);
346 dentry = list_entry(tmp, struct dentry, d_lru);
348 /* Unused dentry with a count? */
349 if (atomic_read(&dentry->d_count))
350 BUG();
352 prune_one_dentry(dentry);
353 if (!--count)
354 break;
356 spin_unlock(&dcache_lock);
360 * Shrink the dcache for the specified super block.
361 * This allows us to unmount a device without disturbing
362 * the dcache for the other devices.
364 * This implementation makes just two traversals of the
365 * unused list. On the first pass we move the selected
366 * dentries to the most recent end, and on the second
367 * pass we free them. The second pass must restart after
368 * each dput(), but since the target dentries are all at
369 * the end, it's really just a single traversal.
373 * shrink_dcache_sb - shrink dcache for a superblock
374 * @sb: superblock
376 * Shrink the dcache for the specified super block. This
377 * is used to free the dcache before unmounting a file
378 * system
381 void shrink_dcache_sb(struct super_block * sb)
383 struct list_head *tmp, *next;
384 struct dentry *dentry;
387 * Pass one ... move the dentries for the specified
388 * superblock to the most recent end of the unused list.
390 spin_lock(&dcache_lock);
391 next = dentry_unused.next;
392 while (next != &dentry_unused) {
393 tmp = next;
394 next = tmp->next;
395 dentry = list_entry(tmp, struct dentry, d_lru);
396 if (dentry->d_sb != sb)
397 continue;
398 list_del(tmp);
399 list_add(tmp, &dentry_unused);
403 * Pass two ... free the dentries for this superblock.
405 repeat:
406 next = dentry_unused.next;
407 while (next != &dentry_unused) {
408 tmp = next;
409 next = tmp->next;
410 dentry = list_entry(tmp, struct dentry, d_lru);
411 if (dentry->d_sb != sb)
412 continue;
413 if (atomic_read(&dentry->d_count))
414 continue;
415 dentry_stat.nr_unused--;
416 list_del(tmp);
417 INIT_LIST_HEAD(tmp);
418 prune_one_dentry(dentry);
419 goto repeat;
421 spin_unlock(&dcache_lock);
425 * Search for at least 1 mount point in the dentry's subdirs.
426 * We descend to the next level whenever the d_subdirs
427 * list is non-empty and continue searching.
431 * have_submounts - check for mounts over a dentry
432 * @parent: dentry to check.
434 * Return true if the parent or its subdirectories contain
435 * a mount point
438 int have_submounts(struct dentry *parent)
440 struct dentry *this_parent = parent;
441 struct list_head *next;
443 spin_lock(&dcache_lock);
444 if (d_mountpoint(parent))
445 goto positive;
446 repeat:
447 next = this_parent->d_subdirs.next;
448 resume:
449 while (next != &this_parent->d_subdirs) {
450 struct list_head *tmp = next;
451 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
452 next = tmp->next;
453 /* Have we found a mount point ? */
454 if (d_mountpoint(dentry))
455 goto positive;
456 if (!list_empty(&dentry->d_subdirs)) {
457 this_parent = dentry;
458 goto repeat;
462 * All done at this level ... ascend and resume the search.
464 if (this_parent != parent) {
465 next = this_parent->d_child.next;
466 this_parent = this_parent->d_parent;
467 goto resume;
469 spin_unlock(&dcache_lock);
470 return 0; /* No mount points found in tree */
471 positive:
472 spin_unlock(&dcache_lock);
473 return 1;
477 * Search the dentry child list for the specified parent,
478 * and move any unused dentries to the end of the unused
479 * list for prune_dcache(). We descend to the next level
480 * whenever the d_subdirs list is non-empty and continue
481 * searching.
483 static int select_parent(struct dentry * parent)
485 struct dentry *this_parent = parent;
486 struct list_head *next;
487 int found = 0;
489 spin_lock(&dcache_lock);
490 repeat:
491 next = this_parent->d_subdirs.next;
492 resume:
493 while (next != &this_parent->d_subdirs) {
494 struct list_head *tmp = next;
495 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
496 next = tmp->next;
497 if (!atomic_read(&dentry->d_count)) {
498 list_del(&dentry->d_lru);
499 list_add(&dentry->d_lru, dentry_unused.prev);
500 found++;
503 * Descend a level if the d_subdirs list is non-empty.
505 if (!list_empty(&dentry->d_subdirs)) {
506 this_parent = dentry;
507 #ifdef DCACHE_DEBUG
508 printk(KERN_DEBUG "select_parent: descending to %s/%s, found=%d\n",
509 dentry->d_parent->d_name.name, dentry->d_name.name, found);
510 #endif
511 goto repeat;
515 * All done at this level ... ascend and resume the search.
517 if (this_parent != parent) {
518 next = this_parent->d_child.next;
519 this_parent = this_parent->d_parent;
520 #ifdef DCACHE_DEBUG
521 printk(KERN_DEBUG "select_parent: ascending to %s/%s, found=%d\n",
522 this_parent->d_parent->d_name.name, this_parent->d_name.name, found);
523 #endif
524 goto resume;
526 spin_unlock(&dcache_lock);
527 return found;
531 * shrink_dcache_parent - prune dcache
532 * @parent: parent of entries to prune
534 * Prune the dcache to remove unused children of the parent dentry.
537 void shrink_dcache_parent(struct dentry * parent)
539 int found;
541 while ((found = select_parent(parent)) != 0)
542 prune_dcache(found);
546 * This is called from kswapd when we think we need some
547 * more memory, but aren't really sure how much. So we
548 * carefully try to free a _bit_ of our dcache, but not
549 * too much.
551 * Priority:
552 * 0 - very urgent: shrink everything
553 * ...
554 * 6 - base-level: try to shrink a bit.
556 int shrink_dcache_memory(int priority, unsigned int gfp_mask)
558 int count = 0;
559 if (priority)
560 count = dentry_stat.nr_unused >> (priority >> 2);
561 prune_dcache(count);
562 /* FIXME: kmem_cache_shrink here should tell us
563 the number of pages freed, and it should
564 work in a __GFP_DMA/__GFP_HIGHMEM behaviour
565 to free only the interesting pages in
566 function of the needs of the current allocation. */
567 kmem_cache_shrink(dentry_cache);
569 return 0;
572 #define NAME_ALLOC_LEN(len) ((len+16) & ~15)
575 * d_alloc - allocate a dcache entry
576 * @parent: parent of entry to allocate
577 * @name: qstr of the name
579 * Allocates a dentry. It returns %NULL if there is insufficient memory
580 * available. On a success the dentry is returned. The name passed in is
581 * copied and the copy passed in may be reused after this call.
584 struct dentry * d_alloc(struct dentry * parent, const struct qstr *name)
586 char * str;
587 struct dentry *dentry;
589 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
590 if (!dentry)
591 return NULL;
593 if (name->len > DNAME_INLINE_LEN-1) {
594 str = kmalloc(NAME_ALLOC_LEN(name->len), GFP_KERNEL);
595 if (!str) {
596 kmem_cache_free(dentry_cache, dentry);
597 return NULL;
599 } else
600 str = dentry->d_iname;
602 memcpy(str, name->name, name->len);
603 str[name->len] = 0;
605 atomic_set(&dentry->d_count, 1);
606 dentry->d_flags = 0;
607 dentry->d_inode = NULL;
608 dentry->d_parent = NULL;
609 dentry->d_sb = NULL;
610 dentry->d_name.name = str;
611 dentry->d_name.len = name->len;
612 dentry->d_name.hash = name->hash;
613 dentry->d_op = NULL;
614 dentry->d_fsdata = NULL;
615 INIT_LIST_HEAD(&dentry->d_vfsmnt);
616 INIT_LIST_HEAD(&dentry->d_hash);
617 INIT_LIST_HEAD(&dentry->d_lru);
618 INIT_LIST_HEAD(&dentry->d_subdirs);
619 INIT_LIST_HEAD(&dentry->d_alias);
620 if (parent) {
621 dentry->d_parent = dget(parent);
622 dentry->d_sb = parent->d_sb;
623 spin_lock(&dcache_lock);
624 list_add(&dentry->d_child, &parent->d_subdirs);
625 spin_unlock(&dcache_lock);
626 } else
627 INIT_LIST_HEAD(&dentry->d_child);
629 dentry_stat.nr_dentry++;
630 return dentry;
634 * d_instantiate - fill in inode information for a dentry
635 * @entry: dentry to complete
636 * @inode: inode to attach to this dentry
638 * Fill in inode information in the entry.
640 * This turns negative dentries into productive full members
641 * of society.
643 * NOTE! This assumes that the inode count has been incremented
644 * (or otherwise set) by the caller to indicate that it is now
645 * in use by the dcache.
648 void d_instantiate(struct dentry *entry, struct inode * inode)
650 spin_lock(&dcache_lock);
651 if (inode)
652 list_add(&entry->d_alias, &inode->i_dentry);
653 entry->d_inode = inode;
654 spin_unlock(&dcache_lock);
658 * d_alloc_root - allocate root dentry
659 * @root_inode: inode to allocate the root for
661 * Allocate a root ("/") dentry for the inode given. The inode is
662 * instantiated and returned. %NULL is returned if there is insufficient
663 * memory or the inode passed is %NULL.
666 struct dentry * d_alloc_root(struct inode * root_inode)
668 struct dentry *res = NULL;
670 if (root_inode) {
671 res = d_alloc(NULL, &(const struct qstr) { "/", 1, 0 });
672 if (res) {
673 res->d_sb = root_inode->i_sb;
674 res->d_parent = res;
675 d_instantiate(res, root_inode);
678 return res;
681 static inline struct list_head * d_hash(struct dentry * parent, unsigned long hash)
683 hash += (unsigned long) parent / L1_CACHE_BYTES;
684 hash = hash ^ (hash >> D_HASHBITS) ^ (hash >> D_HASHBITS*2);
685 return dentry_hashtable + (hash & D_HASHMASK);
689 * d_lookup - search for a dentry
690 * @parent: parent dentry
691 * @name: qstr of name we wish to find
693 * Searches the children of the parent dentry for the name in question. If
694 * the dentry is found its reference count is incremented and the dentry
695 * is returned. The caller must use d_put to free the entry when it has
696 * finished using it. %NULL is returned on failure.
699 struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
701 unsigned int len = name->len;
702 unsigned int hash = name->hash;
703 const unsigned char *str = name->name;
704 struct list_head *head = d_hash(parent,hash);
705 struct list_head *tmp;
707 spin_lock(&dcache_lock);
708 tmp = head->next;
709 for (;;) {
710 struct dentry * dentry = list_entry(tmp, struct dentry, d_hash);
711 if (tmp == head)
712 break;
713 tmp = tmp->next;
714 if (dentry->d_name.hash != hash)
715 continue;
716 if (dentry->d_parent != parent)
717 continue;
718 if (parent->d_op && parent->d_op->d_compare) {
719 if (parent->d_op->d_compare(parent, &dentry->d_name, name))
720 continue;
721 } else {
722 if (dentry->d_name.len != len)
723 continue;
724 if (memcmp(dentry->d_name.name, str, len))
725 continue;
727 __dget_locked(dentry);
728 spin_unlock(&dcache_lock);
729 return dentry;
731 spin_unlock(&dcache_lock);
732 return NULL;
736 * d_validate - verify dentry provided from insecure source
737 * @dentry: The dentry alleged to be valid
738 * @dparent: The parent dentry
739 * @hash: Hash of the dentry
740 * @len: Length of the name
742 * An insecure source has sent us a dentry, here we verify it and dget() it.
743 * This is used by ncpfs in its readdir implementation.
744 * Zero is returned in the dentry is invalid.
746 * NOTE: This function does _not_ dereference the pointers before we have
747 * validated them. We can test the pointer values, but we
748 * must not actually use them until we have found a valid
749 * copy of the pointer in kernel space..
752 int d_validate(struct dentry *dentry, struct dentry *dparent,
753 unsigned int hash, unsigned int len)
755 struct list_head *base, *lhp;
756 int valid = 1;
758 spin_lock(&dcache_lock);
759 if (dentry != dparent) {
760 base = d_hash(dparent, hash);
761 lhp = base;
762 while ((lhp = lhp->next) != base) {
763 if (dentry == list_entry(lhp, struct dentry, d_hash)) {
764 __dget_locked(dentry);
765 goto out;
768 } else {
770 * Special case: local mount points don't live in
771 * the hashes, so we search the super blocks.
773 struct super_block *sb = sb_entry(super_blocks.next);
775 for (; sb != sb_entry(&super_blocks);
776 sb = sb_entry(sb->s_list.next)) {
777 if (!sb->s_dev)
778 continue;
779 if (sb->s_root == dentry) {
780 __dget_locked(dentry);
781 goto out;
785 valid = 0;
786 out:
787 spin_unlock(&dcache_lock);
788 return valid;
792 * When a file is deleted, we have two options:
793 * - turn this dentry into a negative dentry
794 * - unhash this dentry and free it.
796 * Usually, we want to just turn this into
797 * a negative dentry, but if anybody else is
798 * currently using the dentry or the inode
799 * we can't do that and we fall back on removing
800 * it from the hash queues and waiting for
801 * it to be deleted later when it has no users
805 * d_delete - delete a dentry
806 * @dentry: The dentry to delete
808 * Turn the dentry into a negative dentry if possible, otherwise
809 * remove it from the hash queues so it can be deleted later
812 void d_delete(struct dentry * dentry)
815 * Are we the only user?
817 spin_lock(&dcache_lock);
818 if (atomic_read(&dentry->d_count) == 1) {
819 dentry_iput(dentry);
820 return;
822 spin_unlock(&dcache_lock);
825 * If not, just drop the dentry and let dput
826 * pick up the tab..
828 d_drop(dentry);
832 * d_rehash - add an entry back to the hash
833 * @entry: dentry to add to the hash
835 * Adds a dentry to the hash according to its name.
838 void d_rehash(struct dentry * entry)
840 struct list_head *list = d_hash(entry->d_parent, entry->d_name.hash);
841 spin_lock(&dcache_lock);
842 list_add(&entry->d_hash, list);
843 spin_unlock(&dcache_lock);
846 #define do_switch(x,y) do { \
847 __typeof__ (x) __tmp = x; \
848 x = y; y = __tmp; } while (0)
851 * When switching names, the actual string doesn't strictly have to
852 * be preserved in the target - because we're dropping the target
853 * anyway. As such, we can just do a simple memcpy() to copy over
854 * the new name before we switch.
856 * Note that we have to be a lot more careful about getting the hash
857 * switched - we have to switch the hash value properly even if it
858 * then no longer matches the actual (corrupted) string of the target.
859 * The hash value has to match the hash queue that the dentry is on..
861 static inline void switch_names(struct dentry * dentry, struct dentry * target)
863 const unsigned char *old_name, *new_name;
865 check_lock();
866 memcpy(dentry->d_iname, target->d_iname, DNAME_INLINE_LEN);
867 old_name = target->d_name.name;
868 new_name = dentry->d_name.name;
869 if (old_name == target->d_iname)
870 old_name = dentry->d_iname;
871 if (new_name == dentry->d_iname)
872 new_name = target->d_iname;
873 target->d_name.name = new_name;
874 dentry->d_name.name = old_name;
878 * We cannibalize "target" when moving dentry on top of it,
879 * because it's going to be thrown away anyway. We could be more
880 * polite about it, though.
882 * This forceful removal will result in ugly /proc output if
883 * somebody holds a file open that got deleted due to a rename.
884 * We could be nicer about the deleted file, and let it show
885 * up under the name it got deleted rather than the name that
886 * deleted it.
888 * Careful with the hash switch. The hash switch depends on
889 * the fact that any list-entry can be a head of the list.
890 * Think about it.
894 * d_move - move a dentry
895 * @dentry: entry to move
896 * @target: new dentry
898 * Update the dcache to reflect the move of a file name. Negative
899 * dcache entries should not be moved in this way.
902 void d_move(struct dentry * dentry, struct dentry * target)
904 check_lock();
906 if (!dentry->d_inode)
907 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
909 spin_lock(&dcache_lock);
910 /* Move the dentry to the target hash queue */
911 list_del(&dentry->d_hash);
912 list_add(&dentry->d_hash, &target->d_hash);
914 /* Unhash the target: dput() will then get rid of it */
915 list_del(&target->d_hash);
916 INIT_LIST_HEAD(&target->d_hash);
918 list_del(&dentry->d_child);
919 list_del(&target->d_child);
921 /* Switch the parents and the names.. */
922 switch_names(dentry, target);
923 do_switch(dentry->d_parent, target->d_parent);
924 do_switch(dentry->d_name.len, target->d_name.len);
925 do_switch(dentry->d_name.hash, target->d_name.hash);
927 /* And add them back to the (new) parent lists */
928 list_add(&target->d_child, &target->d_parent->d_subdirs);
929 list_add(&dentry->d_child, &dentry->d_parent->d_subdirs);
930 spin_unlock(&dcache_lock);
934 * d_path - return the path of a dentry
935 * @dentry: dentry to report
936 * @buffer: buffer to return value in
937 * @buflen: buffer length
939 * Convert a dentry into an ASCII path name. If the entry has been deleted
940 * the string " (deleted)" is appended. Note that this is ambiguous. Returns
941 * the buffer.
943 * "buflen" should be %PAGE_SIZE or more. Caller holds the dcache_lock.
945 char * __d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
946 struct dentry *root, struct vfsmount *rootmnt,
947 char *buffer, int buflen)
949 char * end = buffer+buflen;
950 char * retval;
951 int namelen;
953 *--end = '\0';
954 buflen--;
955 if (!IS_ROOT(dentry) && list_empty(&dentry->d_hash)) {
956 buflen -= 10;
957 end -= 10;
958 memcpy(end, " (deleted)", 10);
961 /* Get '/' right */
962 retval = end-1;
963 *retval = '/';
965 for (;;) {
966 struct dentry * parent;
968 if (dentry == root && vfsmnt == rootmnt)
969 break;
970 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
971 /* Global root? */
972 if (vfsmnt->mnt_parent == vfsmnt)
973 goto global_root;
974 dentry = vfsmnt->mnt_mountpoint;
975 vfsmnt = vfsmnt->mnt_parent;
976 continue;
978 parent = dentry->d_parent;
979 namelen = dentry->d_name.len;
980 buflen -= namelen + 1;
981 if (buflen < 0)
982 break;
983 end -= namelen;
984 memcpy(end, dentry->d_name.name, namelen);
985 *--end = '/';
986 retval = end;
987 dentry = parent;
989 return retval;
990 global_root:
991 namelen = dentry->d_name.len;
992 buflen -= namelen;
993 if (buflen >= 0) {
994 retval -= namelen-1; /* hit the slash */
995 memcpy(retval, dentry->d_name.name, namelen);
997 return retval;
1001 * NOTE! The user-level library version returns a
1002 * character pointer. The kernel system call just
1003 * returns the length of the buffer filled (which
1004 * includes the ending '\0' character), or a negative
1005 * error value. So libc would do something like
1007 * char *getcwd(char * buf, size_t size)
1009 * int retval;
1011 * retval = sys_getcwd(buf, size);
1012 * if (retval >= 0)
1013 * return buf;
1014 * errno = -retval;
1015 * return NULL;
1018 asmlinkage long sys_getcwd(char *buf, unsigned long size)
1020 int error;
1021 struct vfsmount *pwdmnt, *rootmnt;
1022 struct dentry *pwd, *root;
1023 char *page = (char *) __get_free_page(GFP_USER);
1025 if (!page)
1026 return -ENOMEM;
1028 read_lock(&current->fs->lock);
1029 pwdmnt = mntget(current->fs->pwdmnt);
1030 pwd = dget(current->fs->pwd);
1031 rootmnt = mntget(current->fs->rootmnt);
1032 root = dget(current->fs->root);
1033 read_unlock(&current->fs->lock);
1035 error = -ENOENT;
1036 /* Has the current directory has been unlinked? */
1037 spin_lock(&dcache_lock);
1038 if (pwd->d_parent == pwd || !list_empty(&pwd->d_hash)) {
1039 unsigned long len;
1040 char * cwd;
1042 cwd = __d_path(pwd, pwdmnt, root, rootmnt, page, PAGE_SIZE);
1043 spin_unlock(&dcache_lock);
1045 error = -ERANGE;
1046 len = PAGE_SIZE + page - cwd;
1047 if (len <= size) {
1048 error = len;
1049 if (copy_to_user(buf, cwd, len))
1050 error = -EFAULT;
1052 } else
1053 spin_unlock(&dcache_lock);
1054 dput(pwd);
1055 mntput(pwdmnt);
1056 dput(root);
1057 mntput(rootmnt);
1058 free_page((unsigned long) page);
1059 return error;
1063 * Test whether new_dentry is a subdirectory of old_dentry.
1065 * Trivially implemented using the dcache structure
1069 * is_subdir - is new dentry a subdirectory of old_dentry
1070 * @new_dentry: new dentry
1071 * @old_dentry: old dentry
1073 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth).
1074 * Returns 0 otherwise.
1077 int is_subdir(struct dentry * new_dentry, struct dentry * old_dentry)
1079 int result;
1081 result = 0;
1082 for (;;) {
1083 if (new_dentry != old_dentry) {
1084 struct dentry * parent = new_dentry->d_parent;
1085 if (parent == new_dentry)
1086 break;
1087 new_dentry = parent;
1088 continue;
1090 result = 1;
1091 break;
1093 return result;
1096 void d_genocide(struct dentry *root)
1098 struct dentry *this_parent = root;
1099 struct list_head *next;
1101 spin_lock(&dcache_lock);
1102 repeat:
1103 next = this_parent->d_subdirs.next;
1104 resume:
1105 while (next != &this_parent->d_subdirs) {
1106 struct list_head *tmp = next;
1107 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1108 next = tmp->next;
1109 if (d_unhashed(dentry)||!dentry->d_inode)
1110 continue;
1111 if (!list_empty(&dentry->d_subdirs)) {
1112 this_parent = dentry;
1113 goto repeat;
1115 atomic_dec(&dentry->d_count);
1117 if (this_parent != root) {
1118 next = this_parent->d_child.next;
1119 atomic_dec(&this_parent->d_count);
1120 this_parent = this_parent->d_parent;
1121 goto resume;
1123 spin_unlock(&dcache_lock);
1127 * find_inode_number - check for dentry with name
1128 * @dir: directory to check
1129 * @name: Name to find.
1131 * Check whether a dentry already exists for the given name,
1132 * and return the inode number if it has an inode. Otherwise
1133 * 0 is returned.
1135 * This routine is used to post-process directory listings for
1136 * filesystems using synthetic inode numbers, and is necessary
1137 * to keep getcwd() working.
1140 ino_t find_inode_number(struct dentry *dir, struct qstr *name)
1142 struct dentry * dentry;
1143 ino_t ino = 0;
1146 * Check for a fs-specific hash function. Note that we must
1147 * calculate the standard hash first, as the d_op->d_hash()
1148 * routine may choose to leave the hash value unchanged.
1150 name->hash = full_name_hash(name->name, name->len);
1151 if (dir->d_op && dir->d_op->d_hash)
1153 if (dir->d_op->d_hash(dir, name) != 0)
1154 goto out;
1157 dentry = d_lookup(dir, name);
1158 if (dentry)
1160 if (dentry->d_inode)
1161 ino = dentry->d_inode->i_ino;
1162 dput(dentry);
1164 out:
1165 return ino;
1168 void __init dcache_init(unsigned long mempages)
1170 struct list_head *d;
1171 unsigned long order;
1172 unsigned int nr_hash;
1173 int i;
1176 * A constructor could be added for stable state like the lists,
1177 * but it is probably not worth it because of the cache nature
1178 * of the dcache.
1179 * If fragmentation is too bad then the SLAB_HWCACHE_ALIGN
1180 * flag could be removed here, to hint to the allocator that
1181 * it should not try to get multiple page regions.
1183 dentry_cache = kmem_cache_create("dentry_cache",
1184 sizeof(struct dentry),
1186 SLAB_HWCACHE_ALIGN,
1187 NULL, NULL);
1188 if (!dentry_cache)
1189 panic("Cannot create dentry cache");
1191 mempages >>= (13 - PAGE_SHIFT);
1192 mempages *= sizeof(struct list_head);
1193 for (order = 0; ((1UL << order) << PAGE_SHIFT) < mempages; order++)
1196 do {
1197 unsigned long tmp;
1199 nr_hash = (1UL << order) * PAGE_SIZE /
1200 sizeof(struct list_head);
1201 d_hash_mask = (nr_hash - 1);
1203 tmp = nr_hash;
1204 d_hash_shift = 0;
1205 while ((tmp >>= 1UL) != 0UL)
1206 d_hash_shift++;
1208 dentry_hashtable = (struct list_head *)
1209 __get_free_pages(GFP_ATOMIC, order);
1210 } while (dentry_hashtable == NULL && --order >= 0);
1212 printk("Dentry-cache hash table entries: %d (order: %ld, %ld bytes)\n",
1213 nr_hash, order, (PAGE_SIZE << order));
1215 if (!dentry_hashtable)
1216 panic("Failed to allocate dcache hash table\n");
1218 d = dentry_hashtable;
1219 i = nr_hash;
1220 do {
1221 INIT_LIST_HEAD(d);
1222 d++;
1223 i--;
1224 } while (i);