Ok. I didn't make 2.4.0 in 2000. Tough. I tried, but we had some
[davej-history.git] / fs / dcache.c
blob090bf16864315136a0559d2300a41614ab3c0476
1 /*
2 * fs/dcache.c
4 * Complete reimplementation
5 * (C) 1997 Thomas Schoebel-Theuer,
6 * with heavy changes by Linus Torvalds
7 */
9 /*
10 * Notes on the allocation strategy:
12 * The dcache is a master of the icache - whenever a dcache entry
13 * exists, the inode will always exist. "iput()" is done either when
14 * the dcache entry is deleted or garbage collected.
17 #include <linux/config.h>
18 #include <linux/string.h>
19 #include <linux/mm.h>
20 #include <linux/fs.h>
21 #include <linux/malloc.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/smp_lock.h>
25 #include <linux/cache.h>
27 #include <asm/uaccess.h>
29 #define DCACHE_PARANOIA 1
30 /* #define DCACHE_DEBUG 1 */
32 spinlock_t dcache_lock = SPIN_LOCK_UNLOCKED;
34 /* Right now the dcache depends on the kernel lock */
35 #define check_lock() if (!kernel_locked()) BUG()
37 static kmem_cache_t *dentry_cache;
40 * This is the single most critical data structure when it comes
41 * to the dcache: the hashtable for lookups. Somebody should try
42 * to make this good - I've just made it work.
44 * This hash-function tries to avoid losing too many bits of hash
45 * information, yet avoid using a prime hash-size or similar.
47 #define D_HASHBITS d_hash_shift
48 #define D_HASHMASK d_hash_mask
50 static unsigned int d_hash_mask;
51 static unsigned int d_hash_shift;
52 static struct list_head *dentry_hashtable;
53 static LIST_HEAD(dentry_unused);
55 struct {
56 int nr_dentry;
57 int nr_unused;
58 int age_limit; /* age in seconds */
59 int want_pages; /* pages requested by system */
60 int dummy[2];
61 } dentry_stat = {0, 0, 45, 0,};
63 /* no dcache_lock, please */
64 static inline void d_free(struct dentry *dentry)
66 if (dentry->d_op && dentry->d_op->d_release)
67 dentry->d_op->d_release(dentry);
68 if (dname_external(dentry))
69 kfree(dentry->d_name.name);
70 kmem_cache_free(dentry_cache, dentry);
71 dentry_stat.nr_dentry--;
75 * Release the dentry's inode, using the fileystem
76 * d_iput() operation if defined.
77 * Called with dcache_lock held, drops it.
79 static inline void dentry_iput(struct dentry * dentry)
81 struct inode *inode = dentry->d_inode;
82 if (inode) {
83 dentry->d_inode = NULL;
84 list_del_init(&dentry->d_alias);
85 spin_unlock(&dcache_lock);
86 if (dentry->d_op && dentry->d_op->d_iput)
87 dentry->d_op->d_iput(dentry, inode);
88 else
89 iput(inode);
90 } else
91 spin_unlock(&dcache_lock);
94 /*
95 * This is dput
97 * This is complicated by the fact that we do not want to put
98 * dentries that are no longer on any hash chain on the unused
99 * list: we'd much rather just get rid of them immediately.
101 * However, that implies that we have to traverse the dentry
102 * tree upwards to the parents which might _also_ now be
103 * scheduled for deletion (it may have been only waiting for
104 * its last child to go away).
106 * This tail recursion is done by hand as we don't want to depend
107 * on the compiler to always get this right (gcc generally doesn't).
108 * Real recursion would eat up our stack space.
112 * dput - release a dentry
113 * @dentry: dentry to release
115 * Release a dentry. This will drop the usage count and if appropriate
116 * call the dentry unlink method as well as removing it from the queues and
117 * releasing its resources. If the parent dentries were scheduled for release
118 * they too may now get deleted.
120 * no dcache lock, please.
123 void dput(struct dentry *dentry)
125 if (!dentry)
126 return;
128 repeat:
129 if (!atomic_dec_and_lock(&dentry->d_count, &dcache_lock))
130 return;
132 /* dput on a free dentry? */
133 if (!list_empty(&dentry->d_lru))
134 BUG();
136 * AV: ->d_delete() is _NOT_ allowed to block now.
138 if (dentry->d_op && dentry->d_op->d_delete) {
139 if (dentry->d_op->d_delete(dentry))
140 goto unhash_it;
142 /* Unreachable? Get rid of it */
143 if (list_empty(&dentry->d_hash))
144 goto kill_it;
145 list_add(&dentry->d_lru, &dentry_unused);
146 dentry_stat.nr_unused++;
148 * Update the timestamp
150 dentry->d_reftime = jiffies;
151 spin_unlock(&dcache_lock);
152 return;
154 unhash_it:
155 list_del_init(&dentry->d_hash);
157 kill_it: {
158 struct dentry *parent;
159 list_del(&dentry->d_child);
160 /* drops the lock, at that point nobody can reach this dentry */
161 dentry_iput(dentry);
162 parent = dentry->d_parent;
163 d_free(dentry);
164 if (dentry == parent)
165 return;
166 dentry = parent;
167 goto repeat;
172 * d_invalidate - invalidate a dentry
173 * @dentry: dentry to invalidate
175 * Try to invalidate the dentry if it turns out to be
176 * possible. If there are other dentries that can be
177 * reached through this one we can't delete it and we
178 * return -EBUSY. On success we return 0.
180 * no dcache lock.
183 int d_invalidate(struct dentry * dentry)
186 * If it's already been dropped, return OK.
188 spin_lock(&dcache_lock);
189 if (list_empty(&dentry->d_hash)) {
190 spin_unlock(&dcache_lock);
191 return 0;
194 * Check whether to do a partial shrink_dcache
195 * to get rid of unused child entries.
197 if (!list_empty(&dentry->d_subdirs)) {
198 spin_unlock(&dcache_lock);
199 shrink_dcache_parent(dentry);
200 spin_lock(&dcache_lock);
204 * Somebody else still using it?
206 * If it's a directory, we can't drop it
207 * for fear of somebody re-populating it
208 * with children (even though dropping it
209 * would make it unreachable from the root,
210 * we might still populate it if it was a
211 * working directory or similar).
213 if (atomic_read(&dentry->d_count) > 1) {
214 if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) {
215 spin_unlock(&dcache_lock);
216 return -EBUSY;
220 list_del_init(&dentry->d_hash);
221 spin_unlock(&dcache_lock);
222 return 0;
225 /* This should be called _only_ with dcache_lock held */
227 static inline struct dentry * __dget_locked(struct dentry *dentry)
229 atomic_inc(&dentry->d_count);
230 if (atomic_read(&dentry->d_count) == 1) {
231 dentry_stat.nr_unused--;
232 list_del(&dentry->d_lru);
233 INIT_LIST_HEAD(&dentry->d_lru); /* make "list_empty()" work */
235 return dentry;
238 struct dentry * dget_locked(struct dentry *dentry)
240 return __dget_locked(dentry);
244 * d_find_alias - grab a hashed alias of inode
245 * @inode: inode in question
247 * If inode has a hashed alias - acquire the reference to alias and
248 * return it. Otherwise return NULL. Notice that if inode is a directory
249 * there can be only one alias and it can be unhashed only if it has
250 * no children.
253 struct dentry * d_find_alias(struct inode *inode)
255 struct list_head *head, *next, *tmp;
256 struct dentry *alias;
258 spin_lock(&dcache_lock);
259 head = &inode->i_dentry;
260 next = inode->i_dentry.next;
261 while (next != head) {
262 tmp = next;
263 next = tmp->next;
264 alias = list_entry(tmp, struct dentry, d_alias);
265 if (!list_empty(&alias->d_hash)) {
266 __dget_locked(alias);
267 spin_unlock(&dcache_lock);
268 return alias;
271 spin_unlock(&dcache_lock);
272 return NULL;
276 * Try to kill dentries associated with this inode.
277 * WARNING: you must own a reference to inode.
279 void d_prune_aliases(struct inode *inode)
281 struct list_head *tmp, *head = &inode->i_dentry;
282 restart:
283 spin_lock(&dcache_lock);
284 tmp = head;
285 while ((tmp = tmp->next) != head) {
286 struct dentry *dentry = list_entry(tmp, struct dentry, d_alias);
287 if (!atomic_read(&dentry->d_count)) {
288 __dget_locked(dentry);
289 spin_unlock(&dcache_lock);
290 d_drop(dentry);
291 dput(dentry);
292 goto restart;
295 spin_unlock(&dcache_lock);
299 * Throw away a dentry - free the inode, dput the parent.
300 * This requires that the LRU list has already been
301 * removed.
302 * Called with dcache_lock, drops it and then regains.
304 static inline void prune_one_dentry(struct dentry * dentry)
306 struct dentry * parent;
308 list_del_init(&dentry->d_hash);
309 list_del(&dentry->d_child);
310 dentry_iput(dentry);
311 parent = dentry->d_parent;
312 d_free(dentry);
313 if (parent != dentry)
314 dput(parent);
315 spin_lock(&dcache_lock);
319 * prune_dcache - shrink the dcache
320 * @count: number of entries to try and free
322 * Shrink the dcache. This is done when we need
323 * more memory, or simply when we need to unmount
324 * something (at which point we need to unuse
325 * all dentries).
327 * This function may fail to free any resources if
328 * all the dentries are in use.
331 void prune_dcache(int count)
333 spin_lock(&dcache_lock);
334 for (;;) {
335 struct dentry *dentry;
336 struct list_head *tmp;
338 tmp = dentry_unused.prev;
340 if (tmp == &dentry_unused)
341 break;
342 dentry_stat.nr_unused--;
343 list_del_init(tmp);
344 dentry = list_entry(tmp, struct dentry, d_lru);
346 /* Unused dentry with a count? */
347 if (atomic_read(&dentry->d_count))
348 BUG();
350 prune_one_dentry(dentry);
351 if (!--count)
352 break;
354 spin_unlock(&dcache_lock);
358 * Shrink the dcache for the specified super block.
359 * This allows us to unmount a device without disturbing
360 * the dcache for the other devices.
362 * This implementation makes just two traversals of the
363 * unused list. On the first pass we move the selected
364 * dentries to the most recent end, and on the second
365 * pass we free them. The second pass must restart after
366 * each dput(), but since the target dentries are all at
367 * the end, it's really just a single traversal.
371 * shrink_dcache_sb - shrink dcache for a superblock
372 * @sb: superblock
374 * Shrink the dcache for the specified super block. This
375 * is used to free the dcache before unmounting a file
376 * system
379 void shrink_dcache_sb(struct super_block * sb)
381 struct list_head *tmp, *next;
382 struct dentry *dentry;
385 * Pass one ... move the dentries for the specified
386 * superblock to the most recent end of the unused list.
388 spin_lock(&dcache_lock);
389 next = dentry_unused.next;
390 while (next != &dentry_unused) {
391 tmp = next;
392 next = tmp->next;
393 dentry = list_entry(tmp, struct dentry, d_lru);
394 if (dentry->d_sb != sb)
395 continue;
396 list_del(tmp);
397 list_add(tmp, &dentry_unused);
401 * Pass two ... free the dentries for this superblock.
403 repeat:
404 next = dentry_unused.next;
405 while (next != &dentry_unused) {
406 tmp = next;
407 next = tmp->next;
408 dentry = list_entry(tmp, struct dentry, d_lru);
409 if (dentry->d_sb != sb)
410 continue;
411 if (atomic_read(&dentry->d_count))
412 continue;
413 dentry_stat.nr_unused--;
414 list_del(tmp);
415 INIT_LIST_HEAD(tmp);
416 prune_one_dentry(dentry);
417 goto repeat;
419 spin_unlock(&dcache_lock);
423 * Search for at least 1 mount point in the dentry's subdirs.
424 * We descend to the next level whenever the d_subdirs
425 * list is non-empty and continue searching.
429 * have_submounts - check for mounts over a dentry
430 * @parent: dentry to check.
432 * Return true if the parent or its subdirectories contain
433 * a mount point
436 int have_submounts(struct dentry *parent)
438 struct dentry *this_parent = parent;
439 struct list_head *next;
441 spin_lock(&dcache_lock);
442 if (d_mountpoint(parent))
443 goto positive;
444 repeat:
445 next = this_parent->d_subdirs.next;
446 resume:
447 while (next != &this_parent->d_subdirs) {
448 struct list_head *tmp = next;
449 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
450 next = tmp->next;
451 /* Have we found a mount point ? */
452 if (d_mountpoint(dentry))
453 goto positive;
454 if (!list_empty(&dentry->d_subdirs)) {
455 this_parent = dentry;
456 goto repeat;
460 * All done at this level ... ascend and resume the search.
462 if (this_parent != parent) {
463 next = this_parent->d_child.next;
464 this_parent = this_parent->d_parent;
465 goto resume;
467 spin_unlock(&dcache_lock);
468 return 0; /* No mount points found in tree */
469 positive:
470 spin_unlock(&dcache_lock);
471 return 1;
475 * Search the dentry child list for the specified parent,
476 * and move any unused dentries to the end of the unused
477 * list for prune_dcache(). We descend to the next level
478 * whenever the d_subdirs list is non-empty and continue
479 * searching.
481 static int select_parent(struct dentry * parent)
483 struct dentry *this_parent = parent;
484 struct list_head *next;
485 int found = 0;
487 spin_lock(&dcache_lock);
488 repeat:
489 next = this_parent->d_subdirs.next;
490 resume:
491 while (next != &this_parent->d_subdirs) {
492 struct list_head *tmp = next;
493 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
494 next = tmp->next;
495 if (!atomic_read(&dentry->d_count)) {
496 list_del(&dentry->d_lru);
497 list_add(&dentry->d_lru, dentry_unused.prev);
498 found++;
501 * Descend a level if the d_subdirs list is non-empty.
503 if (!list_empty(&dentry->d_subdirs)) {
504 this_parent = dentry;
505 #ifdef DCACHE_DEBUG
506 printk(KERN_DEBUG "select_parent: descending to %s/%s, found=%d\n",
507 dentry->d_parent->d_name.name, dentry->d_name.name, found);
508 #endif
509 goto repeat;
513 * All done at this level ... ascend and resume the search.
515 if (this_parent != parent) {
516 next = this_parent->d_child.next;
517 this_parent = this_parent->d_parent;
518 #ifdef DCACHE_DEBUG
519 printk(KERN_DEBUG "select_parent: ascending to %s/%s, found=%d\n",
520 this_parent->d_parent->d_name.name, this_parent->d_name.name, found);
521 #endif
522 goto resume;
524 spin_unlock(&dcache_lock);
525 return found;
529 * shrink_dcache_parent - prune dcache
530 * @parent: parent of entries to prune
532 * Prune the dcache to remove unused children of the parent dentry.
535 void shrink_dcache_parent(struct dentry * parent)
537 int found;
539 while ((found = select_parent(parent)) != 0)
540 prune_dcache(found);
544 * This is called from kswapd when we think we need some
545 * more memory, but aren't really sure how much. So we
546 * carefully try to free a _bit_ of our dcache, but not
547 * too much.
549 * Priority:
550 * 0 - very urgent: shrink everything
551 * ...
552 * 6 - base-level: try to shrink a bit.
554 void shrink_dcache_memory(int priority, unsigned int gfp_mask)
556 int count = 0;
559 * Nasty deadlock avoidance.
561 * ext2_new_block->getblk->GFP->shrink_dcache_memory->prune_dcache->
562 * prune_one_dentry->dput->dentry_iput->iput->inode->i_sb->s_op->
563 * put_inode->ext2_discard_prealloc->ext2_free_blocks->lock_super->
564 * DEADLOCK.
566 * We should make sure we don't hold the superblock lock over
567 * block allocations, but for now:
569 if (!(gfp_mask & __GFP_IO))
570 return;
572 if (priority)
573 count = dentry_stat.nr_unused / priority;
575 prune_dcache(count);
576 kmem_cache_shrink(dentry_cache);
579 #define NAME_ALLOC_LEN(len) ((len+16) & ~15)
582 * d_alloc - allocate a dcache entry
583 * @parent: parent of entry to allocate
584 * @name: qstr of the name
586 * Allocates a dentry. It returns %NULL if there is insufficient memory
587 * available. On a success the dentry is returned. The name passed in is
588 * copied and the copy passed in may be reused after this call.
591 struct dentry * d_alloc(struct dentry * parent, const struct qstr *name)
593 char * str;
594 struct dentry *dentry;
596 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
597 if (!dentry)
598 return NULL;
600 if (name->len > DNAME_INLINE_LEN-1) {
601 str = kmalloc(NAME_ALLOC_LEN(name->len), GFP_KERNEL);
602 if (!str) {
603 kmem_cache_free(dentry_cache, dentry);
604 return NULL;
606 } else
607 str = dentry->d_iname;
609 memcpy(str, name->name, name->len);
610 str[name->len] = 0;
612 atomic_set(&dentry->d_count, 1);
613 dentry->d_flags = 0;
614 dentry->d_inode = NULL;
615 dentry->d_parent = NULL;
616 dentry->d_sb = NULL;
617 dentry->d_name.name = str;
618 dentry->d_name.len = name->len;
619 dentry->d_name.hash = name->hash;
620 dentry->d_op = NULL;
621 dentry->d_fsdata = NULL;
622 INIT_LIST_HEAD(&dentry->d_vfsmnt);
623 INIT_LIST_HEAD(&dentry->d_hash);
624 INIT_LIST_HEAD(&dentry->d_lru);
625 INIT_LIST_HEAD(&dentry->d_subdirs);
626 INIT_LIST_HEAD(&dentry->d_alias);
627 if (parent) {
628 dentry->d_parent = dget(parent);
629 dentry->d_sb = parent->d_sb;
630 spin_lock(&dcache_lock);
631 list_add(&dentry->d_child, &parent->d_subdirs);
632 spin_unlock(&dcache_lock);
633 } else
634 INIT_LIST_HEAD(&dentry->d_child);
636 dentry_stat.nr_dentry++;
637 return dentry;
641 * d_instantiate - fill in inode information for a dentry
642 * @entry: dentry to complete
643 * @inode: inode to attach to this dentry
645 * Fill in inode information in the entry.
647 * This turns negative dentries into productive full members
648 * of society.
650 * NOTE! This assumes that the inode count has been incremented
651 * (or otherwise set) by the caller to indicate that it is now
652 * in use by the dcache.
655 void d_instantiate(struct dentry *entry, struct inode * inode)
657 spin_lock(&dcache_lock);
658 if (inode)
659 list_add(&entry->d_alias, &inode->i_dentry);
660 entry->d_inode = inode;
661 spin_unlock(&dcache_lock);
665 * d_alloc_root - allocate root dentry
666 * @root_inode: inode to allocate the root for
668 * Allocate a root ("/") dentry for the inode given. The inode is
669 * instantiated and returned. %NULL is returned if there is insufficient
670 * memory or the inode passed is %NULL.
673 struct dentry * d_alloc_root(struct inode * root_inode)
675 struct dentry *res = NULL;
677 if (root_inode) {
678 res = d_alloc(NULL, &(const struct qstr) { "/", 1, 0 });
679 if (res) {
680 res->d_sb = root_inode->i_sb;
681 res->d_parent = res;
682 d_instantiate(res, root_inode);
685 return res;
688 static inline struct list_head * d_hash(struct dentry * parent, unsigned long hash)
690 hash += (unsigned long) parent / L1_CACHE_BYTES;
691 hash = hash ^ (hash >> D_HASHBITS) ^ (hash >> D_HASHBITS*2);
692 return dentry_hashtable + (hash & D_HASHMASK);
696 * d_lookup - search for a dentry
697 * @parent: parent dentry
698 * @name: qstr of name we wish to find
700 * Searches the children of the parent dentry for the name in question. If
701 * the dentry is found its reference count is incremented and the dentry
702 * is returned. The caller must use d_put to free the entry when it has
703 * finished using it. %NULL is returned on failure.
706 struct dentry * d_lookup(struct dentry * parent, struct qstr * name)
708 unsigned int len = name->len;
709 unsigned int hash = name->hash;
710 const unsigned char *str = name->name;
711 struct list_head *head = d_hash(parent,hash);
712 struct list_head *tmp;
714 spin_lock(&dcache_lock);
715 tmp = head->next;
716 for (;;) {
717 struct dentry * dentry = list_entry(tmp, struct dentry, d_hash);
718 if (tmp == head)
719 break;
720 tmp = tmp->next;
721 if (dentry->d_name.hash != hash)
722 continue;
723 if (dentry->d_parent != parent)
724 continue;
725 if (parent->d_op && parent->d_op->d_compare) {
726 if (parent->d_op->d_compare(parent, &dentry->d_name, name))
727 continue;
728 } else {
729 if (dentry->d_name.len != len)
730 continue;
731 if (memcmp(dentry->d_name.name, str, len))
732 continue;
734 __dget_locked(dentry);
735 spin_unlock(&dcache_lock);
736 return dentry;
738 spin_unlock(&dcache_lock);
739 return NULL;
743 * d_validate - verify dentry provided from insecure source
744 * @dentry: The dentry alleged to be valid
745 * @dparent: The parent dentry
746 * @hash: Hash of the dentry
747 * @len: Length of the name
749 * An insecure source has sent us a dentry, here we verify it and dget() it.
750 * This is used by ncpfs in its readdir implementation.
751 * Zero is returned in the dentry is invalid.
753 * NOTE: This function does _not_ dereference the pointers before we have
754 * validated them. We can test the pointer values, but we
755 * must not actually use them until we have found a valid
756 * copy of the pointer in kernel space..
759 int d_validate(struct dentry *dentry, struct dentry *dparent,
760 unsigned int hash, unsigned int len)
762 struct list_head *base, *lhp;
763 int valid = 1;
765 spin_lock(&dcache_lock);
766 if (dentry != dparent) {
767 base = d_hash(dparent, hash);
768 lhp = base;
769 while ((lhp = lhp->next) != base) {
770 if (dentry == list_entry(lhp, struct dentry, d_hash)) {
771 __dget_locked(dentry);
772 goto out;
775 } else {
777 * Special case: local mount points don't live in
778 * the hashes, so we search the super blocks.
780 struct super_block *sb = sb_entry(super_blocks.next);
782 for (; sb != sb_entry(&super_blocks);
783 sb = sb_entry(sb->s_list.next)) {
784 if (!sb->s_dev)
785 continue;
786 if (sb->s_root == dentry) {
787 __dget_locked(dentry);
788 goto out;
792 valid = 0;
793 out:
794 spin_unlock(&dcache_lock);
795 return valid;
799 * When a file is deleted, we have two options:
800 * - turn this dentry into a negative dentry
801 * - unhash this dentry and free it.
803 * Usually, we want to just turn this into
804 * a negative dentry, but if anybody else is
805 * currently using the dentry or the inode
806 * we can't do that and we fall back on removing
807 * it from the hash queues and waiting for
808 * it to be deleted later when it has no users
812 * d_delete - delete a dentry
813 * @dentry: The dentry to delete
815 * Turn the dentry into a negative dentry if possible, otherwise
816 * remove it from the hash queues so it can be deleted later
819 void d_delete(struct dentry * dentry)
822 * Are we the only user?
824 spin_lock(&dcache_lock);
825 if (atomic_read(&dentry->d_count) == 1) {
826 dentry_iput(dentry);
827 return;
829 spin_unlock(&dcache_lock);
832 * If not, just drop the dentry and let dput
833 * pick up the tab..
835 d_drop(dentry);
839 * d_rehash - add an entry back to the hash
840 * @entry: dentry to add to the hash
842 * Adds a dentry to the hash according to its name.
845 void d_rehash(struct dentry * entry)
847 struct list_head *list = d_hash(entry->d_parent, entry->d_name.hash);
848 spin_lock(&dcache_lock);
849 list_add(&entry->d_hash, list);
850 spin_unlock(&dcache_lock);
853 #define do_switch(x,y) do { \
854 __typeof__ (x) __tmp = x; \
855 x = y; y = __tmp; } while (0)
858 * When switching names, the actual string doesn't strictly have to
859 * be preserved in the target - because we're dropping the target
860 * anyway. As such, we can just do a simple memcpy() to copy over
861 * the new name before we switch.
863 * Note that we have to be a lot more careful about getting the hash
864 * switched - we have to switch the hash value properly even if it
865 * then no longer matches the actual (corrupted) string of the target.
866 * The hash value has to match the hash queue that the dentry is on..
868 static inline void switch_names(struct dentry * dentry, struct dentry * target)
870 const unsigned char *old_name, *new_name;
872 check_lock();
873 memcpy(dentry->d_iname, target->d_iname, DNAME_INLINE_LEN);
874 old_name = target->d_name.name;
875 new_name = dentry->d_name.name;
876 if (old_name == target->d_iname)
877 old_name = dentry->d_iname;
878 if (new_name == dentry->d_iname)
879 new_name = target->d_iname;
880 target->d_name.name = new_name;
881 dentry->d_name.name = old_name;
885 * We cannibalize "target" when moving dentry on top of it,
886 * because it's going to be thrown away anyway. We could be more
887 * polite about it, though.
889 * This forceful removal will result in ugly /proc output if
890 * somebody holds a file open that got deleted due to a rename.
891 * We could be nicer about the deleted file, and let it show
892 * up under the name it got deleted rather than the name that
893 * deleted it.
895 * Careful with the hash switch. The hash switch depends on
896 * the fact that any list-entry can be a head of the list.
897 * Think about it.
901 * d_move - move a dentry
902 * @dentry: entry to move
903 * @target: new dentry
905 * Update the dcache to reflect the move of a file name. Negative
906 * dcache entries should not be moved in this way.
909 void d_move(struct dentry * dentry, struct dentry * target)
911 check_lock();
913 if (!dentry->d_inode)
914 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
916 spin_lock(&dcache_lock);
917 /* Move the dentry to the target hash queue */
918 list_del(&dentry->d_hash);
919 list_add(&dentry->d_hash, &target->d_hash);
921 /* Unhash the target: dput() will then get rid of it */
922 list_del(&target->d_hash);
923 INIT_LIST_HEAD(&target->d_hash);
925 list_del(&dentry->d_child);
926 list_del(&target->d_child);
928 /* Switch the parents and the names.. */
929 switch_names(dentry, target);
930 do_switch(dentry->d_parent, target->d_parent);
931 do_switch(dentry->d_name.len, target->d_name.len);
932 do_switch(dentry->d_name.hash, target->d_name.hash);
934 /* And add them back to the (new) parent lists */
935 list_add(&target->d_child, &target->d_parent->d_subdirs);
936 list_add(&dentry->d_child, &dentry->d_parent->d_subdirs);
937 spin_unlock(&dcache_lock);
941 * d_path - return the path of a dentry
942 * @dentry: dentry to report
943 * @vfsmnt: vfsmnt to which the dentry belongs
944 * @root: root dentry
945 * @rootmnt: vfsmnt to which the root dentry belongs
946 * @buffer: buffer to return value in
947 * @buflen: buffer length
949 * Convert a dentry into an ASCII path name. If the entry has been deleted
950 * the string " (deleted)" is appended. Note that this is ambiguous. Returns
951 * the buffer.
953 * "buflen" should be %PAGE_SIZE or more. Caller holds the dcache_lock.
955 char * __d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
956 struct dentry *root, struct vfsmount *rootmnt,
957 char *buffer, int buflen)
959 char * end = buffer+buflen;
960 char * retval;
961 int namelen;
963 *--end = '\0';
964 buflen--;
965 if (!IS_ROOT(dentry) && list_empty(&dentry->d_hash)) {
966 buflen -= 10;
967 end -= 10;
968 memcpy(end, " (deleted)", 10);
971 /* Get '/' right */
972 retval = end-1;
973 *retval = '/';
975 for (;;) {
976 struct dentry * parent;
978 if (dentry == root && vfsmnt == rootmnt)
979 break;
980 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
981 /* Global root? */
982 if (vfsmnt->mnt_parent == vfsmnt)
983 goto global_root;
984 dentry = vfsmnt->mnt_mountpoint;
985 vfsmnt = vfsmnt->mnt_parent;
986 continue;
988 parent = dentry->d_parent;
989 namelen = dentry->d_name.len;
990 buflen -= namelen + 1;
991 if (buflen < 0)
992 break;
993 end -= namelen;
994 memcpy(end, dentry->d_name.name, namelen);
995 *--end = '/';
996 retval = end;
997 dentry = parent;
999 return retval;
1000 global_root:
1001 namelen = dentry->d_name.len;
1002 buflen -= namelen;
1003 if (buflen >= 0) {
1004 retval -= namelen-1; /* hit the slash */
1005 memcpy(retval, dentry->d_name.name, namelen);
1007 return retval;
1011 * NOTE! The user-level library version returns a
1012 * character pointer. The kernel system call just
1013 * returns the length of the buffer filled (which
1014 * includes the ending '\0' character), or a negative
1015 * error value. So libc would do something like
1017 * char *getcwd(char * buf, size_t size)
1019 * int retval;
1021 * retval = sys_getcwd(buf, size);
1022 * if (retval >= 0)
1023 * return buf;
1024 * errno = -retval;
1025 * return NULL;
1028 asmlinkage long sys_getcwd(char *buf, unsigned long size)
1030 int error;
1031 struct vfsmount *pwdmnt, *rootmnt;
1032 struct dentry *pwd, *root;
1033 char *page = (char *) __get_free_page(GFP_USER);
1035 if (!page)
1036 return -ENOMEM;
1038 read_lock(&current->fs->lock);
1039 pwdmnt = mntget(current->fs->pwdmnt);
1040 pwd = dget(current->fs->pwd);
1041 rootmnt = mntget(current->fs->rootmnt);
1042 root = dget(current->fs->root);
1043 read_unlock(&current->fs->lock);
1045 error = -ENOENT;
1046 /* Has the current directory has been unlinked? */
1047 spin_lock(&dcache_lock);
1048 if (pwd->d_parent == pwd || !list_empty(&pwd->d_hash)) {
1049 unsigned long len;
1050 char * cwd;
1052 cwd = __d_path(pwd, pwdmnt, root, rootmnt, page, PAGE_SIZE);
1053 spin_unlock(&dcache_lock);
1055 error = -ERANGE;
1056 len = PAGE_SIZE + page - cwd;
1057 if (len <= size) {
1058 error = len;
1059 if (copy_to_user(buf, cwd, len))
1060 error = -EFAULT;
1062 } else
1063 spin_unlock(&dcache_lock);
1064 dput(pwd);
1065 mntput(pwdmnt);
1066 dput(root);
1067 mntput(rootmnt);
1068 free_page((unsigned long) page);
1069 return error;
1073 * Test whether new_dentry is a subdirectory of old_dentry.
1075 * Trivially implemented using the dcache structure
1079 * is_subdir - is new dentry a subdirectory of old_dentry
1080 * @new_dentry: new dentry
1081 * @old_dentry: old dentry
1083 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth).
1084 * Returns 0 otherwise.
1087 int is_subdir(struct dentry * new_dentry, struct dentry * old_dentry)
1089 int result;
1091 result = 0;
1092 for (;;) {
1093 if (new_dentry != old_dentry) {
1094 struct dentry * parent = new_dentry->d_parent;
1095 if (parent == new_dentry)
1096 break;
1097 new_dentry = parent;
1098 continue;
1100 result = 1;
1101 break;
1103 return result;
1106 void d_genocide(struct dentry *root)
1108 struct dentry *this_parent = root;
1109 struct list_head *next;
1111 spin_lock(&dcache_lock);
1112 repeat:
1113 next = this_parent->d_subdirs.next;
1114 resume:
1115 while (next != &this_parent->d_subdirs) {
1116 struct list_head *tmp = next;
1117 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1118 next = tmp->next;
1119 if (d_unhashed(dentry)||!dentry->d_inode)
1120 continue;
1121 if (!list_empty(&dentry->d_subdirs)) {
1122 this_parent = dentry;
1123 goto repeat;
1125 atomic_dec(&dentry->d_count);
1127 if (this_parent != root) {
1128 next = this_parent->d_child.next;
1129 atomic_dec(&this_parent->d_count);
1130 this_parent = this_parent->d_parent;
1131 goto resume;
1133 spin_unlock(&dcache_lock);
1137 * find_inode_number - check for dentry with name
1138 * @dir: directory to check
1139 * @name: Name to find.
1141 * Check whether a dentry already exists for the given name,
1142 * and return the inode number if it has an inode. Otherwise
1143 * 0 is returned.
1145 * This routine is used to post-process directory listings for
1146 * filesystems using synthetic inode numbers, and is necessary
1147 * to keep getcwd() working.
1150 ino_t find_inode_number(struct dentry *dir, struct qstr *name)
1152 struct dentry * dentry;
1153 ino_t ino = 0;
1156 * Check for a fs-specific hash function. Note that we must
1157 * calculate the standard hash first, as the d_op->d_hash()
1158 * routine may choose to leave the hash value unchanged.
1160 name->hash = full_name_hash(name->name, name->len);
1161 if (dir->d_op && dir->d_op->d_hash)
1163 if (dir->d_op->d_hash(dir, name) != 0)
1164 goto out;
1167 dentry = d_lookup(dir, name);
1168 if (dentry)
1170 if (dentry->d_inode)
1171 ino = dentry->d_inode->i_ino;
1172 dput(dentry);
1174 out:
1175 return ino;
1178 static void __init dcache_init(unsigned long mempages)
1180 struct list_head *d;
1181 unsigned long order;
1182 unsigned int nr_hash;
1183 int i;
1186 * A constructor could be added for stable state like the lists,
1187 * but it is probably not worth it because of the cache nature
1188 * of the dcache.
1189 * If fragmentation is too bad then the SLAB_HWCACHE_ALIGN
1190 * flag could be removed here, to hint to the allocator that
1191 * it should not try to get multiple page regions.
1193 dentry_cache = kmem_cache_create("dentry_cache",
1194 sizeof(struct dentry),
1196 SLAB_HWCACHE_ALIGN,
1197 NULL, NULL);
1198 if (!dentry_cache)
1199 panic("Cannot create dentry cache");
1201 #if PAGE_SHIFT < 13
1202 mempages >>= (13 - PAGE_SHIFT);
1203 #endif
1204 mempages *= sizeof(struct list_head);
1205 for (order = 0; ((1UL << order) << PAGE_SHIFT) < mempages; order++)
1208 do {
1209 unsigned long tmp;
1211 nr_hash = (1UL << order) * PAGE_SIZE /
1212 sizeof(struct list_head);
1213 d_hash_mask = (nr_hash - 1);
1215 tmp = nr_hash;
1216 d_hash_shift = 0;
1217 while ((tmp >>= 1UL) != 0UL)
1218 d_hash_shift++;
1220 dentry_hashtable = (struct list_head *)
1221 __get_free_pages(GFP_ATOMIC, order);
1222 } while (dentry_hashtable == NULL && --order >= 0);
1224 printk("Dentry-cache hash table entries: %d (order: %ld, %ld bytes)\n",
1225 nr_hash, order, (PAGE_SIZE << order));
1227 if (!dentry_hashtable)
1228 panic("Failed to allocate dcache hash table\n");
1230 d = dentry_hashtable;
1231 i = nr_hash;
1232 do {
1233 INIT_LIST_HEAD(d);
1234 d++;
1235 i--;
1236 } while (i);
1239 /* SLAB cache for __getname() consumers */
1240 kmem_cache_t *names_cachep;
1242 /* SLAB cache for file structures */
1243 kmem_cache_t *filp_cachep;
1245 /* SLAB cache for dquot structures */
1246 kmem_cache_t *dquot_cachep;
1248 /* SLAB cache for buffer_head structures */
1249 kmem_cache_t *bh_cachep;
1251 void __init vfs_caches_init(unsigned long mempages)
1253 bh_cachep = kmem_cache_create("buffer_head",
1254 sizeof(struct buffer_head), 0,
1255 SLAB_HWCACHE_ALIGN, NULL, NULL);
1256 if(!bh_cachep)
1257 panic("Cannot create buffer head SLAB cache");
1259 names_cachep = kmem_cache_create("names_cache",
1260 PATH_MAX + 1, 0,
1261 SLAB_HWCACHE_ALIGN, NULL, NULL);
1262 if (!names_cachep)
1263 panic("Cannot create names SLAB cache");
1265 filp_cachep = kmem_cache_create("filp",
1266 sizeof(struct file), 0,
1267 SLAB_HWCACHE_ALIGN, NULL, NULL);
1268 if(!filp_cachep)
1269 panic("Cannot create filp SLAB cache");
1271 #if defined (CONFIG_QUOTA)
1272 dquot_cachep = kmem_cache_create("dquot",
1273 sizeof(struct dquot), sizeof(unsigned long) * 4,
1274 SLAB_HWCACHE_ALIGN, NULL, NULL);
1275 if (!dquot_cachep)
1276 panic("Cannot create dquot SLAB cache");
1277 #endif
1279 dcache_init(mempages);