Import 2.4.0-test2pre6
[davej-history.git] / fs / inode.c
blobe46359b032afd28c2356470a490c271043c80244
1 /*
2 * linux/fs/inode.c
4 * (C) 1997 Linus Torvalds
5 */
7 #include <linux/config.h>
8 #include <linux/fs.h>
9 #include <linux/string.h>
10 #include <linux/mm.h>
11 #include <linux/dcache.h>
12 #include <linux/init.h>
13 #include <linux/quotaops.h>
14 #include <linux/slab.h>
15 #include <linux/cache.h>
18 * New inode.c implementation.
20 * This implementation has the basic premise of trying
21 * to be extremely low-overhead and SMP-safe, yet be
22 * simple enough to be "obviously correct".
24 * Famous last words.
27 /* inode dynamic allocation 1999, Andrea Arcangeli <andrea@suse.de> */
29 #define INODE_PARANOIA 1
30 /* #define INODE_DEBUG 1 */
33 * Inode lookup is no longer as critical as it used to be:
34 * most of the lookups are going to be through the dcache.
36 #define I_HASHBITS i_hash_shift
37 #define I_HASHMASK i_hash_mask
39 static unsigned int i_hash_mask;
40 static unsigned int i_hash_shift;
43 * Each inode can be on two separate lists. One is
44 * the hash list of the inode, used for lookups. The
45 * other linked list is the "type" list:
46 * "in_use" - valid inode, i_count > 0, i_nlink > 0
47 * "dirty" - as "in_use" but also dirty
48 * "unused" - valid inode, i_count = 0
50 * A "dirty" list is maintained for each super block,
51 * allowing for low-overhead inode sync() operations.
54 static LIST_HEAD(inode_in_use);
55 static LIST_HEAD(inode_unused);
56 static struct list_head *inode_hashtable;
57 static LIST_HEAD(anon_hash_chain); /* for inodes with NULL i_sb */
60 * A simple spinlock to protect the list manipulations.
62 * NOTE! You also have to own the lock if you change
63 * the i_state of an inode while it is in use..
65 spinlock_t inode_lock = SPIN_LOCK_UNLOCKED;
68 * Statistics gathering..
70 struct {
71 int nr_inodes;
72 int nr_unused;
73 int dummy[5];
74 } inodes_stat = {0, 0,};
76 static kmem_cache_t * inode_cachep;
78 #define alloc_inode() \
79 ((struct inode *) kmem_cache_alloc(inode_cachep, SLAB_KERNEL))
80 #define destroy_inode(inode) kmem_cache_free(inode_cachep, (inode))
83 * These are initializations that only need to be done
84 * once, because the fields are idempotent across use
85 * of the inode, so let the slab aware of that.
87 static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
89 struct inode * inode = (struct inode *) foo;
91 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
92 SLAB_CTOR_CONSTRUCTOR)
94 memset(inode, 0, sizeof(*inode));
95 init_waitqueue_head(&inode->i_wait);
96 INIT_LIST_HEAD(&inode->i_hash);
97 INIT_LIST_HEAD(&inode->i_data.pages);
98 INIT_LIST_HEAD(&inode->i_dentry);
99 sema_init(&inode->i_sem, 1);
100 sema_init(&inode->i_zombie, 1);
101 spin_lock_init(&inode->i_data.i_shared_lock);
106 * Put the inode on the super block's dirty list.
108 * CAREFUL! We mark it dirty unconditionally, but
109 * move it onto the dirty list only if it is hashed.
110 * If it was not hashed, it will never be added to
111 * the dirty list even if it is later hashed, as it
112 * will have been marked dirty already.
114 * In short, make sure you hash any inodes _before_
115 * you start marking them dirty..
119 * __mark_inode_dirty - internal function
120 * @inode: inode to mark
122 * Mark an inode as dirty. Callers should use mark_inode_dirty.
125 void __mark_inode_dirty(struct inode *inode)
127 struct super_block * sb = inode->i_sb;
129 if (sb) {
130 spin_lock(&inode_lock);
131 if (!(inode->i_state & I_DIRTY)) {
132 inode->i_state |= I_DIRTY;
133 /* Only add valid (ie hashed) inodes to the dirty list */
134 if (!list_empty(&inode->i_hash)) {
135 list_del(&inode->i_list);
136 list_add(&inode->i_list, &sb->s_dirty);
139 spin_unlock(&inode_lock);
143 static void __wait_on_inode(struct inode * inode)
145 DECLARE_WAITQUEUE(wait, current);
147 add_wait_queue(&inode->i_wait, &wait);
148 repeat:
149 set_current_state(TASK_UNINTERRUPTIBLE);
150 if (inode->i_state & I_LOCK) {
151 schedule();
152 goto repeat;
154 remove_wait_queue(&inode->i_wait, &wait);
155 current->state = TASK_RUNNING;
158 static inline void wait_on_inode(struct inode *inode)
160 if (inode->i_state & I_LOCK)
161 __wait_on_inode(inode);
165 static inline void write_inode(struct inode *inode)
167 if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->write_inode)
168 inode->i_sb->s_op->write_inode(inode);
171 static inline void __iget(struct inode * inode)
173 if (atomic_read(&inode->i_count)) {
174 atomic_inc(&inode->i_count);
175 return;
177 atomic_inc(&inode->i_count);
178 if (!(inode->i_state & I_DIRTY)) {
179 list_del(&inode->i_list);
180 list_add(&inode->i_list, &inode_in_use);
182 inodes_stat.nr_unused--;
185 static inline void sync_one(struct inode *inode)
187 if (inode->i_state & I_LOCK) {
188 __iget(inode);
189 spin_unlock(&inode_lock);
190 __wait_on_inode(inode);
191 iput(inode);
192 spin_lock(&inode_lock);
193 } else {
194 list_del(&inode->i_list);
195 list_add(&inode->i_list, atomic_read(&inode->i_count)
196 ? &inode_in_use
197 : &inode_unused);
198 /* Set I_LOCK, reset I_DIRTY */
199 inode->i_state ^= I_DIRTY | I_LOCK;
200 spin_unlock(&inode_lock);
202 write_inode(inode);
204 spin_lock(&inode_lock);
205 inode->i_state &= ~I_LOCK;
206 wake_up(&inode->i_wait);
210 static inline void sync_list(struct list_head *head)
212 struct list_head * tmp;
214 while ((tmp = head->prev) != head)
215 sync_one(list_entry(tmp, struct inode, i_list));
219 * sync_inodes
220 * @dev: device to sync the inodes from.
222 * sync_inodes goes through the super block's dirty list,
223 * writes them out, and puts them back on the normal list.
226 void sync_inodes(kdev_t dev)
228 struct super_block * sb = sb_entry(super_blocks.next);
231 * Search the super_blocks array for the device(s) to sync.
233 spin_lock(&inode_lock);
234 for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) {
235 if (!sb->s_dev)
236 continue;
237 if (dev && sb->s_dev != dev)
238 continue;
240 sync_list(&sb->s_dirty);
242 if (dev)
243 break;
245 spin_unlock(&inode_lock);
249 * Called with the spinlock already held..
251 static void sync_all_inodes(void)
253 struct super_block * sb = sb_entry(super_blocks.next);
254 for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) {
255 if (!sb->s_dev)
256 continue;
257 sync_list(&sb->s_dirty);
262 * write_inode_now - write an inode to disk
263 * @inode: inode to write to disk
265 * This function commits an inode to disk immediately if it is
266 * dirty. This is primarily needed by knfsd.
269 void write_inode_now(struct inode *inode)
271 struct super_block * sb = inode->i_sb;
273 if (sb) {
274 spin_lock(&inode_lock);
275 while (inode->i_state & I_DIRTY)
276 sync_one(inode);
277 spin_unlock(&inode_lock);
279 else
280 printk("write_inode_now: no super block\n");
284 * clear_inode - clear an inode
285 * @inode: inode to clear
287 * This is called by the filesystem to tell us
288 * that the inode is no longer useful. We just
289 * terminate it with extreme prejudice.
292 void clear_inode(struct inode *inode)
294 if (inode->i_data.nrpages)
295 BUG();
296 if (!(inode->i_state & I_FREEING))
297 BUG();
298 if (inode->i_state & I_CLEAR)
299 BUG();
300 wait_on_inode(inode);
301 if (IS_QUOTAINIT(inode))
302 DQUOT_DROP(inode);
303 if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->clear_inode)
304 inode->i_sb->s_op->clear_inode(inode);
305 if (inode->i_bdev) {
306 bdput(inode->i_bdev);
307 inode->i_bdev = NULL;
309 inode->i_state = I_CLEAR;
313 * Dispose-list gets a local list with local inodes in it, so it doesn't
314 * need to worry about list corruption and SMP locks.
316 static void dispose_list(struct list_head * head)
318 struct list_head * inode_entry;
319 struct inode * inode;
321 while ((inode_entry = head->next) != head)
323 list_del(inode_entry);
325 inode = list_entry(inode_entry, struct inode, i_list);
326 if (inode->i_data.nrpages)
327 truncate_inode_pages(&inode->i_data, 0);
328 clear_inode(inode);
329 destroy_inode(inode);
334 * Invalidate all inodes for a device.
336 static int invalidate_list(struct list_head *head, struct super_block * sb, struct list_head * dispose)
338 struct list_head *next;
339 int busy = 0, count = 0;
341 next = head->next;
342 for (;;) {
343 struct list_head * tmp = next;
344 struct inode * inode;
346 next = next->next;
347 if (tmp == head)
348 break;
349 inode = list_entry(tmp, struct inode, i_list);
350 if (inode->i_sb != sb)
351 continue;
352 if (!atomic_read(&inode->i_count)) {
353 list_del(&inode->i_hash);
354 INIT_LIST_HEAD(&inode->i_hash);
355 list_del(&inode->i_list);
356 list_add(&inode->i_list, dispose);
357 inode->i_state |= I_FREEING;
358 count++;
359 continue;
361 busy = 1;
363 /* only unused inodes may be cached with i_count zero */
364 inodes_stat.nr_unused -= count;
365 return busy;
369 * This is a two-stage process. First we collect all
370 * offending inodes onto the throw-away list, and in
371 * the second stage we actually dispose of them. This
372 * is because we don't want to sleep while messing
373 * with the global lists..
377 * invalidate_inodes - discard the inodes on a device
378 * @sb: superblock
380 * Discard all of the inodes for a given superblock. If the discard
381 * fails because there are busy inodes then a non zero value is returned.
382 * If the discard is successful all the inodes have been discarded.
385 int invalidate_inodes(struct super_block * sb)
387 int busy;
388 LIST_HEAD(throw_away);
390 spin_lock(&inode_lock);
391 busy = invalidate_list(&inode_in_use, sb, &throw_away);
392 busy |= invalidate_list(&inode_unused, sb, &throw_away);
393 busy |= invalidate_list(&sb->s_dirty, sb, &throw_away);
394 spin_unlock(&inode_lock);
396 dispose_list(&throw_away);
398 return busy;
402 * This is called with the inode lock held. It searches
403 * the in-use for freeable inodes, which are moved to a
404 * temporary list and then placed on the unused list by
405 * dispose_list.
407 * We don't expect to have to call this very often.
409 * N.B. The spinlock is released during the call to
410 * dispose_list.
412 #define CAN_UNUSE(inode) \
413 (((inode)->i_state | (inode)->i_data.nrpages) == 0)
414 #define INODE(entry) (list_entry(entry, struct inode, i_list))
416 void prune_icache(int goal)
418 LIST_HEAD(list);
419 struct list_head *entry, *freeable = &list;
420 int count = 0;
421 struct inode * inode;
423 spin_lock(&inode_lock);
424 /* go simple and safe syncing everything before starting */
425 sync_all_inodes();
427 entry = inode_unused.prev;
428 while (entry != &inode_unused)
430 struct list_head *tmp = entry;
432 entry = entry->prev;
433 inode = INODE(tmp);
434 if (inode->i_state & (I_FREEING|I_CLEAR))
435 BUG();
436 if (!CAN_UNUSE(inode))
437 continue;
438 if (atomic_read(&inode->i_count))
439 BUG();
440 list_del(tmp);
441 list_del(&inode->i_hash);
442 INIT_LIST_HEAD(&inode->i_hash);
443 list_add(tmp, freeable);
444 inode->i_state |= I_FREEING;
445 count++;
446 if (!--goal)
447 break;
449 inodes_stat.nr_unused -= count;
450 spin_unlock(&inode_lock);
452 dispose_list(freeable);
455 int shrink_icache_memory(int priority, int gfp_mask)
457 int count = 0;
459 if (priority)
460 count = inodes_stat.nr_unused / priority;
461 prune_icache(count);
462 /* FIXME: kmem_cache_shrink here should tell us
463 the number of pages freed, and it should
464 work in a __GFP_DMA/__GFP_HIGHMEM behaviour
465 to free only the interesting pages in
466 function of the needs of the current allocation. */
467 kmem_cache_shrink(inode_cachep);
469 return 0;
473 * Called with the inode lock held.
474 * NOTE: we are not increasing the inode-refcount, you must call __iget()
475 * by hand after calling find_inode now! This simplifies iunique and won't
476 * add any additional branch in the common code.
478 static struct inode * find_inode(struct super_block * sb, unsigned long ino, struct list_head *head, find_inode_t find_actor, void *opaque)
480 struct list_head *tmp;
481 struct inode * inode;
483 tmp = head;
484 for (;;) {
485 tmp = tmp->next;
486 inode = NULL;
487 if (tmp == head)
488 break;
489 inode = list_entry(tmp, struct inode, i_hash);
490 if (inode->i_sb != sb)
491 continue;
492 if (inode->i_ino != ino)
493 continue;
494 if (find_actor && !find_actor(inode, ino, opaque))
495 continue;
496 break;
498 return inode;
502 * This just initializes the inode fields
503 * to known values before returning the inode..
505 * i_sb, i_ino, i_count, i_state and the lists have
506 * been initialized elsewhere..
508 static void clean_inode(struct inode *inode)
510 static struct address_space_operations empty_aops = {};
511 static struct inode_operations empty_iops = {};
512 static struct file_operations empty_fops = {};
513 memset(&inode->u, 0, sizeof(inode->u));
514 inode->i_sock = 0;
515 inode->i_op = &empty_iops;
516 inode->i_fop = &empty_fops;
517 inode->i_nlink = 1;
518 atomic_set(&inode->i_writecount, 0);
519 inode->i_size = 0;
520 inode->i_generation = 0;
521 memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
522 inode->i_pipe = NULL;
523 inode->i_bdev = NULL;
524 inode->i_data.a_ops = &empty_aops;
525 inode->i_data.host = (void*)inode;
526 inode->i_mapping = &inode->i_data;
530 * get_empty_inode - obtain an inode
532 * This is called by things like the networking layer
533 * etc that want to get an inode without any inode
534 * number, or filesystems that allocate new inodes with
535 * no pre-existing information.
537 * On a successful return the inode pointer is returned. On a failure
538 * a %NULL pointer is returned. The returned inode is not on any superblock
539 * lists.
542 struct inode * get_empty_inode(void)
544 static unsigned long last_ino = 0;
545 struct inode * inode;
547 inode = alloc_inode();
548 if (inode)
550 spin_lock(&inode_lock);
551 list_add(&inode->i_list, &inode_in_use);
552 inode->i_sb = NULL;
553 inode->i_dev = 0;
554 inode->i_ino = ++last_ino;
555 inode->i_flags = 0;
556 atomic_set(&inode->i_count, 1);
557 inode->i_state = 0;
558 spin_unlock(&inode_lock);
559 clean_inode(inode);
561 return inode;
565 * This is called without the inode lock held.. Be careful.
567 * We no longer cache the sb_flags in i_flags - see fs.h
568 * -- rmk@arm.uk.linux.org
570 static struct inode * get_new_inode(struct super_block *sb, unsigned long ino, struct list_head *head, find_inode_t find_actor, void *opaque)
572 struct inode * inode;
574 inode = alloc_inode();
575 if (inode) {
576 struct inode * old;
578 spin_lock(&inode_lock);
579 /* We released the lock, so.. */
580 old = find_inode(sb, ino, head, find_actor, opaque);
581 if (!old) {
582 list_add(&inode->i_list, &inode_in_use);
583 list_add(&inode->i_hash, head);
584 inode->i_sb = sb;
585 inode->i_dev = sb->s_dev;
586 inode->i_ino = ino;
587 inode->i_flags = 0;
588 atomic_set(&inode->i_count, 1);
589 inode->i_state = I_LOCK;
590 spin_unlock(&inode_lock);
592 clean_inode(inode);
593 sb->s_op->read_inode(inode);
596 * This is special! We do not need the spinlock
597 * when clearing I_LOCK, because we're guaranteed
598 * that nobody else tries to do anything about the
599 * state of the inode when it is locked, as we
600 * just created it (so there can be no old holders
601 * that haven't tested I_LOCK).
603 inode->i_state &= ~I_LOCK;
604 wake_up(&inode->i_wait);
606 return inode;
610 * Uhhuh, somebody else created the same inode under
611 * us. Use the old inode instead of the one we just
612 * allocated.
614 __iget(old);
615 spin_unlock(&inode_lock);
616 destroy_inode(inode);
617 inode = old;
618 wait_on_inode(inode);
620 return inode;
623 static inline unsigned long hash(struct super_block *sb, unsigned long i_ino)
625 unsigned long tmp = i_ino | ((unsigned long) sb / L1_CACHE_BYTES);
626 tmp = tmp + (tmp >> I_HASHBITS) + (tmp >> I_HASHBITS*2);
627 return tmp & I_HASHMASK;
630 /* Yeah, I know about quadratic hash. Maybe, later. */
633 * iunique - get a unique inode number
634 * @sb: superblock
635 * @max_reserved: highest reserved inode number
637 * Obtain an inode number that is unique on the system for a given
638 * superblock. This is used by file systems that have no natural
639 * permanent inode numbering system. An inode number is returned that
640 * is higher than the reserved limit but unique.
642 * BUGS:
643 * With a large number of inodes live on the file system this function
644 * currently becomes quite slow.
647 ino_t iunique(struct super_block *sb, ino_t max_reserved)
649 static ino_t counter = 0;
650 struct inode *inode;
651 struct list_head * head;
652 ino_t res;
653 spin_lock(&inode_lock);
654 retry:
655 if (counter > max_reserved) {
656 head = inode_hashtable + hash(sb,counter);
657 inode = find_inode(sb, res = counter++, head, NULL, NULL);
658 if (!inode) {
659 spin_unlock(&inode_lock);
660 return res;
662 } else {
663 counter = max_reserved + 1;
665 goto retry;
669 struct inode *igrab(struct inode *inode)
671 spin_lock(&inode_lock);
672 if (!(inode->i_state & I_FREEING))
673 __iget(inode);
674 else
676 * Handle the case where s_op->clear_inode is not been
677 * called yet, and somebody is calling igrab
678 * while the inode is getting freed.
680 inode = NULL;
681 spin_unlock(&inode_lock);
682 if (inode)
683 wait_on_inode(inode);
684 return inode;
688 struct inode *iget4(struct super_block *sb, unsigned long ino, find_inode_t find_actor, void *opaque)
690 struct list_head * head = inode_hashtable + hash(sb,ino);
691 struct inode * inode;
693 spin_lock(&inode_lock);
694 inode = find_inode(sb, ino, head, find_actor, opaque);
695 if (inode) {
696 __iget(inode);
697 spin_unlock(&inode_lock);
698 wait_on_inode(inode);
699 return inode;
701 spin_unlock(&inode_lock);
704 * get_new_inode() will do the right thing, re-trying the search
705 * in case it had to block at any point.
707 return get_new_inode(sb, ino, head, find_actor, opaque);
711 * insert_inode_hash - hash an inode
712 * @inode: unhashed inode
714 * Add an inode to the inode hash for this superblock. If the inode
715 * has no superblock it is added to a separate anonymous chain.
718 void insert_inode_hash(struct inode *inode)
720 struct list_head *head = &anon_hash_chain;
721 if (inode->i_sb)
722 head = inode_hashtable + hash(inode->i_sb, inode->i_ino);
723 spin_lock(&inode_lock);
724 list_add(&inode->i_hash, head);
725 spin_unlock(&inode_lock);
729 * remove_inode_hash - remove an inode from the hash
730 * @inode: inode to unhash
732 * Remove an inode from the superblock or anonymous hash.
735 void remove_inode_hash(struct inode *inode)
737 spin_lock(&inode_lock);
738 list_del(&inode->i_hash);
739 INIT_LIST_HEAD(&inode->i_hash);
740 spin_unlock(&inode_lock);
744 * iput - put an inode
745 * @inode: inode to put
747 * Puts an inode, dropping its usage count. If the inode use count hits
748 * zero the inode is also then freed and may be destroyed.
751 void iput(struct inode *inode)
753 if (inode) {
754 struct super_operations *op = NULL;
755 int destroy = 0;
757 if (inode->i_sb && inode->i_sb->s_op)
758 op = inode->i_sb->s_op;
759 if (op && op->put_inode)
760 op->put_inode(inode);
762 spin_lock(&inode_lock);
763 if (atomic_dec_and_test(&inode->i_count)) {
764 if (!inode->i_nlink) {
765 list_del(&inode->i_hash);
766 INIT_LIST_HEAD(&inode->i_hash);
767 list_del(&inode->i_list);
768 INIT_LIST_HEAD(&inode->i_list);
769 inode->i_state|=I_FREEING;
770 spin_unlock(&inode_lock);
772 if (inode->i_data.nrpages)
773 truncate_inode_pages(&inode->i_data, 0);
775 destroy = 1;
776 if (op && op->delete_inode) {
777 void (*delete)(struct inode *) = op->delete_inode;
778 /* s_op->delete_inode internally recalls clear_inode() */
779 delete(inode);
780 } else
781 clear_inode(inode);
782 if (inode->i_state != I_CLEAR)
783 BUG();
785 spin_lock(&inode_lock);
786 } else {
787 if (!list_empty(&inode->i_hash)) {
788 if (!(inode->i_state & I_DIRTY)) {
789 list_del(&inode->i_list);
790 list_add(&inode->i_list,
791 &inode_unused);
793 inodes_stat.nr_unused++;
794 } else {
795 /* magic nfs path */
796 list_del(&inode->i_list);
797 INIT_LIST_HEAD(&inode->i_list);
798 inode->i_state|=I_FREEING;
799 spin_unlock(&inode_lock);
800 clear_inode(inode);
801 destroy = 1;
802 spin_lock(&inode_lock);
805 #ifdef INODE_PARANOIA
806 if (inode->i_flock)
807 printk(KERN_ERR "iput: inode %s/%ld still has locks!\n",
808 kdevname(inode->i_dev), inode->i_ino);
809 if (!list_empty(&inode->i_dentry))
810 printk(KERN_ERR "iput: device %s inode %ld still has aliases!\n",
811 kdevname(inode->i_dev), inode->i_ino);
812 if (atomic_read(&inode->i_count))
813 printk(KERN_ERR "iput: device %s inode %ld count changed, count=%d\n",
814 kdevname(inode->i_dev), inode->i_ino, atomic_read(&inode->i_count));
815 if (atomic_read(&inode->i_sem.count) != 1)
816 printk(KERN_ERR "iput: Aieee, semaphore in use inode %s/%ld, count=%d\n",
817 kdevname(inode->i_dev), inode->i_ino, atomic_read(&inode->i_sem.count));
818 #endif
820 if ((unsigned)atomic_read(&inode->i_count) > (1U<<31)) {
821 printk(KERN_ERR "iput: inode %s/%ld count wrapped\n",
822 kdevname(inode->i_dev), inode->i_ino);
824 spin_unlock(&inode_lock);
825 if (destroy)
826 destroy_inode(inode);
830 void force_delete(struct inode *inode)
833 * Kill off unused inodes ... iput() will unhash and
834 * delete the inode if we set i_nlink to zero.
836 if (atomic_read(&inode->i_count) == 1)
837 inode->i_nlink = 0;
841 * bmap - find a block number in a file
842 * @inode: inode of file
843 * @block: block to find
845 * Returns the block number on the device holding the inode that
846 * is the disk block number for the block of the file requested.
847 * That is, asked for block 4 of inode 1 the function will return the
848 * disk block relative to the disk start that holds that block of the
849 * file.
852 int bmap(struct inode * inode, int block)
854 int res = 0;
855 if (inode->i_mapping->a_ops->bmap)
856 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
857 return res;
861 * Initialize the hash tables.
863 void __init inode_init(unsigned long mempages)
865 struct list_head *head;
866 unsigned long order;
867 unsigned int nr_hash;
868 int i;
870 mempages >>= (14 - PAGE_SHIFT);
871 mempages *= sizeof(struct list_head);
872 for (order = 0; ((1UL << order) << PAGE_SHIFT) < mempages; order++)
875 do {
876 unsigned long tmp;
878 nr_hash = (1UL << order) * PAGE_SIZE /
879 sizeof(struct list_head);
880 i_hash_mask = (nr_hash - 1);
882 tmp = nr_hash;
883 i_hash_shift = 0;
884 while ((tmp >>= 1UL) != 0UL)
885 i_hash_shift++;
887 inode_hashtable = (struct list_head *)
888 __get_free_pages(GFP_ATOMIC, order);
889 } while (inode_hashtable == NULL && --order >= 0);
891 printk("Inode-cache hash table entries: %d (order: %ld, %ld bytes)\n",
892 nr_hash, order, (PAGE_SIZE << order));
894 if (!inode_hashtable)
895 panic("Failed to allocate inode hash table\n");
897 head = inode_hashtable;
898 i = nr_hash;
899 do {
900 INIT_LIST_HEAD(head);
901 head++;
902 i--;
903 } while (i);
905 /* inode slab cache */
906 inode_cachep = kmem_cache_create("inode_cache", sizeof(struct inode),
907 0, SLAB_HWCACHE_ALIGN, init_once,
908 NULL);
909 if (!inode_cachep)
910 panic("cannot create inode slab cache");
914 * update_atime - update the access time
915 * @inode: inode accessed
917 * Update the accessed time on an inode and mark it for writeback.
918 * This function automatically handles read only file systems and media,
919 * as well as the "noatime" flag and inode specific "noatime" markers.
922 void update_atime (struct inode *inode)
924 if ( IS_NOATIME (inode) ) return;
925 if ( IS_NODIRATIME (inode) && S_ISDIR (inode->i_mode) ) return;
926 if ( IS_RDONLY (inode) ) return;
927 inode->i_atime = CURRENT_TIME;
928 mark_inode_dirty (inode);
929 } /* End Function update_atime */
933 * Quota functions that want to walk the inode lists..
935 #ifdef CONFIG_QUOTA
937 /* Functions back in dquot.c */
938 void put_dquot_list(struct list_head *);
939 int remove_inode_dquot_ref(struct inode *, short, struct list_head *);
941 void remove_dquot_ref(kdev_t dev, short type)
943 struct super_block *sb = get_super(dev);
944 struct inode *inode;
945 struct list_head *act_head;
946 LIST_HEAD(tofree_head);
948 if (!sb || !sb->dq_op)
949 return; /* nothing to do */
951 /* We have to be protected against other CPUs */
952 spin_lock(&inode_lock);
954 for (act_head = inode_in_use.next; act_head != &inode_in_use; act_head = act_head->next) {
955 inode = list_entry(act_head, struct inode, i_list);
956 if (inode->i_sb != sb || !IS_QUOTAINIT(inode))
957 continue;
958 remove_inode_dquot_ref(inode, type, &tofree_head);
960 for (act_head = inode_unused.next; act_head != &inode_unused; act_head = act_head->next) {
961 inode = list_entry(act_head, struct inode, i_list);
962 if (inode->i_sb != sb || !IS_QUOTAINIT(inode))
963 continue;
964 remove_inode_dquot_ref(inode, type, &tofree_head);
966 for (act_head = sb->s_dirty.next; act_head != &sb->s_dirty; act_head = act_head->next) {
967 inode = list_entry(act_head, struct inode, i_list);
968 if (!IS_QUOTAINIT(inode))
969 continue;
970 remove_inode_dquot_ref(inode, type, &tofree_head);
972 spin_unlock(&inode_lock);
974 put_dquot_list(&tofree_head);
977 #endif