Remove references to CONFIG_PROFILE. Kernel profiling is no longer a
[linux-2.6/linux-mips.git] / fs / inode.c
blob27159b9517becd12cef729bc847be5df0379c553
1 /*
2 * linux/fs/inode.c
4 * (C) 1997 Linus Torvalds
5 */
7 #include <linux/config.h>
8 #include <linux/fs.h>
9 #include <linux/string.h>
10 #include <linux/mm.h>
11 #include <linux/dcache.h>
12 #include <linux/init.h>
13 #include <linux/quotaops.h>
14 #include <linux/slab.h>
15 #include <linux/cache.h>
18 * New inode.c implementation.
20 * This implementation has the basic premise of trying
21 * to be extremely low-overhead and SMP-safe, yet be
22 * simple enough to be "obviously correct".
24 * Famous last words.
27 /* inode dynamic allocation 1999, Andrea Arcangeli <andrea@suse.de> */
29 #define INODE_PARANOIA 1
30 /* #define INODE_DEBUG 1 */
33 * Inode lookup is no longer as critical as it used to be:
34 * most of the lookups are going to be through the dcache.
36 #define I_HASHBITS i_hash_shift
37 #define I_HASHMASK i_hash_mask
39 static unsigned int i_hash_mask;
40 static unsigned int i_hash_shift;
43 * Each inode can be on two separate lists. One is
44 * the hash list of the inode, used for lookups. The
45 * other linked list is the "type" list:
46 * "in_use" - valid inode, i_count > 0, i_nlink > 0
47 * "dirty" - as "in_use" but also dirty
48 * "unused" - valid inode, i_count = 0
50 * A "dirty" list is maintained for each super block,
51 * allowing for low-overhead inode sync() operations.
54 static LIST_HEAD(inode_in_use);
55 static LIST_HEAD(inode_unused);
56 static struct list_head *inode_hashtable;
57 static LIST_HEAD(anon_hash_chain); /* for inodes with NULL i_sb */
60 * A simple spinlock to protect the list manipulations.
62 * NOTE! You also have to own the lock if you change
63 * the i_state of an inode while it is in use..
65 spinlock_t inode_lock = SPIN_LOCK_UNLOCKED;
68 * Statistics gathering..
70 struct {
71 int nr_inodes;
72 int nr_unused;
73 int dummy[5];
74 } inodes_stat = {0, 0,};
76 static kmem_cache_t * inode_cachep;
78 #define alloc_inode() \
79 ((struct inode *) kmem_cache_alloc(inode_cachep, SLAB_KERNEL))
80 #define destroy_inode(inode) kmem_cache_free(inode_cachep, (inode))
83 * These are initializations that only need to be done
84 * once, because the fields are idempotent across use
85 * of the inode, so let the slab aware of that.
87 static void init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
89 struct inode * inode = (struct inode *) foo;
91 if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
92 SLAB_CTOR_CONSTRUCTOR)
94 memset(inode, 0, sizeof(*inode));
95 init_waitqueue_head(&inode->i_wait);
96 INIT_LIST_HEAD(&inode->i_hash);
97 INIT_LIST_HEAD(&inode->i_data.pages);
98 INIT_LIST_HEAD(&inode->i_dentry);
99 INIT_LIST_HEAD(&inode->i_dirty_buffers);
100 sema_init(&inode->i_sem, 1);
101 sema_init(&inode->i_zombie, 1);
102 spin_lock_init(&inode->i_data.i_shared_lock);
107 * Put the inode on the super block's dirty list.
109 * CAREFUL! We mark it dirty unconditionally, but
110 * move it onto the dirty list only if it is hashed.
111 * If it was not hashed, it will never be added to
112 * the dirty list even if it is later hashed, as it
113 * will have been marked dirty already.
115 * In short, make sure you hash any inodes _before_
116 * you start marking them dirty..
120 * __mark_inode_dirty - internal function
121 * @inode: inode to mark
123 * Mark an inode as dirty. Callers should use mark_inode_dirty.
126 void __mark_inode_dirty(struct inode *inode, int flags)
128 struct super_block * sb = inode->i_sb;
130 if (sb) {
131 spin_lock(&inode_lock);
132 if ((inode->i_state & flags) != flags) {
133 inode->i_state |= flags;
134 /* Only add valid (ie hashed) inodes to the dirty list */
135 if (!list_empty(&inode->i_hash)) {
136 list_del(&inode->i_list);
137 list_add(&inode->i_list, &sb->s_dirty);
140 spin_unlock(&inode_lock);
144 static void __wait_on_inode(struct inode * inode)
146 DECLARE_WAITQUEUE(wait, current);
148 add_wait_queue(&inode->i_wait, &wait);
149 repeat:
150 set_current_state(TASK_UNINTERRUPTIBLE);
151 if (inode->i_state & I_LOCK) {
152 schedule();
153 goto repeat;
155 remove_wait_queue(&inode->i_wait, &wait);
156 current->state = TASK_RUNNING;
159 static inline void wait_on_inode(struct inode *inode)
161 if (inode->i_state & I_LOCK)
162 __wait_on_inode(inode);
166 static inline void write_inode(struct inode *inode, int wait)
168 if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->write_inode)
169 inode->i_sb->s_op->write_inode(inode, wait);
172 static inline void __iget(struct inode * inode)
174 if (atomic_read(&inode->i_count)) {
175 atomic_inc(&inode->i_count);
176 return;
178 atomic_inc(&inode->i_count);
179 if (!(inode->i_state & I_DIRTY)) {
180 list_del(&inode->i_list);
181 list_add(&inode->i_list, &inode_in_use);
183 inodes_stat.nr_unused--;
186 static inline void sync_one(struct inode *inode, int wait)
188 if (inode->i_state & I_LOCK) {
189 __iget(inode);
190 spin_unlock(&inode_lock);
191 __wait_on_inode(inode);
192 iput(inode);
193 spin_lock(&inode_lock);
194 } else {
195 list_del(&inode->i_list);
196 list_add(&inode->i_list, atomic_read(&inode->i_count)
197 ? &inode_in_use
198 : &inode_unused);
199 /* Set I_LOCK, reset I_DIRTY */
200 inode->i_state |= I_LOCK;
201 inode->i_state &= ~I_DIRTY;
202 spin_unlock(&inode_lock);
204 write_inode(inode, wait);
206 spin_lock(&inode_lock);
207 inode->i_state &= ~I_LOCK;
208 wake_up(&inode->i_wait);
212 static inline void sync_list(struct list_head *head)
214 struct list_head * tmp;
216 while ((tmp = head->prev) != head)
217 sync_one(list_entry(tmp, struct inode, i_list), 0);
221 * sync_inodes
222 * @dev: device to sync the inodes from.
224 * sync_inodes goes through the super block's dirty list,
225 * writes them out, and puts them back on the normal list.
228 void sync_inodes(kdev_t dev)
230 struct super_block * sb = sb_entry(super_blocks.next);
233 * Search the super_blocks array for the device(s) to sync.
235 spin_lock(&inode_lock);
236 for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) {
237 if (!sb->s_dev)
238 continue;
239 if (dev && sb->s_dev != dev)
240 continue;
242 sync_list(&sb->s_dirty);
244 if (dev)
245 break;
247 spin_unlock(&inode_lock);
252 * Called with the spinlock already held..
254 static void sync_all_inodes(void)
256 struct super_block * sb = sb_entry(super_blocks.next);
257 for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) {
258 if (!sb->s_dev)
259 continue;
260 sync_list(&sb->s_dirty);
265 * write_inode_now - write an inode to disk
266 * @inode: inode to write to disk
267 * @wait: if set, we wait for the write to complete on disk
269 * This function commits an inode to disk immediately if it is
270 * dirty. This is primarily needed by knfsd.
273 void write_inode_now(struct inode *inode, int wait)
275 struct super_block * sb = inode->i_sb;
277 if (sb) {
278 spin_lock(&inode_lock);
279 while (inode->i_state & I_DIRTY)
280 sync_one(inode, wait);
281 spin_unlock(&inode_lock);
283 else
284 printk("write_inode_now: no super block\n");
288 * generic_osync_inode - flush all dirty data for a given inode to disk
289 * @inode: inode to write
290 * @datasync: if set, don't bother flushing timestamps
292 * This is called by generic_file_write for files which have the O_SYNC
293 * flag set, to flush dirty writes to disk.
296 int generic_osync_inode(struct inode *inode, int datasync)
298 int err;
301 * WARNING
303 * Currently, the filesystem write path does not pass the
304 * filp down to the low-level write functions. Therefore it
305 * is impossible for (say) __block_commit_write to know if
306 * the operation is O_SYNC or not.
308 * Ideally, O_SYNC writes would have the filesystem call
309 * ll_rw_block as it went to kick-start the writes, and we
310 * could call osync_inode_buffers() here to wait only for
311 * those IOs which have already been submitted to the device
312 * driver layer. As it stands, if we did this we'd not write
313 * anything to disk since our writes have not been queued by
314 * this point: they are still on the dirty LRU.
316 * So, currently we will call fsync_inode_buffers() instead,
317 * to flush _all_ dirty buffers for this inode to disk on
318 * every O_SYNC write, not just the synchronous I/Os. --sct
321 #ifdef WRITERS_QUEUE_IO
322 err = osync_inode_buffers(inode);
323 #else
324 err = fsync_inode_buffers(inode);
325 #endif
327 spin_lock(&inode_lock);
328 if (!(inode->i_state & I_DIRTY))
329 goto out;
330 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC))
331 goto out;
332 spin_unlock(&inode_lock);
333 write_inode_now(inode, 1);
334 return err;
336 out:
337 spin_unlock(&inode_lock);
338 return err;
342 * clear_inode - clear an inode
343 * @inode: inode to clear
345 * This is called by the filesystem to tell us
346 * that the inode is no longer useful. We just
347 * terminate it with extreme prejudice.
350 void clear_inode(struct inode *inode)
352 if (inode->i_data.nrpages)
353 BUG();
354 if (!(inode->i_state & I_FREEING))
355 BUG();
356 if (inode->i_state & I_CLEAR)
357 BUG();
358 wait_on_inode(inode);
359 if (IS_QUOTAINIT(inode))
360 DQUOT_DROP(inode);
361 if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->clear_inode)
362 inode->i_sb->s_op->clear_inode(inode);
363 if (inode->i_bdev) {
364 bdput(inode->i_bdev);
365 inode->i_bdev = NULL;
367 inode->i_state = I_CLEAR;
371 * Dispose-list gets a local list with local inodes in it, so it doesn't
372 * need to worry about list corruption and SMP locks.
374 static void dispose_list(struct list_head * head)
376 struct list_head * inode_entry;
377 struct inode * inode;
379 while ((inode_entry = head->next) != head)
381 list_del(inode_entry);
383 inode = list_entry(inode_entry, struct inode, i_list);
384 if (inode->i_data.nrpages)
385 truncate_all_inode_pages(&inode->i_data);
386 clear_inode(inode);
387 destroy_inode(inode);
392 * Invalidate all inodes for a device.
394 static int invalidate_list(struct list_head *head, struct super_block * sb, struct list_head * dispose)
396 struct list_head *next;
397 int busy = 0, count = 0;
399 next = head->next;
400 for (;;) {
401 struct list_head * tmp = next;
402 struct inode * inode;
404 next = next->next;
405 if (tmp == head)
406 break;
407 inode = list_entry(tmp, struct inode, i_list);
408 if (inode->i_sb != sb)
409 continue;
410 invalidate_inode_buffers(inode);
411 if (!atomic_read(&inode->i_count)) {
412 list_del(&inode->i_hash);
413 INIT_LIST_HEAD(&inode->i_hash);
414 list_del(&inode->i_list);
415 list_add(&inode->i_list, dispose);
416 inode->i_state |= I_FREEING;
417 count++;
418 continue;
420 busy = 1;
422 /* only unused inodes may be cached with i_count zero */
423 inodes_stat.nr_unused -= count;
424 return busy;
428 * This is a two-stage process. First we collect all
429 * offending inodes onto the throw-away list, and in
430 * the second stage we actually dispose of them. This
431 * is because we don't want to sleep while messing
432 * with the global lists..
436 * invalidate_inodes - discard the inodes on a device
437 * @sb: superblock
439 * Discard all of the inodes for a given superblock. If the discard
440 * fails because there are busy inodes then a non zero value is returned.
441 * If the discard is successful all the inodes have been discarded.
444 int invalidate_inodes(struct super_block * sb)
446 int busy;
447 LIST_HEAD(throw_away);
449 spin_lock(&inode_lock);
450 busy = invalidate_list(&inode_in_use, sb, &throw_away);
451 busy |= invalidate_list(&inode_unused, sb, &throw_away);
452 busy |= invalidate_list(&sb->s_dirty, sb, &throw_away);
453 spin_unlock(&inode_lock);
455 dispose_list(&throw_away);
457 return busy;
461 * This is called with the inode lock held. It searches
462 * the in-use for freeable inodes, which are moved to a
463 * temporary list and then placed on the unused list by
464 * dispose_list.
466 * We don't expect to have to call this very often.
468 * N.B. The spinlock is released during the call to
469 * dispose_list.
471 #define CAN_UNUSE(inode) \
472 ((((inode)->i_state | (inode)->i_data.nrpages) == 0) && \
473 !inode_has_buffers(inode))
474 #define INODE(entry) (list_entry(entry, struct inode, i_list))
476 void prune_icache(int goal)
478 LIST_HEAD(list);
479 struct list_head *entry, *freeable = &list;
480 int count = 0;
481 struct inode * inode;
483 spin_lock(&inode_lock);
484 /* go simple and safe syncing everything before starting */
485 sync_all_inodes();
487 entry = inode_unused.prev;
488 while (entry != &inode_unused)
490 struct list_head *tmp = entry;
492 entry = entry->prev;
493 inode = INODE(tmp);
494 if (inode->i_state & (I_FREEING|I_CLEAR))
495 BUG();
496 if (!CAN_UNUSE(inode))
497 continue;
498 if (atomic_read(&inode->i_count))
499 BUG();
500 list_del(tmp);
501 list_del(&inode->i_hash);
502 INIT_LIST_HEAD(&inode->i_hash);
503 list_add(tmp, freeable);
504 inode->i_state |= I_FREEING;
505 count++;
506 if (!--goal)
507 break;
509 inodes_stat.nr_unused -= count;
510 spin_unlock(&inode_lock);
512 dispose_list(freeable);
515 int shrink_icache_memory(int priority, int gfp_mask)
517 int count = 0;
519 if (priority)
520 count = inodes_stat.nr_unused / priority;
521 prune_icache(count);
522 /* FIXME: kmem_cache_shrink here should tell us
523 the number of pages freed, and it should
524 work in a __GFP_DMA/__GFP_HIGHMEM behaviour
525 to free only the interesting pages in
526 function of the needs of the current allocation. */
527 kmem_cache_shrink(inode_cachep);
529 return 0;
533 * Called with the inode lock held.
534 * NOTE: we are not increasing the inode-refcount, you must call __iget()
535 * by hand after calling find_inode now! This simplifies iunique and won't
536 * add any additional branch in the common code.
538 static struct inode * find_inode(struct super_block * sb, unsigned long ino, struct list_head *head, find_inode_t find_actor, void *opaque)
540 struct list_head *tmp;
541 struct inode * inode;
543 tmp = head;
544 for (;;) {
545 tmp = tmp->next;
546 inode = NULL;
547 if (tmp == head)
548 break;
549 inode = list_entry(tmp, struct inode, i_hash);
550 if (inode->i_sb != sb)
551 continue;
552 if (inode->i_ino != ino)
553 continue;
554 if (find_actor && !find_actor(inode, ino, opaque))
555 continue;
556 break;
558 return inode;
562 * This just initializes the inode fields
563 * to known values before returning the inode..
565 * i_sb, i_ino, i_count, i_state and the lists have
566 * been initialized elsewhere..
568 static void clean_inode(struct inode *inode)
570 static struct address_space_operations empty_aops = {};
571 static struct inode_operations empty_iops = {};
572 static struct file_operations empty_fops = {};
573 memset(&inode->u, 0, sizeof(inode->u));
574 inode->i_sock = 0;
575 inode->i_op = &empty_iops;
576 inode->i_fop = &empty_fops;
577 inode->i_nlink = 1;
578 atomic_set(&inode->i_writecount, 0);
579 inode->i_size = 0;
580 inode->i_generation = 0;
581 memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
582 inode->i_pipe = NULL;
583 inode->i_bdev = NULL;
584 inode->i_data.a_ops = &empty_aops;
585 inode->i_data.host = (void*)inode;
586 inode->i_mapping = &inode->i_data;
590 * get_empty_inode - obtain an inode
592 * This is called by things like the networking layer
593 * etc that want to get an inode without any inode
594 * number, or filesystems that allocate new inodes with
595 * no pre-existing information.
597 * On a successful return the inode pointer is returned. On a failure
598 * a %NULL pointer is returned. The returned inode is not on any superblock
599 * lists.
602 struct inode * get_empty_inode(void)
604 static unsigned long last_ino = 0;
605 struct inode * inode;
607 inode = alloc_inode();
608 if (inode)
610 spin_lock(&inode_lock);
611 list_add(&inode->i_list, &inode_in_use);
612 inode->i_sb = NULL;
613 inode->i_dev = 0;
614 inode->i_ino = ++last_ino;
615 inode->i_flags = 0;
616 atomic_set(&inode->i_count, 1);
617 inode->i_state = 0;
618 spin_unlock(&inode_lock);
619 clean_inode(inode);
621 return inode;
625 * This is called without the inode lock held.. Be careful.
627 * We no longer cache the sb_flags in i_flags - see fs.h
628 * -- rmk@arm.uk.linux.org
630 static struct inode * get_new_inode(struct super_block *sb, unsigned long ino, struct list_head *head, find_inode_t find_actor, void *opaque)
632 struct inode * inode;
634 inode = alloc_inode();
635 if (inode) {
636 struct inode * old;
638 spin_lock(&inode_lock);
639 /* We released the lock, so.. */
640 old = find_inode(sb, ino, head, find_actor, opaque);
641 if (!old) {
642 list_add(&inode->i_list, &inode_in_use);
643 list_add(&inode->i_hash, head);
644 inode->i_sb = sb;
645 inode->i_dev = sb->s_dev;
646 inode->i_ino = ino;
647 inode->i_flags = 0;
648 atomic_set(&inode->i_count, 1);
649 inode->i_state = I_LOCK;
650 spin_unlock(&inode_lock);
652 clean_inode(inode);
653 sb->s_op->read_inode(inode);
656 * This is special! We do not need the spinlock
657 * when clearing I_LOCK, because we're guaranteed
658 * that nobody else tries to do anything about the
659 * state of the inode when it is locked, as we
660 * just created it (so there can be no old holders
661 * that haven't tested I_LOCK).
663 inode->i_state &= ~I_LOCK;
664 wake_up(&inode->i_wait);
666 return inode;
670 * Uhhuh, somebody else created the same inode under
671 * us. Use the old inode instead of the one we just
672 * allocated.
674 __iget(old);
675 spin_unlock(&inode_lock);
676 destroy_inode(inode);
677 inode = old;
678 wait_on_inode(inode);
680 return inode;
683 static inline unsigned long hash(struct super_block *sb, unsigned long i_ino)
685 unsigned long tmp = i_ino | ((unsigned long) sb / L1_CACHE_BYTES);
686 tmp = tmp + (tmp >> I_HASHBITS) + (tmp >> I_HASHBITS*2);
687 return tmp & I_HASHMASK;
690 /* Yeah, I know about quadratic hash. Maybe, later. */
693 * iunique - get a unique inode number
694 * @sb: superblock
695 * @max_reserved: highest reserved inode number
697 * Obtain an inode number that is unique on the system for a given
698 * superblock. This is used by file systems that have no natural
699 * permanent inode numbering system. An inode number is returned that
700 * is higher than the reserved limit but unique.
702 * BUGS:
703 * With a large number of inodes live on the file system this function
704 * currently becomes quite slow.
707 ino_t iunique(struct super_block *sb, ino_t max_reserved)
709 static ino_t counter = 0;
710 struct inode *inode;
711 struct list_head * head;
712 ino_t res;
713 spin_lock(&inode_lock);
714 retry:
715 if (counter > max_reserved) {
716 head = inode_hashtable + hash(sb,counter);
717 inode = find_inode(sb, res = counter++, head, NULL, NULL);
718 if (!inode) {
719 spin_unlock(&inode_lock);
720 return res;
722 } else {
723 counter = max_reserved + 1;
725 goto retry;
729 struct inode *igrab(struct inode *inode)
731 spin_lock(&inode_lock);
732 if (!(inode->i_state & I_FREEING))
733 __iget(inode);
734 else
736 * Handle the case where s_op->clear_inode is not been
737 * called yet, and somebody is calling igrab
738 * while the inode is getting freed.
740 inode = NULL;
741 spin_unlock(&inode_lock);
742 if (inode)
743 wait_on_inode(inode);
744 return inode;
748 struct inode *iget4(struct super_block *sb, unsigned long ino, find_inode_t find_actor, void *opaque)
750 struct list_head * head = inode_hashtable + hash(sb,ino);
751 struct inode * inode;
753 spin_lock(&inode_lock);
754 inode = find_inode(sb, ino, head, find_actor, opaque);
755 if (inode) {
756 __iget(inode);
757 spin_unlock(&inode_lock);
758 wait_on_inode(inode);
759 return inode;
761 spin_unlock(&inode_lock);
764 * get_new_inode() will do the right thing, re-trying the search
765 * in case it had to block at any point.
767 return get_new_inode(sb, ino, head, find_actor, opaque);
771 * insert_inode_hash - hash an inode
772 * @inode: unhashed inode
774 * Add an inode to the inode hash for this superblock. If the inode
775 * has no superblock it is added to a separate anonymous chain.
778 void insert_inode_hash(struct inode *inode)
780 struct list_head *head = &anon_hash_chain;
781 if (inode->i_sb)
782 head = inode_hashtable + hash(inode->i_sb, inode->i_ino);
783 spin_lock(&inode_lock);
784 list_add(&inode->i_hash, head);
785 spin_unlock(&inode_lock);
789 * remove_inode_hash - remove an inode from the hash
790 * @inode: inode to unhash
792 * Remove an inode from the superblock or anonymous hash.
795 void remove_inode_hash(struct inode *inode)
797 spin_lock(&inode_lock);
798 list_del(&inode->i_hash);
799 INIT_LIST_HEAD(&inode->i_hash);
800 spin_unlock(&inode_lock);
804 * iput - put an inode
805 * @inode: inode to put
807 * Puts an inode, dropping its usage count. If the inode use count hits
808 * zero the inode is also then freed and may be destroyed.
811 void iput(struct inode *inode)
813 if (inode) {
814 struct super_operations *op = NULL;
815 int destroy = 0;
817 if (inode->i_sb && inode->i_sb->s_op)
818 op = inode->i_sb->s_op;
819 if (op && op->put_inode)
820 op->put_inode(inode);
822 spin_lock(&inode_lock);
823 if (atomic_dec_and_test(&inode->i_count)) {
824 if (!inode->i_nlink) {
825 list_del(&inode->i_hash);
826 INIT_LIST_HEAD(&inode->i_hash);
827 list_del(&inode->i_list);
828 INIT_LIST_HEAD(&inode->i_list);
829 inode->i_state|=I_FREEING;
830 spin_unlock(&inode_lock);
832 if (inode->i_data.nrpages)
833 truncate_all_inode_pages(&inode->i_data);
835 destroy = 1;
836 if (op && op->delete_inode) {
837 void (*delete)(struct inode *) = op->delete_inode;
838 /* s_op->delete_inode internally recalls clear_inode() */
839 delete(inode);
840 } else
841 clear_inode(inode);
842 if (inode->i_state != I_CLEAR)
843 BUG();
845 spin_lock(&inode_lock);
846 } else {
847 if (!list_empty(&inode->i_hash)) {
848 if (!(inode->i_state & I_DIRTY)) {
849 list_del(&inode->i_list);
850 list_add(&inode->i_list,
851 &inode_unused);
853 inodes_stat.nr_unused++;
854 } else {
855 /* magic nfs path */
856 list_del(&inode->i_list);
857 INIT_LIST_HEAD(&inode->i_list);
858 inode->i_state|=I_FREEING;
859 spin_unlock(&inode_lock);
860 clear_inode(inode);
861 destroy = 1;
862 spin_lock(&inode_lock);
865 #ifdef INODE_PARANOIA
866 if (inode->i_flock)
867 printk(KERN_ERR "iput: inode %s/%ld still has locks!\n",
868 kdevname(inode->i_dev), inode->i_ino);
869 if (!list_empty(&inode->i_dentry))
870 printk(KERN_ERR "iput: device %s inode %ld still has aliases!\n",
871 kdevname(inode->i_dev), inode->i_ino);
872 if (atomic_read(&inode->i_count))
873 printk(KERN_ERR "iput: device %s inode %ld count changed, count=%d\n",
874 kdevname(inode->i_dev), inode->i_ino, atomic_read(&inode->i_count));
875 if (atomic_read(&inode->i_sem.count) != 1)
876 printk(KERN_ERR "iput: Aieee, semaphore in use inode %s/%ld, count=%d\n",
877 kdevname(inode->i_dev), inode->i_ino, atomic_read(&inode->i_sem.count));
878 #endif
880 if ((unsigned)atomic_read(&inode->i_count) > (1U<<31)) {
881 printk(KERN_ERR "iput: inode %s/%ld count wrapped\n",
882 kdevname(inode->i_dev), inode->i_ino);
884 spin_unlock(&inode_lock);
885 if (destroy)
886 destroy_inode(inode);
890 void force_delete(struct inode *inode)
893 * Kill off unused inodes ... iput() will unhash and
894 * delete the inode if we set i_nlink to zero.
896 if (atomic_read(&inode->i_count) == 1)
897 inode->i_nlink = 0;
901 * bmap - find a block number in a file
902 * @inode: inode of file
903 * @block: block to find
905 * Returns the block number on the device holding the inode that
906 * is the disk block number for the block of the file requested.
907 * That is, asked for block 4 of inode 1 the function will return the
908 * disk block relative to the disk start that holds that block of the
909 * file.
912 int bmap(struct inode * inode, int block)
914 int res = 0;
915 if (inode->i_mapping->a_ops->bmap)
916 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
917 return res;
921 * Initialize the hash tables.
923 void __init inode_init(unsigned long mempages)
925 struct list_head *head;
926 unsigned long order;
927 unsigned int nr_hash;
928 int i;
930 mempages >>= (14 - PAGE_SHIFT);
931 mempages *= sizeof(struct list_head);
932 for (order = 0; ((1UL << order) << PAGE_SHIFT) < mempages; order++)
935 do {
936 unsigned long tmp;
938 nr_hash = (1UL << order) * PAGE_SIZE /
939 sizeof(struct list_head);
940 i_hash_mask = (nr_hash - 1);
942 tmp = nr_hash;
943 i_hash_shift = 0;
944 while ((tmp >>= 1UL) != 0UL)
945 i_hash_shift++;
947 inode_hashtable = (struct list_head *)
948 __get_free_pages(GFP_ATOMIC, order);
949 } while (inode_hashtable == NULL && --order >= 0);
951 printk("Inode-cache hash table entries: %d (order: %ld, %ld bytes)\n",
952 nr_hash, order, (PAGE_SIZE << order));
954 if (!inode_hashtable)
955 panic("Failed to allocate inode hash table\n");
957 head = inode_hashtable;
958 i = nr_hash;
959 do {
960 INIT_LIST_HEAD(head);
961 head++;
962 i--;
963 } while (i);
965 /* inode slab cache */
966 inode_cachep = kmem_cache_create("inode_cache", sizeof(struct inode),
967 0, SLAB_HWCACHE_ALIGN, init_once,
968 NULL);
969 if (!inode_cachep)
970 panic("cannot create inode slab cache");
974 * update_atime - update the access time
975 * @inode: inode accessed
977 * Update the accessed time on an inode and mark it for writeback.
978 * This function automatically handles read only file systems and media,
979 * as well as the "noatime" flag and inode specific "noatime" markers.
982 void update_atime (struct inode *inode)
984 if ( IS_NOATIME (inode) ) return;
985 if ( IS_NODIRATIME (inode) && S_ISDIR (inode->i_mode) ) return;
986 if ( IS_RDONLY (inode) ) return;
987 inode->i_atime = CURRENT_TIME;
988 mark_inode_dirty_sync (inode);
989 } /* End Function update_atime */
993 * Quota functions that want to walk the inode lists..
995 #ifdef CONFIG_QUOTA
997 /* Functions back in dquot.c */
998 void put_dquot_list(struct list_head *);
999 int remove_inode_dquot_ref(struct inode *, short, struct list_head *);
1001 void remove_dquot_ref(kdev_t dev, short type)
1003 struct super_block *sb = get_super(dev);
1004 struct inode *inode;
1005 struct list_head *act_head;
1006 LIST_HEAD(tofree_head);
1008 if (!sb || !sb->dq_op)
1009 return; /* nothing to do */
1011 /* We have to be protected against other CPUs */
1012 spin_lock(&inode_lock);
1014 for (act_head = inode_in_use.next; act_head != &inode_in_use; act_head = act_head->next) {
1015 inode = list_entry(act_head, struct inode, i_list);
1016 if (inode->i_sb != sb || !IS_QUOTAINIT(inode))
1017 continue;
1018 remove_inode_dquot_ref(inode, type, &tofree_head);
1020 for (act_head = inode_unused.next; act_head != &inode_unused; act_head = act_head->next) {
1021 inode = list_entry(act_head, struct inode, i_list);
1022 if (inode->i_sb != sb || !IS_QUOTAINIT(inode))
1023 continue;
1024 remove_inode_dquot_ref(inode, type, &tofree_head);
1026 for (act_head = sb->s_dirty.next; act_head != &sb->s_dirty; act_head = act_head->next) {
1027 inode = list_entry(act_head, struct inode, i_list);
1028 if (!IS_QUOTAINIT(inode))
1029 continue;
1030 remove_inode_dquot_ref(inode, type, &tofree_head);
1032 spin_unlock(&inode_lock);
1034 put_dquot_list(&tofree_head);
1037 #endif