Import 2.3.12pre9
[davej-history.git] / fs / inode.c
blob242696d8776e38eeeddbd12683af48f2e13b74fd
1 /*
2 * linux/fs/inode.c
4 * (C) 1997 Linus Torvalds
5 */
7 #include <linux/fs.h>
8 #include <linux/string.h>
9 #include <linux/mm.h>
10 #include <linux/dcache.h>
11 #include <linux/init.h>
12 #include <linux/quotaops.h>
15 * New inode.c implementation.
17 * This implementation has the basic premise of trying
18 * to be extremely low-overhead and SMP-safe, yet be
19 * simple enough to be "obviously correct".
21 * Famous last words.
24 #define INODE_PARANOIA 1
25 /* #define INODE_DEBUG 1 */
28 * Inode lookup is no longer as critical as it used to be:
29 * most of the lookups are going to be through the dcache.
31 #define HASH_BITS 8
32 #define HASH_SIZE (1UL << HASH_BITS)
33 #define HASH_MASK (HASH_SIZE-1)
36 * Each inode can be on two separate lists. One is
37 * the hash list of the inode, used for lookups. The
38 * other linked list is the "type" list:
39 * "in_use" - valid inode, hashed if i_nlink > 0
40 * "dirty" - valid inode, hashed if i_nlink > 0, dirty.
41 * "unused" - ready to be re-used. Not hashed.
43 * A "dirty" list is maintained for each super block,
44 * allowing for low-overhead inode sync() operations.
47 static LIST_HEAD(inode_in_use);
48 static LIST_HEAD(inode_unused);
49 static struct list_head inode_hashtable[HASH_SIZE];
52 * A simple spinlock to protect the list manipulations.
54 * NOTE! You also have to own the lock if you change
55 * the i_state of an inode while it is in use..
57 spinlock_t inode_lock = SPIN_LOCK_UNLOCKED;
60 * Statistics gathering..
62 struct {
63 int nr_inodes;
64 int nr_free_inodes;
65 int dummy[5];
66 } inodes_stat = {0, 0,};
68 int max_inodes;
71 * Put the inode on the super block's dirty list.
73 * CAREFUL! We mark it dirty unconditionally, but
74 * move it onto the dirty list only if it is hashed.
75 * If it was not hashed, it will never be added to
76 * the dirty list even if it is later hashed, as it
77 * will have been marked dirty already.
79 * In short, make sure you hash any inodes _before_
80 * you start marking them dirty..
82 void __mark_inode_dirty(struct inode *inode)
84 struct super_block * sb = inode->i_sb;
86 if (sb) {
87 spin_lock(&inode_lock);
88 if (!(inode->i_state & I_DIRTY)) {
89 inode->i_state |= I_DIRTY;
90 /* Only add valid (ie hashed) inodes to the dirty list */
91 if (!list_empty(&inode->i_hash)) {
92 list_del(&inode->i_list);
93 list_add(&inode->i_list, &sb->s_dirty);
96 spin_unlock(&inode_lock);
100 static void __wait_on_inode(struct inode * inode)
102 DECLARE_WAITQUEUE(wait, current);
104 add_wait_queue(&inode->i_wait, &wait);
105 repeat:
106 current->state = TASK_UNINTERRUPTIBLE;
107 if (inode->i_state & I_LOCK) {
108 schedule();
109 goto repeat;
111 remove_wait_queue(&inode->i_wait, &wait);
112 current->state = TASK_RUNNING;
115 static inline void wait_on_inode(struct inode *inode)
117 if (inode->i_state & I_LOCK)
118 __wait_on_inode(inode);
122 * These are initializations that only need to be done
123 * once, because the fields are idempotent across use
124 * of the inode..
126 static inline void init_once(struct inode * inode)
128 memset(inode, 0, sizeof(*inode));
129 init_waitqueue_head(&inode->i_wait);
130 INIT_LIST_HEAD(&inode->i_hash);
131 INIT_LIST_HEAD(&inode->i_dentry);
132 sema_init(&inode->i_sem, 1);
133 spin_lock_init(&inode->i_shared_lock);
136 static inline void write_inode(struct inode *inode)
138 if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->write_inode)
139 inode->i_sb->s_op->write_inode(inode);
142 static inline void sync_one(struct inode *inode)
144 if (inode->i_state & I_LOCK) {
145 spin_unlock(&inode_lock);
146 __wait_on_inode(inode);
147 spin_lock(&inode_lock);
148 } else {
149 list_del(&inode->i_list);
150 list_add(&inode->i_list, &inode_in_use);
151 /* Set I_LOCK, reset I_DIRTY */
152 inode->i_state ^= I_DIRTY | I_LOCK;
153 spin_unlock(&inode_lock);
155 write_inode(inode);
157 spin_lock(&inode_lock);
158 inode->i_state &= ~I_LOCK;
159 wake_up(&inode->i_wait);
163 static inline void sync_list(struct list_head *head)
165 struct list_head * tmp;
167 while ((tmp = head->prev) != head)
168 sync_one(list_entry(tmp, struct inode, i_list));
172 * "sync_inodes()" goes through the super block's dirty list,
173 * writes them out, and puts them back on the normal list.
175 void sync_inodes(kdev_t dev)
177 struct super_block * sb = sb_entry(super_blocks.next);
180 * Search the super_blocks array for the device(s) to sync.
182 spin_lock(&inode_lock);
183 for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) {
184 if (!sb->s_dev)
185 continue;
186 if (dev && sb->s_dev != dev)
187 continue;
189 sync_list(&sb->s_dirty);
191 if (dev)
192 break;
194 spin_unlock(&inode_lock);
198 * Called with the spinlock already held..
200 static void sync_all_inodes(void)
202 struct super_block * sb = sb_entry(super_blocks.next);
203 for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) {
204 if (!sb->s_dev)
205 continue;
206 sync_list(&sb->s_dirty);
211 * Needed by knfsd
213 void write_inode_now(struct inode *inode)
215 struct super_block * sb = inode->i_sb;
217 if (sb) {
218 spin_lock(&inode_lock);
219 while (inode->i_state & I_DIRTY)
220 sync_one(inode);
221 spin_unlock(&inode_lock);
223 else
224 printk("write_inode_now: no super block\n");
228 * This is called by the filesystem to tell us
229 * that the inode is no longer useful. We just
230 * terminate it with extreme prejudice.
232 void clear_inode(struct inode *inode)
234 if (inode->i_nrpages)
235 BUG();
236 wait_on_inode(inode);
237 if (IS_QUOTAINIT(inode))
238 DQUOT_DROP(inode);
239 if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->clear_inode)
240 inode->i_sb->s_op->clear_inode(inode);
242 inode->i_state = 0;
246 * Dispose-list gets a local list, so it doesn't need to
247 * worry about list corruption. It releases the inode lock
248 * while clearing the inodes.
250 static void dispose_list(struct list_head * head)
252 struct list_head *next;
253 int count = 0;
255 spin_unlock(&inode_lock);
256 next = head->next;
257 for (;;) {
258 struct list_head * tmp = next;
259 struct inode * inode;
261 next = next->next;
262 if (tmp == head)
263 break;
264 inode = list_entry(tmp, struct inode, i_list);
265 if (inode->i_nrpages)
266 truncate_inode_pages(inode, 0);
267 clear_inode(inode);
268 count++;
271 /* Add them all to the unused list in one fell swoop */
272 spin_lock(&inode_lock);
273 list_splice(head, &inode_unused);
274 inodes_stat.nr_free_inodes += count;
278 * Invalidate all inodes for a device.
280 static int invalidate_list(struct list_head *head, struct super_block * sb, struct list_head * dispose)
282 struct list_head *next;
283 int busy = 0;
285 next = head->next;
286 for (;;) {
287 struct list_head * tmp = next;
288 struct inode * inode;
290 next = next->next;
291 if (tmp == head)
292 break;
293 inode = list_entry(tmp, struct inode, i_list);
294 if (inode->i_sb != sb)
295 continue;
296 if (!inode->i_count) {
297 list_del(&inode->i_hash);
298 INIT_LIST_HEAD(&inode->i_hash);
299 list_del(&inode->i_list);
300 list_add(&inode->i_list, dispose);
301 inode->i_state |= I_FREEING;
302 continue;
304 busy = 1;
306 return busy;
310 * This is a two-stage process. First we collect all
311 * offending inodes onto the throw-away list, and in
312 * the second stage we actually dispose of them. This
313 * is because we don't want to sleep while messing
314 * with the global lists..
316 int invalidate_inodes(struct super_block * sb)
318 int busy;
319 LIST_HEAD(throw_away);
321 spin_lock(&inode_lock);
322 busy = invalidate_list(&inode_in_use, sb, &throw_away);
323 busy |= invalidate_list(&sb->s_dirty, sb, &throw_away);
324 dispose_list(&throw_away);
325 spin_unlock(&inode_lock);
327 return busy;
331 * This is called with the inode lock held. It searches
332 * the in-use for freeable inodes, which are moved to a
333 * temporary list and then placed on the unused list by
334 * dispose_list.
336 * We don't expect to have to call this very often.
338 * N.B. The spinlock is released during the call to
339 * dispose_list.
341 #define CAN_UNUSE(inode) \
342 (((inode)->i_count | (inode)->i_state | (inode)->i_nrpages) == 0)
343 #define INODE(entry) (list_entry(entry, struct inode, i_list))
345 static int free_inodes(void)
347 struct list_head list, *entry, *freeable = &list;
348 int found = 0;
350 INIT_LIST_HEAD(freeable);
351 entry = inode_in_use.next;
352 while (entry != &inode_in_use) {
353 struct list_head *tmp = entry;
355 entry = entry->next;
356 if (!CAN_UNUSE(INODE(tmp)))
357 continue;
358 list_del(tmp);
359 list_del(&INODE(tmp)->i_hash);
360 INIT_LIST_HEAD(&INODE(tmp)->i_hash);
361 list_add(tmp, freeable);
362 list_entry(tmp, struct inode, i_list)->i_state = I_FREEING;
363 found = 1;
366 if (found)
367 dispose_list(freeable);
369 return found;
373 * Searches the inodes list for freeable inodes,
374 * shrinking the dcache before (and possible after,
375 * if we're low)
377 static void try_to_free_inodes(int goal)
380 * First stry to just get rid of unused inodes.
382 * If we can't reach our goal that way, we'll have
383 * to try to shrink the dcache and sync existing
384 * inodes..
386 free_inodes();
387 goal -= inodes_stat.nr_free_inodes;
388 if (goal > 0) {
389 spin_unlock(&inode_lock);
390 select_dcache(goal, 0);
391 prune_dcache(goal);
392 spin_lock(&inode_lock);
393 sync_all_inodes();
394 free_inodes();
399 * This is the externally visible routine for
400 * inode memory management.
402 void free_inode_memory(int goal)
404 spin_lock(&inode_lock);
405 free_inodes();
406 spin_unlock(&inode_lock);
411 * This is called with the spinlock held, but releases
412 * the lock when freeing or allocating inodes.
413 * Look out! This returns with the inode lock held if
414 * it got an inode..
416 * We do inode allocations two pages at a time to reduce
417 * fragmentation.
419 #define INODE_PAGE_ORDER 1
420 #define INODE_ALLOCATION_SIZE (PAGE_SIZE << INODE_PAGE_ORDER)
421 #define INODES_PER_ALLOCATION (INODE_ALLOCATION_SIZE/sizeof(struct inode))
423 static struct inode * grow_inodes(void)
425 struct inode * inode;
428 * Check whether to restock the unused list.
430 if (inodes_stat.nr_inodes > max_inodes) {
431 struct list_head *tmp;
432 try_to_free_inodes(inodes_stat.nr_inodes >> 2);
433 tmp = inode_unused.next;
434 if (tmp != &inode_unused) {
435 inodes_stat.nr_free_inodes--;
436 list_del(tmp);
437 inode = list_entry(tmp, struct inode, i_list);
438 return inode;
442 spin_unlock(&inode_lock);
443 inode = (struct inode *)__get_free_pages(GFP_KERNEL,INODE_PAGE_ORDER);
444 if (inode) {
445 int size;
446 struct inode * tmp;
448 size = INODE_ALLOCATION_SIZE - 2*sizeof(struct inode);
449 tmp = inode;
450 spin_lock(&inode_lock);
451 do {
452 tmp++;
453 init_once(tmp);
454 list_add(&tmp->i_list, &inode_unused);
455 size -= sizeof(struct inode);
456 } while (size >= 0);
457 init_once(inode);
459 * Update the inode statistics
461 inodes_stat.nr_inodes += INODES_PER_ALLOCATION;
462 inodes_stat.nr_free_inodes += INODES_PER_ALLOCATION - 1;
463 return inode;
467 * If the allocation failed, do an extensive pruning of
468 * the dcache and then try again to free some inodes.
470 prune_dcache(inodes_stat.nr_inodes >> 2);
472 spin_lock(&inode_lock);
473 free_inodes();
475 struct list_head *tmp = inode_unused.next;
476 if (tmp != &inode_unused) {
477 inodes_stat.nr_free_inodes--;
478 list_del(tmp);
479 inode = list_entry(tmp, struct inode, i_list);
480 return inode;
483 spin_unlock(&inode_lock);
485 printk("grow_inodes: allocation failed\n");
486 return NULL;
490 * Called with the inode lock held.
492 static struct inode * find_inode(struct super_block * sb, unsigned long ino, struct list_head *head)
494 struct list_head *tmp;
495 struct inode * inode;
497 tmp = head;
498 for (;;) {
499 tmp = tmp->next;
500 inode = NULL;
501 if (tmp == head)
502 break;
503 inode = list_entry(tmp, struct inode, i_hash);
504 if (inode->i_sb != sb)
505 continue;
506 if (inode->i_ino != ino)
507 continue;
508 inode->i_count++;
509 break;
511 return inode;
515 * This just initializes the inode fields
516 * to known values before returning the inode..
518 * i_sb, i_ino, i_count, i_state and the lists have
519 * been initialized elsewhere..
521 void clean_inode(struct inode *inode)
523 memset(&inode->u, 0, sizeof(inode->u));
524 inode->i_sock = 0;
525 inode->i_op = NULL;
526 inode->i_nlink = 1;
527 atomic_set(&inode->i_writecount, 0);
528 inode->i_size = 0;
529 inode->i_generation = 0;
530 memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
531 sema_init(&inode->i_sem, 1);
532 inode->i_pipe = NULL;
536 * This is called by things like the networking layer
537 * etc that want to get an inode without any inode
538 * number, or filesystems that allocate new inodes with
539 * no pre-existing information.
541 struct inode * get_empty_inode(void)
543 static unsigned long last_ino = 0;
544 struct inode * inode;
545 struct list_head * tmp;
547 spin_lock(&inode_lock);
548 tmp = inode_unused.next;
549 if (tmp != &inode_unused) {
550 list_del(tmp);
551 inodes_stat.nr_free_inodes--;
552 inode = list_entry(tmp, struct inode, i_list);
553 add_new_inode:
554 list_add(&inode->i_list, &inode_in_use);
555 inode->i_sb = NULL;
556 inode->i_dev = 0;
557 inode->i_ino = ++last_ino;
558 inode->i_flags = 0;
559 inode->i_count = 1;
560 inode->i_state = 0;
561 spin_unlock(&inode_lock);
562 clean_inode(inode);
563 return inode;
567 * Warning: if this succeeded, we will now
568 * return with the inode lock.
570 inode = grow_inodes();
571 if (inode)
572 goto add_new_inode;
574 return inode;
578 * This is called with the inode lock held.. Be careful.
580 * We no longer cache the sb_flags in i_flags - see fs.h
581 * -- rmk@arm.uk.linux.org
583 static struct inode * get_new_inode(struct super_block *sb, unsigned long ino, struct list_head *head)
585 struct inode * inode;
586 struct list_head * tmp = inode_unused.next;
588 if (tmp != &inode_unused) {
589 list_del(tmp);
590 inodes_stat.nr_free_inodes--;
591 inode = list_entry(tmp, struct inode, i_list);
592 add_new_inode:
593 list_add(&inode->i_list, &inode_in_use);
594 list_add(&inode->i_hash, head);
595 inode->i_sb = sb;
596 inode->i_dev = sb->s_dev;
597 inode->i_ino = ino;
598 inode->i_flags = 0;
599 inode->i_count = 1;
600 inode->i_state = I_LOCK;
601 spin_unlock(&inode_lock);
603 clean_inode(inode);
604 sb->s_op->read_inode(inode);
607 * This is special! We do not need the spinlock
608 * when clearing I_LOCK, because we're guaranteed
609 * that nobody else tries to do anything about the
610 * state of the inode when it is locked, as we
611 * just created it (so there can be no old holders
612 * that haven't tested I_LOCK).
614 inode->i_state &= ~I_LOCK;
615 wake_up(&inode->i_wait);
617 return inode;
621 * We need to expand. Note that "grow_inodes()" will
622 * release the spinlock, but will return with the lock
623 * held again if the allocation succeeded.
625 inode = grow_inodes();
626 if (inode) {
627 /* We released the lock, so.. */
628 struct inode * old = find_inode(sb, ino, head);
629 if (!old)
630 goto add_new_inode;
631 list_add(&inode->i_list, &inode_unused);
632 inodes_stat.nr_free_inodes++;
633 spin_unlock(&inode_lock);
634 wait_on_inode(old);
635 return old;
637 return inode;
640 static inline unsigned long hash(struct super_block *sb, unsigned long i_ino)
642 unsigned long tmp = i_ino | (unsigned long) sb;
643 tmp = tmp + (tmp >> HASH_BITS) + (tmp >> HASH_BITS*2);
644 return tmp & HASH_MASK;
647 /* Yeah, I know about quadratic hash. Maybe, later. */
648 ino_t iunique(struct super_block *sb, ino_t max_reserved)
650 static ino_t counter = 0;
651 struct inode *inode;
652 struct list_head * head;
653 ino_t res;
654 spin_lock(&inode_lock);
655 retry:
656 if (counter > max_reserved) {
657 head = inode_hashtable + hash(sb,counter);
658 inode = find_inode(sb, res = counter++, head);
659 if (!inode) {
660 spin_unlock(&inode_lock);
661 return res;
663 inode->i_count--; /* compensate find_inode() */
664 } else {
665 counter = max_reserved + 1;
667 goto retry;
671 struct inode *igrab(struct inode *inode)
673 spin_lock(&inode_lock);
674 if (inode->i_state & I_FREEING)
675 inode = NULL;
676 else
677 inode->i_count++;
678 spin_unlock(&inode_lock);
679 if (inode)
680 wait_on_inode(inode);
681 return inode;
684 struct inode *iget(struct super_block *sb, unsigned long ino)
686 struct list_head * head = inode_hashtable + hash(sb,ino);
687 struct inode * inode;
689 spin_lock(&inode_lock);
690 inode = find_inode(sb, ino, head);
691 if (inode) {
692 spin_unlock(&inode_lock);
693 wait_on_inode(inode);
694 return inode;
697 * get_new_inode() will do the right thing, releasing
698 * the inode lock and re-trying the search in case it
699 * had to block at any point.
701 return get_new_inode(sb, ino, head);
704 void insert_inode_hash(struct inode *inode)
706 struct list_head *head = inode_hashtable + hash(inode->i_sb, inode->i_ino);
707 spin_lock(&inode_lock);
708 list_add(&inode->i_hash, head);
709 spin_unlock(&inode_lock);
712 void remove_inode_hash(struct inode *inode)
714 spin_lock(&inode_lock);
715 list_del(&inode->i_hash);
716 INIT_LIST_HEAD(&inode->i_hash);
717 spin_unlock(&inode_lock);
720 void iput(struct inode *inode)
722 if (inode) {
723 struct super_operations *op = NULL;
725 if (inode->i_sb && inode->i_sb->s_op)
726 op = inode->i_sb->s_op;
727 if (op && op->put_inode)
728 op->put_inode(inode);
730 spin_lock(&inode_lock);
731 if (!--inode->i_count) {
732 if (!inode->i_nlink) {
733 list_del(&inode->i_hash);
734 INIT_LIST_HEAD(&inode->i_hash);
735 list_del(&inode->i_list);
736 INIT_LIST_HEAD(&inode->i_list);
737 inode->i_state|=I_FREEING;
738 if (op && op->delete_inode) {
739 void (*delete)(struct inode *) = op->delete_inode;
740 spin_unlock(&inode_lock);
741 if (inode->i_nrpages)
742 truncate_inode_pages(inode, 0);
743 delete(inode);
744 spin_lock(&inode_lock);
747 if (list_empty(&inode->i_hash)) {
748 list_del(&inode->i_list);
749 INIT_LIST_HEAD(&inode->i_list);
750 inode->i_state|=I_FREEING;
751 spin_unlock(&inode_lock);
752 clear_inode(inode);
753 spin_lock(&inode_lock);
754 list_add(&inode->i_list, &inode_unused);
755 inodes_stat.nr_free_inodes++;
757 else if (!(inode->i_state & I_DIRTY)) {
758 list_del(&inode->i_list);
759 list_add(&inode->i_list, &inode_in_use);
761 #ifdef INODE_PARANOIA
762 if (inode->i_flock)
763 printk(KERN_ERR "iput: inode %s/%ld still has locks!\n",
764 kdevname(inode->i_dev), inode->i_ino);
765 if (!list_empty(&inode->i_dentry))
766 printk(KERN_ERR "iput: device %s inode %ld still has aliases!\n",
767 kdevname(inode->i_dev), inode->i_ino);
768 if (inode->i_count)
769 printk(KERN_ERR "iput: device %s inode %ld count changed, count=%d\n",
770 kdevname(inode->i_dev), inode->i_ino, inode->i_count);
771 if (atomic_read(&inode->i_sem.count) != 1)
772 printk(KERN_ERR "iput: Aieee, semaphore in use inode %s/%ld, count=%d\n",
773 kdevname(inode->i_dev), inode->i_ino, atomic_read(&inode->i_sem.count));
774 #endif
776 if (inode->i_count > (1<<31)) {
777 printk(KERN_ERR "iput: inode %s/%ld count wrapped\n",
778 kdevname(inode->i_dev), inode->i_ino);
780 spin_unlock(&inode_lock);
784 int bmap(struct inode * inode, int block)
786 struct buffer_head tmp;
788 if (inode->i_op && inode->i_op->get_block) {
789 tmp.b_state = 0;
790 tmp.b_blocknr = 0;
791 inode->i_op->get_block(inode, block, &tmp, 0);
792 return tmp.b_blocknr;
794 return 0;
798 * Initialize the hash tables and default
799 * value for max inodes
801 #define MAX_INODE (16384)
803 void __init inode_init(void)
805 int i, max;
806 struct list_head *head = inode_hashtable;
808 i = HASH_SIZE;
809 do {
810 INIT_LIST_HEAD(head);
811 head++;
812 i--;
813 } while (i);
815 /* Initial guess at reasonable inode number */
816 max = num_physpages >> 1;
817 if (max > MAX_INODE)
818 max = MAX_INODE;
819 max_inodes = max;
822 void update_atime (struct inode *inode)
824 if ( IS_NOATIME (inode) ) return;
825 if ( IS_NODIRATIME (inode) && S_ISDIR (inode->i_mode) ) return;
826 if ( IS_RDONLY (inode) ) return;
827 inode->i_atime = CURRENT_TIME;
828 mark_inode_dirty (inode);
829 } /* End Function update_atime */