Import 2.2.8pre2
[davej-history.git] / fs / inode.c
blobac1ef535d4c0862cb1a3a0b45fbd6304bb34ce3e
1 /*
2 * linux/fs/inode.c
4 * (C) 1997 Linus Torvalds
5 */
7 #include <linux/fs.h>
8 #include <linux/string.h>
9 #include <linux/mm.h>
10 #include <linux/dcache.h>
11 #include <linux/init.h>
12 #include <linux/quotaops.h>
15 * New inode.c implementation.
17 * This implementation has the basic premise of trying
18 * to be extremely low-overhead and SMP-safe, yet be
19 * simple enough to be "obviously correct".
21 * Famous last words.
24 #define INODE_PARANOIA 1
25 /* #define INODE_DEBUG 1 */
28 * Inode lookup is no longer as critical as it used to be:
29 * most of the lookups are going to be through the dcache.
31 #define HASH_BITS 8
32 #define HASH_SIZE (1UL << HASH_BITS)
33 #define HASH_MASK (HASH_SIZE-1)
36 * Each inode can be on two separate lists. One is
37 * the hash list of the inode, used for lookups. The
38 * other linked list is the "type" list:
39 * "in_use" - valid inode, hashed if i_nlink > 0
40 * "dirty" - valid inode, hashed if i_nlink > 0, dirty.
41 * "unused" - ready to be re-used. Not hashed.
43 * A "dirty" list is maintained for each super block,
44 * allowing for low-overhead inode sync() operations.
47 static LIST_HEAD(inode_in_use);
48 static LIST_HEAD(inode_unused);
49 static struct list_head inode_hashtable[HASH_SIZE];
52 * A simple spinlock to protect the list manipulations.
54 * NOTE! You also have to own the lock if you change
55 * the i_state of an inode while it is in use..
57 spinlock_t inode_lock = SPIN_LOCK_UNLOCKED;
60 * Statistics gathering..
62 struct {
63 int nr_inodes;
64 int nr_free_inodes;
65 int dummy[5];
66 } inodes_stat = {0, 0,};
68 int max_inodes;
71 * Put the inode on the super block's dirty list.
73 * CAREFUL! We mark it dirty unconditionally, but
74 * move it onto the dirty list only if it is hashed.
75 * If it was not hashed, it will never be added to
76 * the dirty list even if it is later hashed, as it
77 * will have been marked dirty already.
79 * In short, make sure you hash any inodes _before_
80 * you start marking them dirty..
82 void __mark_inode_dirty(struct inode *inode)
84 struct super_block * sb = inode->i_sb;
86 if (sb) {
87 spin_lock(&inode_lock);
88 if (!(inode->i_state & I_DIRTY)) {
89 inode->i_state |= I_DIRTY;
90 /* Only add valid (ie hashed) inodes to the dirty list */
91 if (!list_empty(&inode->i_hash)) {
92 list_del(&inode->i_list);
93 list_add(&inode->i_list, &sb->s_dirty);
96 spin_unlock(&inode_lock);
100 static void __wait_on_inode(struct inode * inode)
102 struct wait_queue wait = { current, NULL };
104 add_wait_queue(&inode->i_wait, &wait);
105 repeat:
106 current->state = TASK_UNINTERRUPTIBLE;
107 if (inode->i_state & I_LOCK) {
108 schedule();
109 goto repeat;
111 remove_wait_queue(&inode->i_wait, &wait);
112 current->state = TASK_RUNNING;
115 static inline void wait_on_inode(struct inode *inode)
117 if (inode->i_state & I_LOCK)
118 __wait_on_inode(inode);
122 * These are initializations that only need to be done
123 * once, because the fields are idempotent across use
124 * of the inode..
126 static inline void init_once(struct inode * inode)
128 memset(inode, 0, sizeof(*inode));
129 init_waitqueue(&inode->i_wait);
130 INIT_LIST_HEAD(&inode->i_hash);
131 INIT_LIST_HEAD(&inode->i_dentry);
132 sema_init(&inode->i_sem, 1);
133 sema_init(&inode->i_atomic_write, 1);
136 static inline void write_inode(struct inode *inode)
138 if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->write_inode)
139 inode->i_sb->s_op->write_inode(inode);
142 static inline void sync_one(struct inode *inode)
144 if (inode->i_state & I_LOCK) {
145 spin_unlock(&inode_lock);
146 __wait_on_inode(inode);
147 spin_lock(&inode_lock);
148 } else {
149 list_del(&inode->i_list);
150 list_add(&inode->i_list, &inode_in_use);
151 /* Set I_LOCK, reset I_DIRTY */
152 inode->i_state ^= I_DIRTY | I_LOCK;
153 spin_unlock(&inode_lock);
155 write_inode(inode);
157 spin_lock(&inode_lock);
158 inode->i_state &= ~I_LOCK;
159 wake_up(&inode->i_wait);
163 static inline void sync_list(struct list_head *head)
165 struct list_head * tmp;
167 while ((tmp = head->prev) != head)
168 sync_one(list_entry(tmp, struct inode, i_list));
172 * "sync_inodes()" goes through the super block's dirty list,
173 * writes them out, and puts them back on the normal list.
175 void sync_inodes(kdev_t dev)
177 struct super_block * sb = sb_entry(super_blocks.next);
180 * Search the super_blocks array for the device(s) to sync.
182 spin_lock(&inode_lock);
183 for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) {
184 if (!sb->s_dev)
185 continue;
186 if (dev && sb->s_dev != dev)
187 continue;
189 sync_list(&sb->s_dirty);
191 if (dev)
192 break;
194 spin_unlock(&inode_lock);
198 * Called with the spinlock already held..
200 static void sync_all_inodes(void)
202 struct super_block * sb = sb_entry(super_blocks.next);
203 for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) {
204 if (!sb->s_dev)
205 continue;
206 sync_list(&sb->s_dirty);
211 * Needed by knfsd
213 void write_inode_now(struct inode *inode)
215 struct super_block * sb = inode->i_sb;
217 if (sb) {
218 spin_lock(&inode_lock);
219 while (inode->i_state & I_DIRTY)
220 sync_one(inode);
221 spin_unlock(&inode_lock);
223 else
224 printk("write_inode_now: no super block\n");
228 * This is called by the filesystem to tell us
229 * that the inode is no longer useful. We just
230 * terminate it with extreme prejudice.
232 void clear_inode(struct inode *inode)
234 if (inode->i_nrpages)
235 truncate_inode_pages(inode, 0);
236 wait_on_inode(inode);
237 if (IS_QUOTAINIT(inode))
238 DQUOT_DROP(inode);
239 if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->clear_inode)
240 inode->i_sb->s_op->clear_inode(inode);
242 inode->i_state = 0;
246 * Dispose-list gets a local list, so it doesn't need to
247 * worry about list corruption. It releases the inode lock
248 * while clearing the inodes.
250 static void dispose_list(struct list_head * head)
252 struct list_head *next;
253 int count = 0;
255 spin_unlock(&inode_lock);
256 next = head->next;
257 for (;;) {
258 struct list_head * tmp = next;
259 struct inode * inode;
261 next = next->next;
262 if (tmp == head)
263 break;
264 inode = list_entry(tmp, struct inode, i_list);
265 clear_inode(inode);
266 count++;
269 /* Add them all to the unused list in one fell swoop */
270 spin_lock(&inode_lock);
271 list_splice(head, &inode_unused);
272 inodes_stat.nr_free_inodes += count;
276 * Invalidate all inodes for a device.
278 static int invalidate_list(struct list_head *head, struct super_block * sb, struct list_head * dispose)
280 struct list_head *next;
281 int busy = 0;
283 next = head->next;
284 for (;;) {
285 struct list_head * tmp = next;
286 struct inode * inode;
288 next = next->next;
289 if (tmp == head)
290 break;
291 inode = list_entry(tmp, struct inode, i_list);
292 if (inode->i_sb != sb)
293 continue;
294 if (!inode->i_count) {
295 list_del(&inode->i_hash);
296 INIT_LIST_HEAD(&inode->i_hash);
297 list_del(&inode->i_list);
298 list_add(&inode->i_list, dispose);
299 continue;
301 busy = 1;
303 return busy;
307 * This is a two-stage process. First we collect all
308 * offending inodes onto the throw-away list, and in
309 * the second stage we actually dispose of them. This
310 * is because we don't want to sleep while messing
311 * with the global lists..
313 int invalidate_inodes(struct super_block * sb)
315 int busy;
316 LIST_HEAD(throw_away);
318 spin_lock(&inode_lock);
319 busy = invalidate_list(&inode_in_use, sb, &throw_away);
320 busy |= invalidate_list(&sb->s_dirty, sb, &throw_away);
321 dispose_list(&throw_away);
322 spin_unlock(&inode_lock);
324 return busy;
328 * This is called with the inode lock held. It searches
329 * the in-use for freeable inodes, which are moved to a
330 * temporary list and then placed on the unused list by
331 * dispose_list.
333 * We don't expect to have to call this very often.
335 * N.B. The spinlock is released during the call to
336 * dispose_list.
338 #define CAN_UNUSE(inode) \
339 (((inode)->i_count | (inode)->i_state) == 0)
340 #define INODE(entry) (list_entry(entry, struct inode, i_list))
342 static int free_inodes(void)
344 struct list_head list, *entry, *freeable = &list;
345 int found = 0;
347 INIT_LIST_HEAD(freeable);
348 entry = inode_in_use.next;
349 while (entry != &inode_in_use) {
350 struct list_head *tmp = entry;
352 entry = entry->next;
353 if (!CAN_UNUSE(INODE(tmp)))
354 continue;
355 list_del(tmp);
356 list_del(&INODE(tmp)->i_hash);
357 INIT_LIST_HEAD(&INODE(tmp)->i_hash);
358 list_add(tmp, freeable);
359 found = 1;
362 if (found)
363 dispose_list(freeable);
365 return found;
369 * Searches the inodes list for freeable inodes,
370 * shrinking the dcache before (and possible after,
371 * if we're low)
373 static void try_to_free_inodes(int goal)
376 * First stry to just get rid of unused inodes.
378 * If we can't reach our goal that way, we'll have
379 * to try to shrink the dcache and sync existing
380 * inodes..
382 free_inodes();
383 goal -= inodes_stat.nr_free_inodes;
384 if (goal > 0) {
385 spin_unlock(&inode_lock);
386 select_dcache(goal, 0);
387 prune_dcache(goal);
388 spin_lock(&inode_lock);
389 sync_all_inodes();
390 free_inodes();
395 * This is the externally visible routine for
396 * inode memory management.
398 void free_inode_memory(int goal)
400 spin_lock(&inode_lock);
401 free_inodes();
402 spin_unlock(&inode_lock);
407 * This is called with the spinlock held, but releases
408 * the lock when freeing or allocating inodes.
409 * Look out! This returns with the inode lock held if
410 * it got an inode..
412 * We do inode allocations two pages at a time to reduce
413 * fragmentation.
415 #define INODE_PAGE_ORDER 1
416 #define INODE_ALLOCATION_SIZE (PAGE_SIZE << INODE_PAGE_ORDER)
417 #define INODES_PER_ALLOCATION (INODE_ALLOCATION_SIZE/sizeof(struct inode))
419 static struct inode * grow_inodes(void)
421 struct inode * inode;
424 * Check whether to restock the unused list.
426 if (inodes_stat.nr_inodes > max_inodes) {
427 struct list_head *tmp;
428 try_to_free_inodes(inodes_stat.nr_inodes >> 2);
429 tmp = inode_unused.next;
430 if (tmp != &inode_unused) {
431 inodes_stat.nr_free_inodes--;
432 list_del(tmp);
433 inode = list_entry(tmp, struct inode, i_list);
434 return inode;
438 spin_unlock(&inode_lock);
439 inode = (struct inode *)__get_free_pages(GFP_KERNEL,INODE_PAGE_ORDER);
440 if (inode) {
441 int size;
442 struct inode * tmp;
444 size = INODE_ALLOCATION_SIZE - 2*sizeof(struct inode);
445 tmp = inode;
446 spin_lock(&inode_lock);
447 do {
448 tmp++;
449 init_once(tmp);
450 list_add(&tmp->i_list, &inode_unused);
451 size -= sizeof(struct inode);
452 } while (size >= 0);
453 init_once(inode);
455 * Update the inode statistics
457 inodes_stat.nr_inodes += INODES_PER_ALLOCATION;
458 inodes_stat.nr_free_inodes += INODES_PER_ALLOCATION - 1;
459 return inode;
463 * If the allocation failed, do an extensive pruning of
464 * the dcache and then try again to free some inodes.
466 prune_dcache(inodes_stat.nr_inodes >> 2);
468 spin_lock(&inode_lock);
469 free_inodes();
471 struct list_head *tmp = inode_unused.next;
472 if (tmp != &inode_unused) {
473 inodes_stat.nr_free_inodes--;
474 list_del(tmp);
475 inode = list_entry(tmp, struct inode, i_list);
476 return inode;
479 spin_unlock(&inode_lock);
481 printk("grow_inodes: allocation failed\n");
482 return NULL;
486 * Called with the inode lock held.
488 static struct inode * find_inode(struct super_block * sb, unsigned long ino, struct list_head *head)
490 struct list_head *tmp;
491 struct inode * inode;
493 tmp = head;
494 for (;;) {
495 tmp = tmp->next;
496 inode = NULL;
497 if (tmp == head)
498 break;
499 inode = list_entry(tmp, struct inode, i_hash);
500 if (inode->i_sb != sb)
501 continue;
502 if (inode->i_ino != ino)
503 continue;
504 inode->i_count++;
505 break;
507 return inode;
511 * This just initializes the inode fields
512 * to known values before returning the inode..
514 * i_sb, i_ino, i_count, i_state and the lists have
515 * been initialized elsewhere..
517 void clean_inode(struct inode *inode)
519 memset(&inode->u, 0, sizeof(inode->u));
520 inode->i_sock = 0;
521 inode->i_op = NULL;
522 inode->i_nlink = 1;
523 inode->i_writecount = 0;
524 inode->i_size = 0;
525 inode->i_generation = 0;
526 memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
527 sema_init(&inode->i_sem, 1);
531 * This is called by things like the networking layer
532 * etc that want to get an inode without any inode
533 * number, or filesystems that allocate new inodes with
534 * no pre-existing information.
536 struct inode * get_empty_inode(void)
538 static unsigned long last_ino = 0;
539 struct inode * inode;
540 struct list_head * tmp;
542 spin_lock(&inode_lock);
543 tmp = inode_unused.next;
544 if (tmp != &inode_unused) {
545 list_del(tmp);
546 inodes_stat.nr_free_inodes--;
547 inode = list_entry(tmp, struct inode, i_list);
548 add_new_inode:
549 list_add(&inode->i_list, &inode_in_use);
550 inode->i_sb = NULL;
551 inode->i_dev = 0;
552 inode->i_ino = ++last_ino;
553 inode->i_flags = 0;
554 inode->i_count = 1;
555 inode->i_state = 0;
556 spin_unlock(&inode_lock);
557 clean_inode(inode);
558 return inode;
562 * Warning: if this succeeded, we will now
563 * return with the inode lock.
565 inode = grow_inodes();
566 if (inode)
567 goto add_new_inode;
569 return inode;
573 * This is called with the inode lock held.. Be careful.
575 * We no longer cache the sb_flags in i_flags - see fs.h
576 * -- rmk@arm.uk.linux.org
578 static struct inode * get_new_inode(struct super_block *sb, unsigned long ino, struct list_head *head)
580 struct inode * inode;
581 struct list_head * tmp = inode_unused.next;
583 if (tmp != &inode_unused) {
584 list_del(tmp);
585 inodes_stat.nr_free_inodes--;
586 inode = list_entry(tmp, struct inode, i_list);
587 add_new_inode:
588 list_add(&inode->i_list, &inode_in_use);
589 list_add(&inode->i_hash, head);
590 inode->i_sb = sb;
591 inode->i_dev = sb->s_dev;
592 inode->i_ino = ino;
593 inode->i_flags = 0;
594 inode->i_count = 1;
595 inode->i_state = I_LOCK;
596 spin_unlock(&inode_lock);
598 clean_inode(inode);
599 sb->s_op->read_inode(inode);
602 * This is special! We do not need the spinlock
603 * when clearing I_LOCK, because we're guaranteed
604 * that nobody else tries to do anything about the
605 * state of the inode when it is locked, as we
606 * just created it (so there can be no old holders
607 * that haven't tested I_LOCK).
609 inode->i_state &= ~I_LOCK;
610 wake_up(&inode->i_wait);
612 return inode;
616 * We need to expand. Note that "grow_inodes()" will
617 * release the spinlock, but will return with the lock
618 * held again if the allocation succeeded.
620 inode = grow_inodes();
621 if (inode) {
622 /* We released the lock, so.. */
623 struct inode * old = find_inode(sb, ino, head);
624 if (!old)
625 goto add_new_inode;
626 list_add(&inode->i_list, &inode_unused);
627 inodes_stat.nr_free_inodes++;
628 spin_unlock(&inode_lock);
629 wait_on_inode(old);
630 return old;
632 return inode;
635 static inline unsigned long hash(struct super_block *sb, unsigned long i_ino)
637 unsigned long tmp = i_ino | (unsigned long) sb;
638 tmp = tmp + (tmp >> HASH_BITS) + (tmp >> HASH_BITS*2);
639 return tmp & HASH_MASK;
642 struct inode *iget(struct super_block *sb, unsigned long ino)
644 struct list_head * head = inode_hashtable + hash(sb,ino);
645 struct inode * inode;
647 spin_lock(&inode_lock);
648 inode = find_inode(sb, ino, head);
649 if (inode) {
650 spin_unlock(&inode_lock);
651 wait_on_inode(inode);
652 return inode;
655 * get_new_inode() will do the right thing, releasing
656 * the inode lock and re-trying the search in case it
657 * had to block at any point.
659 return get_new_inode(sb, ino, head);
662 void insert_inode_hash(struct inode *inode)
664 struct list_head *head = inode_hashtable + hash(inode->i_sb, inode->i_ino);
665 spin_lock(&inode_lock);
666 list_add(&inode->i_hash, head);
667 spin_unlock(&inode_lock);
670 void remove_inode_hash(struct inode *inode)
672 spin_lock(&inode_lock);
673 list_del(&inode->i_hash);
674 INIT_LIST_HEAD(&inode->i_hash);
675 spin_unlock(&inode_lock);
678 void iput(struct inode *inode)
680 if (inode) {
681 struct super_operations *op = NULL;
683 if (inode->i_sb && inode->i_sb->s_op)
684 op = inode->i_sb->s_op;
685 if (op && op->put_inode)
686 op->put_inode(inode);
688 spin_lock(&inode_lock);
689 if (!--inode->i_count) {
690 if (!inode->i_nlink) {
691 list_del(&inode->i_hash);
692 INIT_LIST_HEAD(&inode->i_hash);
693 list_del(&inode->i_list);
694 INIT_LIST_HEAD(&inode->i_list);
695 if (op && op->delete_inode) {
696 void (*delete)(struct inode *) = op->delete_inode;
697 spin_unlock(&inode_lock);
698 delete(inode);
699 spin_lock(&inode_lock);
702 if (list_empty(&inode->i_hash)) {
703 list_del(&inode->i_list);
704 INIT_LIST_HEAD(&inode->i_list);
705 spin_unlock(&inode_lock);
706 clear_inode(inode);
707 spin_lock(&inode_lock);
708 list_add(&inode->i_list, &inode_unused);
709 inodes_stat.nr_free_inodes++;
711 else if (!(inode->i_state & I_DIRTY)) {
712 list_del(&inode->i_list);
713 list_add(&inode->i_list, &inode_in_use);
715 #ifdef INODE_PARANOIA
716 if (inode->i_flock)
717 printk(KERN_ERR "iput: inode %s/%ld still has locks!\n",
718 kdevname(inode->i_dev), inode->i_ino);
719 if (!list_empty(&inode->i_dentry))
720 printk(KERN_ERR "iput: device %s inode %ld still has aliases!\n",
721 kdevname(inode->i_dev), inode->i_ino);
722 if (inode->i_count)
723 printk(KERN_ERR "iput: device %s inode %ld count changed, count=%d\n",
724 kdevname(inode->i_dev), inode->i_ino, inode->i_count);
725 if (atomic_read(&inode->i_sem.count) != 1)
726 printk(KERN_ERR "iput: Aieee, semaphore in use inode %s/%ld, count=%d\n",
727 kdevname(inode->i_dev), inode->i_ino, atomic_read(&inode->i_sem.count));
728 if (atomic_read(&inode->i_atomic_write.count) != 1)
729 printk(KERN_ERR "iput: Aieee, atomic write semaphore in use inode %s/%ld, count=%d\n",
730 kdevname(inode->i_dev), inode->i_ino, atomic_read(&inode->i_sem.count));
731 #endif
733 if (inode->i_count > (1<<31)) {
734 printk(KERN_ERR "iput: inode %s/%ld count wrapped\n",
735 kdevname(inode->i_dev), inode->i_ino);
737 spin_unlock(&inode_lock);
741 int bmap(struct inode * inode, int block)
743 if (inode->i_op && inode->i_op->bmap)
744 return inode->i_op->bmap(inode, block);
745 return 0;
749 * Initialize the hash tables and default
750 * value for max inodes
752 #define MAX_INODE (16384)
754 void __init inode_init(void)
756 int i, max;
757 struct list_head *head = inode_hashtable;
759 i = HASH_SIZE;
760 do {
761 INIT_LIST_HEAD(head);
762 head++;
763 i--;
764 } while (i);
766 /* Initial guess at reasonable inode number */
767 max = num_physpages >> 1;
768 if (max > MAX_INODE)
769 max = MAX_INODE;
770 max_inodes = max;
773 /* This belongs in file_table.c, not here... */
774 int fs_may_remount_ro(struct super_block *sb)
776 struct file *file;
778 /* Check that no files are currently opened for writing. */
779 for (file = inuse_filps; file; file = file->f_next) {
780 struct inode *inode;
781 if (!file->f_dentry)
782 continue;
783 inode = file->f_dentry->d_inode;
784 if (!inode || inode->i_sb != sb)
785 continue;
787 /* File with pending delete? */
788 if (inode->i_nlink == 0)
789 return 0;
791 /* Writable file? */
792 if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE))
793 return 0;
795 return 1; /* Tis' cool bro. */
798 void update_atime (struct inode *inode)
800 if ( IS_NOATIME (inode) ) return;
801 if ( IS_NODIRATIME (inode) && S_ISDIR (inode->i_mode) ) return;
802 if ( IS_RDONLY (inode) ) return;
803 inode->i_atime = CURRENT_TIME;
804 mark_inode_dirty (inode);
805 } /* End Function update_atime */