Linux-2.3.3 and a short hiatus..
[davej-history.git] / fs / inode.c
blob88805efe61047f338cfc9bd58f824651c3f464e3
1 /*
2 * linux/fs/inode.c
4 * (C) 1997 Linus Torvalds
5 */
7 #include <linux/fs.h>
8 #include <linux/string.h>
9 #include <linux/mm.h>
10 #include <linux/dcache.h>
11 #include <linux/init.h>
12 #include <linux/quotaops.h>
15 * New inode.c implementation.
17 * This implementation has the basic premise of trying
18 * to be extremely low-overhead and SMP-safe, yet be
19 * simple enough to be "obviously correct".
21 * Famous last words.
24 #define INODE_PARANOIA 1
25 /* #define INODE_DEBUG 1 */
28 * Inode lookup is no longer as critical as it used to be:
29 * most of the lookups are going to be through the dcache.
31 #define HASH_BITS 8
32 #define HASH_SIZE (1UL << HASH_BITS)
33 #define HASH_MASK (HASH_SIZE-1)
36 * Each inode can be on two separate lists. One is
37 * the hash list of the inode, used for lookups. The
38 * other linked list is the "type" list:
39 * "in_use" - valid inode, hashed if i_nlink > 0
40 * "dirty" - valid inode, hashed if i_nlink > 0, dirty.
41 * "unused" - ready to be re-used. Not hashed.
43 * A "dirty" list is maintained for each super block,
44 * allowing for low-overhead inode sync() operations.
47 static LIST_HEAD(inode_in_use);
48 static LIST_HEAD(inode_unused);
49 static struct list_head inode_hashtable[HASH_SIZE];
52 * A simple spinlock to protect the list manipulations.
54 * NOTE! You also have to own the lock if you change
55 * the i_state of an inode while it is in use..
57 spinlock_t inode_lock = SPIN_LOCK_UNLOCKED;
60 * Statistics gathering..
62 struct {
63 int nr_inodes;
64 int nr_free_inodes;
65 int dummy[5];
66 } inodes_stat = {0, 0,};
68 int max_inodes;
71 * Put the inode on the super block's dirty list.
73 * CAREFUL! We mark it dirty unconditionally, but
74 * move it onto the dirty list only if it is hashed.
75 * If it was not hashed, it will never be added to
76 * the dirty list even if it is later hashed, as it
77 * will have been marked dirty already.
79 * In short, make sure you hash any inodes _before_
80 * you start marking them dirty..
82 void __mark_inode_dirty(struct inode *inode)
84 struct super_block * sb = inode->i_sb;
86 if (sb) {
87 spin_lock(&inode_lock);
88 if (!(inode->i_state & I_DIRTY)) {
89 inode->i_state |= I_DIRTY;
90 /* Only add valid (ie hashed) inodes to the dirty list */
91 if (!list_empty(&inode->i_hash)) {
92 list_del(&inode->i_list);
93 list_add(&inode->i_list, &sb->s_dirty);
96 spin_unlock(&inode_lock);
100 static void __wait_on_inode(struct inode * inode)
102 DECLARE_WAITQUEUE(wait, current);
104 add_wait_queue(&inode->i_wait, &wait);
105 repeat:
106 current->state = TASK_UNINTERRUPTIBLE;
107 if (inode->i_state & I_LOCK) {
108 schedule();
109 goto repeat;
111 remove_wait_queue(&inode->i_wait, &wait);
112 current->state = TASK_RUNNING;
115 static inline void wait_on_inode(struct inode *inode)
117 if (inode->i_state & I_LOCK)
118 __wait_on_inode(inode);
122 * These are initializations that only need to be done
123 * once, because the fields are idempotent across use
124 * of the inode..
126 static inline void init_once(struct inode * inode)
128 memset(inode, 0, sizeof(*inode));
129 init_waitqueue_head(&inode->i_wait);
130 INIT_LIST_HEAD(&inode->i_hash);
131 INIT_LIST_HEAD(&inode->i_dentry);
132 sema_init(&inode->i_sem, 1);
133 sema_init(&inode->i_atomic_write, 1);
136 static inline void write_inode(struct inode *inode)
138 if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->write_inode)
139 inode->i_sb->s_op->write_inode(inode);
142 static inline void sync_one(struct inode *inode)
144 if (inode->i_state & I_LOCK) {
145 spin_unlock(&inode_lock);
146 __wait_on_inode(inode);
147 spin_lock(&inode_lock);
148 } else {
149 list_del(&inode->i_list);
150 list_add(&inode->i_list, &inode_in_use);
151 /* Set I_LOCK, reset I_DIRTY */
152 inode->i_state ^= I_DIRTY | I_LOCK;
153 spin_unlock(&inode_lock);
155 write_inode(inode);
157 spin_lock(&inode_lock);
158 inode->i_state &= ~I_LOCK;
159 wake_up(&inode->i_wait);
163 static inline void sync_list(struct list_head *head)
165 struct list_head * tmp;
167 while ((tmp = head->prev) != head)
168 sync_one(list_entry(tmp, struct inode, i_list));
172 * "sync_inodes()" goes through the super block's dirty list,
173 * writes them out, and puts them back on the normal list.
175 void sync_inodes(kdev_t dev)
177 struct super_block * sb = sb_entry(super_blocks.next);
180 * Search the super_blocks array for the device(s) to sync.
182 spin_lock(&inode_lock);
183 for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) {
184 if (!sb->s_dev)
185 continue;
186 if (dev && sb->s_dev != dev)
187 continue;
189 sync_list(&sb->s_dirty);
191 if (dev)
192 break;
194 spin_unlock(&inode_lock);
198 * Called with the spinlock already held..
200 static void sync_all_inodes(void)
202 struct super_block * sb = sb_entry(super_blocks.next);
203 for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.next)) {
204 if (!sb->s_dev)
205 continue;
206 sync_list(&sb->s_dirty);
211 * Needed by knfsd
213 void write_inode_now(struct inode *inode)
215 struct super_block * sb = inode->i_sb;
217 if (sb) {
218 spin_lock(&inode_lock);
219 while (inode->i_state & I_DIRTY)
220 sync_one(inode);
221 spin_unlock(&inode_lock);
223 else
224 printk("write_inode_now: no super block\n");
228 * This is called by the filesystem to tell us
229 * that the inode is no longer useful. We just
230 * terminate it with extreme prejudice.
232 void clear_inode(struct inode *inode)
234 if (inode->i_nrpages)
235 truncate_inode_pages(inode, 0);
236 wait_on_inode(inode);
237 if (IS_QUOTAINIT(inode))
238 DQUOT_DROP(inode);
239 if (inode->i_sb && inode->i_sb->s_op && inode->i_sb->s_op->clear_inode)
240 inode->i_sb->s_op->clear_inode(inode);
242 inode->i_state = 0;
246 * Dispose-list gets a local list, so it doesn't need to
247 * worry about list corruption. It releases the inode lock
248 * while clearing the inodes.
250 static void dispose_list(struct list_head * head)
252 struct list_head *next;
253 int count = 0;
255 spin_unlock(&inode_lock);
256 next = head->next;
257 for (;;) {
258 struct list_head * tmp = next;
259 struct inode * inode;
261 next = next->next;
262 if (tmp == head)
263 break;
264 inode = list_entry(tmp, struct inode, i_list);
265 clear_inode(inode);
266 count++;
269 /* Add them all to the unused list in one fell swoop */
270 spin_lock(&inode_lock);
271 list_splice(head, &inode_unused);
272 inodes_stat.nr_free_inodes += count;
276 * Invalidate all inodes for a device.
278 static int invalidate_list(struct list_head *head, struct super_block * sb, struct list_head * dispose)
280 struct list_head *next;
281 int busy = 0;
283 next = head->next;
284 for (;;) {
285 struct list_head * tmp = next;
286 struct inode * inode;
288 next = next->next;
289 if (tmp == head)
290 break;
291 inode = list_entry(tmp, struct inode, i_list);
292 if (inode->i_sb != sb)
293 continue;
294 if (!inode->i_count) {
295 list_del(&inode->i_hash);
296 INIT_LIST_HEAD(&inode->i_hash);
297 list_del(&inode->i_list);
298 list_add(&inode->i_list, dispose);
299 inode->i_state |= I_FREEING;
300 continue;
302 busy = 1;
304 return busy;
308 * This is a two-stage process. First we collect all
309 * offending inodes onto the throw-away list, and in
310 * the second stage we actually dispose of them. This
311 * is because we don't want to sleep while messing
312 * with the global lists..
314 int invalidate_inodes(struct super_block * sb)
316 int busy;
317 LIST_HEAD(throw_away);
319 spin_lock(&inode_lock);
320 busy = invalidate_list(&inode_in_use, sb, &throw_away);
321 busy |= invalidate_list(&sb->s_dirty, sb, &throw_away);
322 dispose_list(&throw_away);
323 spin_unlock(&inode_lock);
325 return busy;
329 * This is called with the inode lock held. It searches
330 * the in-use for freeable inodes, which are moved to a
331 * temporary list and then placed on the unused list by
332 * dispose_list.
334 * We don't expect to have to call this very often.
336 * N.B. The spinlock is released during the call to
337 * dispose_list.
339 #define CAN_UNUSE(inode) \
340 (((inode)->i_count | (inode)->i_state) == 0)
341 #define INODE(entry) (list_entry(entry, struct inode, i_list))
343 static int free_inodes(void)
345 struct list_head list, *entry, *freeable = &list;
346 int found = 0;
348 INIT_LIST_HEAD(freeable);
349 entry = inode_in_use.next;
350 while (entry != &inode_in_use) {
351 struct list_head *tmp = entry;
353 entry = entry->next;
354 if (!CAN_UNUSE(INODE(tmp)))
355 continue;
356 list_del(tmp);
357 list_del(&INODE(tmp)->i_hash);
358 INIT_LIST_HEAD(&INODE(tmp)->i_hash);
359 list_add(tmp, freeable);
360 list_entry(tmp, struct inode, i_list)->i_state = I_FREEING;
361 found = 1;
364 if (found)
365 dispose_list(freeable);
367 return found;
371 * Searches the inodes list for freeable inodes,
372 * shrinking the dcache before (and possible after,
373 * if we're low)
375 static void try_to_free_inodes(int goal)
378 * First stry to just get rid of unused inodes.
380 * If we can't reach our goal that way, we'll have
381 * to try to shrink the dcache and sync existing
382 * inodes..
384 free_inodes();
385 goal -= inodes_stat.nr_free_inodes;
386 if (goal > 0) {
387 spin_unlock(&inode_lock);
388 select_dcache(goal, 0);
389 prune_dcache(goal);
390 spin_lock(&inode_lock);
391 sync_all_inodes();
392 free_inodes();
397 * This is the externally visible routine for
398 * inode memory management.
400 void free_inode_memory(int goal)
402 spin_lock(&inode_lock);
403 free_inodes();
404 spin_unlock(&inode_lock);
409 * This is called with the spinlock held, but releases
410 * the lock when freeing or allocating inodes.
411 * Look out! This returns with the inode lock held if
412 * it got an inode..
414 * We do inode allocations two pages at a time to reduce
415 * fragmentation.
417 #define INODE_PAGE_ORDER 1
418 #define INODE_ALLOCATION_SIZE (PAGE_SIZE << INODE_PAGE_ORDER)
419 #define INODES_PER_ALLOCATION (INODE_ALLOCATION_SIZE/sizeof(struct inode))
421 static struct inode * grow_inodes(void)
423 struct inode * inode;
426 * Check whether to restock the unused list.
428 if (inodes_stat.nr_inodes > max_inodes) {
429 struct list_head *tmp;
430 try_to_free_inodes(inodes_stat.nr_inodes >> 2);
431 tmp = inode_unused.next;
432 if (tmp != &inode_unused) {
433 inodes_stat.nr_free_inodes--;
434 list_del(tmp);
435 inode = list_entry(tmp, struct inode, i_list);
436 return inode;
440 spin_unlock(&inode_lock);
441 inode = (struct inode *)__get_free_pages(GFP_KERNEL,INODE_PAGE_ORDER);
442 if (inode) {
443 int size;
444 struct inode * tmp;
446 size = INODE_ALLOCATION_SIZE - 2*sizeof(struct inode);
447 tmp = inode;
448 spin_lock(&inode_lock);
449 do {
450 tmp++;
451 init_once(tmp);
452 list_add(&tmp->i_list, &inode_unused);
453 size -= sizeof(struct inode);
454 } while (size >= 0);
455 init_once(inode);
457 * Update the inode statistics
459 inodes_stat.nr_inodes += INODES_PER_ALLOCATION;
460 inodes_stat.nr_free_inodes += INODES_PER_ALLOCATION - 1;
461 return inode;
465 * If the allocation failed, do an extensive pruning of
466 * the dcache and then try again to free some inodes.
468 prune_dcache(inodes_stat.nr_inodes >> 2);
470 spin_lock(&inode_lock);
471 free_inodes();
473 struct list_head *tmp = inode_unused.next;
474 if (tmp != &inode_unused) {
475 inodes_stat.nr_free_inodes--;
476 list_del(tmp);
477 inode = list_entry(tmp, struct inode, i_list);
478 return inode;
481 spin_unlock(&inode_lock);
483 printk("grow_inodes: allocation failed\n");
484 return NULL;
488 * Called with the inode lock held.
490 static struct inode * find_inode(struct super_block * sb, unsigned long ino, struct list_head *head)
492 struct list_head *tmp;
493 struct inode * inode;
495 tmp = head;
496 for (;;) {
497 tmp = tmp->next;
498 inode = NULL;
499 if (tmp == head)
500 break;
501 inode = list_entry(tmp, struct inode, i_hash);
502 if (inode->i_sb != sb)
503 continue;
504 if (inode->i_ino != ino)
505 continue;
506 inode->i_count++;
507 break;
509 return inode;
513 * This just initializes the inode fields
514 * to known values before returning the inode..
516 * i_sb, i_ino, i_count, i_state and the lists have
517 * been initialized elsewhere..
519 void clean_inode(struct inode *inode)
521 memset(&inode->u, 0, sizeof(inode->u));
522 inode->i_sock = 0;
523 inode->i_op = NULL;
524 inode->i_nlink = 1;
525 inode->i_writecount = 0;
526 inode->i_size = 0;
527 inode->i_generation = 0;
528 memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
529 sema_init(&inode->i_sem, 1);
533 * This is called by things like the networking layer
534 * etc that want to get an inode without any inode
535 * number, or filesystems that allocate new inodes with
536 * no pre-existing information.
538 struct inode * get_empty_inode(void)
540 static unsigned long last_ino = 0;
541 struct inode * inode;
542 struct list_head * tmp;
544 spin_lock(&inode_lock);
545 tmp = inode_unused.next;
546 if (tmp != &inode_unused) {
547 list_del(tmp);
548 inodes_stat.nr_free_inodes--;
549 inode = list_entry(tmp, struct inode, i_list);
550 add_new_inode:
551 list_add(&inode->i_list, &inode_in_use);
552 inode->i_sb = NULL;
553 inode->i_dev = 0;
554 inode->i_ino = ++last_ino;
555 inode->i_flags = 0;
556 inode->i_count = 1;
557 inode->i_state = 0;
558 spin_unlock(&inode_lock);
559 clean_inode(inode);
560 return inode;
564 * Warning: if this succeeded, we will now
565 * return with the inode lock.
567 inode = grow_inodes();
568 if (inode)
569 goto add_new_inode;
571 return inode;
575 * This is called with the inode lock held.. Be careful.
577 * We no longer cache the sb_flags in i_flags - see fs.h
578 * -- rmk@arm.uk.linux.org
580 static struct inode * get_new_inode(struct super_block *sb, unsigned long ino, struct list_head *head)
582 struct inode * inode;
583 struct list_head * tmp = inode_unused.next;
585 if (tmp != &inode_unused) {
586 list_del(tmp);
587 inodes_stat.nr_free_inodes--;
588 inode = list_entry(tmp, struct inode, i_list);
589 add_new_inode:
590 list_add(&inode->i_list, &inode_in_use);
591 list_add(&inode->i_hash, head);
592 inode->i_sb = sb;
593 inode->i_dev = sb->s_dev;
594 inode->i_ino = ino;
595 inode->i_flags = 0;
596 inode->i_count = 1;
597 inode->i_state = I_LOCK;
598 spin_unlock(&inode_lock);
600 clean_inode(inode);
601 sb->s_op->read_inode(inode);
604 * This is special! We do not need the spinlock
605 * when clearing I_LOCK, because we're guaranteed
606 * that nobody else tries to do anything about the
607 * state of the inode when it is locked, as we
608 * just created it (so there can be no old holders
609 * that haven't tested I_LOCK).
611 inode->i_state &= ~I_LOCK;
612 wake_up(&inode->i_wait);
614 return inode;
618 * We need to expand. Note that "grow_inodes()" will
619 * release the spinlock, but will return with the lock
620 * held again if the allocation succeeded.
622 inode = grow_inodes();
623 if (inode) {
624 /* We released the lock, so.. */
625 struct inode * old = find_inode(sb, ino, head);
626 if (!old)
627 goto add_new_inode;
628 list_add(&inode->i_list, &inode_unused);
629 inodes_stat.nr_free_inodes++;
630 spin_unlock(&inode_lock);
631 wait_on_inode(old);
632 return old;
634 return inode;
637 static inline unsigned long hash(struct super_block *sb, unsigned long i_ino)
639 unsigned long tmp = i_ino | (unsigned long) sb;
640 tmp = tmp + (tmp >> HASH_BITS) + (tmp >> HASH_BITS*2);
641 return tmp & HASH_MASK;
644 /* Yeah, I know about quadratic hash. Maybe, later. */
645 ino_t iunique(struct super_block *sb, ino_t max_reserved)
647 static ino_t counter = 0;
648 struct inode *inode;
649 struct list_head * head;
650 ino_t res;
651 spin_lock(&inode_lock);
652 retry:
653 if (counter > max_reserved) {
654 head = inode_hashtable + hash(sb,counter);
655 inode = find_inode(sb, res = counter++, head);
656 if (!inode) {
657 spin_unlock(&inode_lock);
658 return res;
660 inode->i_count--; /* compensate find_inode() */
661 } else {
662 counter = max_reserved + 1;
664 goto retry;
668 struct inode *igrab(struct inode *inode)
670 spin_lock(&inode_lock);
671 if (inode->i_state & I_FREEING)
672 inode = NULL;
673 else
674 inode->i_count++;
675 spin_unlock(&inode_lock);
676 if (inode)
677 wait_on_inode(inode);
678 return inode;
681 struct inode *iget(struct super_block *sb, unsigned long ino)
683 struct list_head * head = inode_hashtable + hash(sb,ino);
684 struct inode * inode;
686 spin_lock(&inode_lock);
687 inode = find_inode(sb, ino, head);
688 if (inode) {
689 spin_unlock(&inode_lock);
690 wait_on_inode(inode);
691 return inode;
694 * get_new_inode() will do the right thing, releasing
695 * the inode lock and re-trying the search in case it
696 * had to block at any point.
698 return get_new_inode(sb, ino, head);
701 void insert_inode_hash(struct inode *inode)
703 struct list_head *head = inode_hashtable + hash(inode->i_sb, inode->i_ino);
704 spin_lock(&inode_lock);
705 list_add(&inode->i_hash, head);
706 spin_unlock(&inode_lock);
709 void remove_inode_hash(struct inode *inode)
711 spin_lock(&inode_lock);
712 list_del(&inode->i_hash);
713 INIT_LIST_HEAD(&inode->i_hash);
714 spin_unlock(&inode_lock);
717 void iput(struct inode *inode)
719 if (inode) {
720 struct super_operations *op = NULL;
722 if (inode->i_sb && inode->i_sb->s_op)
723 op = inode->i_sb->s_op;
724 if (op && op->put_inode)
725 op->put_inode(inode);
727 spin_lock(&inode_lock);
728 if (!--inode->i_count) {
729 if (!inode->i_nlink) {
730 list_del(&inode->i_hash);
731 INIT_LIST_HEAD(&inode->i_hash);
732 list_del(&inode->i_list);
733 INIT_LIST_HEAD(&inode->i_list);
734 inode->i_state|=I_FREEING;
735 if (op && op->delete_inode) {
736 void (*delete)(struct inode *) = op->delete_inode;
737 spin_unlock(&inode_lock);
738 delete(inode);
739 spin_lock(&inode_lock);
742 if (list_empty(&inode->i_hash)) {
743 list_del(&inode->i_list);
744 INIT_LIST_HEAD(&inode->i_list);
745 inode->i_state|=I_FREEING;
746 spin_unlock(&inode_lock);
747 clear_inode(inode);
748 spin_lock(&inode_lock);
749 list_add(&inode->i_list, &inode_unused);
750 inodes_stat.nr_free_inodes++;
752 else if (!(inode->i_state & I_DIRTY)) {
753 list_del(&inode->i_list);
754 list_add(&inode->i_list, &inode_in_use);
756 #ifdef INODE_PARANOIA
757 if (inode->i_flock)
758 printk(KERN_ERR "iput: inode %s/%ld still has locks!\n",
759 kdevname(inode->i_dev), inode->i_ino);
760 if (!list_empty(&inode->i_dentry))
761 printk(KERN_ERR "iput: device %s inode %ld still has aliases!\n",
762 kdevname(inode->i_dev), inode->i_ino);
763 if (inode->i_count)
764 printk(KERN_ERR "iput: device %s inode %ld count changed, count=%d\n",
765 kdevname(inode->i_dev), inode->i_ino, inode->i_count);
766 if (atomic_read(&inode->i_sem.count) != 1)
767 printk(KERN_ERR "iput: Aieee, semaphore in use inode %s/%ld, count=%d\n",
768 kdevname(inode->i_dev), inode->i_ino, atomic_read(&inode->i_sem.count));
769 if (atomic_read(&inode->i_atomic_write.count) != 1)
770 printk(KERN_ERR "iput: Aieee, atomic write semaphore in use inode %s/%ld, count=%d\n",
771 kdevname(inode->i_dev), inode->i_ino, atomic_read(&inode->i_sem.count));
772 #endif
774 if (inode->i_count > (1<<31)) {
775 printk(KERN_ERR "iput: inode %s/%ld count wrapped\n",
776 kdevname(inode->i_dev), inode->i_ino);
778 spin_unlock(&inode_lock);
782 int bmap(struct inode * inode, int block)
784 if (inode->i_op && inode->i_op->bmap)
785 return inode->i_op->bmap(inode, block);
786 return 0;
790 * Initialize the hash tables and default
791 * value for max inodes
793 #define MAX_INODE (16384)
795 void __init inode_init(void)
797 int i, max;
798 struct list_head *head = inode_hashtable;
800 i = HASH_SIZE;
801 do {
802 INIT_LIST_HEAD(head);
803 head++;
804 i--;
805 } while (i);
807 /* Initial guess at reasonable inode number */
808 max = num_physpages >> 1;
809 if (max > MAX_INODE)
810 max = MAX_INODE;
811 max_inodes = max;
814 /* This belongs in file_table.c, not here... */
815 int fs_may_remount_ro(struct super_block *sb)
817 struct file *file;
819 /* Check that no files are currently opened for writing. */
820 for (file = inuse_filps; file; file = file->f_next) {
821 struct inode *inode;
822 if (!file->f_dentry)
823 continue;
824 inode = file->f_dentry->d_inode;
825 if (!inode || inode->i_sb != sb)
826 continue;
828 /* File with pending delete? */
829 if (inode->i_nlink == 0)
830 return 0;
832 /* Writable file? */
833 if (S_ISREG(inode->i_mode) && (file->f_mode & FMODE_WRITE))
834 return 0;
836 return 1; /* Tis' cool bro. */
839 void update_atime (struct inode *inode)
841 if ( IS_NOATIME (inode) ) return;
842 if ( IS_NODIRATIME (inode) && S_ISDIR (inode->i_mode) ) return;
843 if ( IS_RDONLY (inode) ) return;
844 inode->i_atime = CURRENT_TIME;
845 mark_inode_dirty (inode);
846 } /* End Function update_atime */