4 * (C) 1997 Linus Torvalds
8 #include <linux/string.h>
10 #include <linux/dcache.h>
11 #include <linux/init.h>
12 #include <linux/quotaops.h>
15 * New inode.c implementation.
17 * This implementation has the basic premise of trying
18 * to be extremely low-overhead and SMP-safe, yet be
19 * simple enough to be "obviously correct".
24 #define INODE_PARANOIA 1
25 /* #define INODE_DEBUG 1 */
28 * Inode lookup is no longer as critical as it used to be:
29 * most of the lookups are going to be through the dcache.
32 #define HASH_SIZE (1UL << HASH_BITS)
33 #define HASH_MASK (HASH_SIZE-1)
36 * Each inode can be on two separate lists. One is
37 * the hash list of the inode, used for lookups. The
38 * other linked list is the "type" list:
39 * "in_use" - valid inode, hashed if i_nlink > 0
40 * "dirty" - valid inode, hashed if i_nlink > 0, dirty.
41 * "unused" - ready to be re-used. Not hashed.
43 * A "dirty" list is maintained for each super block,
44 * allowing for low-overhead inode sync() operations.
47 static LIST_HEAD(inode_in_use
);
48 static LIST_HEAD(inode_unused
);
49 static struct list_head inode_hashtable
[HASH_SIZE
];
52 * A simple spinlock to protect the list manipulations.
54 * NOTE! You also have to own the lock if you change
55 * the i_state of an inode while it is in use..
57 spinlock_t inode_lock
= SPIN_LOCK_UNLOCKED
;
60 * Statistics gathering..
66 } inodes_stat
= {0, 0,};
71 * Put the inode on the super block's dirty list.
73 * CAREFUL! We mark it dirty unconditionally, but
74 * move it onto the dirty list only if it is hashed.
75 * If it was not hashed, it will never be added to
76 * the dirty list even if it is later hashed, as it
77 * will have been marked dirty already.
79 * In short, make sure you hash any inodes _before_
80 * you start marking them dirty..
82 void __mark_inode_dirty(struct inode
*inode
)
84 struct super_block
* sb
= inode
->i_sb
;
87 spin_lock(&inode_lock
);
88 if (!(inode
->i_state
& I_DIRTY
)) {
89 inode
->i_state
|= I_DIRTY
;
90 /* Only add valid (ie hashed) inodes to the dirty list */
91 if (!list_empty(&inode
->i_hash
)) {
92 list_del(&inode
->i_list
);
93 list_add(&inode
->i_list
, &sb
->s_dirty
);
96 spin_unlock(&inode_lock
);
100 static void __wait_on_inode(struct inode
* inode
)
102 DECLARE_WAITQUEUE(wait
, current
);
104 add_wait_queue(&inode
->i_wait
, &wait
);
106 current
->state
= TASK_UNINTERRUPTIBLE
;
107 if (inode
->i_state
& I_LOCK
) {
111 remove_wait_queue(&inode
->i_wait
, &wait
);
112 current
->state
= TASK_RUNNING
;
115 static inline void wait_on_inode(struct inode
*inode
)
117 if (inode
->i_state
& I_LOCK
)
118 __wait_on_inode(inode
);
122 * These are initializations that only need to be done
123 * once, because the fields are idempotent across use
126 static inline void init_once(struct inode
* inode
)
128 memset(inode
, 0, sizeof(*inode
));
129 init_waitqueue_head(&inode
->i_wait
);
130 INIT_LIST_HEAD(&inode
->i_hash
);
131 INIT_LIST_HEAD(&inode
->i_dentry
);
132 sema_init(&inode
->i_sem
, 1);
133 spin_lock_init(&inode
->i_shared_lock
);
136 static inline void write_inode(struct inode
*inode
)
138 if (inode
->i_sb
&& inode
->i_sb
->s_op
&& inode
->i_sb
->s_op
->write_inode
)
139 inode
->i_sb
->s_op
->write_inode(inode
);
142 static inline void sync_one(struct inode
*inode
)
144 if (inode
->i_state
& I_LOCK
) {
145 spin_unlock(&inode_lock
);
146 __wait_on_inode(inode
);
147 spin_lock(&inode_lock
);
149 list_del(&inode
->i_list
);
150 list_add(&inode
->i_list
, &inode_in_use
);
151 /* Set I_LOCK, reset I_DIRTY */
152 inode
->i_state
^= I_DIRTY
| I_LOCK
;
153 spin_unlock(&inode_lock
);
157 spin_lock(&inode_lock
);
158 inode
->i_state
&= ~I_LOCK
;
159 wake_up(&inode
->i_wait
);
163 static inline void sync_list(struct list_head
*head
)
165 struct list_head
* tmp
;
167 while ((tmp
= head
->prev
) != head
)
168 sync_one(list_entry(tmp
, struct inode
, i_list
));
172 * "sync_inodes()" goes through the super block's dirty list,
173 * writes them out, and puts them back on the normal list.
175 void sync_inodes(kdev_t dev
)
177 struct super_block
* sb
= sb_entry(super_blocks
.next
);
180 * Search the super_blocks array for the device(s) to sync.
182 spin_lock(&inode_lock
);
183 for (; sb
!= sb_entry(&super_blocks
); sb
= sb_entry(sb
->s_list
.next
)) {
186 if (dev
&& sb
->s_dev
!= dev
)
189 sync_list(&sb
->s_dirty
);
194 spin_unlock(&inode_lock
);
198 * Called with the spinlock already held..
200 static void sync_all_inodes(void)
202 struct super_block
* sb
= sb_entry(super_blocks
.next
);
203 for (; sb
!= sb_entry(&super_blocks
); sb
= sb_entry(sb
->s_list
.next
)) {
206 sync_list(&sb
->s_dirty
);
213 void write_inode_now(struct inode
*inode
)
215 struct super_block
* sb
= inode
->i_sb
;
218 spin_lock(&inode_lock
);
219 while (inode
->i_state
& I_DIRTY
)
221 spin_unlock(&inode_lock
);
224 printk("write_inode_now: no super block\n");
228 * This is called by the filesystem to tell us
229 * that the inode is no longer useful. We just
230 * terminate it with extreme prejudice.
232 void clear_inode(struct inode
*inode
)
234 if (inode
->i_nrpages
)
236 wait_on_inode(inode
);
237 if (IS_QUOTAINIT(inode
))
239 if (inode
->i_sb
&& inode
->i_sb
->s_op
&& inode
->i_sb
->s_op
->clear_inode
)
240 inode
->i_sb
->s_op
->clear_inode(inode
);
246 * Dispose-list gets a local list, so it doesn't need to
247 * worry about list corruption. It releases the inode lock
248 * while clearing the inodes.
250 static void dispose_list(struct list_head
* head
)
252 struct list_head
*next
;
255 spin_unlock(&inode_lock
);
258 struct list_head
* tmp
= next
;
259 struct inode
* inode
;
264 inode
= list_entry(tmp
, struct inode
, i_list
);
265 if (inode
->i_nrpages
)
266 truncate_inode_pages(inode
, 0);
271 /* Add them all to the unused list in one fell swoop */
272 spin_lock(&inode_lock
);
273 list_splice(head
, &inode_unused
);
274 inodes_stat
.nr_free_inodes
+= count
;
278 * Invalidate all inodes for a device.
280 static int invalidate_list(struct list_head
*head
, struct super_block
* sb
, struct list_head
* dispose
)
282 struct list_head
*next
;
287 struct list_head
* tmp
= next
;
288 struct inode
* inode
;
293 inode
= list_entry(tmp
, struct inode
, i_list
);
294 if (inode
->i_sb
!= sb
)
296 if (!inode
->i_count
) {
297 list_del(&inode
->i_hash
);
298 INIT_LIST_HEAD(&inode
->i_hash
);
299 list_del(&inode
->i_list
);
300 list_add(&inode
->i_list
, dispose
);
301 inode
->i_state
|= I_FREEING
;
310 * This is a two-stage process. First we collect all
311 * offending inodes onto the throw-away list, and in
312 * the second stage we actually dispose of them. This
313 * is because we don't want to sleep while messing
314 * with the global lists..
316 int invalidate_inodes(struct super_block
* sb
)
319 LIST_HEAD(throw_away
);
321 spin_lock(&inode_lock
);
322 busy
= invalidate_list(&inode_in_use
, sb
, &throw_away
);
323 busy
|= invalidate_list(&sb
->s_dirty
, sb
, &throw_away
);
324 dispose_list(&throw_away
);
325 spin_unlock(&inode_lock
);
331 * This is called with the inode lock held. It searches
332 * the in-use for freeable inodes, which are moved to a
333 * temporary list and then placed on the unused list by
336 * We don't expect to have to call this very often.
338 * N.B. The spinlock is released during the call to
341 #define CAN_UNUSE(inode) \
342 (((inode)->i_count | (inode)->i_state | (inode)->i_nrpages) == 0)
343 #define INODE(entry) (list_entry(entry, struct inode, i_list))
345 static int free_inodes(void)
347 struct list_head list
, *entry
, *freeable
= &list
;
350 INIT_LIST_HEAD(freeable
);
351 entry
= inode_in_use
.next
;
352 while (entry
!= &inode_in_use
) {
353 struct list_head
*tmp
= entry
;
356 if (!CAN_UNUSE(INODE(tmp
)))
359 list_del(&INODE(tmp
)->i_hash
);
360 INIT_LIST_HEAD(&INODE(tmp
)->i_hash
);
361 list_add(tmp
, freeable
);
362 list_entry(tmp
, struct inode
, i_list
)->i_state
= I_FREEING
;
367 dispose_list(freeable
);
373 * Searches the inodes list for freeable inodes,
374 * shrinking the dcache before (and possible after,
377 static void try_to_free_inodes(int goal
)
380 * First stry to just get rid of unused inodes.
382 * If we can't reach our goal that way, we'll have
383 * to try to shrink the dcache and sync existing
387 goal
-= inodes_stat
.nr_free_inodes
;
389 spin_unlock(&inode_lock
);
390 select_dcache(goal
, 0);
392 spin_lock(&inode_lock
);
399 * This is the externally visible routine for
400 * inode memory management.
402 void free_inode_memory(int goal
)
404 spin_lock(&inode_lock
);
406 spin_unlock(&inode_lock
);
411 * This is called with the spinlock held, but releases
412 * the lock when freeing or allocating inodes.
413 * Look out! This returns with the inode lock held if
416 * We do inode allocations two pages at a time to reduce
419 #define INODE_PAGE_ORDER 1
420 #define INODE_ALLOCATION_SIZE (PAGE_SIZE << INODE_PAGE_ORDER)
421 #define INODES_PER_ALLOCATION (INODE_ALLOCATION_SIZE/sizeof(struct inode))
423 static struct inode
* grow_inodes(void)
425 struct inode
* inode
;
428 * Check whether to restock the unused list.
430 if (inodes_stat
.nr_inodes
> max_inodes
) {
431 struct list_head
*tmp
;
432 try_to_free_inodes(inodes_stat
.nr_inodes
>> 2);
433 tmp
= inode_unused
.next
;
434 if (tmp
!= &inode_unused
) {
435 inodes_stat
.nr_free_inodes
--;
437 inode
= list_entry(tmp
, struct inode
, i_list
);
442 spin_unlock(&inode_lock
);
443 inode
= (struct inode
*)__get_free_pages(GFP_KERNEL
,INODE_PAGE_ORDER
);
448 size
= INODE_ALLOCATION_SIZE
- 2*sizeof(struct inode
);
450 spin_lock(&inode_lock
);
454 list_add(&tmp
->i_list
, &inode_unused
);
455 size
-= sizeof(struct inode
);
459 * Update the inode statistics
461 inodes_stat
.nr_inodes
+= INODES_PER_ALLOCATION
;
462 inodes_stat
.nr_free_inodes
+= INODES_PER_ALLOCATION
- 1;
467 * If the allocation failed, do an extensive pruning of
468 * the dcache and then try again to free some inodes.
470 prune_dcache(inodes_stat
.nr_inodes
>> 2);
472 spin_lock(&inode_lock
);
475 struct list_head
*tmp
= inode_unused
.next
;
476 if (tmp
!= &inode_unused
) {
477 inodes_stat
.nr_free_inodes
--;
479 inode
= list_entry(tmp
, struct inode
, i_list
);
483 spin_unlock(&inode_lock
);
485 printk("grow_inodes: allocation failed\n");
490 * Called with the inode lock held.
492 static struct inode
* find_inode(struct super_block
* sb
, unsigned long ino
, struct list_head
*head
)
494 struct list_head
*tmp
;
495 struct inode
* inode
;
503 inode
= list_entry(tmp
, struct inode
, i_hash
);
504 if (inode
->i_sb
!= sb
)
506 if (inode
->i_ino
!= ino
)
515 * This just initializes the inode fields
516 * to known values before returning the inode..
518 * i_sb, i_ino, i_count, i_state and the lists have
519 * been initialized elsewhere..
521 void clean_inode(struct inode
*inode
)
523 memset(&inode
->u
, 0, sizeof(inode
->u
));
527 atomic_set(&inode
->i_writecount
, 0);
529 inode
->i_generation
= 0;
530 memset(&inode
->i_dquot
, 0, sizeof(inode
->i_dquot
));
531 sema_init(&inode
->i_sem
, 1);
532 inode
->i_pipe
= NULL
;
536 * This is called by things like the networking layer
537 * etc that want to get an inode without any inode
538 * number, or filesystems that allocate new inodes with
539 * no pre-existing information.
541 struct inode
* get_empty_inode(void)
543 static unsigned long last_ino
= 0;
544 struct inode
* inode
;
545 struct list_head
* tmp
;
547 spin_lock(&inode_lock
);
548 tmp
= inode_unused
.next
;
549 if (tmp
!= &inode_unused
) {
551 inodes_stat
.nr_free_inodes
--;
552 inode
= list_entry(tmp
, struct inode
, i_list
);
554 list_add(&inode
->i_list
, &inode_in_use
);
557 inode
->i_ino
= ++last_ino
;
561 spin_unlock(&inode_lock
);
567 * Warning: if this succeeded, we will now
568 * return with the inode lock.
570 inode
= grow_inodes();
578 * This is called with the inode lock held.. Be careful.
580 * We no longer cache the sb_flags in i_flags - see fs.h
581 * -- rmk@arm.uk.linux.org
583 static struct inode
* get_new_inode(struct super_block
*sb
, unsigned long ino
, struct list_head
*head
)
585 struct inode
* inode
;
586 struct list_head
* tmp
= inode_unused
.next
;
588 if (tmp
!= &inode_unused
) {
590 inodes_stat
.nr_free_inodes
--;
591 inode
= list_entry(tmp
, struct inode
, i_list
);
593 list_add(&inode
->i_list
, &inode_in_use
);
594 list_add(&inode
->i_hash
, head
);
596 inode
->i_dev
= sb
->s_dev
;
600 inode
->i_state
= I_LOCK
;
601 spin_unlock(&inode_lock
);
604 sb
->s_op
->read_inode(inode
);
607 * This is special! We do not need the spinlock
608 * when clearing I_LOCK, because we're guaranteed
609 * that nobody else tries to do anything about the
610 * state of the inode when it is locked, as we
611 * just created it (so there can be no old holders
612 * that haven't tested I_LOCK).
614 inode
->i_state
&= ~I_LOCK
;
615 wake_up(&inode
->i_wait
);
621 * We need to expand. Note that "grow_inodes()" will
622 * release the spinlock, but will return with the lock
623 * held again if the allocation succeeded.
625 inode
= grow_inodes();
627 /* We released the lock, so.. */
628 struct inode
* old
= find_inode(sb
, ino
, head
);
631 list_add(&inode
->i_list
, &inode_unused
);
632 inodes_stat
.nr_free_inodes
++;
633 spin_unlock(&inode_lock
);
640 static inline unsigned long hash(struct super_block
*sb
, unsigned long i_ino
)
642 unsigned long tmp
= i_ino
| (unsigned long) sb
;
643 tmp
= tmp
+ (tmp
>> HASH_BITS
) + (tmp
>> HASH_BITS
*2);
644 return tmp
& HASH_MASK
;
647 /* Yeah, I know about quadratic hash. Maybe, later. */
648 ino_t
iunique(struct super_block
*sb
, ino_t max_reserved
)
650 static ino_t counter
= 0;
652 struct list_head
* head
;
654 spin_lock(&inode_lock
);
656 if (counter
> max_reserved
) {
657 head
= inode_hashtable
+ hash(sb
,counter
);
658 inode
= find_inode(sb
, res
= counter
++, head
);
660 spin_unlock(&inode_lock
);
663 inode
->i_count
--; /* compensate find_inode() */
665 counter
= max_reserved
+ 1;
671 struct inode
*igrab(struct inode
*inode
)
673 spin_lock(&inode_lock
);
674 if (inode
->i_state
& I_FREEING
)
678 spin_unlock(&inode_lock
);
680 wait_on_inode(inode
);
684 struct inode
*iget(struct super_block
*sb
, unsigned long ino
)
686 struct list_head
* head
= inode_hashtable
+ hash(sb
,ino
);
687 struct inode
* inode
;
689 spin_lock(&inode_lock
);
690 inode
= find_inode(sb
, ino
, head
);
692 spin_unlock(&inode_lock
);
693 wait_on_inode(inode
);
697 * get_new_inode() will do the right thing, releasing
698 * the inode lock and re-trying the search in case it
699 * had to block at any point.
701 return get_new_inode(sb
, ino
, head
);
704 void insert_inode_hash(struct inode
*inode
)
706 struct list_head
*head
= inode_hashtable
+ hash(inode
->i_sb
, inode
->i_ino
);
707 spin_lock(&inode_lock
);
708 list_add(&inode
->i_hash
, head
);
709 spin_unlock(&inode_lock
);
712 void remove_inode_hash(struct inode
*inode
)
714 spin_lock(&inode_lock
);
715 list_del(&inode
->i_hash
);
716 INIT_LIST_HEAD(&inode
->i_hash
);
717 spin_unlock(&inode_lock
);
720 void iput(struct inode
*inode
)
723 struct super_operations
*op
= NULL
;
725 if (inode
->i_sb
&& inode
->i_sb
->s_op
)
726 op
= inode
->i_sb
->s_op
;
727 if (op
&& op
->put_inode
)
728 op
->put_inode(inode
);
730 spin_lock(&inode_lock
);
731 if (!--inode
->i_count
) {
732 if (!inode
->i_nlink
) {
733 list_del(&inode
->i_hash
);
734 INIT_LIST_HEAD(&inode
->i_hash
);
735 list_del(&inode
->i_list
);
736 INIT_LIST_HEAD(&inode
->i_list
);
737 inode
->i_state
|=I_FREEING
;
738 if (op
&& op
->delete_inode
) {
739 void (*delete)(struct inode
*) = op
->delete_inode
;
740 spin_unlock(&inode_lock
);
741 if (inode
->i_nrpages
)
742 truncate_inode_pages(inode
, 0);
744 spin_lock(&inode_lock
);
747 if (list_empty(&inode
->i_hash
)) {
748 list_del(&inode
->i_list
);
749 INIT_LIST_HEAD(&inode
->i_list
);
750 inode
->i_state
|=I_FREEING
;
751 spin_unlock(&inode_lock
);
753 spin_lock(&inode_lock
);
754 list_add(&inode
->i_list
, &inode_unused
);
755 inodes_stat
.nr_free_inodes
++;
757 else if (!(inode
->i_state
& I_DIRTY
)) {
758 list_del(&inode
->i_list
);
759 list_add(&inode
->i_list
, &inode_in_use
);
761 #ifdef INODE_PARANOIA
763 printk(KERN_ERR
"iput: inode %s/%ld still has locks!\n",
764 kdevname(inode
->i_dev
), inode
->i_ino
);
765 if (!list_empty(&inode
->i_dentry
))
766 printk(KERN_ERR
"iput: device %s inode %ld still has aliases!\n",
767 kdevname(inode
->i_dev
), inode
->i_ino
);
769 printk(KERN_ERR
"iput: device %s inode %ld count changed, count=%d\n",
770 kdevname(inode
->i_dev
), inode
->i_ino
, inode
->i_count
);
771 if (atomic_read(&inode
->i_sem
.count
) != 1)
772 printk(KERN_ERR
"iput: Aieee, semaphore in use inode %s/%ld, count=%d\n",
773 kdevname(inode
->i_dev
), inode
->i_ino
, atomic_read(&inode
->i_sem
.count
));
776 if (inode
->i_count
> (1<<31)) {
777 printk(KERN_ERR
"iput: inode %s/%ld count wrapped\n",
778 kdevname(inode
->i_dev
), inode
->i_ino
);
780 spin_unlock(&inode_lock
);
784 int bmap(struct inode
* inode
, int block
)
786 struct buffer_head tmp
;
788 if (inode
->i_op
&& inode
->i_op
->get_block
) {
791 inode
->i_op
->get_block(inode
, block
, &tmp
, 0);
792 return tmp
.b_blocknr
;
798 * Initialize the hash tables and default
799 * value for max inodes
801 #define MAX_INODE (16384)
803 void __init
inode_init(void)
806 struct list_head
*head
= inode_hashtable
;
810 INIT_LIST_HEAD(head
);
815 /* Initial guess at reasonable inode number */
816 max
= num_physpages
>> 1;
822 void update_atime (struct inode
*inode
)
824 if ( IS_NOATIME (inode
) ) return;
825 if ( IS_NODIRATIME (inode
) && S_ISDIR (inode
->i_mode
) ) return;
826 if ( IS_RDONLY (inode
) ) return;
827 inode
->i_atime
= CURRENT_TIME
;
828 mark_inode_dirty (inode
);
829 } /* End Function update_atime */