4 * (C) 1997 Linus Torvalds
7 #include <linux/config.h>
9 #include <linux/string.h>
11 #include <linux/dcache.h>
12 #include <linux/init.h>
13 #include <linux/quotaops.h>
14 #include <linux/slab.h>
15 #include <linux/cache.h>
18 * New inode.c implementation.
20 * This implementation has the basic premise of trying
21 * to be extremely low-overhead and SMP-safe, yet be
22 * simple enough to be "obviously correct".
27 /* inode dynamic allocation 1999, Andrea Arcangeli <andrea@suse.de> */
29 #define INODE_PARANOIA 1
30 /* #define INODE_DEBUG 1 */
33 * Inode lookup is no longer as critical as it used to be:
34 * most of the lookups are going to be through the dcache.
36 #define I_HASHBITS i_hash_shift
37 #define I_HASHMASK i_hash_mask
39 static unsigned int i_hash_mask
;
40 static unsigned int i_hash_shift
;
43 * Each inode can be on two separate lists. One is
44 * the hash list of the inode, used for lookups. The
45 * other linked list is the "type" list:
46 * "in_use" - valid inode, i_count > 0, i_nlink > 0
47 * "dirty" - as "in_use" but also dirty
48 * "unused" - valid inode, i_count = 0
50 * A "dirty" list is maintained for each super block,
51 * allowing for low-overhead inode sync() operations.
54 static LIST_HEAD(inode_in_use
);
55 static LIST_HEAD(inode_unused
);
56 static struct list_head
*inode_hashtable
;
57 static LIST_HEAD(anon_hash_chain
); /* for inodes with NULL i_sb */
60 * A simple spinlock to protect the list manipulations.
62 * NOTE! You also have to own the lock if you change
63 * the i_state of an inode while it is in use..
65 spinlock_t inode_lock
= SPIN_LOCK_UNLOCKED
;
68 * Statistics gathering..
74 } inodes_stat
= {0, 0,};
76 static kmem_cache_t
* inode_cachep
;
78 #define alloc_inode() \
79 ((struct inode *) kmem_cache_alloc(inode_cachep, SLAB_KERNEL))
80 #define destroy_inode(inode) kmem_cache_free(inode_cachep, (inode))
83 * These are initializations that only need to be done
84 * once, because the fields are idempotent across use
85 * of the inode, so let the slab aware of that.
87 static void init_once(void * foo
, kmem_cache_t
* cachep
, unsigned long flags
)
89 struct inode
* inode
= (struct inode
*) foo
;
91 if ((flags
& (SLAB_CTOR_VERIFY
|SLAB_CTOR_CONSTRUCTOR
)) ==
92 SLAB_CTOR_CONSTRUCTOR
)
94 memset(inode
, 0, sizeof(*inode
));
95 init_waitqueue_head(&inode
->i_wait
);
96 INIT_LIST_HEAD(&inode
->i_hash
);
97 INIT_LIST_HEAD(&inode
->i_data
.pages
);
98 INIT_LIST_HEAD(&inode
->i_dentry
);
99 sema_init(&inode
->i_sem
, 1);
100 sema_init(&inode
->i_zombie
, 1);
101 spin_lock_init(&inode
->i_data
.i_shared_lock
);
106 * Put the inode on the super block's dirty list.
108 * CAREFUL! We mark it dirty unconditionally, but
109 * move it onto the dirty list only if it is hashed.
110 * If it was not hashed, it will never be added to
111 * the dirty list even if it is later hashed, as it
112 * will have been marked dirty already.
114 * In short, make sure you hash any inodes _before_
115 * you start marking them dirty..
119 * __mark_inode_dirty - internal function
120 * @inode: inode to mark
122 * Mark an inode as dirty. Callers should use mark_inode_dirty.
125 void __mark_inode_dirty(struct inode
*inode
)
127 struct super_block
* sb
= inode
->i_sb
;
130 spin_lock(&inode_lock
);
131 if (!(inode
->i_state
& I_DIRTY
)) {
132 inode
->i_state
|= I_DIRTY
;
133 /* Only add valid (ie hashed) inodes to the dirty list */
134 if (!list_empty(&inode
->i_hash
)) {
135 list_del(&inode
->i_list
);
136 list_add(&inode
->i_list
, &sb
->s_dirty
);
139 spin_unlock(&inode_lock
);
143 static void __wait_on_inode(struct inode
* inode
)
145 DECLARE_WAITQUEUE(wait
, current
);
147 add_wait_queue(&inode
->i_wait
, &wait
);
149 set_current_state(TASK_UNINTERRUPTIBLE
);
150 if (inode
->i_state
& I_LOCK
) {
154 remove_wait_queue(&inode
->i_wait
, &wait
);
155 current
->state
= TASK_RUNNING
;
158 static inline void wait_on_inode(struct inode
*inode
)
160 if (inode
->i_state
& I_LOCK
)
161 __wait_on_inode(inode
);
165 static inline void write_inode(struct inode
*inode
)
167 if (inode
->i_sb
&& inode
->i_sb
->s_op
&& inode
->i_sb
->s_op
->write_inode
)
168 inode
->i_sb
->s_op
->write_inode(inode
);
171 static inline void __iget(struct inode
* inode
)
173 if (!inode
->i_count
++)
175 if (!(inode
->i_state
& I_DIRTY
))
177 list_del(&inode
->i_list
);
178 list_add(&inode
->i_list
, &inode_in_use
);
180 inodes_stat
.nr_unused
--;
184 static inline void sync_one(struct inode
*inode
)
186 if (inode
->i_state
& I_LOCK
) {
188 spin_unlock(&inode_lock
);
189 __wait_on_inode(inode
);
191 spin_lock(&inode_lock
);
193 list_del(&inode
->i_list
);
194 list_add(&inode
->i_list
,
195 inode
->i_count
? &inode_in_use
: &inode_unused
);
196 /* Set I_LOCK, reset I_DIRTY */
197 inode
->i_state
^= I_DIRTY
| I_LOCK
;
198 spin_unlock(&inode_lock
);
202 spin_lock(&inode_lock
);
203 inode
->i_state
&= ~I_LOCK
;
204 wake_up(&inode
->i_wait
);
208 static inline void sync_list(struct list_head
*head
)
210 struct list_head
* tmp
;
212 while ((tmp
= head
->prev
) != head
)
213 sync_one(list_entry(tmp
, struct inode
, i_list
));
218 * @dev: device to sync the inodes from.
220 * sync_inodes goes through the super block's dirty list,
221 * writes them out, and puts them back on the normal list.
224 void sync_inodes(kdev_t dev
)
226 struct super_block
* sb
= sb_entry(super_blocks
.next
);
229 * Search the super_blocks array for the device(s) to sync.
231 spin_lock(&inode_lock
);
232 for (; sb
!= sb_entry(&super_blocks
); sb
= sb_entry(sb
->s_list
.next
)) {
235 if (dev
&& sb
->s_dev
!= dev
)
238 sync_list(&sb
->s_dirty
);
243 spin_unlock(&inode_lock
);
247 * Called with the spinlock already held..
249 static void sync_all_inodes(void)
251 struct super_block
* sb
= sb_entry(super_blocks
.next
);
252 for (; sb
!= sb_entry(&super_blocks
); sb
= sb_entry(sb
->s_list
.next
)) {
255 sync_list(&sb
->s_dirty
);
260 * write_inode_now - write an inode to disk
261 * @inode: inode to write to disk
263 * This function commits an inode to disk immediately if it is
264 * dirty. This is primarily needed by knfsd.
267 void write_inode_now(struct inode
*inode
)
269 struct super_block
* sb
= inode
->i_sb
;
272 spin_lock(&inode_lock
);
273 while (inode
->i_state
& I_DIRTY
)
275 spin_unlock(&inode_lock
);
278 printk("write_inode_now: no super block\n");
282 * clear_inode - clear an inode
283 * @inode: inode to clear
285 * This is called by the filesystem to tell us
286 * that the inode is no longer useful. We just
287 * terminate it with extreme prejudice.
290 void clear_inode(struct inode
*inode
)
292 if (inode
->i_data
.nrpages
)
294 if (!(inode
->i_state
& I_FREEING
))
296 if (inode
->i_state
& I_CLEAR
)
298 wait_on_inode(inode
);
299 if (IS_QUOTAINIT(inode
))
301 if (inode
->i_sb
&& inode
->i_sb
->s_op
&& inode
->i_sb
->s_op
->clear_inode
)
302 inode
->i_sb
->s_op
->clear_inode(inode
);
304 bdput(inode
->i_bdev
);
305 inode
->i_bdev
= NULL
;
307 inode
->i_state
= I_CLEAR
;
311 * Dispose-list gets a local list with local inodes in it, so it doesn't
312 * need to worry about list corruption and SMP locks.
314 static void dispose_list(struct list_head
* head
)
316 struct list_head
* inode_entry
;
317 struct inode
* inode
;
319 while ((inode_entry
= head
->next
) != head
)
321 list_del(inode_entry
);
323 inode
= list_entry(inode_entry
, struct inode
, i_list
);
324 if (inode
->i_data
.nrpages
)
325 truncate_inode_pages(&inode
->i_data
, 0);
327 destroy_inode(inode
);
332 * Invalidate all inodes for a device.
334 static int invalidate_list(struct list_head
*head
, struct super_block
* sb
, struct list_head
* dispose
)
336 struct list_head
*next
;
337 int busy
= 0, count
= 0;
341 struct list_head
* tmp
= next
;
342 struct inode
* inode
;
347 inode
= list_entry(tmp
, struct inode
, i_list
);
348 if (inode
->i_sb
!= sb
)
350 if (!inode
->i_count
) {
351 list_del(&inode
->i_hash
);
352 INIT_LIST_HEAD(&inode
->i_hash
);
353 list_del(&inode
->i_list
);
354 list_add(&inode
->i_list
, dispose
);
355 inode
->i_state
|= I_FREEING
;
361 /* only unused inodes may be cached with i_count zero */
362 inodes_stat
.nr_unused
-= count
;
367 * This is a two-stage process. First we collect all
368 * offending inodes onto the throw-away list, and in
369 * the second stage we actually dispose of them. This
370 * is because we don't want to sleep while messing
371 * with the global lists..
375 * invalidate_inodes - discard the inodes on a device
378 * Discard all of the inodes for a given superblock. If the discard
379 * fails because there are busy inodes then a non zero value is returned.
380 * If the discard is successful all the inodes are dicarded.
383 int invalidate_inodes(struct super_block
* sb
)
386 LIST_HEAD(throw_away
);
388 spin_lock(&inode_lock
);
389 busy
= invalidate_list(&inode_in_use
, sb
, &throw_away
);
390 busy
|= invalidate_list(&inode_unused
, sb
, &throw_away
);
391 busy
|= invalidate_list(&sb
->s_dirty
, sb
, &throw_away
);
392 spin_unlock(&inode_lock
);
394 dispose_list(&throw_away
);
400 * This is called with the inode lock held. It searches
401 * the in-use for freeable inodes, which are moved to a
402 * temporary list and then placed on the unused list by
405 * We don't expect to have to call this very often.
407 * N.B. The spinlock is released during the call to
410 #define CAN_UNUSE(inode) \
411 (((inode)->i_state | (inode)->i_data.nrpages) == 0)
412 #define INODE(entry) (list_entry(entry, struct inode, i_list))
414 void prune_icache(int goal
)
417 struct list_head
*entry
, *freeable
= &list
;
419 struct inode
* inode
;
421 spin_lock(&inode_lock
);
422 /* go simple and safe syncing everything before starting */
425 entry
= inode_unused
.prev
;
426 while (entry
!= &inode_unused
)
428 struct list_head
*tmp
= entry
;
432 if (inode
->i_state
& (I_FREEING
|I_CLEAR
))
434 if (!CAN_UNUSE(inode
))
439 list_del(&inode
->i_hash
);
440 INIT_LIST_HEAD(&inode
->i_hash
);
441 list_add(tmp
, freeable
);
442 inode
->i_state
|= I_FREEING
;
447 inodes_stat
.nr_unused
-= count
;
448 spin_unlock(&inode_lock
);
450 dispose_list(freeable
);
453 int shrink_icache_memory(int priority
, int gfp_mask
)
458 count
= inodes_stat
.nr_unused
/ priority
;
460 /* FIXME: kmem_cache_shrink here should tell us
461 the number of pages freed, and it should
462 work in a __GFP_DMA/__GFP_HIGHMEM behaviour
463 to free only the interesting pages in
464 function of the needs of the current allocation. */
465 kmem_cache_shrink(inode_cachep
);
471 * Called with the inode lock held.
472 * NOTE: we are not increasing the inode-refcount, you must call __iget()
473 * by hand after calling find_inode now! This simplify iunique and won't
474 * add any additional branch in the common code.
476 static struct inode
* find_inode(struct super_block
* sb
, unsigned long ino
, struct list_head
*head
, find_inode_t find_actor
, void *opaque
)
478 struct list_head
*tmp
;
479 struct inode
* inode
;
487 inode
= list_entry(tmp
, struct inode
, i_hash
);
488 if (inode
->i_sb
!= sb
)
490 if (inode
->i_ino
!= ino
)
492 if (find_actor
&& !find_actor(inode
, ino
, opaque
))
500 * This just initializes the inode fields
501 * to known values before returning the inode..
503 * i_sb, i_ino, i_count, i_state and the lists have
504 * been initialized elsewhere..
506 static void clean_inode(struct inode
*inode
)
508 static struct address_space_operations empty_aops
= {};
509 static struct inode_operations empty_iops
= {};
510 static struct file_operations empty_fops
= {};
511 memset(&inode
->u
, 0, sizeof(inode
->u
));
513 inode
->i_op
= &empty_iops
;
514 inode
->i_fop
= &empty_fops
;
516 atomic_set(&inode
->i_writecount
, 0);
518 inode
->i_generation
= 0;
519 memset(&inode
->i_dquot
, 0, sizeof(inode
->i_dquot
));
520 inode
->i_pipe
= NULL
;
521 inode
->i_bdev
= NULL
;
522 inode
->i_data
.a_ops
= &empty_aops
;
523 inode
->i_data
.host
= (void*)inode
;
524 inode
->i_mapping
= &inode
->i_data
;
528 * get_empty_inode - obtain an inode
530 * This is called by things like the networking layer
531 * etc that want to get an inode without any inode
532 * number, or filesystems that allocate new inodes with
533 * no pre-existing information.
535 * On a successful return the inode pointer is returned. On a failure
536 * a %NULL pointer is returned. The returned inode is not on any superblock
540 struct inode
* get_empty_inode(void)
542 static unsigned long last_ino
= 0;
543 struct inode
* inode
;
545 inode
= alloc_inode();
548 spin_lock(&inode_lock
);
549 list_add(&inode
->i_list
, &inode_in_use
);
552 inode
->i_ino
= ++last_ino
;
556 spin_unlock(&inode_lock
);
563 * This is called without the inode lock held.. Be careful.
565 * We no longer cache the sb_flags in i_flags - see fs.h
566 * -- rmk@arm.uk.linux.org
568 static struct inode
* get_new_inode(struct super_block
*sb
, unsigned long ino
, struct list_head
*head
, find_inode_t find_actor
, void *opaque
)
570 struct inode
* inode
;
572 inode
= alloc_inode();
576 spin_lock(&inode_lock
);
577 /* We released the lock, so.. */
578 old
= find_inode(sb
, ino
, head
, find_actor
, opaque
);
580 list_add(&inode
->i_list
, &inode_in_use
);
581 list_add(&inode
->i_hash
, head
);
583 inode
->i_dev
= sb
->s_dev
;
587 inode
->i_state
= I_LOCK
;
588 spin_unlock(&inode_lock
);
591 sb
->s_op
->read_inode(inode
);
594 * This is special! We do not need the spinlock
595 * when clearing I_LOCK, because we're guaranteed
596 * that nobody else tries to do anything about the
597 * state of the inode when it is locked, as we
598 * just created it (so there can be no old holders
599 * that haven't tested I_LOCK).
601 inode
->i_state
&= ~I_LOCK
;
602 wake_up(&inode
->i_wait
);
608 * Uhhuh, somebody else created the same inode under
609 * us. Use the old inode instead of the one we just
613 spin_unlock(&inode_lock
);
614 destroy_inode(inode
);
616 wait_on_inode(inode
);
621 static inline unsigned long hash(struct super_block
*sb
, unsigned long i_ino
)
623 unsigned long tmp
= i_ino
| ((unsigned long) sb
/ L1_CACHE_BYTES
);
624 tmp
= tmp
+ (tmp
>> I_HASHBITS
) + (tmp
>> I_HASHBITS
*2);
625 return tmp
& I_HASHMASK
;
628 /* Yeah, I know about quadratic hash. Maybe, later. */
631 * iunique - get a unique inode number
633 * @max_reserved: highest reserved inode number
635 * Obtain an inode number that is unique on the system for a given
636 * superblock. This is used by file systems that have no natural
637 * permanent inode numbering system. An inode number is returned that
638 * is higher than the reserved limit but unique.
641 * With a large number of inodes live on the file system this function
642 * currently becomes quite slow.
645 ino_t
iunique(struct super_block
*sb
, ino_t max_reserved
)
647 static ino_t counter
= 0;
649 struct list_head
* head
;
651 spin_lock(&inode_lock
);
653 if (counter
> max_reserved
) {
654 head
= inode_hashtable
+ hash(sb
,counter
);
655 inode
= find_inode(sb
, res
= counter
++, head
, NULL
, NULL
);
657 spin_unlock(&inode_lock
);
661 counter
= max_reserved
+ 1;
667 struct inode
*igrab(struct inode
*inode
)
669 spin_lock(&inode_lock
);
670 if (!(inode
->i_state
& I_FREEING
))
674 * Handle the case where s_op->clear_inode is not been
675 * called yet, and somebody is calling igrab
676 * while the inode is getting freed.
679 spin_unlock(&inode_lock
);
681 wait_on_inode(inode
);
686 struct inode
*iget4(struct super_block
*sb
, unsigned long ino
, find_inode_t find_actor
, void *opaque
)
688 struct list_head
* head
= inode_hashtable
+ hash(sb
,ino
);
689 struct inode
* inode
;
691 spin_lock(&inode_lock
);
692 inode
= find_inode(sb
, ino
, head
, find_actor
, opaque
);
695 spin_unlock(&inode_lock
);
696 wait_on_inode(inode
);
699 spin_unlock(&inode_lock
);
702 * get_new_inode() will do the right thing, re-trying the search
703 * in case it had to block at any point.
705 return get_new_inode(sb
, ino
, head
, find_actor
, opaque
);
709 * insert_inode_hash - hash an inode
710 * @inode: unhashed inode
712 * Add an inode to the inode hash for this superblock. If the inode
713 * has no superblock it is added to a separate anonymous chain.
716 void insert_inode_hash(struct inode
*inode
)
718 struct list_head
*head
= &anon_hash_chain
;
720 head
= inode_hashtable
+ hash(inode
->i_sb
, inode
->i_ino
);
721 spin_lock(&inode_lock
);
722 list_add(&inode
->i_hash
, head
);
723 spin_unlock(&inode_lock
);
727 * remove_inode_hash - remove an inode from the hash
728 * @inode: inode to unhash
730 * Remove an inode from the superblock or anonymous hash.
733 void remove_inode_hash(struct inode
*inode
)
735 spin_lock(&inode_lock
);
736 list_del(&inode
->i_hash
);
737 INIT_LIST_HEAD(&inode
->i_hash
);
738 spin_unlock(&inode_lock
);
742 * iput - put an inode
743 * @inode: inode to put
745 * Puts an inode, dropping its usage count. If the inode use count hits
746 * zero the inode is also then freed and may be destroyed.
749 void iput(struct inode
*inode
)
752 struct super_operations
*op
= NULL
;
755 if (inode
->i_sb
&& inode
->i_sb
->s_op
)
756 op
= inode
->i_sb
->s_op
;
757 if (op
&& op
->put_inode
)
758 op
->put_inode(inode
);
760 spin_lock(&inode_lock
);
761 if (!--inode
->i_count
) {
762 if (!inode
->i_nlink
) {
763 list_del(&inode
->i_hash
);
764 INIT_LIST_HEAD(&inode
->i_hash
);
765 list_del(&inode
->i_list
);
766 INIT_LIST_HEAD(&inode
->i_list
);
767 inode
->i_state
|=I_FREEING
;
768 spin_unlock(&inode_lock
);
770 if (inode
->i_data
.nrpages
)
771 truncate_inode_pages(&inode
->i_data
, 0);
774 if (op
&& op
->delete_inode
) {
775 void (*delete)(struct inode
*) = op
->delete_inode
;
776 /* s_op->delete_inode internally recalls clear_inode() */
780 if (inode
->i_state
!= I_CLEAR
)
783 spin_lock(&inode_lock
);
785 if (!list_empty(&inode
->i_hash
)) {
786 if (!(inode
->i_state
& I_DIRTY
)) {
787 list_del(&inode
->i_list
);
788 list_add(&inode
->i_list
,
791 inodes_stat
.nr_unused
++;
794 list_del(&inode
->i_list
);
795 INIT_LIST_HEAD(&inode
->i_list
);
796 inode
->i_state
|=I_FREEING
;
797 spin_unlock(&inode_lock
);
800 spin_lock(&inode_lock
);
803 #ifdef INODE_PARANOIA
805 printk(KERN_ERR
"iput: inode %s/%ld still has locks!\n",
806 kdevname(inode
->i_dev
), inode
->i_ino
);
807 if (!list_empty(&inode
->i_dentry
))
808 printk(KERN_ERR
"iput: device %s inode %ld still has aliases!\n",
809 kdevname(inode
->i_dev
), inode
->i_ino
);
811 printk(KERN_ERR
"iput: device %s inode %ld count changed, count=%d\n",
812 kdevname(inode
->i_dev
), inode
->i_ino
, inode
->i_count
);
813 if (atomic_read(&inode
->i_sem
.count
) != 1)
814 printk(KERN_ERR
"iput: Aieee, semaphore in use inode %s/%ld, count=%d\n",
815 kdevname(inode
->i_dev
), inode
->i_ino
, atomic_read(&inode
->i_sem
.count
));
818 if (inode
->i_count
> (1<<31)) {
819 printk(KERN_ERR
"iput: inode %s/%ld count wrapped\n",
820 kdevname(inode
->i_dev
), inode
->i_ino
);
822 spin_unlock(&inode_lock
);
824 destroy_inode(inode
);
829 * bmap - find a block number in a file
830 * @inode: inode of file
831 * @block: block to find
833 * Returns the block number on the device holding the inode that
834 * is the disk block number for the block of the file requested.
835 * That is, asked for block 4 of inode 1 the function will return the
836 * disk block relative to the disk start that holds that block of the
840 int bmap(struct inode
* inode
, int block
)
843 if (inode
->i_mapping
->a_ops
->bmap
)
844 res
= inode
->i_mapping
->a_ops
->bmap(inode
->i_mapping
, block
);
849 * Initialize the hash tables.
851 void __init
inode_init(unsigned long mempages
)
853 struct list_head
*head
;
855 unsigned int nr_hash
;
858 mempages
>>= (14 - PAGE_SHIFT
);
859 mempages
*= sizeof(struct list_head
);
860 for (order
= 0; ((1UL << order
) << PAGE_SHIFT
) < mempages
; order
++)
866 nr_hash
= (1UL << order
) * PAGE_SIZE
/
867 sizeof(struct list_head
);
868 i_hash_mask
= (nr_hash
- 1);
872 while ((tmp
>>= 1UL) != 0UL)
875 inode_hashtable
= (struct list_head
*)
876 __get_free_pages(GFP_ATOMIC
, order
);
877 } while (inode_hashtable
== NULL
&& --order
>= 0);
879 printk("Inode-cache hash table entries: %d (order: %ld, %ld bytes)\n",
880 nr_hash
, order
, (PAGE_SIZE
<< order
));
882 if (!inode_hashtable
)
883 panic("Failed to allocate inode hash table\n");
885 head
= inode_hashtable
;
888 INIT_LIST_HEAD(head
);
893 /* inode slab cache */
894 inode_cachep
= kmem_cache_create("inode_cache", sizeof(struct inode
),
895 0, SLAB_HWCACHE_ALIGN
, init_once
,
898 panic("cannot create inode slab cache");
902 * update_atime - update the access time
903 * @inode: inode accessed
905 * Update the accessed time on an inode and mark it for writeback.
906 * This function automatically handles read only file systems and media,
907 * as well as the "noatime" flag and inode specific "noatime" markers.
910 void update_atime (struct inode
*inode
)
912 if ( IS_NOATIME (inode
) ) return;
913 if ( IS_NODIRATIME (inode
) && S_ISDIR (inode
->i_mode
) ) return;
914 if ( IS_RDONLY (inode
) ) return;
915 inode
->i_atime
= CURRENT_TIME
;
916 mark_inode_dirty (inode
);
917 } /* End Function update_atime */
921 * Quota functions that want to walk the inode lists..
925 /* Functions back in dquot.c */
926 void put_dquot_list(struct list_head
*);
927 int remove_inode_dquot_ref(struct inode
*, short, struct list_head
*);
929 void remove_dquot_ref(kdev_t dev
, short type
)
931 struct super_block
*sb
= get_super(dev
);
933 struct list_head
*act_head
;
934 LIST_HEAD(tofree_head
);
936 if (!sb
|| !sb
->dq_op
)
937 return; /* nothing to do */
939 /* We have to be protected against other CPUs */
940 spin_lock(&inode_lock
);
942 for (act_head
= inode_in_use
.next
; act_head
!= &inode_in_use
; act_head
= act_head
->next
) {
943 inode
= list_entry(act_head
, struct inode
, i_list
);
944 if (inode
->i_sb
!= sb
|| !IS_QUOTAINIT(inode
))
946 remove_inode_dquot_ref(inode
, type
, &tofree_head
);
948 for (act_head
= inode_unused
.next
; act_head
!= &inode_unused
; act_head
= act_head
->next
) {
949 inode
= list_entry(act_head
, struct inode
, i_list
);
950 if (inode
->i_sb
!= sb
|| !IS_QUOTAINIT(inode
))
952 remove_inode_dquot_ref(inode
, type
, &tofree_head
);
954 for (act_head
= sb
->s_dirty
.next
; act_head
!= &sb
->s_dirty
; act_head
= act_head
->next
) {
955 inode
= list_entry(act_head
, struct inode
, i_list
);
956 if (!IS_QUOTAINIT(inode
))
958 remove_inode_dquot_ref(inode
, type
, &tofree_head
);
960 spin_unlock(&inode_lock
);
962 put_dquot_list(&tofree_head
);