4 * (C) 1997 Linus Torvalds
9 #include <linux/dcache.h>
10 #include <linux/init.h>
11 #include <linux/quotaops.h>
12 #include <linux/slab.h>
13 #include <linux/writeback.h>
14 #include <linux/module.h>
15 #include <linux/backing-dev.h>
16 #include <linux/wait.h>
17 #include <linux/hash.h>
18 #include <linux/swap.h>
19 #include <linux/security.h>
20 #include <linux/ima.h>
21 #include <linux/pagemap.h>
22 #include <linux/cdev.h>
23 #include <linux/bootmem.h>
24 #include <linux/inotify.h>
25 #include <linux/mount.h>
26 #include <linux/async.h>
29 * This is needed for the following functions:
31 * - invalidate_inode_buffers
34 * FIXME: remove all knowledge of the buffer layer from this file
36 #include <linux/buffer_head.h>
39 * New inode.c implementation.
41 * This implementation has the basic premise of trying
42 * to be extremely low-overhead and SMP-safe, yet be
43 * simple enough to be "obviously correct".
48 /* inode dynamic allocation 1999, Andrea Arcangeli <andrea@suse.de> */
50 /* #define INODE_PARANOIA 1 */
51 /* #define INODE_DEBUG 1 */
54 * Inode lookup is no longer as critical as it used to be:
55 * most of the lookups are going to be through the dcache.
57 #define I_HASHBITS i_hash_shift
58 #define I_HASHMASK i_hash_mask
60 static unsigned int i_hash_mask __read_mostly
;
61 static unsigned int i_hash_shift __read_mostly
;
64 * Each inode can be on two separate lists. One is
65 * the hash list of the inode, used for lookups. The
66 * other linked list is the "type" list:
67 * "in_use" - valid inode, i_count > 0, i_nlink > 0
68 * "dirty" - as "in_use" but also dirty
69 * "unused" - valid inode, i_count = 0
71 * A "dirty" list is maintained for each super block,
72 * allowing for low-overhead inode sync() operations.
75 LIST_HEAD(inode_in_use
);
76 LIST_HEAD(inode_unused
);
77 static struct hlist_head
*inode_hashtable __read_mostly
;
80 * A simple spinlock to protect the list manipulations.
82 * NOTE! You also have to own the lock if you change
83 * the i_state of an inode while it is in use..
85 DEFINE_SPINLOCK(inode_lock
);
88 * iprune_mutex provides exclusion between the kswapd or try_to_free_pages
89 * icache shrinking path, and the umount path. Without this exclusion,
90 * by the time prune_icache calls iput for the inode whose pages it has
91 * been invalidating, or by the time it calls clear_inode & destroy_inode
92 * from its final dispose_list, the struct super_block they refer to
93 * (for inode->i_sb->s_op) may already have been freed and reused.
95 static DEFINE_MUTEX(iprune_mutex
);
98 * Statistics gathering..
100 struct inodes_stat_t inodes_stat
;
102 static struct kmem_cache
*inode_cachep __read_mostly
;
104 static void wake_up_inode(struct inode
*inode
)
107 * Prevent speculative execution through spin_unlock(&inode_lock);
110 wake_up_bit(&inode
->i_state
, __I_LOCK
);
114 * inode_init_always - perform inode structure intialisation
115 * @sb: superblock inode belongs to
116 * @inode: inode to initialise
118 * These are initializations that need to be done on every inode
119 * allocation as the fields are not initialised by slab allocation.
121 struct inode
*inode_init_always(struct super_block
*sb
, struct inode
*inode
)
123 static const struct address_space_operations empty_aops
;
124 static struct inode_operations empty_iops
;
125 static const struct file_operations empty_fops
;
127 struct address_space
*const mapping
= &inode
->i_data
;
130 inode
->i_blkbits
= sb
->s_blocksize_bits
;
132 atomic_set(&inode
->i_count
, 1);
133 inode
->i_op
= &empty_iops
;
134 inode
->i_fop
= &empty_fops
;
138 atomic_set(&inode
->i_writecount
, 0);
142 inode
->i_generation
= 0;
144 memset(&inode
->i_dquot
, 0, sizeof(inode
->i_dquot
));
146 inode
->i_pipe
= NULL
;
147 inode
->i_bdev
= NULL
;
148 inode
->i_cdev
= NULL
;
150 inode
->dirtied_when
= 0;
152 if (security_inode_alloc(inode
))
155 /* allocate and initialize an i_integrity */
156 if (ima_inode_alloc(inode
))
157 goto out_free_security
;
159 spin_lock_init(&inode
->i_lock
);
160 lockdep_set_class(&inode
->i_lock
, &sb
->s_type
->i_lock_key
);
162 mutex_init(&inode
->i_mutex
);
163 lockdep_set_class(&inode
->i_mutex
, &sb
->s_type
->i_mutex_key
);
165 init_rwsem(&inode
->i_alloc_sem
);
166 lockdep_set_class(&inode
->i_alloc_sem
, &sb
->s_type
->i_alloc_sem_key
);
168 mapping
->a_ops
= &empty_aops
;
169 mapping
->host
= inode
;
171 mapping_set_gfp_mask(mapping
, GFP_HIGHUSER_MOVABLE
);
172 mapping
->assoc_mapping
= NULL
;
173 mapping
->backing_dev_info
= &default_backing_dev_info
;
174 mapping
->writeback_index
= 0;
177 * If the block_device provides a backing_dev_info for client
178 * inodes then use that. Otherwise the inode share the bdev's
182 struct backing_dev_info
*bdi
;
184 bdi
= sb
->s_bdev
->bd_inode_backing_dev_info
;
186 bdi
= sb
->s_bdev
->bd_inode
->i_mapping
->backing_dev_info
;
187 mapping
->backing_dev_info
= bdi
;
189 inode
->i_private
= NULL
;
190 inode
->i_mapping
= mapping
;
195 security_inode_free(inode
);
197 if (inode
->i_sb
->s_op
->destroy_inode
)
198 inode
->i_sb
->s_op
->destroy_inode(inode
);
200 kmem_cache_free(inode_cachep
, (inode
));
203 EXPORT_SYMBOL(inode_init_always
);
205 static struct inode
*alloc_inode(struct super_block
*sb
)
209 if (sb
->s_op
->alloc_inode
)
210 inode
= sb
->s_op
->alloc_inode(sb
);
212 inode
= kmem_cache_alloc(inode_cachep
, GFP_KERNEL
);
215 return inode_init_always(sb
, inode
);
219 void destroy_inode(struct inode
*inode
)
221 BUG_ON(inode_has_buffers(inode
));
222 security_inode_free(inode
);
223 if (inode
->i_sb
->s_op
->destroy_inode
)
224 inode
->i_sb
->s_op
->destroy_inode(inode
);
226 kmem_cache_free(inode_cachep
, (inode
));
228 EXPORT_SYMBOL(destroy_inode
);
232 * These are initializations that only need to be done
233 * once, because the fields are idempotent across use
234 * of the inode, so let the slab aware of that.
236 void inode_init_once(struct inode
*inode
)
238 memset(inode
, 0, sizeof(*inode
));
239 INIT_HLIST_NODE(&inode
->i_hash
);
240 INIT_LIST_HEAD(&inode
->i_dentry
);
241 INIT_LIST_HEAD(&inode
->i_devices
);
242 INIT_RADIX_TREE(&inode
->i_data
.page_tree
, GFP_ATOMIC
);
243 spin_lock_init(&inode
->i_data
.tree_lock
);
244 spin_lock_init(&inode
->i_data
.i_mmap_lock
);
245 INIT_LIST_HEAD(&inode
->i_data
.private_list
);
246 spin_lock_init(&inode
->i_data
.private_lock
);
247 INIT_RAW_PRIO_TREE_ROOT(&inode
->i_data
.i_mmap
);
248 INIT_LIST_HEAD(&inode
->i_data
.i_mmap_nonlinear
);
249 i_size_ordered_init(inode
);
250 #ifdef CONFIG_INOTIFY
251 INIT_LIST_HEAD(&inode
->inotify_watches
);
252 mutex_init(&inode
->inotify_mutex
);
255 EXPORT_SYMBOL(inode_init_once
);
257 static void init_once(void *foo
)
259 struct inode
*inode
= (struct inode
*) foo
;
261 inode_init_once(inode
);
265 * inode_lock must be held
267 void __iget(struct inode
*inode
)
269 if (atomic_read(&inode
->i_count
)) {
270 atomic_inc(&inode
->i_count
);
273 atomic_inc(&inode
->i_count
);
274 if (!(inode
->i_state
& (I_DIRTY
|I_SYNC
)))
275 list_move(&inode
->i_list
, &inode_in_use
);
276 inodes_stat
.nr_unused
--;
280 * clear_inode - clear an inode
281 * @inode: inode to clear
283 * This is called by the filesystem to tell us
284 * that the inode is no longer useful. We just
285 * terminate it with extreme prejudice.
287 void clear_inode(struct inode
*inode
)
290 invalidate_inode_buffers(inode
);
292 BUG_ON(inode
->i_data
.nrpages
);
293 BUG_ON(!(inode
->i_state
& I_FREEING
));
294 BUG_ON(inode
->i_state
& I_CLEAR
);
295 inode_sync_wait(inode
);
297 if (inode
->i_sb
->s_op
->clear_inode
)
298 inode
->i_sb
->s_op
->clear_inode(inode
);
299 if (S_ISBLK(inode
->i_mode
) && inode
->i_bdev
)
301 if (S_ISCHR(inode
->i_mode
) && inode
->i_cdev
)
303 inode
->i_state
= I_CLEAR
;
305 EXPORT_SYMBOL(clear_inode
);
308 * dispose_list - dispose of the contents of a local list
309 * @head: the head of the list to free
311 * Dispose-list gets a local list with local inodes in it, so it doesn't
312 * need to worry about list corruption and SMP locks.
314 static void dispose_list(struct list_head
*head
)
318 while (!list_empty(head
)) {
321 inode
= list_first_entry(head
, struct inode
, i_list
);
322 list_del(&inode
->i_list
);
324 if (inode
->i_data
.nrpages
)
325 truncate_inode_pages(&inode
->i_data
, 0);
328 spin_lock(&inode_lock
);
329 hlist_del_init(&inode
->i_hash
);
330 list_del_init(&inode
->i_sb_list
);
331 spin_unlock(&inode_lock
);
333 wake_up_inode(inode
);
334 destroy_inode(inode
);
337 spin_lock(&inode_lock
);
338 inodes_stat
.nr_inodes
-= nr_disposed
;
339 spin_unlock(&inode_lock
);
343 * Invalidate all inodes for a device.
345 static int invalidate_list(struct list_head
*head
, struct list_head
*dispose
)
347 struct list_head
*next
;
348 int busy
= 0, count
= 0;
352 struct list_head
*tmp
= next
;
356 * We can reschedule here without worrying about the list's
357 * consistency because the per-sb list of inodes must not
358 * change during umount anymore, and because iprune_mutex keeps
359 * shrink_icache_memory() away.
361 cond_resched_lock(&inode_lock
);
366 inode
= list_entry(tmp
, struct inode
, i_sb_list
);
367 if (inode
->i_state
& I_NEW
)
369 invalidate_inode_buffers(inode
);
370 if (!atomic_read(&inode
->i_count
)) {
371 list_move(&inode
->i_list
, dispose
);
372 WARN_ON(inode
->i_state
& I_NEW
);
373 inode
->i_state
|= I_FREEING
;
379 /* only unused inodes may be cached with i_count zero */
380 inodes_stat
.nr_unused
-= count
;
385 * invalidate_inodes - discard the inodes on a device
388 * Discard all of the inodes for a given superblock. If the discard
389 * fails because there are busy inodes then a non zero value is returned.
390 * If the discard is successful all the inodes have been discarded.
392 int invalidate_inodes(struct super_block
*sb
)
395 LIST_HEAD(throw_away
);
397 mutex_lock(&iprune_mutex
);
398 spin_lock(&inode_lock
);
399 inotify_unmount_inodes(&sb
->s_inodes
);
400 busy
= invalidate_list(&sb
->s_inodes
, &throw_away
);
401 spin_unlock(&inode_lock
);
403 dispose_list(&throw_away
);
404 mutex_unlock(&iprune_mutex
);
408 EXPORT_SYMBOL(invalidate_inodes
);
410 static int can_unuse(struct inode
*inode
)
414 if (inode_has_buffers(inode
))
416 if (atomic_read(&inode
->i_count
))
418 if (inode
->i_data
.nrpages
)
424 * Scan `goal' inodes on the unused list for freeable ones. They are moved to
425 * a temporary list and then are freed outside inode_lock by dispose_list().
427 * Any inodes which are pinned purely because of attached pagecache have their
428 * pagecache removed. We expect the final iput() on that inode to add it to
429 * the front of the inode_unused list. So look for it there and if the
430 * inode is still freeable, proceed. The right inode is found 99.9% of the
431 * time in testing on a 4-way.
433 * If the inode has metadata buffers attached to mapping->private_list then
434 * try to remove them.
436 static void prune_icache(int nr_to_scan
)
441 unsigned long reap
= 0;
443 mutex_lock(&iprune_mutex
);
444 spin_lock(&inode_lock
);
445 for (nr_scanned
= 0; nr_scanned
< nr_to_scan
; nr_scanned
++) {
448 if (list_empty(&inode_unused
))
451 inode
= list_entry(inode_unused
.prev
, struct inode
, i_list
);
453 if (inode
->i_state
|| atomic_read(&inode
->i_count
)) {
454 list_move(&inode
->i_list
, &inode_unused
);
457 if (inode_has_buffers(inode
) || inode
->i_data
.nrpages
) {
459 spin_unlock(&inode_lock
);
460 if (remove_inode_buffers(inode
))
461 reap
+= invalidate_mapping_pages(&inode
->i_data
,
464 spin_lock(&inode_lock
);
466 if (inode
!= list_entry(inode_unused
.next
,
467 struct inode
, i_list
))
468 continue; /* wrong inode or list_empty */
469 if (!can_unuse(inode
))
472 list_move(&inode
->i_list
, &freeable
);
473 WARN_ON(inode
->i_state
& I_NEW
);
474 inode
->i_state
|= I_FREEING
;
477 inodes_stat
.nr_unused
-= nr_pruned
;
478 if (current_is_kswapd())
479 __count_vm_events(KSWAPD_INODESTEAL
, reap
);
481 __count_vm_events(PGINODESTEAL
, reap
);
482 spin_unlock(&inode_lock
);
484 dispose_list(&freeable
);
485 mutex_unlock(&iprune_mutex
);
489 * shrink_icache_memory() will attempt to reclaim some unused inodes. Here,
490 * "unused" means that no dentries are referring to the inodes: the files are
491 * not open and the dcache references to those inodes have already been
494 * This function is passed the number of inodes to scan, and it returns the
495 * total number of remaining possibly-reclaimable inodes.
497 static int shrink_icache_memory(int nr
, gfp_t gfp_mask
)
501 * Nasty deadlock avoidance. We may hold various FS locks,
502 * and we don't want to recurse into the FS that called us
503 * in clear_inode() and friends..
505 if (!(gfp_mask
& __GFP_FS
))
509 return (inodes_stat
.nr_unused
/ 100) * sysctl_vfs_cache_pressure
;
512 static struct shrinker icache_shrinker
= {
513 .shrink
= shrink_icache_memory
,
514 .seeks
= DEFAULT_SEEKS
,
517 static void __wait_on_freeing_inode(struct inode
*inode
);
519 * Called with the inode lock held.
520 * NOTE: we are not increasing the inode-refcount, you must call __iget()
521 * by hand after calling find_inode now! This simplifies iunique and won't
522 * add any additional branch in the common code.
524 static struct inode
*find_inode(struct super_block
*sb
,
525 struct hlist_head
*head
,
526 int (*test
)(struct inode
*, void *),
529 struct hlist_node
*node
;
530 struct inode
*inode
= NULL
;
533 hlist_for_each_entry(inode
, node
, head
, i_hash
) {
534 if (inode
->i_sb
!= sb
)
536 if (!test(inode
, data
))
538 if (inode
->i_state
& (I_FREEING
|I_CLEAR
|I_WILL_FREE
)) {
539 __wait_on_freeing_inode(inode
);
544 return node
? inode
: NULL
;
548 * find_inode_fast is the fast path version of find_inode, see the comment at
549 * iget_locked for details.
551 static struct inode
*find_inode_fast(struct super_block
*sb
,
552 struct hlist_head
*head
, unsigned long ino
)
554 struct hlist_node
*node
;
555 struct inode
*inode
= NULL
;
558 hlist_for_each_entry(inode
, node
, head
, i_hash
) {
559 if (inode
->i_ino
!= ino
)
561 if (inode
->i_sb
!= sb
)
563 if (inode
->i_state
& (I_FREEING
|I_CLEAR
|I_WILL_FREE
)) {
564 __wait_on_freeing_inode(inode
);
569 return node
? inode
: NULL
;
572 static unsigned long hash(struct super_block
*sb
, unsigned long hashval
)
576 tmp
= (hashval
* (unsigned long)sb
) ^ (GOLDEN_RATIO_PRIME
+ hashval
) /
578 tmp
= tmp
^ ((tmp
^ GOLDEN_RATIO_PRIME
) >> I_HASHBITS
);
579 return tmp
& I_HASHMASK
;
583 __inode_add_to_lists(struct super_block
*sb
, struct hlist_head
*head
,
586 inodes_stat
.nr_inodes
++;
587 list_add(&inode
->i_list
, &inode_in_use
);
588 list_add(&inode
->i_sb_list
, &sb
->s_inodes
);
590 hlist_add_head(&inode
->i_hash
, head
);
594 * inode_add_to_lists - add a new inode to relevant lists
595 * @sb: superblock inode belongs to
596 * @inode: inode to mark in use
598 * When an inode is allocated it needs to be accounted for, added to the in use
599 * list, the owning superblock and the inode hash. This needs to be done under
600 * the inode_lock, so export a function to do this rather than the inode lock
601 * itself. We calculate the hash list to add to here so it is all internal
602 * which requires the caller to have already set up the inode number in the
605 void inode_add_to_lists(struct super_block
*sb
, struct inode
*inode
)
607 struct hlist_head
*head
= inode_hashtable
+ hash(sb
, inode
->i_ino
);
609 spin_lock(&inode_lock
);
610 __inode_add_to_lists(sb
, head
, inode
);
611 spin_unlock(&inode_lock
);
613 EXPORT_SYMBOL_GPL(inode_add_to_lists
);
616 * new_inode - obtain an inode
619 * Allocates a new inode for given superblock. The default gfp_mask
620 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
621 * If HIGHMEM pages are unsuitable or it is known that pages allocated
622 * for the page cache are not reclaimable or migratable,
623 * mapping_set_gfp_mask() must be called with suitable flags on the
624 * newly created inode's mapping
627 struct inode
*new_inode(struct super_block
*sb
)
630 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
631 * error if st_ino won't fit in target struct field. Use 32bit counter
632 * here to attempt to avoid that.
634 static unsigned int last_ino
;
637 spin_lock_prefetch(&inode_lock
);
639 inode
= alloc_inode(sb
);
641 spin_lock(&inode_lock
);
642 __inode_add_to_lists(sb
, NULL
, inode
);
643 inode
->i_ino
= ++last_ino
;
645 spin_unlock(&inode_lock
);
649 EXPORT_SYMBOL(new_inode
);
651 void unlock_new_inode(struct inode
*inode
)
653 #ifdef CONFIG_DEBUG_LOCK_ALLOC
654 if (inode
->i_mode
& S_IFDIR
) {
655 struct file_system_type
*type
= inode
->i_sb
->s_type
;
658 * ensure nobody is actually holding i_mutex
660 mutex_destroy(&inode
->i_mutex
);
661 mutex_init(&inode
->i_mutex
);
662 lockdep_set_class(&inode
->i_mutex
, &type
->i_mutex_dir_key
);
666 * This is special! We do not need the spinlock
667 * when clearing I_LOCK, because we're guaranteed
668 * that nobody else tries to do anything about the
669 * state of the inode when it is locked, as we
670 * just created it (so there can be no old holders
671 * that haven't tested I_LOCK).
673 WARN_ON((inode
->i_state
& (I_LOCK
|I_NEW
)) != (I_LOCK
|I_NEW
));
674 inode
->i_state
&= ~(I_LOCK
|I_NEW
);
675 wake_up_inode(inode
);
677 EXPORT_SYMBOL(unlock_new_inode
);
680 * This is called without the inode lock held.. Be careful.
682 * We no longer cache the sb_flags in i_flags - see fs.h
683 * -- rmk@arm.uk.linux.org
685 static struct inode
*get_new_inode(struct super_block
*sb
,
686 struct hlist_head
*head
,
687 int (*test
)(struct inode
*, void *),
688 int (*set
)(struct inode
*, void *),
693 inode
= alloc_inode(sb
);
697 spin_lock(&inode_lock
);
698 /* We released the lock, so.. */
699 old
= find_inode(sb
, head
, test
, data
);
701 if (set(inode
, data
))
704 __inode_add_to_lists(sb
, head
, inode
);
705 inode
->i_state
= I_LOCK
|I_NEW
;
706 spin_unlock(&inode_lock
);
708 /* Return the locked inode with I_NEW set, the
709 * caller is responsible for filling in the contents
715 * Uhhuh, somebody else created the same inode under
716 * us. Use the old inode instead of the one we just
720 spin_unlock(&inode_lock
);
721 destroy_inode(inode
);
723 wait_on_inode(inode
);
728 spin_unlock(&inode_lock
);
729 destroy_inode(inode
);
734 * get_new_inode_fast is the fast path version of get_new_inode, see the
735 * comment at iget_locked for details.
737 static struct inode
*get_new_inode_fast(struct super_block
*sb
,
738 struct hlist_head
*head
, unsigned long ino
)
742 inode
= alloc_inode(sb
);
746 spin_lock(&inode_lock
);
747 /* We released the lock, so.. */
748 old
= find_inode_fast(sb
, head
, ino
);
751 __inode_add_to_lists(sb
, head
, inode
);
752 inode
->i_state
= I_LOCK
|I_NEW
;
753 spin_unlock(&inode_lock
);
755 /* Return the locked inode with I_NEW set, the
756 * caller is responsible for filling in the contents
762 * Uhhuh, somebody else created the same inode under
763 * us. Use the old inode instead of the one we just
767 spin_unlock(&inode_lock
);
768 destroy_inode(inode
);
770 wait_on_inode(inode
);
776 * iunique - get a unique inode number
778 * @max_reserved: highest reserved inode number
780 * Obtain an inode number that is unique on the system for a given
781 * superblock. This is used by file systems that have no natural
782 * permanent inode numbering system. An inode number is returned that
783 * is higher than the reserved limit but unique.
786 * With a large number of inodes live on the file system this function
787 * currently becomes quite slow.
789 ino_t
iunique(struct super_block
*sb
, ino_t max_reserved
)
792 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
793 * error if st_ino won't fit in target struct field. Use 32bit counter
794 * here to attempt to avoid that.
796 static unsigned int counter
;
798 struct hlist_head
*head
;
801 spin_lock(&inode_lock
);
803 if (counter
<= max_reserved
)
804 counter
= max_reserved
+ 1;
806 head
= inode_hashtable
+ hash(sb
, res
);
807 inode
= find_inode_fast(sb
, head
, res
);
808 } while (inode
!= NULL
);
809 spin_unlock(&inode_lock
);
813 EXPORT_SYMBOL(iunique
);
815 struct inode
*igrab(struct inode
*inode
)
817 spin_lock(&inode_lock
);
818 if (!(inode
->i_state
& (I_FREEING
|I_CLEAR
|I_WILL_FREE
)))
822 * Handle the case where s_op->clear_inode is not been
823 * called yet, and somebody is calling igrab
824 * while the inode is getting freed.
827 spin_unlock(&inode_lock
);
830 EXPORT_SYMBOL(igrab
);
833 * ifind - internal function, you want ilookup5() or iget5().
834 * @sb: super block of file system to search
835 * @head: the head of the list to search
836 * @test: callback used for comparisons between inodes
837 * @data: opaque data pointer to pass to @test
838 * @wait: if true wait for the inode to be unlocked, if false do not
840 * ifind() searches for the inode specified by @data in the inode
841 * cache. This is a generalized version of ifind_fast() for file systems where
842 * the inode number is not sufficient for unique identification of an inode.
844 * If the inode is in the cache, the inode is returned with an incremented
847 * Otherwise NULL is returned.
849 * Note, @test is called with the inode_lock held, so can't sleep.
851 static struct inode
*ifind(struct super_block
*sb
,
852 struct hlist_head
*head
, int (*test
)(struct inode
*, void *),
853 void *data
, const int wait
)
857 spin_lock(&inode_lock
);
858 inode
= find_inode(sb
, head
, test
, data
);
861 spin_unlock(&inode_lock
);
863 wait_on_inode(inode
);
866 spin_unlock(&inode_lock
);
871 * ifind_fast - internal function, you want ilookup() or iget().
872 * @sb: super block of file system to search
873 * @head: head of the list to search
874 * @ino: inode number to search for
876 * ifind_fast() searches for the inode @ino in the inode cache. This is for
877 * file systems where the inode number is sufficient for unique identification
880 * If the inode is in the cache, the inode is returned with an incremented
883 * Otherwise NULL is returned.
885 static struct inode
*ifind_fast(struct super_block
*sb
,
886 struct hlist_head
*head
, unsigned long ino
)
890 spin_lock(&inode_lock
);
891 inode
= find_inode_fast(sb
, head
, ino
);
894 spin_unlock(&inode_lock
);
895 wait_on_inode(inode
);
898 spin_unlock(&inode_lock
);
903 * ilookup5_nowait - search for an inode in the inode cache
904 * @sb: super block of file system to search
905 * @hashval: hash value (usually inode number) to search for
906 * @test: callback used for comparisons between inodes
907 * @data: opaque data pointer to pass to @test
909 * ilookup5() uses ifind() to search for the inode specified by @hashval and
910 * @data in the inode cache. This is a generalized version of ilookup() for
911 * file systems where the inode number is not sufficient for unique
912 * identification of an inode.
914 * If the inode is in the cache, the inode is returned with an incremented
915 * reference count. Note, the inode lock is not waited upon so you have to be
916 * very careful what you do with the returned inode. You probably should be
917 * using ilookup5() instead.
919 * Otherwise NULL is returned.
921 * Note, @test is called with the inode_lock held, so can't sleep.
923 struct inode
*ilookup5_nowait(struct super_block
*sb
, unsigned long hashval
,
924 int (*test
)(struct inode
*, void *), void *data
)
926 struct hlist_head
*head
= inode_hashtable
+ hash(sb
, hashval
);
928 return ifind(sb
, head
, test
, data
, 0);
930 EXPORT_SYMBOL(ilookup5_nowait
);
933 * ilookup5 - search for an inode in the inode cache
934 * @sb: super block of file system to search
935 * @hashval: hash value (usually inode number) to search for
936 * @test: callback used for comparisons between inodes
937 * @data: opaque data pointer to pass to @test
939 * ilookup5() uses ifind() to search for the inode specified by @hashval and
940 * @data in the inode cache. This is a generalized version of ilookup() for
941 * file systems where the inode number is not sufficient for unique
942 * identification of an inode.
944 * If the inode is in the cache, the inode lock is waited upon and the inode is
945 * returned with an incremented reference count.
947 * Otherwise NULL is returned.
949 * Note, @test is called with the inode_lock held, so can't sleep.
951 struct inode
*ilookup5(struct super_block
*sb
, unsigned long hashval
,
952 int (*test
)(struct inode
*, void *), void *data
)
954 struct hlist_head
*head
= inode_hashtable
+ hash(sb
, hashval
);
956 return ifind(sb
, head
, test
, data
, 1);
958 EXPORT_SYMBOL(ilookup5
);
961 * ilookup - search for an inode in the inode cache
962 * @sb: super block of file system to search
963 * @ino: inode number to search for
965 * ilookup() uses ifind_fast() to search for the inode @ino in the inode cache.
966 * This is for file systems where the inode number is sufficient for unique
967 * identification of an inode.
969 * If the inode is in the cache, the inode is returned with an incremented
972 * Otherwise NULL is returned.
974 struct inode
*ilookup(struct super_block
*sb
, unsigned long ino
)
976 struct hlist_head
*head
= inode_hashtable
+ hash(sb
, ino
);
978 return ifind_fast(sb
, head
, ino
);
980 EXPORT_SYMBOL(ilookup
);
983 * iget5_locked - obtain an inode from a mounted file system
984 * @sb: super block of file system
985 * @hashval: hash value (usually inode number) to get
986 * @test: callback used for comparisons between inodes
987 * @set: callback used to initialize a new struct inode
988 * @data: opaque data pointer to pass to @test and @set
990 * iget5_locked() uses ifind() to search for the inode specified by @hashval
991 * and @data in the inode cache and if present it is returned with an increased
992 * reference count. This is a generalized version of iget_locked() for file
993 * systems where the inode number is not sufficient for unique identification
996 * If the inode is not in cache, get_new_inode() is called to allocate a new
997 * inode and this is returned locked, hashed, and with the I_NEW flag set. The
998 * file system gets to fill it in before unlocking it via unlock_new_inode().
1000 * Note both @test and @set are called with the inode_lock held, so can't sleep.
1002 struct inode
*iget5_locked(struct super_block
*sb
, unsigned long hashval
,
1003 int (*test
)(struct inode
*, void *),
1004 int (*set
)(struct inode
*, void *), void *data
)
1006 struct hlist_head
*head
= inode_hashtable
+ hash(sb
, hashval
);
1007 struct inode
*inode
;
1009 inode
= ifind(sb
, head
, test
, data
, 1);
1013 * get_new_inode() will do the right thing, re-trying the search
1014 * in case it had to block at any point.
1016 return get_new_inode(sb
, head
, test
, set
, data
);
1018 EXPORT_SYMBOL(iget5_locked
);
1021 * iget_locked - obtain an inode from a mounted file system
1022 * @sb: super block of file system
1023 * @ino: inode number to get
1025 * iget_locked() uses ifind_fast() to search for the inode specified by @ino in
1026 * the inode cache and if present it is returned with an increased reference
1027 * count. This is for file systems where the inode number is sufficient for
1028 * unique identification of an inode.
1030 * If the inode is not in cache, get_new_inode_fast() is called to allocate a
1031 * new inode and this is returned locked, hashed, and with the I_NEW flag set.
1032 * The file system gets to fill it in before unlocking it via
1033 * unlock_new_inode().
1035 struct inode
*iget_locked(struct super_block
*sb
, unsigned long ino
)
1037 struct hlist_head
*head
= inode_hashtable
+ hash(sb
, ino
);
1038 struct inode
*inode
;
1040 inode
= ifind_fast(sb
, head
, ino
);
1044 * get_new_inode_fast() will do the right thing, re-trying the search
1045 * in case it had to block at any point.
1047 return get_new_inode_fast(sb
, head
, ino
);
1049 EXPORT_SYMBOL(iget_locked
);
1051 int insert_inode_locked(struct inode
*inode
)
1053 struct super_block
*sb
= inode
->i_sb
;
1054 ino_t ino
= inode
->i_ino
;
1055 struct hlist_head
*head
= inode_hashtable
+ hash(sb
, ino
);
1058 inode
->i_state
|= I_LOCK
|I_NEW
;
1060 spin_lock(&inode_lock
);
1061 old
= find_inode_fast(sb
, head
, ino
);
1063 hlist_add_head(&inode
->i_hash
, head
);
1064 spin_unlock(&inode_lock
);
1068 spin_unlock(&inode_lock
);
1070 if (unlikely(!hlist_unhashed(&old
->i_hash
))) {
1077 EXPORT_SYMBOL(insert_inode_locked
);
1079 int insert_inode_locked4(struct inode
*inode
, unsigned long hashval
,
1080 int (*test
)(struct inode
*, void *), void *data
)
1082 struct super_block
*sb
= inode
->i_sb
;
1083 struct hlist_head
*head
= inode_hashtable
+ hash(sb
, hashval
);
1086 inode
->i_state
|= I_LOCK
|I_NEW
;
1089 spin_lock(&inode_lock
);
1090 old
= find_inode(sb
, head
, test
, data
);
1092 hlist_add_head(&inode
->i_hash
, head
);
1093 spin_unlock(&inode_lock
);
1097 spin_unlock(&inode_lock
);
1099 if (unlikely(!hlist_unhashed(&old
->i_hash
))) {
1106 EXPORT_SYMBOL(insert_inode_locked4
);
1109 * __insert_inode_hash - hash an inode
1110 * @inode: unhashed inode
1111 * @hashval: unsigned long value used to locate this object in the
1114 * Add an inode to the inode hash for this superblock.
1116 void __insert_inode_hash(struct inode
*inode
, unsigned long hashval
)
1118 struct hlist_head
*head
= inode_hashtable
+ hash(inode
->i_sb
, hashval
);
1119 spin_lock(&inode_lock
);
1120 hlist_add_head(&inode
->i_hash
, head
);
1121 spin_unlock(&inode_lock
);
1123 EXPORT_SYMBOL(__insert_inode_hash
);
1126 * remove_inode_hash - remove an inode from the hash
1127 * @inode: inode to unhash
1129 * Remove an inode from the superblock.
1131 void remove_inode_hash(struct inode
*inode
)
1133 spin_lock(&inode_lock
);
1134 hlist_del_init(&inode
->i_hash
);
1135 spin_unlock(&inode_lock
);
1137 EXPORT_SYMBOL(remove_inode_hash
);
1140 * Tell the filesystem that this inode is no longer of any interest and should
1141 * be completely destroyed.
1143 * We leave the inode in the inode hash table until *after* the filesystem's
1144 * ->delete_inode completes. This ensures that an iget (such as nfsd might
1145 * instigate) will always find up-to-date information either in the hash or on
1148 * I_FREEING is set so that no-one will take a new reference to the inode while
1149 * it is being deleted.
1151 void generic_delete_inode(struct inode
*inode
)
1153 const struct super_operations
*op
= inode
->i_sb
->s_op
;
1155 list_del_init(&inode
->i_list
);
1156 list_del_init(&inode
->i_sb_list
);
1157 WARN_ON(inode
->i_state
& I_NEW
);
1158 inode
->i_state
|= I_FREEING
;
1159 inodes_stat
.nr_inodes
--;
1160 spin_unlock(&inode_lock
);
1162 security_inode_delete(inode
);
1164 if (op
->delete_inode
) {
1165 void (*delete)(struct inode
*) = op
->delete_inode
;
1166 if (!is_bad_inode(inode
))
1168 /* Filesystems implementing their own
1169 * s_op->delete_inode are required to call
1170 * truncate_inode_pages and clear_inode()
1174 truncate_inode_pages(&inode
->i_data
, 0);
1177 spin_lock(&inode_lock
);
1178 hlist_del_init(&inode
->i_hash
);
1179 spin_unlock(&inode_lock
);
1180 wake_up_inode(inode
);
1181 BUG_ON(inode
->i_state
!= I_CLEAR
);
1182 destroy_inode(inode
);
1184 EXPORT_SYMBOL(generic_delete_inode
);
1186 static void generic_forget_inode(struct inode
*inode
)
1188 struct super_block
*sb
= inode
->i_sb
;
1190 if (!hlist_unhashed(&inode
->i_hash
)) {
1191 if (!(inode
->i_state
& (I_DIRTY
|I_SYNC
)))
1192 list_move(&inode
->i_list
, &inode_unused
);
1193 inodes_stat
.nr_unused
++;
1194 if (sb
->s_flags
& MS_ACTIVE
) {
1195 spin_unlock(&inode_lock
);
1198 WARN_ON(inode
->i_state
& I_NEW
);
1199 inode
->i_state
|= I_WILL_FREE
;
1200 spin_unlock(&inode_lock
);
1201 write_inode_now(inode
, 1);
1202 spin_lock(&inode_lock
);
1203 WARN_ON(inode
->i_state
& I_NEW
);
1204 inode
->i_state
&= ~I_WILL_FREE
;
1205 inodes_stat
.nr_unused
--;
1206 hlist_del_init(&inode
->i_hash
);
1208 list_del_init(&inode
->i_list
);
1209 list_del_init(&inode
->i_sb_list
);
1210 WARN_ON(inode
->i_state
& I_NEW
);
1211 inode
->i_state
|= I_FREEING
;
1212 inodes_stat
.nr_inodes
--;
1213 spin_unlock(&inode_lock
);
1214 if (inode
->i_data
.nrpages
)
1215 truncate_inode_pages(&inode
->i_data
, 0);
1217 wake_up_inode(inode
);
1218 destroy_inode(inode
);
1222 * Normal UNIX filesystem behaviour: delete the
1223 * inode when the usage count drops to zero, and
1226 void generic_drop_inode(struct inode
*inode
)
1228 if (!inode
->i_nlink
)
1229 generic_delete_inode(inode
);
1231 generic_forget_inode(inode
);
1233 EXPORT_SYMBOL_GPL(generic_drop_inode
);
1236 * Called when we're dropping the last reference
1239 * Call the FS "drop()" function, defaulting to
1240 * the legacy UNIX filesystem behaviour..
1242 * NOTE! NOTE! NOTE! We're called with the inode lock
1243 * held, and the drop function is supposed to release
1246 static inline void iput_final(struct inode
*inode
)
1248 const struct super_operations
*op
= inode
->i_sb
->s_op
;
1249 void (*drop
)(struct inode
*) = generic_drop_inode
;
1251 if (op
&& op
->drop_inode
)
1252 drop
= op
->drop_inode
;
1257 * iput - put an inode
1258 * @inode: inode to put
1260 * Puts an inode, dropping its usage count. If the inode use count hits
1261 * zero, the inode is then freed and may also be destroyed.
1263 * Consequently, iput() can sleep.
1265 void iput(struct inode
*inode
)
1268 BUG_ON(inode
->i_state
== I_CLEAR
);
1270 if (atomic_dec_and_lock(&inode
->i_count
, &inode_lock
))
1274 EXPORT_SYMBOL(iput
);
1277 * bmap - find a block number in a file
1278 * @inode: inode of file
1279 * @block: block to find
1281 * Returns the block number on the device holding the inode that
1282 * is the disk block number for the block of the file requested.
1283 * That is, asked for block 4 of inode 1 the function will return the
1284 * disk block relative to the disk start that holds that block of the
1287 sector_t
bmap(struct inode
*inode
, sector_t block
)
1290 if (inode
->i_mapping
->a_ops
->bmap
)
1291 res
= inode
->i_mapping
->a_ops
->bmap(inode
->i_mapping
, block
);
1294 EXPORT_SYMBOL(bmap
);
1297 * With relative atime, only update atime if the previous atime is
1298 * earlier than either the ctime or mtime or if at least a day has
1299 * passed since the last atime update.
1301 static int relatime_need_update(struct vfsmount
*mnt
, struct inode
*inode
,
1302 struct timespec now
)
1305 if (!(mnt
->mnt_flags
& MNT_RELATIME
))
1308 * Is mtime younger than atime? If yes, update atime:
1310 if (timespec_compare(&inode
->i_mtime
, &inode
->i_atime
) >= 0)
1313 * Is ctime younger than atime? If yes, update atime:
1315 if (timespec_compare(&inode
->i_ctime
, &inode
->i_atime
) >= 0)
1319 * Is the previous atime value older than a day? If yes,
1322 if ((long)(now
.tv_sec
- inode
->i_atime
.tv_sec
) >= 24*60*60)
1325 * Good, we can skip the atime update:
1331 * touch_atime - update the access time
1332 * @mnt: mount the inode is accessed on
1333 * @dentry: dentry accessed
1335 * Update the accessed time on an inode and mark it for writeback.
1336 * This function automatically handles read only file systems and media,
1337 * as well as the "noatime" flag and inode specific "noatime" markers.
1339 void touch_atime(struct vfsmount
*mnt
, struct dentry
*dentry
)
1341 struct inode
*inode
= dentry
->d_inode
;
1342 struct timespec now
;
1344 if (mnt_want_write(mnt
))
1346 if (inode
->i_flags
& S_NOATIME
)
1348 if (IS_NOATIME(inode
))
1350 if ((inode
->i_sb
->s_flags
& MS_NODIRATIME
) && S_ISDIR(inode
->i_mode
))
1353 if (mnt
->mnt_flags
& MNT_NOATIME
)
1355 if ((mnt
->mnt_flags
& MNT_NODIRATIME
) && S_ISDIR(inode
->i_mode
))
1358 now
= current_fs_time(inode
->i_sb
);
1360 if (!relatime_need_update(mnt
, inode
, now
))
1363 if (timespec_equal(&inode
->i_atime
, &now
))
1366 inode
->i_atime
= now
;
1367 mark_inode_dirty_sync(inode
);
1369 mnt_drop_write(mnt
);
1371 EXPORT_SYMBOL(touch_atime
);
1374 * file_update_time - update mtime and ctime time
1375 * @file: file accessed
1377 * Update the mtime and ctime members of an inode and mark the inode
1378 * for writeback. Note that this function is meant exclusively for
1379 * usage in the file write path of filesystems, and filesystems may
1380 * choose to explicitly ignore update via this function with the
1381 * S_NOCTIME inode flag, e.g. for network filesystem where these
1382 * timestamps are handled by the server.
1385 void file_update_time(struct file
*file
)
1387 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1388 struct timespec now
;
1392 if (IS_NOCMTIME(inode
))
1395 err
= mnt_want_write(file
->f_path
.mnt
);
1399 now
= current_fs_time(inode
->i_sb
);
1400 if (!timespec_equal(&inode
->i_mtime
, &now
)) {
1401 inode
->i_mtime
= now
;
1405 if (!timespec_equal(&inode
->i_ctime
, &now
)) {
1406 inode
->i_ctime
= now
;
1410 if (IS_I_VERSION(inode
)) {
1411 inode_inc_iversion(inode
);
1416 mark_inode_dirty_sync(inode
);
1417 mnt_drop_write(file
->f_path
.mnt
);
1419 EXPORT_SYMBOL(file_update_time
);
1421 int inode_needs_sync(struct inode
*inode
)
1425 if (S_ISDIR(inode
->i_mode
) && IS_DIRSYNC(inode
))
1429 EXPORT_SYMBOL(inode_needs_sync
);
1431 int inode_wait(void *word
)
1436 EXPORT_SYMBOL(inode_wait
);
1439 * If we try to find an inode in the inode hash while it is being
1440 * deleted, we have to wait until the filesystem completes its
1441 * deletion before reporting that it isn't found. This function waits
1442 * until the deletion _might_ have completed. Callers are responsible
1443 * to recheck inode state.
1445 * It doesn't matter if I_LOCK is not set initially, a call to
1446 * wake_up_inode() after removing from the hash list will DTRT.
1448 * This is called with inode_lock held.
1450 static void __wait_on_freeing_inode(struct inode
*inode
)
1452 wait_queue_head_t
*wq
;
1453 DEFINE_WAIT_BIT(wait
, &inode
->i_state
, __I_LOCK
);
1454 wq
= bit_waitqueue(&inode
->i_state
, __I_LOCK
);
1455 prepare_to_wait(wq
, &wait
.wait
, TASK_UNINTERRUPTIBLE
);
1456 spin_unlock(&inode_lock
);
1458 finish_wait(wq
, &wait
.wait
);
1459 spin_lock(&inode_lock
);
1462 static __initdata
unsigned long ihash_entries
;
1463 static int __init
set_ihash_entries(char *str
)
1467 ihash_entries
= simple_strtoul(str
, &str
, 0);
1470 __setup("ihash_entries=", set_ihash_entries
);
1473 * Initialize the waitqueues and inode hash table.
1475 void __init
inode_init_early(void)
1479 /* If hashes are distributed across NUMA nodes, defer
1480 * hash allocation until vmalloc space is available.
1486 alloc_large_system_hash("Inode-cache",
1487 sizeof(struct hlist_head
),
1495 for (loop
= 0; loop
< (1 << i_hash_shift
); loop
++)
1496 INIT_HLIST_HEAD(&inode_hashtable
[loop
]);
1499 void __init
inode_init(void)
1503 /* inode slab cache */
1504 inode_cachep
= kmem_cache_create("inode_cache",
1505 sizeof(struct inode
),
1507 (SLAB_RECLAIM_ACCOUNT
|SLAB_PANIC
|
1510 register_shrinker(&icache_shrinker
);
1512 /* Hash may have been set up in inode_init_early */
1517 alloc_large_system_hash("Inode-cache",
1518 sizeof(struct hlist_head
),
1526 for (loop
= 0; loop
< (1 << i_hash_shift
); loop
++)
1527 INIT_HLIST_HEAD(&inode_hashtable
[loop
]);
1530 void init_special_inode(struct inode
*inode
, umode_t mode
, dev_t rdev
)
1532 inode
->i_mode
= mode
;
1533 if (S_ISCHR(mode
)) {
1534 inode
->i_fop
= &def_chr_fops
;
1535 inode
->i_rdev
= rdev
;
1536 } else if (S_ISBLK(mode
)) {
1537 inode
->i_fop
= &def_blk_fops
;
1538 inode
->i_rdev
= rdev
;
1539 } else if (S_ISFIFO(mode
))
1540 inode
->i_fop
= &def_fifo_fops
;
1541 else if (S_ISSOCK(mode
))
1542 inode
->i_fop
= &bad_sock_fops
;
1544 printk(KERN_DEBUG
"init_special_inode: bogus i_mode (%o)\n",
1547 EXPORT_SYMBOL(init_special_inode
);