CFQ: add think time check for group
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / inode.c
blob0f7e88a7803f39e52b778eb30652654d7d61d2d1
1 /*
2 * (C) 1997 Linus Torvalds
3 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
4 */
5 #include <linux/fs.h>
6 #include <linux/mm.h>
7 #include <linux/dcache.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/writeback.h>
11 #include <linux/module.h>
12 #include <linux/backing-dev.h>
13 #include <linux/wait.h>
14 #include <linux/rwsem.h>
15 #include <linux/hash.h>
16 #include <linux/swap.h>
17 #include <linux/security.h>
18 #include <linux/pagemap.h>
19 #include <linux/cdev.h>
20 #include <linux/bootmem.h>
21 #include <linux/fsnotify.h>
22 #include <linux/mount.h>
23 #include <linux/async.h>
24 #include <linux/posix_acl.h>
25 #include <linux/prefetch.h>
26 #include <linux/ima.h>
27 #include <linux/cred.h>
28 #include <linux/buffer_head.h> /* for inode_has_buffers */
29 #include "internal.h"
32 * Inode locking rules:
34 * inode->i_lock protects:
35 * inode->i_state, inode->i_hash, __iget()
36 * inode_lru_lock protects:
37 * inode_lru, inode->i_lru
38 * inode_sb_list_lock protects:
39 * sb->s_inodes, inode->i_sb_list
40 * inode_wb_list_lock protects:
41 * bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list
42 * inode_hash_lock protects:
43 * inode_hashtable, inode->i_hash
45 * Lock ordering:
47 * inode_sb_list_lock
48 * inode->i_lock
49 * inode_lru_lock
51 * inode_wb_list_lock
52 * inode->i_lock
54 * inode_hash_lock
55 * inode_sb_list_lock
56 * inode->i_lock
58 * iunique_lock
59 * inode_hash_lock
62 static unsigned int i_hash_mask __read_mostly;
63 static unsigned int i_hash_shift __read_mostly;
64 static struct hlist_head *inode_hashtable __read_mostly;
65 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
67 static LIST_HEAD(inode_lru);
68 static DEFINE_SPINLOCK(inode_lru_lock);
70 __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
71 __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock);
74 * iprune_sem provides exclusion between the icache shrinking and the
75 * umount path.
77 * We don't actually need it to protect anything in the umount path,
78 * but only need to cycle through it to make sure any inode that
79 * prune_icache took off the LRU list has been fully torn down by the
80 * time we are past evict_inodes.
82 static DECLARE_RWSEM(iprune_sem);
85 * Empty aops. Can be used for the cases where the user does not
86 * define any of the address_space operations.
88 const struct address_space_operations empty_aops = {
90 EXPORT_SYMBOL(empty_aops);
93 * Statistics gathering..
95 struct inodes_stat_t inodes_stat;
97 static DEFINE_PER_CPU(unsigned int, nr_inodes);
99 static struct kmem_cache *inode_cachep __read_mostly;
101 static int get_nr_inodes(void)
103 int i;
104 int sum = 0;
105 for_each_possible_cpu(i)
106 sum += per_cpu(nr_inodes, i);
107 return sum < 0 ? 0 : sum;
110 static inline int get_nr_inodes_unused(void)
112 return inodes_stat.nr_unused;
115 int get_nr_dirty_inodes(void)
117 /* not actually dirty inodes, but a wild approximation */
118 int nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
119 return nr_dirty > 0 ? nr_dirty : 0;
123 * Handle nr_inode sysctl
125 #ifdef CONFIG_SYSCTL
126 int proc_nr_inodes(ctl_table *table, int write,
127 void __user *buffer, size_t *lenp, loff_t *ppos)
129 inodes_stat.nr_inodes = get_nr_inodes();
130 return proc_dointvec(table, write, buffer, lenp, ppos);
132 #endif
135 * inode_init_always - perform inode structure intialisation
136 * @sb: superblock inode belongs to
137 * @inode: inode to initialise
139 * These are initializations that need to be done on every inode
140 * allocation as the fields are not initialised by slab allocation.
142 int inode_init_always(struct super_block *sb, struct inode *inode)
144 static const struct inode_operations empty_iops;
145 static const struct file_operations empty_fops;
146 struct address_space *const mapping = &inode->i_data;
148 inode->i_sb = sb;
149 inode->i_blkbits = sb->s_blocksize_bits;
150 inode->i_flags = 0;
151 atomic_set(&inode->i_count, 1);
152 inode->i_op = &empty_iops;
153 inode->i_fop = &empty_fops;
154 inode->i_nlink = 1;
155 inode->i_uid = 0;
156 inode->i_gid = 0;
157 atomic_set(&inode->i_writecount, 0);
158 inode->i_size = 0;
159 inode->i_blocks = 0;
160 inode->i_bytes = 0;
161 inode->i_generation = 0;
162 #ifdef CONFIG_QUOTA
163 memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
164 #endif
165 inode->i_pipe = NULL;
166 inode->i_bdev = NULL;
167 inode->i_cdev = NULL;
168 inode->i_rdev = 0;
169 inode->dirtied_when = 0;
171 if (security_inode_alloc(inode))
172 goto out;
173 spin_lock_init(&inode->i_lock);
174 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
176 mutex_init(&inode->i_mutex);
177 lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key);
179 init_rwsem(&inode->i_alloc_sem);
180 lockdep_set_class(&inode->i_alloc_sem, &sb->s_type->i_alloc_sem_key);
182 mapping->a_ops = &empty_aops;
183 mapping->host = inode;
184 mapping->flags = 0;
185 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
186 mapping->assoc_mapping = NULL;
187 mapping->backing_dev_info = &default_backing_dev_info;
188 mapping->writeback_index = 0;
191 * If the block_device provides a backing_dev_info for client
192 * inodes then use that. Otherwise the inode share the bdev's
193 * backing_dev_info.
195 if (sb->s_bdev) {
196 struct backing_dev_info *bdi;
198 bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
199 mapping->backing_dev_info = bdi;
201 inode->i_private = NULL;
202 inode->i_mapping = mapping;
203 #ifdef CONFIG_FS_POSIX_ACL
204 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
205 #endif
207 #ifdef CONFIG_FSNOTIFY
208 inode->i_fsnotify_mask = 0;
209 #endif
211 this_cpu_inc(nr_inodes);
213 return 0;
214 out:
215 return -ENOMEM;
217 EXPORT_SYMBOL(inode_init_always);
219 static struct inode *alloc_inode(struct super_block *sb)
221 struct inode *inode;
223 if (sb->s_op->alloc_inode)
224 inode = sb->s_op->alloc_inode(sb);
225 else
226 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
228 if (!inode)
229 return NULL;
231 if (unlikely(inode_init_always(sb, inode))) {
232 if (inode->i_sb->s_op->destroy_inode)
233 inode->i_sb->s_op->destroy_inode(inode);
234 else
235 kmem_cache_free(inode_cachep, inode);
236 return NULL;
239 return inode;
242 void free_inode_nonrcu(struct inode *inode)
244 kmem_cache_free(inode_cachep, inode);
246 EXPORT_SYMBOL(free_inode_nonrcu);
248 void __destroy_inode(struct inode *inode)
250 BUG_ON(inode_has_buffers(inode));
251 security_inode_free(inode);
252 fsnotify_inode_delete(inode);
253 #ifdef CONFIG_FS_POSIX_ACL
254 if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED)
255 posix_acl_release(inode->i_acl);
256 if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED)
257 posix_acl_release(inode->i_default_acl);
258 #endif
259 this_cpu_dec(nr_inodes);
261 EXPORT_SYMBOL(__destroy_inode);
263 static void i_callback(struct rcu_head *head)
265 struct inode *inode = container_of(head, struct inode, i_rcu);
266 INIT_LIST_HEAD(&inode->i_dentry);
267 kmem_cache_free(inode_cachep, inode);
270 static void destroy_inode(struct inode *inode)
272 BUG_ON(!list_empty(&inode->i_lru));
273 __destroy_inode(inode);
274 if (inode->i_sb->s_op->destroy_inode)
275 inode->i_sb->s_op->destroy_inode(inode);
276 else
277 call_rcu(&inode->i_rcu, i_callback);
280 void address_space_init_once(struct address_space *mapping)
282 memset(mapping, 0, sizeof(*mapping));
283 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
284 spin_lock_init(&mapping->tree_lock);
285 mutex_init(&mapping->i_mmap_mutex);
286 INIT_LIST_HEAD(&mapping->private_list);
287 spin_lock_init(&mapping->private_lock);
288 INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
289 INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
291 EXPORT_SYMBOL(address_space_init_once);
294 * These are initializations that only need to be done
295 * once, because the fields are idempotent across use
296 * of the inode, so let the slab aware of that.
298 void inode_init_once(struct inode *inode)
300 memset(inode, 0, sizeof(*inode));
301 INIT_HLIST_NODE(&inode->i_hash);
302 INIT_LIST_HEAD(&inode->i_dentry);
303 INIT_LIST_HEAD(&inode->i_devices);
304 INIT_LIST_HEAD(&inode->i_wb_list);
305 INIT_LIST_HEAD(&inode->i_lru);
306 address_space_init_once(&inode->i_data);
307 i_size_ordered_init(inode);
308 #ifdef CONFIG_FSNOTIFY
309 INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
310 #endif
312 EXPORT_SYMBOL(inode_init_once);
314 static void init_once(void *foo)
316 struct inode *inode = (struct inode *) foo;
318 inode_init_once(inode);
322 * inode->i_lock must be held
324 void __iget(struct inode *inode)
326 atomic_inc(&inode->i_count);
330 * get additional reference to inode; caller must already hold one.
332 void ihold(struct inode *inode)
334 WARN_ON(atomic_inc_return(&inode->i_count) < 2);
336 EXPORT_SYMBOL(ihold);
338 static void inode_lru_list_add(struct inode *inode)
340 spin_lock(&inode_lru_lock);
341 if (list_empty(&inode->i_lru)) {
342 list_add(&inode->i_lru, &inode_lru);
343 inodes_stat.nr_unused++;
345 spin_unlock(&inode_lru_lock);
348 static void inode_lru_list_del(struct inode *inode)
350 spin_lock(&inode_lru_lock);
351 if (!list_empty(&inode->i_lru)) {
352 list_del_init(&inode->i_lru);
353 inodes_stat.nr_unused--;
355 spin_unlock(&inode_lru_lock);
359 * inode_sb_list_add - add inode to the superblock list of inodes
360 * @inode: inode to add
362 void inode_sb_list_add(struct inode *inode)
364 spin_lock(&inode_sb_list_lock);
365 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
366 spin_unlock(&inode_sb_list_lock);
368 EXPORT_SYMBOL_GPL(inode_sb_list_add);
370 static inline void inode_sb_list_del(struct inode *inode)
372 spin_lock(&inode_sb_list_lock);
373 list_del_init(&inode->i_sb_list);
374 spin_unlock(&inode_sb_list_lock);
377 static unsigned long hash(struct super_block *sb, unsigned long hashval)
379 unsigned long tmp;
381 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
382 L1_CACHE_BYTES;
383 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
384 return tmp & i_hash_mask;
388 * __insert_inode_hash - hash an inode
389 * @inode: unhashed inode
390 * @hashval: unsigned long value used to locate this object in the
391 * inode_hashtable.
393 * Add an inode to the inode hash for this superblock.
395 void __insert_inode_hash(struct inode *inode, unsigned long hashval)
397 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
399 spin_lock(&inode_hash_lock);
400 spin_lock(&inode->i_lock);
401 hlist_add_head(&inode->i_hash, b);
402 spin_unlock(&inode->i_lock);
403 spin_unlock(&inode_hash_lock);
405 EXPORT_SYMBOL(__insert_inode_hash);
408 * remove_inode_hash - remove an inode from the hash
409 * @inode: inode to unhash
411 * Remove an inode from the superblock.
413 void remove_inode_hash(struct inode *inode)
415 spin_lock(&inode_hash_lock);
416 spin_lock(&inode->i_lock);
417 hlist_del_init(&inode->i_hash);
418 spin_unlock(&inode->i_lock);
419 spin_unlock(&inode_hash_lock);
421 EXPORT_SYMBOL(remove_inode_hash);
423 void end_writeback(struct inode *inode)
425 might_sleep();
426 BUG_ON(inode->i_data.nrpages);
427 BUG_ON(!list_empty(&inode->i_data.private_list));
428 BUG_ON(!(inode->i_state & I_FREEING));
429 BUG_ON(inode->i_state & I_CLEAR);
430 inode_sync_wait(inode);
431 /* don't need i_lock here, no concurrent mods to i_state */
432 inode->i_state = I_FREEING | I_CLEAR;
434 EXPORT_SYMBOL(end_writeback);
437 * Free the inode passed in, removing it from the lists it is still connected
438 * to. We remove any pages still attached to the inode and wait for any IO that
439 * is still in progress before finally destroying the inode.
441 * An inode must already be marked I_FREEING so that we avoid the inode being
442 * moved back onto lists if we race with other code that manipulates the lists
443 * (e.g. writeback_single_inode). The caller is responsible for setting this.
445 * An inode must already be removed from the LRU list before being evicted from
446 * the cache. This should occur atomically with setting the I_FREEING state
447 * flag, so no inodes here should ever be on the LRU when being evicted.
449 static void evict(struct inode *inode)
451 const struct super_operations *op = inode->i_sb->s_op;
453 BUG_ON(!(inode->i_state & I_FREEING));
454 BUG_ON(!list_empty(&inode->i_lru));
456 inode_wb_list_del(inode);
457 inode_sb_list_del(inode);
459 if (op->evict_inode) {
460 op->evict_inode(inode);
461 } else {
462 if (inode->i_data.nrpages)
463 truncate_inode_pages(&inode->i_data, 0);
464 end_writeback(inode);
466 if (S_ISBLK(inode->i_mode) && inode->i_bdev)
467 bd_forget(inode);
468 if (S_ISCHR(inode->i_mode) && inode->i_cdev)
469 cd_forget(inode);
471 remove_inode_hash(inode);
473 spin_lock(&inode->i_lock);
474 wake_up_bit(&inode->i_state, __I_NEW);
475 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
476 spin_unlock(&inode->i_lock);
478 destroy_inode(inode);
482 * dispose_list - dispose of the contents of a local list
483 * @head: the head of the list to free
485 * Dispose-list gets a local list with local inodes in it, so it doesn't
486 * need to worry about list corruption and SMP locks.
488 static void dispose_list(struct list_head *head)
490 while (!list_empty(head)) {
491 struct inode *inode;
493 inode = list_first_entry(head, struct inode, i_lru);
494 list_del_init(&inode->i_lru);
496 evict(inode);
501 * evict_inodes - evict all evictable inodes for a superblock
502 * @sb: superblock to operate on
504 * Make sure that no inodes with zero refcount are retained. This is
505 * called by superblock shutdown after having MS_ACTIVE flag removed,
506 * so any inode reaching zero refcount during or after that call will
507 * be immediately evicted.
509 void evict_inodes(struct super_block *sb)
511 struct inode *inode, *next;
512 LIST_HEAD(dispose);
514 spin_lock(&inode_sb_list_lock);
515 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
516 if (atomic_read(&inode->i_count))
517 continue;
519 spin_lock(&inode->i_lock);
520 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
521 spin_unlock(&inode->i_lock);
522 continue;
525 inode->i_state |= I_FREEING;
526 inode_lru_list_del(inode);
527 spin_unlock(&inode->i_lock);
528 list_add(&inode->i_lru, &dispose);
530 spin_unlock(&inode_sb_list_lock);
532 dispose_list(&dispose);
535 * Cycle through iprune_sem to make sure any inode that prune_icache
536 * moved off the list before we took the lock has been fully torn
537 * down.
539 down_write(&iprune_sem);
540 up_write(&iprune_sem);
544 * invalidate_inodes - attempt to free all inodes on a superblock
545 * @sb: superblock to operate on
546 * @kill_dirty: flag to guide handling of dirty inodes
548 * Attempts to free all inodes for a given superblock. If there were any
549 * busy inodes return a non-zero value, else zero.
550 * If @kill_dirty is set, discard dirty inodes too, otherwise treat
551 * them as busy.
553 int invalidate_inodes(struct super_block *sb, bool kill_dirty)
555 int busy = 0;
556 struct inode *inode, *next;
557 LIST_HEAD(dispose);
559 spin_lock(&inode_sb_list_lock);
560 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
561 spin_lock(&inode->i_lock);
562 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
563 spin_unlock(&inode->i_lock);
564 continue;
566 if (inode->i_state & I_DIRTY && !kill_dirty) {
567 spin_unlock(&inode->i_lock);
568 busy = 1;
569 continue;
571 if (atomic_read(&inode->i_count)) {
572 spin_unlock(&inode->i_lock);
573 busy = 1;
574 continue;
577 inode->i_state |= I_FREEING;
578 inode_lru_list_del(inode);
579 spin_unlock(&inode->i_lock);
580 list_add(&inode->i_lru, &dispose);
582 spin_unlock(&inode_sb_list_lock);
584 dispose_list(&dispose);
586 return busy;
589 static int can_unuse(struct inode *inode)
591 if (inode->i_state & ~I_REFERENCED)
592 return 0;
593 if (inode_has_buffers(inode))
594 return 0;
595 if (atomic_read(&inode->i_count))
596 return 0;
597 if (inode->i_data.nrpages)
598 return 0;
599 return 1;
603 * Scan `goal' inodes on the unused list for freeable ones. They are moved to a
604 * temporary list and then are freed outside inode_lru_lock by dispose_list().
606 * Any inodes which are pinned purely because of attached pagecache have their
607 * pagecache removed. If the inode has metadata buffers attached to
608 * mapping->private_list then try to remove them.
610 * If the inode has the I_REFERENCED flag set, then it means that it has been
611 * used recently - the flag is set in iput_final(). When we encounter such an
612 * inode, clear the flag and move it to the back of the LRU so it gets another
613 * pass through the LRU before it gets reclaimed. This is necessary because of
614 * the fact we are doing lazy LRU updates to minimise lock contention so the
615 * LRU does not have strict ordering. Hence we don't want to reclaim inodes
616 * with this flag set because they are the inodes that are out of order.
618 static void prune_icache(int nr_to_scan)
620 LIST_HEAD(freeable);
621 int nr_scanned;
622 unsigned long reap = 0;
624 down_read(&iprune_sem);
625 spin_lock(&inode_lru_lock);
626 for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) {
627 struct inode *inode;
629 if (list_empty(&inode_lru))
630 break;
632 inode = list_entry(inode_lru.prev, struct inode, i_lru);
635 * we are inverting the inode_lru_lock/inode->i_lock here,
636 * so use a trylock. If we fail to get the lock, just move the
637 * inode to the back of the list so we don't spin on it.
639 if (!spin_trylock(&inode->i_lock)) {
640 list_move(&inode->i_lru, &inode_lru);
641 continue;
645 * Referenced or dirty inodes are still in use. Give them
646 * another pass through the LRU as we canot reclaim them now.
648 if (atomic_read(&inode->i_count) ||
649 (inode->i_state & ~I_REFERENCED)) {
650 list_del_init(&inode->i_lru);
651 spin_unlock(&inode->i_lock);
652 inodes_stat.nr_unused--;
653 continue;
656 /* recently referenced inodes get one more pass */
657 if (inode->i_state & I_REFERENCED) {
658 inode->i_state &= ~I_REFERENCED;
659 list_move(&inode->i_lru, &inode_lru);
660 spin_unlock(&inode->i_lock);
661 continue;
663 if (inode_has_buffers(inode) || inode->i_data.nrpages) {
664 __iget(inode);
665 spin_unlock(&inode->i_lock);
666 spin_unlock(&inode_lru_lock);
667 if (remove_inode_buffers(inode))
668 reap += invalidate_mapping_pages(&inode->i_data,
669 0, -1);
670 iput(inode);
671 spin_lock(&inode_lru_lock);
673 if (inode != list_entry(inode_lru.next,
674 struct inode, i_lru))
675 continue; /* wrong inode or list_empty */
676 /* avoid lock inversions with trylock */
677 if (!spin_trylock(&inode->i_lock))
678 continue;
679 if (!can_unuse(inode)) {
680 spin_unlock(&inode->i_lock);
681 continue;
684 WARN_ON(inode->i_state & I_NEW);
685 inode->i_state |= I_FREEING;
686 spin_unlock(&inode->i_lock);
688 list_move(&inode->i_lru, &freeable);
689 inodes_stat.nr_unused--;
691 if (current_is_kswapd())
692 __count_vm_events(KSWAPD_INODESTEAL, reap);
693 else
694 __count_vm_events(PGINODESTEAL, reap);
695 spin_unlock(&inode_lru_lock);
697 dispose_list(&freeable);
698 up_read(&iprune_sem);
702 * shrink_icache_memory() will attempt to reclaim some unused inodes. Here,
703 * "unused" means that no dentries are referring to the inodes: the files are
704 * not open and the dcache references to those inodes have already been
705 * reclaimed.
707 * This function is passed the number of inodes to scan, and it returns the
708 * total number of remaining possibly-reclaimable inodes.
710 static int shrink_icache_memory(struct shrinker *shrink,
711 struct shrink_control *sc)
713 int nr = sc->nr_to_scan;
714 gfp_t gfp_mask = sc->gfp_mask;
716 if (nr) {
718 * Nasty deadlock avoidance. We may hold various FS locks,
719 * and we don't want to recurse into the FS that called us
720 * in clear_inode() and friends..
722 if (!(gfp_mask & __GFP_FS))
723 return -1;
724 prune_icache(nr);
726 return (get_nr_inodes_unused() / 100) * sysctl_vfs_cache_pressure;
729 static struct shrinker icache_shrinker = {
730 .shrink = shrink_icache_memory,
731 .seeks = DEFAULT_SEEKS,
734 static void __wait_on_freeing_inode(struct inode *inode);
736 * Called with the inode lock held.
738 static struct inode *find_inode(struct super_block *sb,
739 struct hlist_head *head,
740 int (*test)(struct inode *, void *),
741 void *data)
743 struct hlist_node *node;
744 struct inode *inode = NULL;
746 repeat:
747 hlist_for_each_entry(inode, node, head, i_hash) {
748 spin_lock(&inode->i_lock);
749 if (inode->i_sb != sb) {
750 spin_unlock(&inode->i_lock);
751 continue;
753 if (!test(inode, data)) {
754 spin_unlock(&inode->i_lock);
755 continue;
757 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
758 __wait_on_freeing_inode(inode);
759 goto repeat;
761 __iget(inode);
762 spin_unlock(&inode->i_lock);
763 return inode;
765 return NULL;
769 * find_inode_fast is the fast path version of find_inode, see the comment at
770 * iget_locked for details.
772 static struct inode *find_inode_fast(struct super_block *sb,
773 struct hlist_head *head, unsigned long ino)
775 struct hlist_node *node;
776 struct inode *inode = NULL;
778 repeat:
779 hlist_for_each_entry(inode, node, head, i_hash) {
780 spin_lock(&inode->i_lock);
781 if (inode->i_ino != ino) {
782 spin_unlock(&inode->i_lock);
783 continue;
785 if (inode->i_sb != sb) {
786 spin_unlock(&inode->i_lock);
787 continue;
789 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
790 __wait_on_freeing_inode(inode);
791 goto repeat;
793 __iget(inode);
794 spin_unlock(&inode->i_lock);
795 return inode;
797 return NULL;
801 * Each cpu owns a range of LAST_INO_BATCH numbers.
802 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
803 * to renew the exhausted range.
805 * This does not significantly increase overflow rate because every CPU can
806 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
807 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
808 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
809 * overflow rate by 2x, which does not seem too significant.
811 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
812 * error if st_ino won't fit in target struct field. Use 32bit counter
813 * here to attempt to avoid that.
815 #define LAST_INO_BATCH 1024
816 static DEFINE_PER_CPU(unsigned int, last_ino);
818 unsigned int get_next_ino(void)
820 unsigned int *p = &get_cpu_var(last_ino);
821 unsigned int res = *p;
823 #ifdef CONFIG_SMP
824 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
825 static atomic_t shared_last_ino;
826 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
828 res = next - LAST_INO_BATCH;
830 #endif
832 *p = ++res;
833 put_cpu_var(last_ino);
834 return res;
836 EXPORT_SYMBOL(get_next_ino);
839 * new_inode - obtain an inode
840 * @sb: superblock
842 * Allocates a new inode for given superblock. The default gfp_mask
843 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
844 * If HIGHMEM pages are unsuitable or it is known that pages allocated
845 * for the page cache are not reclaimable or migratable,
846 * mapping_set_gfp_mask() must be called with suitable flags on the
847 * newly created inode's mapping
850 struct inode *new_inode(struct super_block *sb)
852 struct inode *inode;
854 spin_lock_prefetch(&inode_sb_list_lock);
856 inode = alloc_inode(sb);
857 if (inode) {
858 spin_lock(&inode->i_lock);
859 inode->i_state = 0;
860 spin_unlock(&inode->i_lock);
861 inode_sb_list_add(inode);
863 return inode;
865 EXPORT_SYMBOL(new_inode);
868 * unlock_new_inode - clear the I_NEW state and wake up any waiters
869 * @inode: new inode to unlock
871 * Called when the inode is fully initialised to clear the new state of the
872 * inode and wake up anyone waiting for the inode to finish initialisation.
874 void unlock_new_inode(struct inode *inode)
876 #ifdef CONFIG_DEBUG_LOCK_ALLOC
877 if (S_ISDIR(inode->i_mode)) {
878 struct file_system_type *type = inode->i_sb->s_type;
880 /* Set new key only if filesystem hasn't already changed it */
881 if (!lockdep_match_class(&inode->i_mutex,
882 &type->i_mutex_key)) {
884 * ensure nobody is actually holding i_mutex
886 mutex_destroy(&inode->i_mutex);
887 mutex_init(&inode->i_mutex);
888 lockdep_set_class(&inode->i_mutex,
889 &type->i_mutex_dir_key);
892 #endif
893 spin_lock(&inode->i_lock);
894 WARN_ON(!(inode->i_state & I_NEW));
895 inode->i_state &= ~I_NEW;
896 wake_up_bit(&inode->i_state, __I_NEW);
897 spin_unlock(&inode->i_lock);
899 EXPORT_SYMBOL(unlock_new_inode);
902 * iget5_locked - obtain an inode from a mounted file system
903 * @sb: super block of file system
904 * @hashval: hash value (usually inode number) to get
905 * @test: callback used for comparisons between inodes
906 * @set: callback used to initialize a new struct inode
907 * @data: opaque data pointer to pass to @test and @set
909 * Search for the inode specified by @hashval and @data in the inode cache,
910 * and if present it is return it with an increased reference count. This is
911 * a generalized version of iget_locked() for file systems where the inode
912 * number is not sufficient for unique identification of an inode.
914 * If the inode is not in cache, allocate a new inode and return it locked,
915 * hashed, and with the I_NEW flag set. The file system gets to fill it in
916 * before unlocking it via unlock_new_inode().
918 * Note both @test and @set are called with the inode_hash_lock held, so can't
919 * sleep.
921 struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
922 int (*test)(struct inode *, void *),
923 int (*set)(struct inode *, void *), void *data)
925 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
926 struct inode *inode;
928 spin_lock(&inode_hash_lock);
929 inode = find_inode(sb, head, test, data);
930 spin_unlock(&inode_hash_lock);
932 if (inode) {
933 wait_on_inode(inode);
934 return inode;
937 inode = alloc_inode(sb);
938 if (inode) {
939 struct inode *old;
941 spin_lock(&inode_hash_lock);
942 /* We released the lock, so.. */
943 old = find_inode(sb, head, test, data);
944 if (!old) {
945 if (set(inode, data))
946 goto set_failed;
948 spin_lock(&inode->i_lock);
949 inode->i_state = I_NEW;
950 hlist_add_head(&inode->i_hash, head);
951 spin_unlock(&inode->i_lock);
952 inode_sb_list_add(inode);
953 spin_unlock(&inode_hash_lock);
955 /* Return the locked inode with I_NEW set, the
956 * caller is responsible for filling in the contents
958 return inode;
962 * Uhhuh, somebody else created the same inode under
963 * us. Use the old inode instead of the one we just
964 * allocated.
966 spin_unlock(&inode_hash_lock);
967 destroy_inode(inode);
968 inode = old;
969 wait_on_inode(inode);
971 return inode;
973 set_failed:
974 spin_unlock(&inode_hash_lock);
975 destroy_inode(inode);
976 return NULL;
978 EXPORT_SYMBOL(iget5_locked);
981 * iget_locked - obtain an inode from a mounted file system
982 * @sb: super block of file system
983 * @ino: inode number to get
985 * Search for the inode specified by @ino in the inode cache and if present
986 * return it with an increased reference count. This is for file systems
987 * where the inode number is sufficient for unique identification of an inode.
989 * If the inode is not in cache, allocate a new inode and return it locked,
990 * hashed, and with the I_NEW flag set. The file system gets to fill it in
991 * before unlocking it via unlock_new_inode().
993 struct inode *iget_locked(struct super_block *sb, unsigned long ino)
995 struct hlist_head *head = inode_hashtable + hash(sb, ino);
996 struct inode *inode;
998 spin_lock(&inode_hash_lock);
999 inode = find_inode_fast(sb, head, ino);
1000 spin_unlock(&inode_hash_lock);
1001 if (inode) {
1002 wait_on_inode(inode);
1003 return inode;
1006 inode = alloc_inode(sb);
1007 if (inode) {
1008 struct inode *old;
1010 spin_lock(&inode_hash_lock);
1011 /* We released the lock, so.. */
1012 old = find_inode_fast(sb, head, ino);
1013 if (!old) {
1014 inode->i_ino = ino;
1015 spin_lock(&inode->i_lock);
1016 inode->i_state = I_NEW;
1017 hlist_add_head(&inode->i_hash, head);
1018 spin_unlock(&inode->i_lock);
1019 inode_sb_list_add(inode);
1020 spin_unlock(&inode_hash_lock);
1022 /* Return the locked inode with I_NEW set, the
1023 * caller is responsible for filling in the contents
1025 return inode;
1029 * Uhhuh, somebody else created the same inode under
1030 * us. Use the old inode instead of the one we just
1031 * allocated.
1033 spin_unlock(&inode_hash_lock);
1034 destroy_inode(inode);
1035 inode = old;
1036 wait_on_inode(inode);
1038 return inode;
1040 EXPORT_SYMBOL(iget_locked);
1043 * search the inode cache for a matching inode number.
1044 * If we find one, then the inode number we are trying to
1045 * allocate is not unique and so we should not use it.
1047 * Returns 1 if the inode number is unique, 0 if it is not.
1049 static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1051 struct hlist_head *b = inode_hashtable + hash(sb, ino);
1052 struct hlist_node *node;
1053 struct inode *inode;
1055 spin_lock(&inode_hash_lock);
1056 hlist_for_each_entry(inode, node, b, i_hash) {
1057 if (inode->i_ino == ino && inode->i_sb == sb) {
1058 spin_unlock(&inode_hash_lock);
1059 return 0;
1062 spin_unlock(&inode_hash_lock);
1064 return 1;
1068 * iunique - get a unique inode number
1069 * @sb: superblock
1070 * @max_reserved: highest reserved inode number
1072 * Obtain an inode number that is unique on the system for a given
1073 * superblock. This is used by file systems that have no natural
1074 * permanent inode numbering system. An inode number is returned that
1075 * is higher than the reserved limit but unique.
1077 * BUGS:
1078 * With a large number of inodes live on the file system this function
1079 * currently becomes quite slow.
1081 ino_t iunique(struct super_block *sb, ino_t max_reserved)
1084 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
1085 * error if st_ino won't fit in target struct field. Use 32bit counter
1086 * here to attempt to avoid that.
1088 static DEFINE_SPINLOCK(iunique_lock);
1089 static unsigned int counter;
1090 ino_t res;
1092 spin_lock(&iunique_lock);
1093 do {
1094 if (counter <= max_reserved)
1095 counter = max_reserved + 1;
1096 res = counter++;
1097 } while (!test_inode_iunique(sb, res));
1098 spin_unlock(&iunique_lock);
1100 return res;
1102 EXPORT_SYMBOL(iunique);
1104 struct inode *igrab(struct inode *inode)
1106 spin_lock(&inode->i_lock);
1107 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
1108 __iget(inode);
1109 spin_unlock(&inode->i_lock);
1110 } else {
1111 spin_unlock(&inode->i_lock);
1113 * Handle the case where s_op->clear_inode is not been
1114 * called yet, and somebody is calling igrab
1115 * while the inode is getting freed.
1117 inode = NULL;
1119 return inode;
1121 EXPORT_SYMBOL(igrab);
1124 * ilookup5_nowait - search for an inode in the inode cache
1125 * @sb: super block of file system to search
1126 * @hashval: hash value (usually inode number) to search for
1127 * @test: callback used for comparisons between inodes
1128 * @data: opaque data pointer to pass to @test
1130 * Search for the inode specified by @hashval and @data in the inode cache.
1131 * If the inode is in the cache, the inode is returned with an incremented
1132 * reference count.
1134 * Note: I_NEW is not waited upon so you have to be very careful what you do
1135 * with the returned inode. You probably should be using ilookup5() instead.
1137 * Note2: @test is called with the inode_hash_lock held, so can't sleep.
1139 struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
1140 int (*test)(struct inode *, void *), void *data)
1142 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1143 struct inode *inode;
1145 spin_lock(&inode_hash_lock);
1146 inode = find_inode(sb, head, test, data);
1147 spin_unlock(&inode_hash_lock);
1149 return inode;
1151 EXPORT_SYMBOL(ilookup5_nowait);
1154 * ilookup5 - search for an inode in the inode cache
1155 * @sb: super block of file system to search
1156 * @hashval: hash value (usually inode number) to search for
1157 * @test: callback used for comparisons between inodes
1158 * @data: opaque data pointer to pass to @test
1160 * Search for the inode specified by @hashval and @data in the inode cache,
1161 * and if the inode is in the cache, return the inode with an incremented
1162 * reference count. Waits on I_NEW before returning the inode.
1163 * returned with an incremented reference count.
1165 * This is a generalized version of ilookup() for file systems where the
1166 * inode number is not sufficient for unique identification of an inode.
1168 * Note: @test is called with the inode_hash_lock held, so can't sleep.
1170 struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
1171 int (*test)(struct inode *, void *), void *data)
1173 struct inode *inode = ilookup5_nowait(sb, hashval, test, data);
1175 if (inode)
1176 wait_on_inode(inode);
1177 return inode;
1179 EXPORT_SYMBOL(ilookup5);
1182 * ilookup - search for an inode in the inode cache
1183 * @sb: super block of file system to search
1184 * @ino: inode number to search for
1186 * Search for the inode @ino in the inode cache, and if the inode is in the
1187 * cache, the inode is returned with an incremented reference count.
1189 struct inode *ilookup(struct super_block *sb, unsigned long ino)
1191 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1192 struct inode *inode;
1194 spin_lock(&inode_hash_lock);
1195 inode = find_inode_fast(sb, head, ino);
1196 spin_unlock(&inode_hash_lock);
1198 if (inode)
1199 wait_on_inode(inode);
1200 return inode;
1202 EXPORT_SYMBOL(ilookup);
1204 int insert_inode_locked(struct inode *inode)
1206 struct super_block *sb = inode->i_sb;
1207 ino_t ino = inode->i_ino;
1208 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1210 while (1) {
1211 struct hlist_node *node;
1212 struct inode *old = NULL;
1213 spin_lock(&inode_hash_lock);
1214 hlist_for_each_entry(old, node, head, i_hash) {
1215 if (old->i_ino != ino)
1216 continue;
1217 if (old->i_sb != sb)
1218 continue;
1219 spin_lock(&old->i_lock);
1220 if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1221 spin_unlock(&old->i_lock);
1222 continue;
1224 break;
1226 if (likely(!node)) {
1227 spin_lock(&inode->i_lock);
1228 inode->i_state |= I_NEW;
1229 hlist_add_head(&inode->i_hash, head);
1230 spin_unlock(&inode->i_lock);
1231 spin_unlock(&inode_hash_lock);
1232 return 0;
1234 __iget(old);
1235 spin_unlock(&old->i_lock);
1236 spin_unlock(&inode_hash_lock);
1237 wait_on_inode(old);
1238 if (unlikely(!inode_unhashed(old))) {
1239 iput(old);
1240 return -EBUSY;
1242 iput(old);
1245 EXPORT_SYMBOL(insert_inode_locked);
1247 int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1248 int (*test)(struct inode *, void *), void *data)
1250 struct super_block *sb = inode->i_sb;
1251 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1253 while (1) {
1254 struct hlist_node *node;
1255 struct inode *old = NULL;
1257 spin_lock(&inode_hash_lock);
1258 hlist_for_each_entry(old, node, head, i_hash) {
1259 if (old->i_sb != sb)
1260 continue;
1261 if (!test(old, data))
1262 continue;
1263 spin_lock(&old->i_lock);
1264 if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1265 spin_unlock(&old->i_lock);
1266 continue;
1268 break;
1270 if (likely(!node)) {
1271 spin_lock(&inode->i_lock);
1272 inode->i_state |= I_NEW;
1273 hlist_add_head(&inode->i_hash, head);
1274 spin_unlock(&inode->i_lock);
1275 spin_unlock(&inode_hash_lock);
1276 return 0;
1278 __iget(old);
1279 spin_unlock(&old->i_lock);
1280 spin_unlock(&inode_hash_lock);
1281 wait_on_inode(old);
1282 if (unlikely(!inode_unhashed(old))) {
1283 iput(old);
1284 return -EBUSY;
1286 iput(old);
1289 EXPORT_SYMBOL(insert_inode_locked4);
1292 int generic_delete_inode(struct inode *inode)
1294 return 1;
1296 EXPORT_SYMBOL(generic_delete_inode);
1299 * Normal UNIX filesystem behaviour: delete the
1300 * inode when the usage count drops to zero, and
1301 * i_nlink is zero.
1303 int generic_drop_inode(struct inode *inode)
1305 return !inode->i_nlink || inode_unhashed(inode);
1307 EXPORT_SYMBOL_GPL(generic_drop_inode);
1310 * Called when we're dropping the last reference
1311 * to an inode.
1313 * Call the FS "drop_inode()" function, defaulting to
1314 * the legacy UNIX filesystem behaviour. If it tells
1315 * us to evict inode, do so. Otherwise, retain inode
1316 * in cache if fs is alive, sync and evict if fs is
1317 * shutting down.
1319 static void iput_final(struct inode *inode)
1321 struct super_block *sb = inode->i_sb;
1322 const struct super_operations *op = inode->i_sb->s_op;
1323 int drop;
1325 WARN_ON(inode->i_state & I_NEW);
1327 if (op && op->drop_inode)
1328 drop = op->drop_inode(inode);
1329 else
1330 drop = generic_drop_inode(inode);
1332 if (!drop && (sb->s_flags & MS_ACTIVE)) {
1333 inode->i_state |= I_REFERENCED;
1334 if (!(inode->i_state & (I_DIRTY|I_SYNC)))
1335 inode_lru_list_add(inode);
1336 spin_unlock(&inode->i_lock);
1337 return;
1340 if (!drop) {
1341 inode->i_state |= I_WILL_FREE;
1342 spin_unlock(&inode->i_lock);
1343 write_inode_now(inode, 1);
1344 spin_lock(&inode->i_lock);
1345 WARN_ON(inode->i_state & I_NEW);
1346 inode->i_state &= ~I_WILL_FREE;
1349 inode->i_state |= I_FREEING;
1350 inode_lru_list_del(inode);
1351 spin_unlock(&inode->i_lock);
1353 evict(inode);
1357 * iput - put an inode
1358 * @inode: inode to put
1360 * Puts an inode, dropping its usage count. If the inode use count hits
1361 * zero, the inode is then freed and may also be destroyed.
1363 * Consequently, iput() can sleep.
1365 void iput(struct inode *inode)
1367 if (inode) {
1368 BUG_ON(inode->i_state & I_CLEAR);
1370 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock))
1371 iput_final(inode);
1374 EXPORT_SYMBOL(iput);
1377 * bmap - find a block number in a file
1378 * @inode: inode of file
1379 * @block: block to find
1381 * Returns the block number on the device holding the inode that
1382 * is the disk block number for the block of the file requested.
1383 * That is, asked for block 4 of inode 1 the function will return the
1384 * disk block relative to the disk start that holds that block of the
1385 * file.
1387 sector_t bmap(struct inode *inode, sector_t block)
1389 sector_t res = 0;
1390 if (inode->i_mapping->a_ops->bmap)
1391 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
1392 return res;
1394 EXPORT_SYMBOL(bmap);
1397 * With relative atime, only update atime if the previous atime is
1398 * earlier than either the ctime or mtime or if at least a day has
1399 * passed since the last atime update.
1401 static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
1402 struct timespec now)
1405 if (!(mnt->mnt_flags & MNT_RELATIME))
1406 return 1;
1408 * Is mtime younger than atime? If yes, update atime:
1410 if (timespec_compare(&inode->i_mtime, &inode->i_atime) >= 0)
1411 return 1;
1413 * Is ctime younger than atime? If yes, update atime:
1415 if (timespec_compare(&inode->i_ctime, &inode->i_atime) >= 0)
1416 return 1;
1419 * Is the previous atime value older than a day? If yes,
1420 * update atime:
1422 if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
1423 return 1;
1425 * Good, we can skip the atime update:
1427 return 0;
1431 * touch_atime - update the access time
1432 * @mnt: mount the inode is accessed on
1433 * @dentry: dentry accessed
1435 * Update the accessed time on an inode and mark it for writeback.
1436 * This function automatically handles read only file systems and media,
1437 * as well as the "noatime" flag and inode specific "noatime" markers.
1439 void touch_atime(struct vfsmount *mnt, struct dentry *dentry)
1441 struct inode *inode = dentry->d_inode;
1442 struct timespec now;
1444 if (inode->i_flags & S_NOATIME)
1445 return;
1446 if (IS_NOATIME(inode))
1447 return;
1448 if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
1449 return;
1451 if (mnt->mnt_flags & MNT_NOATIME)
1452 return;
1453 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1454 return;
1456 now = current_fs_time(inode->i_sb);
1458 if (!relatime_need_update(mnt, inode, now))
1459 return;
1461 if (timespec_equal(&inode->i_atime, &now))
1462 return;
1464 if (mnt_want_write(mnt))
1465 return;
1467 inode->i_atime = now;
1468 mark_inode_dirty_sync(inode);
1469 mnt_drop_write(mnt);
1471 EXPORT_SYMBOL(touch_atime);
1474 * file_update_time - update mtime and ctime time
1475 * @file: file accessed
1477 * Update the mtime and ctime members of an inode and mark the inode
1478 * for writeback. Note that this function is meant exclusively for
1479 * usage in the file write path of filesystems, and filesystems may
1480 * choose to explicitly ignore update via this function with the
1481 * S_NOCMTIME inode flag, e.g. for network filesystem where these
1482 * timestamps are handled by the server.
1485 void file_update_time(struct file *file)
1487 struct inode *inode = file->f_path.dentry->d_inode;
1488 struct timespec now;
1489 enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
1491 /* First try to exhaust all avenues to not sync */
1492 if (IS_NOCMTIME(inode))
1493 return;
1495 now = current_fs_time(inode->i_sb);
1496 if (!timespec_equal(&inode->i_mtime, &now))
1497 sync_it = S_MTIME;
1499 if (!timespec_equal(&inode->i_ctime, &now))
1500 sync_it |= S_CTIME;
1502 if (IS_I_VERSION(inode))
1503 sync_it |= S_VERSION;
1505 if (!sync_it)
1506 return;
1508 /* Finally allowed to write? Takes lock. */
1509 if (mnt_want_write_file(file))
1510 return;
1512 /* Only change inode inside the lock region */
1513 if (sync_it & S_VERSION)
1514 inode_inc_iversion(inode);
1515 if (sync_it & S_CTIME)
1516 inode->i_ctime = now;
1517 if (sync_it & S_MTIME)
1518 inode->i_mtime = now;
1519 mark_inode_dirty_sync(inode);
1520 mnt_drop_write(file->f_path.mnt);
1522 EXPORT_SYMBOL(file_update_time);
1524 int inode_needs_sync(struct inode *inode)
1526 if (IS_SYNC(inode))
1527 return 1;
1528 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
1529 return 1;
1530 return 0;
1532 EXPORT_SYMBOL(inode_needs_sync);
1534 int inode_wait(void *word)
1536 schedule();
1537 return 0;
1539 EXPORT_SYMBOL(inode_wait);
1542 * If we try to find an inode in the inode hash while it is being
1543 * deleted, we have to wait until the filesystem completes its
1544 * deletion before reporting that it isn't found. This function waits
1545 * until the deletion _might_ have completed. Callers are responsible
1546 * to recheck inode state.
1548 * It doesn't matter if I_NEW is not set initially, a call to
1549 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
1550 * will DTRT.
1552 static void __wait_on_freeing_inode(struct inode *inode)
1554 wait_queue_head_t *wq;
1555 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
1556 wq = bit_waitqueue(&inode->i_state, __I_NEW);
1557 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
1558 spin_unlock(&inode->i_lock);
1559 spin_unlock(&inode_hash_lock);
1560 schedule();
1561 finish_wait(wq, &wait.wait);
1562 spin_lock(&inode_hash_lock);
1565 static __initdata unsigned long ihash_entries;
1566 static int __init set_ihash_entries(char *str)
1568 if (!str)
1569 return 0;
1570 ihash_entries = simple_strtoul(str, &str, 0);
1571 return 1;
1573 __setup("ihash_entries=", set_ihash_entries);
1576 * Initialize the waitqueues and inode hash table.
1578 void __init inode_init_early(void)
1580 int loop;
1582 /* If hashes are distributed across NUMA nodes, defer
1583 * hash allocation until vmalloc space is available.
1585 if (hashdist)
1586 return;
1588 inode_hashtable =
1589 alloc_large_system_hash("Inode-cache",
1590 sizeof(struct hlist_head),
1591 ihash_entries,
1593 HASH_EARLY,
1594 &i_hash_shift,
1595 &i_hash_mask,
1598 for (loop = 0; loop < (1 << i_hash_shift); loop++)
1599 INIT_HLIST_HEAD(&inode_hashtable[loop]);
1602 void __init inode_init(void)
1604 int loop;
1606 /* inode slab cache */
1607 inode_cachep = kmem_cache_create("inode_cache",
1608 sizeof(struct inode),
1610 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
1611 SLAB_MEM_SPREAD),
1612 init_once);
1613 register_shrinker(&icache_shrinker);
1615 /* Hash may have been set up in inode_init_early */
1616 if (!hashdist)
1617 return;
1619 inode_hashtable =
1620 alloc_large_system_hash("Inode-cache",
1621 sizeof(struct hlist_head),
1622 ihash_entries,
1625 &i_hash_shift,
1626 &i_hash_mask,
1629 for (loop = 0; loop < (1 << i_hash_shift); loop++)
1630 INIT_HLIST_HEAD(&inode_hashtable[loop]);
1633 void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
1635 inode->i_mode = mode;
1636 if (S_ISCHR(mode)) {
1637 inode->i_fop = &def_chr_fops;
1638 inode->i_rdev = rdev;
1639 } else if (S_ISBLK(mode)) {
1640 inode->i_fop = &def_blk_fops;
1641 inode->i_rdev = rdev;
1642 } else if (S_ISFIFO(mode))
1643 inode->i_fop = &def_fifo_fops;
1644 else if (S_ISSOCK(mode))
1645 inode->i_fop = &bad_sock_fops;
1646 else
1647 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
1648 " inode %s:%lu\n", mode, inode->i_sb->s_id,
1649 inode->i_ino);
1651 EXPORT_SYMBOL(init_special_inode);
1654 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
1655 * @inode: New inode
1656 * @dir: Directory inode
1657 * @mode: mode of the new inode
1659 void inode_init_owner(struct inode *inode, const struct inode *dir,
1660 mode_t mode)
1662 inode->i_uid = current_fsuid();
1663 if (dir && dir->i_mode & S_ISGID) {
1664 inode->i_gid = dir->i_gid;
1665 if (S_ISDIR(mode))
1666 mode |= S_ISGID;
1667 } else
1668 inode->i_gid = current_fsgid();
1669 inode->i_mode = mode;
1671 EXPORT_SYMBOL(inode_init_owner);
1674 * inode_owner_or_capable - check current task permissions to inode
1675 * @inode: inode being checked
1677 * Return true if current either has CAP_FOWNER to the inode, or
1678 * owns the file.
1680 bool inode_owner_or_capable(const struct inode *inode)
1682 struct user_namespace *ns = inode_userns(inode);
1684 if (current_user_ns() == ns && current_fsuid() == inode->i_uid)
1685 return true;
1686 if (ns_capable(ns, CAP_FOWNER))
1687 return true;
1688 return false;
1690 EXPORT_SYMBOL(inode_owner_or_capable);