2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
25 #include "xfs_trans.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_dinode.h"
33 #include "xfs_inode.h"
34 #include "xfs_btree.h"
35 #include "xfs_ialloc.h"
36 #include "xfs_quota.h"
37 #include "xfs_utils.h"
38 #include "xfs_trans_priv.h"
39 #include "xfs_inode_item.h"
41 #include "xfs_trace.h"
45 * Define xfs inode iolock lockdep classes. We need to ensure that all active
46 * inodes are considered the same for lockdep purposes, including inodes that
47 * are recycled through the XFS_IRECLAIMABLE state. This is the the only way to
48 * guarantee the locks are considered the same when there are multiple lock
49 * initialisation siteѕ. Also, define a reclaimable inode class so it is
50 * obvious in lockdep reports which class the report is against.
52 static struct lock_class_key xfs_iolock_active
;
53 struct lock_class_key xfs_iolock_reclaimable
;
56 * Allocate and initialise an xfs_inode.
58 STATIC
struct xfs_inode
*
66 * if this didn't occur in transactions, we could use
67 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
68 * code up to do this anyway.
70 ip
= kmem_zone_alloc(xfs_inode_zone
, KM_SLEEP
);
73 if (inode_init_always(mp
->m_super
, VFS_I(ip
))) {
74 kmem_zone_free(xfs_inode_zone
, ip
);
78 ASSERT(atomic_read(&ip
->i_pincount
) == 0);
79 ASSERT(!spin_is_locked(&ip
->i_flags_lock
));
80 ASSERT(!xfs_isiflocked(ip
));
81 ASSERT(ip
->i_ino
== 0);
83 mrlock_init(&ip
->i_iolock
, MRLOCK_BARRIER
, "xfsio", ip
->i_ino
);
84 lockdep_set_class_and_name(&ip
->i_iolock
.mr_lock
,
85 &xfs_iolock_active
, "xfs_iolock_active");
87 /* initialise the xfs inode */
90 memset(&ip
->i_imap
, 0, sizeof(struct xfs_imap
));
92 memset(&ip
->i_df
, 0, sizeof(xfs_ifork_t
));
94 ip
->i_delayed_blks
= 0;
95 memset(&ip
->i_d
, 0, sizeof(xfs_icdinode_t
));
101 xfs_inode_free_callback(
102 struct rcu_head
*head
)
104 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
105 struct xfs_inode
*ip
= XFS_I(inode
);
107 kmem_zone_free(xfs_inode_zone
, ip
);
112 struct xfs_inode
*ip
)
114 switch (ip
->i_d
.di_mode
& S_IFMT
) {
118 xfs_idestroy_fork(ip
, XFS_DATA_FORK
);
123 xfs_idestroy_fork(ip
, XFS_ATTR_FORK
);
127 * Only if we are shutting down the fs will we see an
128 * inode still in the AIL. If it is there, we should remove
129 * it to prevent a use-after-free from occurring.
131 xfs_log_item_t
*lip
= &ip
->i_itemp
->ili_item
;
132 struct xfs_ail
*ailp
= lip
->li_ailp
;
134 ASSERT(((lip
->li_flags
& XFS_LI_IN_AIL
) == 0) ||
135 XFS_FORCED_SHUTDOWN(ip
->i_mount
));
136 if (lip
->li_flags
& XFS_LI_IN_AIL
) {
137 spin_lock(&ailp
->xa_lock
);
138 if (lip
->li_flags
& XFS_LI_IN_AIL
)
139 xfs_trans_ail_delete(ailp
, lip
);
141 spin_unlock(&ailp
->xa_lock
);
143 xfs_inode_item_destroy(ip
);
147 /* asserts to verify all state is correct here */
148 ASSERT(atomic_read(&ip
->i_pincount
) == 0);
149 ASSERT(!spin_is_locked(&ip
->i_flags_lock
));
150 ASSERT(!xfs_isiflocked(ip
));
153 * Because we use RCU freeing we need to ensure the inode always
154 * appears to be reclaimed with an invalid inode number when in the
155 * free state. The ip->i_flags_lock provides the barrier against lookup
158 spin_lock(&ip
->i_flags_lock
);
159 ip
->i_flags
= XFS_IRECLAIM
;
161 spin_unlock(&ip
->i_flags_lock
);
163 call_rcu(&VFS_I(ip
)->i_rcu
, xfs_inode_free_callback
);
167 * Check the validity of the inode we just found it the cache
171 struct xfs_perag
*pag
,
172 struct xfs_inode
*ip
,
175 int lock_flags
) __releases(RCU
)
177 struct inode
*inode
= VFS_I(ip
);
178 struct xfs_mount
*mp
= ip
->i_mount
;
182 * check for re-use of an inode within an RCU grace period due to the
183 * radix tree nodes not being updated yet. We monitor for this by
184 * setting the inode number to zero before freeing the inode structure.
185 * If the inode has been reallocated and set up, then the inode number
186 * will not match, so check for that, too.
188 spin_lock(&ip
->i_flags_lock
);
189 if (ip
->i_ino
!= ino
) {
190 trace_xfs_iget_skip(ip
);
191 XFS_STATS_INC(xs_ig_frecycle
);
198 * If we are racing with another cache hit that is currently
199 * instantiating this inode or currently recycling it out of
200 * reclaimabe state, wait for the initialisation to complete
203 * XXX(hch): eventually we should do something equivalent to
204 * wait_on_inode to wait for these flags to be cleared
205 * instead of polling for it.
207 if (ip
->i_flags
& (XFS_INEW
|XFS_IRECLAIM
)) {
208 trace_xfs_iget_skip(ip
);
209 XFS_STATS_INC(xs_ig_frecycle
);
215 * If lookup is racing with unlink return an error immediately.
217 if (ip
->i_d
.di_mode
== 0 && !(flags
& XFS_IGET_CREATE
)) {
223 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
224 * Need to carefully get it back into useable state.
226 if (ip
->i_flags
& XFS_IRECLAIMABLE
) {
227 trace_xfs_iget_reclaim(ip
);
230 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
231 * from stomping over us while we recycle the inode. We can't
232 * clear the radix tree reclaimable tag yet as it requires
233 * pag_ici_lock to be held exclusive.
235 ip
->i_flags
|= XFS_IRECLAIM
;
237 spin_unlock(&ip
->i_flags_lock
);
240 error
= -inode_init_always(mp
->m_super
, inode
);
243 * Re-initializing the inode failed, and we are in deep
244 * trouble. Try to re-add it to the reclaim list.
247 spin_lock(&ip
->i_flags_lock
);
249 ip
->i_flags
&= ~(XFS_INEW
| XFS_IRECLAIM
);
250 ASSERT(ip
->i_flags
& XFS_IRECLAIMABLE
);
251 trace_xfs_iget_reclaim_fail(ip
);
255 spin_lock(&pag
->pag_ici_lock
);
256 spin_lock(&ip
->i_flags_lock
);
259 * Clear the per-lifetime state in the inode as we are now
260 * effectively a new inode and need to return to the initial
261 * state before reuse occurs.
263 ip
->i_flags
&= ~XFS_IRECLAIM_RESET_FLAGS
;
264 ip
->i_flags
|= XFS_INEW
;
265 __xfs_inode_clear_reclaim_tag(mp
, pag
, ip
);
266 inode
->i_state
= I_NEW
;
268 ASSERT(!rwsem_is_locked(&ip
->i_iolock
.mr_lock
));
269 mrlock_init(&ip
->i_iolock
, MRLOCK_BARRIER
, "xfsio", ip
->i_ino
);
270 lockdep_set_class_and_name(&ip
->i_iolock
.mr_lock
,
271 &xfs_iolock_active
, "xfs_iolock_active");
273 spin_unlock(&ip
->i_flags_lock
);
274 spin_unlock(&pag
->pag_ici_lock
);
276 /* If the VFS inode is being torn down, pause and try again. */
278 trace_xfs_iget_skip(ip
);
283 /* We've got a live one. */
284 spin_unlock(&ip
->i_flags_lock
);
286 trace_xfs_iget_hit(ip
);
290 xfs_ilock(ip
, lock_flags
);
292 xfs_iflags_clear(ip
, XFS_ISTALE
| XFS_IDONTCACHE
);
293 XFS_STATS_INC(xs_ig_found
);
298 spin_unlock(&ip
->i_flags_lock
);
306 struct xfs_mount
*mp
,
307 struct xfs_perag
*pag
,
310 struct xfs_inode
**ipp
,
314 struct xfs_inode
*ip
;
316 xfs_agino_t agino
= XFS_INO_TO_AGINO(mp
, ino
);
319 ip
= xfs_inode_alloc(mp
, ino
);
323 error
= xfs_iread(mp
, tp
, ip
, flags
);
327 trace_xfs_iget_miss(ip
);
329 if ((ip
->i_d
.di_mode
== 0) && !(flags
& XFS_IGET_CREATE
)) {
335 * Preload the radix tree so we can insert safely under the
336 * write spinlock. Note that we cannot sleep inside the preload
339 if (radix_tree_preload(GFP_KERNEL
)) {
345 * Because the inode hasn't been added to the radix-tree yet it can't
346 * be found by another thread, so we can do the non-sleeping lock here.
349 if (!xfs_ilock_nowait(ip
, lock_flags
))
354 * These values must be set before inserting the inode into the radix
355 * tree as the moment it is inserted a concurrent lookup (allowed by the
356 * RCU locking mechanism) can find it and that lookup must see that this
357 * is an inode currently under construction (i.e. that XFS_INEW is set).
358 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
359 * memory barrier that ensures this detection works correctly at lookup
363 if (flags
& XFS_IGET_DONTCACHE
)
364 iflags
|= XFS_IDONTCACHE
;
365 ip
->i_udquot
= ip
->i_gdquot
= NULL
;
366 xfs_iflags_set(ip
, iflags
);
368 /* insert the new inode */
369 spin_lock(&pag
->pag_ici_lock
);
370 error
= radix_tree_insert(&pag
->pag_ici_root
, agino
, ip
);
371 if (unlikely(error
)) {
372 WARN_ON(error
!= -EEXIST
);
373 XFS_STATS_INC(xs_ig_dup
);
375 goto out_preload_end
;
377 spin_unlock(&pag
->pag_ici_lock
);
378 radix_tree_preload_end();
384 spin_unlock(&pag
->pag_ici_lock
);
385 radix_tree_preload_end();
387 xfs_iunlock(ip
, lock_flags
);
389 __destroy_inode(VFS_I(ip
));
395 * Look up an inode by number in the given file system.
396 * The inode is looked up in the cache held in each AG.
397 * If the inode is found in the cache, initialise the vfs inode
400 * If it is not in core, read it in from the file system's device,
401 * add it to the cache and initialise the vfs inode.
403 * The inode is locked according to the value of the lock_flags parameter.
404 * This flag parameter indicates how and if the inode's IO lock and inode lock
407 * mp -- the mount point structure for the current file system. It points
408 * to the inode hash table.
409 * tp -- a pointer to the current transaction if there is one. This is
410 * simply passed through to the xfs_iread() call.
411 * ino -- the number of the inode desired. This is the unique identifier
412 * within the file system for the inode being requested.
413 * lock_flags -- flags indicating how to lock the inode. See the comment
414 * for xfs_ilock() for a list of valid values.
431 * xfs_reclaim_inode() uses the ILOCK to ensure an inode
432 * doesn't get freed while it's being referenced during a
433 * radix tree traversal here. It assumes this function
434 * aqcuires only the ILOCK (and therefore it has no need to
435 * involve the IOLOCK in this synchronization).
437 ASSERT((lock_flags
& (XFS_IOLOCK_EXCL
| XFS_IOLOCK_SHARED
)) == 0);
439 /* reject inode numbers outside existing AGs */
440 if (!ino
|| XFS_INO_TO_AGNO(mp
, ino
) >= mp
->m_sb
.sb_agcount
)
443 /* get the perag structure and ensure that it's inode capable */
444 pag
= xfs_perag_get(mp
, XFS_INO_TO_AGNO(mp
, ino
));
445 agino
= XFS_INO_TO_AGINO(mp
, ino
);
450 ip
= radix_tree_lookup(&pag
->pag_ici_root
, agino
);
453 error
= xfs_iget_cache_hit(pag
, ip
, ino
, flags
, lock_flags
);
455 goto out_error_or_again
;
458 XFS_STATS_INC(xs_ig_missed
);
460 error
= xfs_iget_cache_miss(mp
, pag
, tp
, ino
, &ip
,
463 goto out_error_or_again
;
470 * If we have a real type for an on-disk inode, we can set ops(&unlock)
471 * now. If it's a new inode being created, xfs_ialloc will handle it.
473 if (xfs_iflags_test(ip
, XFS_INEW
) && ip
->i_d
.di_mode
!= 0)
478 if (error
== EAGAIN
) {
487 * This is a wrapper routine around the xfs_ilock() routine
488 * used to centralize some grungy code. It is used in places
489 * that wish to lock the inode solely for reading the extents.
490 * The reason these places can't just call xfs_ilock(SHARED)
491 * is that the inode lock also guards to bringing in of the
492 * extents from disk for a file in b-tree format. If the inode
493 * is in b-tree format, then we need to lock the inode exclusively
494 * until the extents are read in. Locking it exclusively all
495 * the time would limit our parallelism unnecessarily, though.
496 * What we do instead is check to see if the extents have been
497 * read in yet, and only lock the inode exclusively if they
500 * The function returns a value which should be given to the
501 * corresponding xfs_iunlock_map_shared(). This value is
502 * the mode in which the lock was actually taken.
505 xfs_ilock_map_shared(
510 if ((ip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
) &&
511 ((ip
->i_df
.if_flags
& XFS_IFEXTENTS
) == 0)) {
512 lock_mode
= XFS_ILOCK_EXCL
;
514 lock_mode
= XFS_ILOCK_SHARED
;
517 xfs_ilock(ip
, lock_mode
);
523 * This is simply the unlock routine to go with xfs_ilock_map_shared().
524 * All it does is call xfs_iunlock() with the given lock_mode.
527 xfs_iunlock_map_shared(
529 unsigned int lock_mode
)
531 xfs_iunlock(ip
, lock_mode
);
535 * The xfs inode contains 2 locks: a multi-reader lock called the
536 * i_iolock and a multi-reader lock called the i_lock. This routine
537 * allows either or both of the locks to be obtained.
539 * The 2 locks should always be ordered so that the IO lock is
540 * obtained first in order to prevent deadlock.
542 * ip -- the inode being locked
543 * lock_flags -- this parameter indicates the inode's locks
544 * to be locked. It can be:
549 * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
550 * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
551 * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
552 * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
560 * You can't set both SHARED and EXCL for the same lock,
561 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
562 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
564 ASSERT((lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) !=
565 (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
));
566 ASSERT((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) !=
567 (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
568 ASSERT((lock_flags
& ~(XFS_LOCK_MASK
| XFS_LOCK_DEP_MASK
)) == 0);
570 if (lock_flags
& XFS_IOLOCK_EXCL
)
571 mrupdate_nested(&ip
->i_iolock
, XFS_IOLOCK_DEP(lock_flags
));
572 else if (lock_flags
& XFS_IOLOCK_SHARED
)
573 mraccess_nested(&ip
->i_iolock
, XFS_IOLOCK_DEP(lock_flags
));
575 if (lock_flags
& XFS_ILOCK_EXCL
)
576 mrupdate_nested(&ip
->i_lock
, XFS_ILOCK_DEP(lock_flags
));
577 else if (lock_flags
& XFS_ILOCK_SHARED
)
578 mraccess_nested(&ip
->i_lock
, XFS_ILOCK_DEP(lock_flags
));
580 trace_xfs_ilock(ip
, lock_flags
, _RET_IP_
);
584 * This is just like xfs_ilock(), except that the caller
585 * is guaranteed not to sleep. It returns 1 if it gets
586 * the requested locks and 0 otherwise. If the IO lock is
587 * obtained but the inode lock cannot be, then the IO lock
588 * is dropped before returning.
590 * ip -- the inode being locked
591 * lock_flags -- this parameter indicates the inode's locks to be
592 * to be locked. See the comment for xfs_ilock() for a list
601 * You can't set both SHARED and EXCL for the same lock,
602 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
603 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
605 ASSERT((lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) !=
606 (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
));
607 ASSERT((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) !=
608 (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
609 ASSERT((lock_flags
& ~(XFS_LOCK_MASK
| XFS_LOCK_DEP_MASK
)) == 0);
611 if (lock_flags
& XFS_IOLOCK_EXCL
) {
612 if (!mrtryupdate(&ip
->i_iolock
))
614 } else if (lock_flags
& XFS_IOLOCK_SHARED
) {
615 if (!mrtryaccess(&ip
->i_iolock
))
618 if (lock_flags
& XFS_ILOCK_EXCL
) {
619 if (!mrtryupdate(&ip
->i_lock
))
620 goto out_undo_iolock
;
621 } else if (lock_flags
& XFS_ILOCK_SHARED
) {
622 if (!mrtryaccess(&ip
->i_lock
))
623 goto out_undo_iolock
;
625 trace_xfs_ilock_nowait(ip
, lock_flags
, _RET_IP_
);
629 if (lock_flags
& XFS_IOLOCK_EXCL
)
630 mrunlock_excl(&ip
->i_iolock
);
631 else if (lock_flags
& XFS_IOLOCK_SHARED
)
632 mrunlock_shared(&ip
->i_iolock
);
638 * xfs_iunlock() is used to drop the inode locks acquired with
639 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
640 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
641 * that we know which locks to drop.
643 * ip -- the inode being unlocked
644 * lock_flags -- this parameter indicates the inode's locks to be
645 * to be unlocked. See the comment for xfs_ilock() for a list
646 * of valid values for this parameter.
655 * You can't set both SHARED and EXCL for the same lock,
656 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
657 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
659 ASSERT((lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) !=
660 (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
));
661 ASSERT((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) !=
662 (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
663 ASSERT((lock_flags
& ~(XFS_LOCK_MASK
| XFS_LOCK_DEP_MASK
)) == 0);
664 ASSERT(lock_flags
!= 0);
666 if (lock_flags
& XFS_IOLOCK_EXCL
)
667 mrunlock_excl(&ip
->i_iolock
);
668 else if (lock_flags
& XFS_IOLOCK_SHARED
)
669 mrunlock_shared(&ip
->i_iolock
);
671 if (lock_flags
& XFS_ILOCK_EXCL
)
672 mrunlock_excl(&ip
->i_lock
);
673 else if (lock_flags
& XFS_ILOCK_SHARED
)
674 mrunlock_shared(&ip
->i_lock
);
676 trace_xfs_iunlock(ip
, lock_flags
, _RET_IP_
);
680 * give up write locks. the i/o lock cannot be held nested
681 * if it is being demoted.
688 ASSERT(lock_flags
& (XFS_IOLOCK_EXCL
|XFS_ILOCK_EXCL
));
689 ASSERT((lock_flags
& ~(XFS_IOLOCK_EXCL
|XFS_ILOCK_EXCL
)) == 0);
691 if (lock_flags
& XFS_ILOCK_EXCL
)
692 mrdemote(&ip
->i_lock
);
693 if (lock_flags
& XFS_IOLOCK_EXCL
)
694 mrdemote(&ip
->i_iolock
);
696 trace_xfs_ilock_demote(ip
, lock_flags
, _RET_IP_
);
705 if (lock_flags
& (XFS_ILOCK_EXCL
|XFS_ILOCK_SHARED
)) {
706 if (!(lock_flags
& XFS_ILOCK_SHARED
))
707 return !!ip
->i_lock
.mr_writer
;
708 return rwsem_is_locked(&ip
->i_lock
.mr_lock
);
711 if (lock_flags
& (XFS_IOLOCK_EXCL
|XFS_IOLOCK_SHARED
)) {
712 if (!(lock_flags
& XFS_IOLOCK_SHARED
))
713 return !!ip
->i_iolock
.mr_writer
;
714 return rwsem_is_locked(&ip
->i_iolock
.mr_lock
);
724 struct xfs_inode
*ip
)
726 wait_queue_head_t
*wq
= bit_waitqueue(&ip
->i_flags
, __XFS_IFLOCK_BIT
);
727 DEFINE_WAIT_BIT(wait
, &ip
->i_flags
, __XFS_IFLOCK_BIT
);
730 prepare_to_wait_exclusive(wq
, &wait
.wait
, TASK_UNINTERRUPTIBLE
);
731 if (xfs_isiflocked(ip
))
733 } while (!xfs_iflock_nowait(ip
));
735 finish_wait(wq
, &wait
.wait
);