2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_dir2_sf.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_btree.h"
38 #include "xfs_ialloc.h"
39 #include "xfs_quota.h"
40 #include "xfs_utils.h"
41 #include "xfs_trans_priv.h"
42 #include "xfs_inode_item.h"
44 #include "xfs_btree_trace.h"
45 #include "xfs_dir2_trace.h"
49 * Allocate and initialise an xfs_inode.
51 STATIC
struct xfs_inode
*
59 * if this didn't occur in transactions, we could use
60 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
61 * code up to do this anyway.
63 ip
= kmem_zone_alloc(xfs_inode_zone
, KM_SLEEP
);
67 ASSERT(atomic_read(&ip
->i_iocount
) == 0);
68 ASSERT(atomic_read(&ip
->i_pincount
) == 0);
69 ASSERT(!spin_is_locked(&ip
->i_flags_lock
));
70 ASSERT(completion_done(&ip
->i_flush
));
73 * initialise the VFS inode here to get failures
74 * out of the way early.
76 if (!inode_init_always(mp
->m_super
, VFS_I(ip
))) {
77 kmem_zone_free(xfs_inode_zone
, ip
);
81 /* initialise the xfs inode */
84 memset(&ip
->i_imap
, 0, sizeof(struct xfs_imap
));
86 memset(&ip
->i_df
, 0, sizeof(xfs_ifork_t
));
88 ip
->i_update_core
= 0;
89 ip
->i_update_size
= 0;
90 ip
->i_delayed_blks
= 0;
91 memset(&ip
->i_d
, 0, sizeof(xfs_icdinode_t
));
96 * Initialize inode's trace buffers.
98 #ifdef XFS_INODE_TRACE
99 ip
->i_trace
= ktrace_alloc(INODE_TRACE_SIZE
, KM_NOFS
);
101 #ifdef XFS_BMAP_TRACE
102 ip
->i_xtrace
= ktrace_alloc(XFS_BMAP_KTRACE_SIZE
, KM_NOFS
);
104 #ifdef XFS_BTREE_TRACE
105 ip
->i_btrace
= ktrace_alloc(XFS_BMBT_KTRACE_SIZE
, KM_NOFS
);
108 ip
->i_rwtrace
= ktrace_alloc(XFS_RW_KTRACE_SIZE
, KM_NOFS
);
110 #ifdef XFS_ILOCK_TRACE
111 ip
->i_lock_trace
= ktrace_alloc(XFS_ILOCK_KTRACE_SIZE
, KM_NOFS
);
113 #ifdef XFS_DIR2_TRACE
114 ip
->i_dir_trace
= ktrace_alloc(XFS_DIR2_KTRACE_SIZE
, KM_NOFS
);
121 * Check the validity of the inode we just found it the cache
125 struct xfs_perag
*pag
,
126 struct xfs_inode
*ip
,
128 int lock_flags
) __releases(pag
->pag_ici_lock
)
130 struct xfs_mount
*mp
= ip
->i_mount
;
134 * If INEW is set this inode is being set up
135 * If IRECLAIM is set this inode is being torn down
136 * Pause and try again.
138 if (xfs_iflags_test(ip
, (XFS_INEW
|XFS_IRECLAIM
))) {
139 XFS_STATS_INC(xs_ig_frecycle
);
143 /* If IRECLAIMABLE is set, we've torn down the vfs inode part */
144 if (xfs_iflags_test(ip
, XFS_IRECLAIMABLE
)) {
147 * If lookup is racing with unlink, then we should return an
148 * error immediately so we don't remove it from the reclaim
149 * list and potentially leak the inode.
151 if ((ip
->i_d
.di_mode
== 0) && !(flags
& XFS_IGET_CREATE
)) {
156 xfs_itrace_exit_tag(ip
, "xfs_iget.alloc");
159 * We need to re-initialise the VFS inode as it has been
160 * 'freed' by the VFS. Do this here so we can deal with
161 * errors cleanly, then tag it so it can be set up correctly
164 if (!inode_init_always(mp
->m_super
, VFS_I(ip
))) {
170 * We must set the XFS_INEW flag before clearing the
171 * XFS_IRECLAIMABLE flag so that if a racing lookup does
172 * not find the XFS_IRECLAIMABLE above but has the igrab()
173 * below succeed we can safely check XFS_INEW to detect
174 * that this inode is still being initialised.
176 xfs_iflags_set(ip
, XFS_INEW
);
177 xfs_iflags_clear(ip
, XFS_IRECLAIMABLE
);
179 /* clear the radix tree reclaim flag as well. */
180 __xfs_inode_clear_reclaim_tag(mp
, pag
, ip
);
181 } else if (!igrab(VFS_I(ip
))) {
182 /* If the VFS inode is being torn down, pause and try again. */
183 XFS_STATS_INC(xs_ig_frecycle
);
185 } else if (xfs_iflags_test(ip
, XFS_INEW
)) {
187 * We are racing with another cache hit that is
188 * currently recycling this inode out of the XFS_IRECLAIMABLE
189 * state. Wait for the initialisation to complete before
192 wait_on_inode(VFS_I(ip
));
195 if (ip
->i_d
.di_mode
== 0 && !(flags
& XFS_IGET_CREATE
)) {
201 /* We've got a live one. */
202 read_unlock(&pag
->pag_ici_lock
);
205 xfs_ilock(ip
, lock_flags
);
207 xfs_iflags_clear(ip
, XFS_ISTALE
);
208 xfs_itrace_exit_tag(ip
, "xfs_iget.found");
209 XFS_STATS_INC(xs_ig_found
);
213 read_unlock(&pag
->pag_ici_lock
);
220 struct xfs_mount
*mp
,
221 struct xfs_perag
*pag
,
224 struct xfs_inode
**ipp
,
227 int lock_flags
) __releases(pag
->pag_ici_lock
)
229 struct xfs_inode
*ip
;
231 unsigned long first_index
, mask
;
232 xfs_agino_t agino
= XFS_INO_TO_AGINO(mp
, ino
);
234 ip
= xfs_inode_alloc(mp
, ino
);
238 error
= xfs_iread(mp
, tp
, ip
, bno
, flags
);
242 xfs_itrace_exit_tag(ip
, "xfs_iget.alloc");
244 if ((ip
->i_d
.di_mode
== 0) && !(flags
& XFS_IGET_CREATE
)) {
250 * Preload the radix tree so we can insert safely under the
251 * write spinlock. Note that we cannot sleep inside the preload
254 if (radix_tree_preload(GFP_KERNEL
)) {
260 * Because the inode hasn't been added to the radix-tree yet it can't
261 * be found by another thread, so we can do the non-sleeping lock here.
264 if (!xfs_ilock_nowait(ip
, lock_flags
))
268 mask
= ~(((XFS_INODE_CLUSTER_SIZE(mp
) >> mp
->m_sb
.sb_inodelog
)) - 1);
269 first_index
= agino
& mask
;
270 write_lock(&pag
->pag_ici_lock
);
272 /* insert the new inode */
273 error
= radix_tree_insert(&pag
->pag_ici_root
, agino
, ip
);
274 if (unlikely(error
)) {
275 WARN_ON(error
!= -EEXIST
);
276 XFS_STATS_INC(xs_ig_dup
);
278 goto out_preload_end
;
281 /* These values _must_ be set before releasing the radix tree lock! */
282 ip
->i_udquot
= ip
->i_gdquot
= NULL
;
283 xfs_iflags_set(ip
, XFS_INEW
);
285 write_unlock(&pag
->pag_ici_lock
);
286 radix_tree_preload_end();
291 write_unlock(&pag
->pag_ici_lock
);
292 radix_tree_preload_end();
294 xfs_iunlock(ip
, lock_flags
);
296 xfs_destroy_inode(ip
);
301 * Look up an inode by number in the given file system.
302 * The inode is looked up in the cache held in each AG.
303 * If the inode is found in the cache, initialise the vfs inode
306 * If it is not in core, read it in from the file system's device,
307 * add it to the cache and initialise the vfs inode.
309 * The inode is locked according to the value of the lock_flags parameter.
310 * This flag parameter indicates how and if the inode's IO lock and inode lock
313 * mp -- the mount point structure for the current file system. It points
314 * to the inode hash table.
315 * tp -- a pointer to the current transaction if there is one. This is
316 * simply passed through to the xfs_iread() call.
317 * ino -- the number of the inode desired. This is the unique identifier
318 * within the file system for the inode being requested.
319 * lock_flags -- flags indicating how to lock the inode. See the comment
320 * for xfs_ilock() for a list of valid values.
321 * bno -- the block number starting the buffer containing the inode,
322 * if known (as by bulkstat), else 0.
339 /* the radix tree exists only in inode capable AGs */
340 if (XFS_INO_TO_AGNO(mp
, ino
) >= mp
->m_maxagi
)
343 /* get the perag structure and ensure that it's inode capable */
344 pag
= xfs_get_perag(mp
, ino
);
345 if (!pag
->pagi_inodeok
)
347 ASSERT(pag
->pag_ici_init
);
348 agino
= XFS_INO_TO_AGINO(mp
, ino
);
352 read_lock(&pag
->pag_ici_lock
);
353 ip
= radix_tree_lookup(&pag
->pag_ici_root
, agino
);
356 error
= xfs_iget_cache_hit(pag
, ip
, flags
, lock_flags
);
358 goto out_error_or_again
;
360 read_unlock(&pag
->pag_ici_lock
);
361 XFS_STATS_INC(xs_ig_missed
);
363 error
= xfs_iget_cache_miss(mp
, pag
, tp
, ino
, &ip
, bno
,
366 goto out_error_or_again
;
368 xfs_put_perag(mp
, pag
);
372 ASSERT(ip
->i_df
.if_ext_max
==
373 XFS_IFORK_DSIZE(ip
) / sizeof(xfs_bmbt_rec_t
));
375 * If we have a real type for an on-disk inode, we can set ops(&unlock)
376 * now. If it's a new inode being created, xfs_ialloc will handle it.
378 if (xfs_iflags_test(ip
, XFS_INEW
) && ip
->i_d
.di_mode
!= 0)
383 if (error
== EAGAIN
) {
387 xfs_put_perag(mp
, pag
);
393 * Look for the inode corresponding to the given ino in the hash table.
394 * If it is there and its i_transp pointer matches tp, return it.
395 * Otherwise, return NULL.
398 xfs_inode_incore(xfs_mount_t
*mp
,
405 pag
= xfs_get_perag(mp
, ino
);
406 read_lock(&pag
->pag_ici_lock
);
407 ip
= radix_tree_lookup(&pag
->pag_ici_root
, XFS_INO_TO_AGINO(mp
, ino
));
408 read_unlock(&pag
->pag_ici_lock
);
409 xfs_put_perag(mp
, pag
);
411 /* the returned inode must match the transaction */
412 if (ip
&& (ip
->i_transp
!= tp
))
418 * Decrement reference count of an inode structure and unlock it.
420 * ip -- the inode being released
421 * lock_flags -- this parameter indicates the inode's locks to be
422 * to be released. See the comment on xfs_iunlock() for a list
426 xfs_iput(xfs_inode_t
*ip
,
429 xfs_itrace_entry(ip
);
430 xfs_iunlock(ip
, lock_flags
);
435 * Special iput for brand-new inodes that are still locked
442 struct inode
*inode
= VFS_I(ip
);
444 xfs_itrace_entry(ip
);
446 if ((ip
->i_d
.di_mode
== 0)) {
447 ASSERT(!xfs_iflags_test(ip
, XFS_IRECLAIMABLE
));
448 make_bad_inode(inode
);
450 if (inode
->i_state
& I_NEW
)
451 unlock_new_inode(inode
);
453 xfs_iunlock(ip
, lock_flags
);
458 * This is called free all the memory associated with an inode.
459 * It must free the inode itself and any buffers allocated for
460 * if_extents/if_data and if_broot. It must also free the lock
461 * associated with the inode.
463 * Note: because we don't initialise everything on reallocation out
464 * of the zone, we must ensure we nullify everything correctly before
465 * freeing the structure.
469 struct xfs_inode
*ip
)
471 struct xfs_mount
*mp
= ip
->i_mount
;
472 struct xfs_perag
*pag
;
474 XFS_STATS_INC(xs_ig_reclaims
);
477 * Remove the inode from the per-AG radix tree. It doesn't matter
478 * if it was never added to it because radix_tree_delete can deal
479 * with that case just fine.
481 pag
= xfs_get_perag(mp
, ip
->i_ino
);
482 write_lock(&pag
->pag_ici_lock
);
483 radix_tree_delete(&pag
->pag_ici_root
, XFS_INO_TO_AGINO(mp
, ip
->i_ino
));
484 write_unlock(&pag
->pag_ici_lock
);
485 xfs_put_perag(mp
, pag
);
488 * Here we do an (almost) spurious inode lock in order to coordinate
489 * with inode cache radix tree lookups. This is because the lookup
490 * can reference the inodes in the cache without taking references.
492 * We make that OK here by ensuring that we wait until the inode is
493 * unlocked after the lookup before we go ahead and free it. We get
494 * both the ilock and the iolock because the code may need to drop the
495 * ilock one but will still hold the iolock.
497 xfs_ilock(ip
, XFS_ILOCK_EXCL
| XFS_IOLOCK_EXCL
);
499 * Release dquots (and their references) if any.
501 XFS_QM_DQDETACH(ip
->i_mount
, ip
);
502 xfs_iunlock(ip
, XFS_ILOCK_EXCL
| XFS_IOLOCK_EXCL
);
504 switch (ip
->i_d
.di_mode
& S_IFMT
) {
508 xfs_idestroy_fork(ip
, XFS_DATA_FORK
);
513 xfs_idestroy_fork(ip
, XFS_ATTR_FORK
);
515 #ifdef XFS_INODE_TRACE
516 ktrace_free(ip
->i_trace
);
518 #ifdef XFS_BMAP_TRACE
519 ktrace_free(ip
->i_xtrace
);
521 #ifdef XFS_BTREE_TRACE
522 ktrace_free(ip
->i_btrace
);
525 ktrace_free(ip
->i_rwtrace
);
527 #ifdef XFS_ILOCK_TRACE
528 ktrace_free(ip
->i_lock_trace
);
530 #ifdef XFS_DIR2_TRACE
531 ktrace_free(ip
->i_dir_trace
);
535 * Only if we are shutting down the fs will we see an
536 * inode still in the AIL. If it is there, we should remove
537 * it to prevent a use-after-free from occurring.
539 xfs_log_item_t
*lip
= &ip
->i_itemp
->ili_item
;
540 struct xfs_ail
*ailp
= lip
->li_ailp
;
542 ASSERT(((lip
->li_flags
& XFS_LI_IN_AIL
) == 0) ||
543 XFS_FORCED_SHUTDOWN(ip
->i_mount
));
544 if (lip
->li_flags
& XFS_LI_IN_AIL
) {
545 spin_lock(&ailp
->xa_lock
);
546 if (lip
->li_flags
& XFS_LI_IN_AIL
)
547 xfs_trans_ail_delete(ailp
, lip
);
549 spin_unlock(&ailp
->xa_lock
);
551 xfs_inode_item_destroy(ip
);
554 /* asserts to verify all state is correct here */
555 ASSERT(atomic_read(&ip
->i_iocount
) == 0);
556 ASSERT(atomic_read(&ip
->i_pincount
) == 0);
557 ASSERT(!spin_is_locked(&ip
->i_flags_lock
));
558 ASSERT(completion_done(&ip
->i_flush
));
559 kmem_zone_free(xfs_inode_zone
, ip
);
563 * This is a wrapper routine around the xfs_ilock() routine
564 * used to centralize some grungy code. It is used in places
565 * that wish to lock the inode solely for reading the extents.
566 * The reason these places can't just call xfs_ilock(SHARED)
567 * is that the inode lock also guards to bringing in of the
568 * extents from disk for a file in b-tree format. If the inode
569 * is in b-tree format, then we need to lock the inode exclusively
570 * until the extents are read in. Locking it exclusively all
571 * the time would limit our parallelism unnecessarily, though.
572 * What we do instead is check to see if the extents have been
573 * read in yet, and only lock the inode exclusively if they
576 * The function returns a value which should be given to the
577 * corresponding xfs_iunlock_map_shared(). This value is
578 * the mode in which the lock was actually taken.
581 xfs_ilock_map_shared(
586 if ((ip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
) &&
587 ((ip
->i_df
.if_flags
& XFS_IFEXTENTS
) == 0)) {
588 lock_mode
= XFS_ILOCK_EXCL
;
590 lock_mode
= XFS_ILOCK_SHARED
;
593 xfs_ilock(ip
, lock_mode
);
599 * This is simply the unlock routine to go with xfs_ilock_map_shared().
600 * All it does is call xfs_iunlock() with the given lock_mode.
603 xfs_iunlock_map_shared(
605 unsigned int lock_mode
)
607 xfs_iunlock(ip
, lock_mode
);
611 * The xfs inode contains 2 locks: a multi-reader lock called the
612 * i_iolock and a multi-reader lock called the i_lock. This routine
613 * allows either or both of the locks to be obtained.
615 * The 2 locks should always be ordered so that the IO lock is
616 * obtained first in order to prevent deadlock.
618 * ip -- the inode being locked
619 * lock_flags -- this parameter indicates the inode's locks
620 * to be locked. It can be:
625 * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
626 * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
627 * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
628 * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
636 * You can't set both SHARED and EXCL for the same lock,
637 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
638 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
640 ASSERT((lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) !=
641 (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
));
642 ASSERT((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) !=
643 (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
644 ASSERT((lock_flags
& ~(XFS_LOCK_MASK
| XFS_LOCK_DEP_MASK
)) == 0);
646 if (lock_flags
& XFS_IOLOCK_EXCL
)
647 mrupdate_nested(&ip
->i_iolock
, XFS_IOLOCK_DEP(lock_flags
));
648 else if (lock_flags
& XFS_IOLOCK_SHARED
)
649 mraccess_nested(&ip
->i_iolock
, XFS_IOLOCK_DEP(lock_flags
));
651 if (lock_flags
& XFS_ILOCK_EXCL
)
652 mrupdate_nested(&ip
->i_lock
, XFS_ILOCK_DEP(lock_flags
));
653 else if (lock_flags
& XFS_ILOCK_SHARED
)
654 mraccess_nested(&ip
->i_lock
, XFS_ILOCK_DEP(lock_flags
));
656 xfs_ilock_trace(ip
, 1, lock_flags
, (inst_t
*)__return_address
);
660 * This is just like xfs_ilock(), except that the caller
661 * is guaranteed not to sleep. It returns 1 if it gets
662 * the requested locks and 0 otherwise. If the IO lock is
663 * obtained but the inode lock cannot be, then the IO lock
664 * is dropped before returning.
666 * ip -- the inode being locked
667 * lock_flags -- this parameter indicates the inode's locks to be
668 * to be locked. See the comment for xfs_ilock() for a list
677 * You can't set both SHARED and EXCL for the same lock,
678 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
679 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
681 ASSERT((lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) !=
682 (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
));
683 ASSERT((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) !=
684 (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
685 ASSERT((lock_flags
& ~(XFS_LOCK_MASK
| XFS_LOCK_DEP_MASK
)) == 0);
687 if (lock_flags
& XFS_IOLOCK_EXCL
) {
688 if (!mrtryupdate(&ip
->i_iolock
))
690 } else if (lock_flags
& XFS_IOLOCK_SHARED
) {
691 if (!mrtryaccess(&ip
->i_iolock
))
694 if (lock_flags
& XFS_ILOCK_EXCL
) {
695 if (!mrtryupdate(&ip
->i_lock
))
696 goto out_undo_iolock
;
697 } else if (lock_flags
& XFS_ILOCK_SHARED
) {
698 if (!mrtryaccess(&ip
->i_lock
))
699 goto out_undo_iolock
;
701 xfs_ilock_trace(ip
, 2, lock_flags
, (inst_t
*)__return_address
);
705 if (lock_flags
& XFS_IOLOCK_EXCL
)
706 mrunlock_excl(&ip
->i_iolock
);
707 else if (lock_flags
& XFS_IOLOCK_SHARED
)
708 mrunlock_shared(&ip
->i_iolock
);
714 * xfs_iunlock() is used to drop the inode locks acquired with
715 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
716 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
717 * that we know which locks to drop.
719 * ip -- the inode being unlocked
720 * lock_flags -- this parameter indicates the inode's locks to be
721 * to be unlocked. See the comment for xfs_ilock() for a list
722 * of valid values for this parameter.
731 * You can't set both SHARED and EXCL for the same lock,
732 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
733 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
735 ASSERT((lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) !=
736 (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
));
737 ASSERT((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) !=
738 (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
739 ASSERT((lock_flags
& ~(XFS_LOCK_MASK
| XFS_IUNLOCK_NONOTIFY
|
740 XFS_LOCK_DEP_MASK
)) == 0);
741 ASSERT(lock_flags
!= 0);
743 if (lock_flags
& XFS_IOLOCK_EXCL
)
744 mrunlock_excl(&ip
->i_iolock
);
745 else if (lock_flags
& XFS_IOLOCK_SHARED
)
746 mrunlock_shared(&ip
->i_iolock
);
748 if (lock_flags
& XFS_ILOCK_EXCL
)
749 mrunlock_excl(&ip
->i_lock
);
750 else if (lock_flags
& XFS_ILOCK_SHARED
)
751 mrunlock_shared(&ip
->i_lock
);
753 if ((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) &&
754 !(lock_flags
& XFS_IUNLOCK_NONOTIFY
) && ip
->i_itemp
) {
756 * Let the AIL know that this item has been unlocked in case
757 * it is in the AIL and anyone is waiting on it. Don't do
758 * this if the caller has asked us not to.
760 xfs_trans_unlocked_item(ip
->i_itemp
->ili_item
.li_ailp
,
761 (xfs_log_item_t
*)(ip
->i_itemp
));
763 xfs_ilock_trace(ip
, 3, lock_flags
, (inst_t
*)__return_address
);
767 * give up write locks. the i/o lock cannot be held nested
768 * if it is being demoted.
775 ASSERT(lock_flags
& (XFS_IOLOCK_EXCL
|XFS_ILOCK_EXCL
));
776 ASSERT((lock_flags
& ~(XFS_IOLOCK_EXCL
|XFS_ILOCK_EXCL
)) == 0);
778 if (lock_flags
& XFS_ILOCK_EXCL
)
779 mrdemote(&ip
->i_lock
);
780 if (lock_flags
& XFS_IOLOCK_EXCL
)
781 mrdemote(&ip
->i_iolock
);
786 * Debug-only routine, without additional rw_semaphore APIs, we can
787 * now only answer requests regarding whether we hold the lock for write
788 * (reader state is outside our visibility, we only track writer state).
790 * Note: this means !xfs_isilocked would give false positives, so don't do that.
797 if ((lock_flags
& (XFS_ILOCK_EXCL
|XFS_ILOCK_SHARED
)) ==
799 if (!ip
->i_lock
.mr_writer
)
803 if ((lock_flags
& (XFS_IOLOCK_EXCL
|XFS_IOLOCK_SHARED
)) ==
805 if (!ip
->i_iolock
.mr_writer
)
813 #ifdef XFS_INODE_TRACE
815 #define KTRACE_ENTER(ip, vk, s, line, ra) \
816 ktrace_enter((ip)->i_trace, \
817 /* 0 */ (void *)(__psint_t)(vk), \
818 /* 1 */ (void *)(s), \
819 /* 2 */ (void *)(__psint_t) line, \
820 /* 3 */ (void *)(__psint_t)atomic_read(&VFS_I(ip)->i_count), \
821 /* 4 */ (void *)(ra), \
823 /* 6 */ (void *)(__psint_t)current_cpu(), \
824 /* 7 */ (void *)(__psint_t)current_pid(), \
825 /* 8 */ (void *)__return_address, \
826 /* 9 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL)
829 * Vnode tracing code.
832 _xfs_itrace_entry(xfs_inode_t
*ip
, const char *func
, inst_t
*ra
)
834 KTRACE_ENTER(ip
, INODE_KTRACE_ENTRY
, func
, 0, ra
);
838 _xfs_itrace_exit(xfs_inode_t
*ip
, const char *func
, inst_t
*ra
)
840 KTRACE_ENTER(ip
, INODE_KTRACE_EXIT
, func
, 0, ra
);
844 xfs_itrace_hold(xfs_inode_t
*ip
, char *file
, int line
, inst_t
*ra
)
846 KTRACE_ENTER(ip
, INODE_KTRACE_HOLD
, file
, line
, ra
);
850 _xfs_itrace_ref(xfs_inode_t
*ip
, char *file
, int line
, inst_t
*ra
)
852 KTRACE_ENTER(ip
, INODE_KTRACE_REF
, file
, line
, ra
);
856 xfs_itrace_rele(xfs_inode_t
*ip
, char *file
, int line
, inst_t
*ra
)
858 KTRACE_ENTER(ip
, INODE_KTRACE_RELE
, file
, line
, ra
);
860 #endif /* XFS_INODE_TRACE */