2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_dir2_sf.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_btree.h"
38 #include "xfs_ialloc.h"
39 #include "xfs_quota.h"
40 #include "xfs_utils.h"
41 #include "xfs_trans_priv.h"
42 #include "xfs_inode_item.h"
44 #include "xfs_btree_trace.h"
45 #include "xfs_dir2_trace.h"
49 * Allocate and initialise an xfs_inode.
51 STATIC
struct xfs_inode
*
59 * if this didn't occur in transactions, we could use
60 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
61 * code up to do this anyway.
63 ip
= kmem_zone_alloc(xfs_inode_zone
, KM_SLEEP
);
66 if (inode_init_always(mp
->m_super
, VFS_I(ip
))) {
67 kmem_zone_free(xfs_inode_zone
, ip
);
71 ASSERT(atomic_read(&ip
->i_iocount
) == 0);
72 ASSERT(atomic_read(&ip
->i_pincount
) == 0);
73 ASSERT(!spin_is_locked(&ip
->i_flags_lock
));
74 ASSERT(completion_done(&ip
->i_flush
));
76 /* initialise the xfs inode */
79 memset(&ip
->i_imap
, 0, sizeof(struct xfs_imap
));
81 memset(&ip
->i_df
, 0, sizeof(xfs_ifork_t
));
83 ip
->i_update_core
= 0;
84 ip
->i_update_size
= 0;
85 ip
->i_delayed_blks
= 0;
86 memset(&ip
->i_d
, 0, sizeof(xfs_icdinode_t
));
91 * Initialize inode's trace buffers.
93 #ifdef XFS_INODE_TRACE
94 ip
->i_trace
= ktrace_alloc(INODE_TRACE_SIZE
, KM_NOFS
);
97 ip
->i_xtrace
= ktrace_alloc(XFS_BMAP_KTRACE_SIZE
, KM_NOFS
);
99 #ifdef XFS_BTREE_TRACE
100 ip
->i_btrace
= ktrace_alloc(XFS_BMBT_KTRACE_SIZE
, KM_NOFS
);
103 ip
->i_rwtrace
= ktrace_alloc(XFS_RW_KTRACE_SIZE
, KM_NOFS
);
105 #ifdef XFS_ILOCK_TRACE
106 ip
->i_lock_trace
= ktrace_alloc(XFS_ILOCK_KTRACE_SIZE
, KM_NOFS
);
108 #ifdef XFS_DIR2_TRACE
109 ip
->i_dir_trace
= ktrace_alloc(XFS_DIR2_KTRACE_SIZE
, KM_NOFS
);
112 /* prevent anyone from using this yet */
113 VFS_I(ip
)->i_state
= I_NEW
|I_LOCK
;
119 * Check the validity of the inode we just found it the cache
123 struct xfs_perag
*pag
,
124 struct xfs_inode
*ip
,
126 int lock_flags
) __releases(pag
->pag_ici_lock
)
128 struct xfs_mount
*mp
= ip
->i_mount
;
132 * If INEW is set this inode is being set up
133 * If IRECLAIM is set this inode is being torn down
134 * Pause and try again.
136 if (xfs_iflags_test(ip
, (XFS_INEW
|XFS_IRECLAIM
))) {
137 XFS_STATS_INC(xs_ig_frecycle
);
141 /* If IRECLAIMABLE is set, we've torn down the vfs inode part */
142 if (xfs_iflags_test(ip
, XFS_IRECLAIMABLE
)) {
145 * If lookup is racing with unlink, then we should return an
146 * error immediately so we don't remove it from the reclaim
147 * list and potentially leak the inode.
149 if ((ip
->i_d
.di_mode
== 0) && !(flags
& XFS_IGET_CREATE
)) {
154 xfs_itrace_exit_tag(ip
, "xfs_iget.alloc");
157 * We need to re-initialise the VFS inode as it has been
158 * 'freed' by the VFS. Do this here so we can deal with
159 * errors cleanly, then tag it so it can be set up correctly
162 if (inode_init_always(mp
->m_super
, VFS_I(ip
))) {
168 * We must set the XFS_INEW flag before clearing the
169 * XFS_IRECLAIMABLE flag so that if a racing lookup does
170 * not find the XFS_IRECLAIMABLE above but has the igrab()
171 * below succeed we can safely check XFS_INEW to detect
172 * that this inode is still being initialised.
174 xfs_iflags_set(ip
, XFS_INEW
);
175 xfs_iflags_clear(ip
, XFS_IRECLAIMABLE
);
177 /* clear the radix tree reclaim flag as well. */
178 __xfs_inode_clear_reclaim_tag(mp
, pag
, ip
);
179 } else if (!igrab(VFS_I(ip
))) {
180 /* If the VFS inode is being torn down, pause and try again. */
181 XFS_STATS_INC(xs_ig_frecycle
);
183 } else if (xfs_iflags_test(ip
, XFS_INEW
)) {
185 * We are racing with another cache hit that is
186 * currently recycling this inode out of the XFS_IRECLAIMABLE
187 * state. Wait for the initialisation to complete before
190 wait_on_inode(VFS_I(ip
));
193 if (ip
->i_d
.di_mode
== 0 && !(flags
& XFS_IGET_CREATE
)) {
199 /* We've got a live one. */
200 read_unlock(&pag
->pag_ici_lock
);
203 xfs_ilock(ip
, lock_flags
);
205 xfs_iflags_clear(ip
, XFS_ISTALE
);
206 xfs_itrace_exit_tag(ip
, "xfs_iget.found");
207 XFS_STATS_INC(xs_ig_found
);
211 read_unlock(&pag
->pag_ici_lock
);
218 struct xfs_mount
*mp
,
219 struct xfs_perag
*pag
,
222 struct xfs_inode
**ipp
,
225 int lock_flags
) __releases(pag
->pag_ici_lock
)
227 struct xfs_inode
*ip
;
229 unsigned long first_index
, mask
;
230 xfs_agino_t agino
= XFS_INO_TO_AGINO(mp
, ino
);
232 ip
= xfs_inode_alloc(mp
, ino
);
236 error
= xfs_iread(mp
, tp
, ip
, bno
, flags
);
240 xfs_itrace_exit_tag(ip
, "xfs_iget.alloc");
242 if ((ip
->i_d
.di_mode
== 0) && !(flags
& XFS_IGET_CREATE
)) {
248 * Preload the radix tree so we can insert safely under the
249 * write spinlock. Note that we cannot sleep inside the preload
252 if (radix_tree_preload(GFP_KERNEL
)) {
258 * Because the inode hasn't been added to the radix-tree yet it can't
259 * be found by another thread, so we can do the non-sleeping lock here.
262 if (!xfs_ilock_nowait(ip
, lock_flags
))
266 mask
= ~(((XFS_INODE_CLUSTER_SIZE(mp
) >> mp
->m_sb
.sb_inodelog
)) - 1);
267 first_index
= agino
& mask
;
268 write_lock(&pag
->pag_ici_lock
);
270 /* insert the new inode */
271 error
= radix_tree_insert(&pag
->pag_ici_root
, agino
, ip
);
272 if (unlikely(error
)) {
273 WARN_ON(error
!= -EEXIST
);
274 XFS_STATS_INC(xs_ig_dup
);
276 goto out_preload_end
;
279 /* These values _must_ be set before releasing the radix tree lock! */
280 ip
->i_udquot
= ip
->i_gdquot
= NULL
;
281 xfs_iflags_set(ip
, XFS_INEW
);
283 write_unlock(&pag
->pag_ici_lock
);
284 radix_tree_preload_end();
289 write_unlock(&pag
->pag_ici_lock
);
290 radix_tree_preload_end();
292 xfs_iunlock(ip
, lock_flags
);
294 xfs_destroy_inode(ip
);
299 * Look up an inode by number in the given file system.
300 * The inode is looked up in the cache held in each AG.
301 * If the inode is found in the cache, initialise the vfs inode
304 * If it is not in core, read it in from the file system's device,
305 * add it to the cache and initialise the vfs inode.
307 * The inode is locked according to the value of the lock_flags parameter.
308 * This flag parameter indicates how and if the inode's IO lock and inode lock
311 * mp -- the mount point structure for the current file system. It points
312 * to the inode hash table.
313 * tp -- a pointer to the current transaction if there is one. This is
314 * simply passed through to the xfs_iread() call.
315 * ino -- the number of the inode desired. This is the unique identifier
316 * within the file system for the inode being requested.
317 * lock_flags -- flags indicating how to lock the inode. See the comment
318 * for xfs_ilock() for a list of valid values.
319 * bno -- the block number starting the buffer containing the inode,
320 * if known (as by bulkstat), else 0.
337 /* the radix tree exists only in inode capable AGs */
338 if (XFS_INO_TO_AGNO(mp
, ino
) >= mp
->m_maxagi
)
341 /* get the perag structure and ensure that it's inode capable */
342 pag
= xfs_get_perag(mp
, ino
);
343 if (!pag
->pagi_inodeok
)
345 ASSERT(pag
->pag_ici_init
);
346 agino
= XFS_INO_TO_AGINO(mp
, ino
);
350 read_lock(&pag
->pag_ici_lock
);
351 ip
= radix_tree_lookup(&pag
->pag_ici_root
, agino
);
354 error
= xfs_iget_cache_hit(pag
, ip
, flags
, lock_flags
);
356 goto out_error_or_again
;
358 read_unlock(&pag
->pag_ici_lock
);
359 XFS_STATS_INC(xs_ig_missed
);
361 error
= xfs_iget_cache_miss(mp
, pag
, tp
, ino
, &ip
, bno
,
364 goto out_error_or_again
;
366 xfs_put_perag(mp
, pag
);
370 ASSERT(ip
->i_df
.if_ext_max
==
371 XFS_IFORK_DSIZE(ip
) / sizeof(xfs_bmbt_rec_t
));
373 * If we have a real type for an on-disk inode, we can set ops(&unlock)
374 * now. If it's a new inode being created, xfs_ialloc will handle it.
376 if (xfs_iflags_test(ip
, XFS_INEW
) && ip
->i_d
.di_mode
!= 0)
381 if (error
== EAGAIN
) {
385 xfs_put_perag(mp
, pag
);
391 * Look for the inode corresponding to the given ino in the hash table.
392 * If it is there and its i_transp pointer matches tp, return it.
393 * Otherwise, return NULL.
396 xfs_inode_incore(xfs_mount_t
*mp
,
403 pag
= xfs_get_perag(mp
, ino
);
404 read_lock(&pag
->pag_ici_lock
);
405 ip
= radix_tree_lookup(&pag
->pag_ici_root
, XFS_INO_TO_AGINO(mp
, ino
));
406 read_unlock(&pag
->pag_ici_lock
);
407 xfs_put_perag(mp
, pag
);
409 /* the returned inode must match the transaction */
410 if (ip
&& (ip
->i_transp
!= tp
))
416 * Decrement reference count of an inode structure and unlock it.
418 * ip -- the inode being released
419 * lock_flags -- this parameter indicates the inode's locks to be
420 * to be released. See the comment on xfs_iunlock() for a list
424 xfs_iput(xfs_inode_t
*ip
,
427 xfs_itrace_entry(ip
);
428 xfs_iunlock(ip
, lock_flags
);
433 * Special iput for brand-new inodes that are still locked
440 struct inode
*inode
= VFS_I(ip
);
442 xfs_itrace_entry(ip
);
444 if ((ip
->i_d
.di_mode
== 0)) {
445 ASSERT(!xfs_iflags_test(ip
, XFS_IRECLAIMABLE
));
446 make_bad_inode(inode
);
448 if (inode
->i_state
& I_NEW
)
449 unlock_new_inode(inode
);
451 xfs_iunlock(ip
, lock_flags
);
456 * This is called free all the memory associated with an inode.
457 * It must free the inode itself and any buffers allocated for
458 * if_extents/if_data and if_broot. It must also free the lock
459 * associated with the inode.
461 * Note: because we don't initialise everything on reallocation out
462 * of the zone, we must ensure we nullify everything correctly before
463 * freeing the structure.
467 struct xfs_inode
*ip
)
469 struct xfs_mount
*mp
= ip
->i_mount
;
470 struct xfs_perag
*pag
;
472 XFS_STATS_INC(xs_ig_reclaims
);
475 * Remove the inode from the per-AG radix tree. It doesn't matter
476 * if it was never added to it because radix_tree_delete can deal
477 * with that case just fine.
479 pag
= xfs_get_perag(mp
, ip
->i_ino
);
480 write_lock(&pag
->pag_ici_lock
);
481 radix_tree_delete(&pag
->pag_ici_root
, XFS_INO_TO_AGINO(mp
, ip
->i_ino
));
482 write_unlock(&pag
->pag_ici_lock
);
483 xfs_put_perag(mp
, pag
);
486 * Here we do an (almost) spurious inode lock in order to coordinate
487 * with inode cache radix tree lookups. This is because the lookup
488 * can reference the inodes in the cache without taking references.
490 * We make that OK here by ensuring that we wait until the inode is
491 * unlocked after the lookup before we go ahead and free it. We get
492 * both the ilock and the iolock because the code may need to drop the
493 * ilock one but will still hold the iolock.
495 xfs_ilock(ip
, XFS_ILOCK_EXCL
| XFS_IOLOCK_EXCL
);
497 * Release dquots (and their references) if any.
499 XFS_QM_DQDETACH(ip
->i_mount
, ip
);
500 xfs_iunlock(ip
, XFS_ILOCK_EXCL
| XFS_IOLOCK_EXCL
);
502 switch (ip
->i_d
.di_mode
& S_IFMT
) {
506 xfs_idestroy_fork(ip
, XFS_DATA_FORK
);
511 xfs_idestroy_fork(ip
, XFS_ATTR_FORK
);
513 #ifdef XFS_INODE_TRACE
514 ktrace_free(ip
->i_trace
);
516 #ifdef XFS_BMAP_TRACE
517 ktrace_free(ip
->i_xtrace
);
519 #ifdef XFS_BTREE_TRACE
520 ktrace_free(ip
->i_btrace
);
523 ktrace_free(ip
->i_rwtrace
);
525 #ifdef XFS_ILOCK_TRACE
526 ktrace_free(ip
->i_lock_trace
);
528 #ifdef XFS_DIR2_TRACE
529 ktrace_free(ip
->i_dir_trace
);
533 * Only if we are shutting down the fs will we see an
534 * inode still in the AIL. If it is there, we should remove
535 * it to prevent a use-after-free from occurring.
537 xfs_log_item_t
*lip
= &ip
->i_itemp
->ili_item
;
538 struct xfs_ail
*ailp
= lip
->li_ailp
;
540 ASSERT(((lip
->li_flags
& XFS_LI_IN_AIL
) == 0) ||
541 XFS_FORCED_SHUTDOWN(ip
->i_mount
));
542 if (lip
->li_flags
& XFS_LI_IN_AIL
) {
543 spin_lock(&ailp
->xa_lock
);
544 if (lip
->li_flags
& XFS_LI_IN_AIL
)
545 xfs_trans_ail_delete(ailp
, lip
);
547 spin_unlock(&ailp
->xa_lock
);
549 xfs_inode_item_destroy(ip
);
552 /* asserts to verify all state is correct here */
553 ASSERT(atomic_read(&ip
->i_iocount
) == 0);
554 ASSERT(atomic_read(&ip
->i_pincount
) == 0);
555 ASSERT(!spin_is_locked(&ip
->i_flags_lock
));
556 ASSERT(completion_done(&ip
->i_flush
));
557 kmem_zone_free(xfs_inode_zone
, ip
);
561 * This is a wrapper routine around the xfs_ilock() routine
562 * used to centralize some grungy code. It is used in places
563 * that wish to lock the inode solely for reading the extents.
564 * The reason these places can't just call xfs_ilock(SHARED)
565 * is that the inode lock also guards to bringing in of the
566 * extents from disk for a file in b-tree format. If the inode
567 * is in b-tree format, then we need to lock the inode exclusively
568 * until the extents are read in. Locking it exclusively all
569 * the time would limit our parallelism unnecessarily, though.
570 * What we do instead is check to see if the extents have been
571 * read in yet, and only lock the inode exclusively if they
574 * The function returns a value which should be given to the
575 * corresponding xfs_iunlock_map_shared(). This value is
576 * the mode in which the lock was actually taken.
579 xfs_ilock_map_shared(
584 if ((ip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
) &&
585 ((ip
->i_df
.if_flags
& XFS_IFEXTENTS
) == 0)) {
586 lock_mode
= XFS_ILOCK_EXCL
;
588 lock_mode
= XFS_ILOCK_SHARED
;
591 xfs_ilock(ip
, lock_mode
);
597 * This is simply the unlock routine to go with xfs_ilock_map_shared().
598 * All it does is call xfs_iunlock() with the given lock_mode.
601 xfs_iunlock_map_shared(
603 unsigned int lock_mode
)
605 xfs_iunlock(ip
, lock_mode
);
609 * The xfs inode contains 2 locks: a multi-reader lock called the
610 * i_iolock and a multi-reader lock called the i_lock. This routine
611 * allows either or both of the locks to be obtained.
613 * The 2 locks should always be ordered so that the IO lock is
614 * obtained first in order to prevent deadlock.
616 * ip -- the inode being locked
617 * lock_flags -- this parameter indicates the inode's locks
618 * to be locked. It can be:
623 * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
624 * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
625 * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
626 * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
634 * You can't set both SHARED and EXCL for the same lock,
635 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
636 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
638 ASSERT((lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) !=
639 (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
));
640 ASSERT((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) !=
641 (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
642 ASSERT((lock_flags
& ~(XFS_LOCK_MASK
| XFS_LOCK_DEP_MASK
)) == 0);
644 if (lock_flags
& XFS_IOLOCK_EXCL
)
645 mrupdate_nested(&ip
->i_iolock
, XFS_IOLOCK_DEP(lock_flags
));
646 else if (lock_flags
& XFS_IOLOCK_SHARED
)
647 mraccess_nested(&ip
->i_iolock
, XFS_IOLOCK_DEP(lock_flags
));
649 if (lock_flags
& XFS_ILOCK_EXCL
)
650 mrupdate_nested(&ip
->i_lock
, XFS_ILOCK_DEP(lock_flags
));
651 else if (lock_flags
& XFS_ILOCK_SHARED
)
652 mraccess_nested(&ip
->i_lock
, XFS_ILOCK_DEP(lock_flags
));
654 xfs_ilock_trace(ip
, 1, lock_flags
, (inst_t
*)__return_address
);
658 * This is just like xfs_ilock(), except that the caller
659 * is guaranteed not to sleep. It returns 1 if it gets
660 * the requested locks and 0 otherwise. If the IO lock is
661 * obtained but the inode lock cannot be, then the IO lock
662 * is dropped before returning.
664 * ip -- the inode being locked
665 * lock_flags -- this parameter indicates the inode's locks to be
666 * to be locked. See the comment for xfs_ilock() for a list
675 * You can't set both SHARED and EXCL for the same lock,
676 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
677 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
679 ASSERT((lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) !=
680 (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
));
681 ASSERT((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) !=
682 (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
683 ASSERT((lock_flags
& ~(XFS_LOCK_MASK
| XFS_LOCK_DEP_MASK
)) == 0);
685 if (lock_flags
& XFS_IOLOCK_EXCL
) {
686 if (!mrtryupdate(&ip
->i_iolock
))
688 } else if (lock_flags
& XFS_IOLOCK_SHARED
) {
689 if (!mrtryaccess(&ip
->i_iolock
))
692 if (lock_flags
& XFS_ILOCK_EXCL
) {
693 if (!mrtryupdate(&ip
->i_lock
))
694 goto out_undo_iolock
;
695 } else if (lock_flags
& XFS_ILOCK_SHARED
) {
696 if (!mrtryaccess(&ip
->i_lock
))
697 goto out_undo_iolock
;
699 xfs_ilock_trace(ip
, 2, lock_flags
, (inst_t
*)__return_address
);
703 if (lock_flags
& XFS_IOLOCK_EXCL
)
704 mrunlock_excl(&ip
->i_iolock
);
705 else if (lock_flags
& XFS_IOLOCK_SHARED
)
706 mrunlock_shared(&ip
->i_iolock
);
712 * xfs_iunlock() is used to drop the inode locks acquired with
713 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
714 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
715 * that we know which locks to drop.
717 * ip -- the inode being unlocked
718 * lock_flags -- this parameter indicates the inode's locks to be
719 * to be unlocked. See the comment for xfs_ilock() for a list
720 * of valid values for this parameter.
729 * You can't set both SHARED and EXCL for the same lock,
730 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
731 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
733 ASSERT((lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) !=
734 (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
));
735 ASSERT((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) !=
736 (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
737 ASSERT((lock_flags
& ~(XFS_LOCK_MASK
| XFS_IUNLOCK_NONOTIFY
|
738 XFS_LOCK_DEP_MASK
)) == 0);
739 ASSERT(lock_flags
!= 0);
741 if (lock_flags
& XFS_IOLOCK_EXCL
)
742 mrunlock_excl(&ip
->i_iolock
);
743 else if (lock_flags
& XFS_IOLOCK_SHARED
)
744 mrunlock_shared(&ip
->i_iolock
);
746 if (lock_flags
& XFS_ILOCK_EXCL
)
747 mrunlock_excl(&ip
->i_lock
);
748 else if (lock_flags
& XFS_ILOCK_SHARED
)
749 mrunlock_shared(&ip
->i_lock
);
751 if ((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) &&
752 !(lock_flags
& XFS_IUNLOCK_NONOTIFY
) && ip
->i_itemp
) {
754 * Let the AIL know that this item has been unlocked in case
755 * it is in the AIL and anyone is waiting on it. Don't do
756 * this if the caller has asked us not to.
758 xfs_trans_unlocked_item(ip
->i_itemp
->ili_item
.li_ailp
,
759 (xfs_log_item_t
*)(ip
->i_itemp
));
761 xfs_ilock_trace(ip
, 3, lock_flags
, (inst_t
*)__return_address
);
765 * give up write locks. the i/o lock cannot be held nested
766 * if it is being demoted.
773 ASSERT(lock_flags
& (XFS_IOLOCK_EXCL
|XFS_ILOCK_EXCL
));
774 ASSERT((lock_flags
& ~(XFS_IOLOCK_EXCL
|XFS_ILOCK_EXCL
)) == 0);
776 if (lock_flags
& XFS_ILOCK_EXCL
)
777 mrdemote(&ip
->i_lock
);
778 if (lock_flags
& XFS_IOLOCK_EXCL
)
779 mrdemote(&ip
->i_iolock
);
784 * Debug-only routine, without additional rw_semaphore APIs, we can
785 * now only answer requests regarding whether we hold the lock for write
786 * (reader state is outside our visibility, we only track writer state).
788 * Note: this means !xfs_isilocked would give false positives, so don't do that.
795 if ((lock_flags
& (XFS_ILOCK_EXCL
|XFS_ILOCK_SHARED
)) ==
797 if (!ip
->i_lock
.mr_writer
)
801 if ((lock_flags
& (XFS_IOLOCK_EXCL
|XFS_IOLOCK_SHARED
)) ==
803 if (!ip
->i_iolock
.mr_writer
)
811 #ifdef XFS_INODE_TRACE
813 #define KTRACE_ENTER(ip, vk, s, line, ra) \
814 ktrace_enter((ip)->i_trace, \
815 /* 0 */ (void *)(__psint_t)(vk), \
816 /* 1 */ (void *)(s), \
817 /* 2 */ (void *)(__psint_t) line, \
818 /* 3 */ (void *)(__psint_t)atomic_read(&VFS_I(ip)->i_count), \
819 /* 4 */ (void *)(ra), \
821 /* 6 */ (void *)(__psint_t)current_cpu(), \
822 /* 7 */ (void *)(__psint_t)current_pid(), \
823 /* 8 */ (void *)__return_address, \
824 /* 9 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL)
827 * Vnode tracing code.
830 _xfs_itrace_entry(xfs_inode_t
*ip
, const char *func
, inst_t
*ra
)
832 KTRACE_ENTER(ip
, INODE_KTRACE_ENTRY
, func
, 0, ra
);
836 _xfs_itrace_exit(xfs_inode_t
*ip
, const char *func
, inst_t
*ra
)
838 KTRACE_ENTER(ip
, INODE_KTRACE_EXIT
, func
, 0, ra
);
842 xfs_itrace_hold(xfs_inode_t
*ip
, char *file
, int line
, inst_t
*ra
)
844 KTRACE_ENTER(ip
, INODE_KTRACE_HOLD
, file
, line
, ra
);
848 _xfs_itrace_ref(xfs_inode_t
*ip
, char *file
, int line
, inst_t
*ra
)
850 KTRACE_ENTER(ip
, INODE_KTRACE_REF
, file
, line
, ra
);
854 xfs_itrace_rele(xfs_inode_t
*ip
, char *file
, int line
, inst_t
*ra
)
856 KTRACE_ENTER(ip
, INODE_KTRACE_RELE
, file
, line
, ra
);
858 #endif /* XFS_INODE_TRACE */