2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
25 #include "xfs_trans.h"
29 #include "xfs_dmapi.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_btree.h"
39 #include "xfs_ialloc.h"
40 #include "xfs_quota.h"
41 #include "xfs_utils.h"
42 #include "xfs_trans_priv.h"
43 #include "xfs_inode_item.h"
45 #include "xfs_btree_trace.h"
46 #include "xfs_dir2_trace.h"
50 * Allocate and initialise an xfs_inode.
52 STATIC
struct xfs_inode
*
60 * if this didn't occur in transactions, we could use
61 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
62 * code up to do this anyway.
64 ip
= kmem_zone_alloc(xfs_inode_zone
, KM_SLEEP
);
68 ASSERT(atomic_read(&ip
->i_iocount
) == 0);
69 ASSERT(atomic_read(&ip
->i_pincount
) == 0);
70 ASSERT(!spin_is_locked(&ip
->i_flags_lock
));
71 ASSERT(completion_done(&ip
->i_flush
));
73 /* initialise the xfs inode */
76 memset(&ip
->i_imap
, 0, sizeof(struct xfs_imap
));
78 memset(&ip
->i_df
, 0, sizeof(xfs_ifork_t
));
80 ip
->i_update_core
= 0;
81 ip
->i_update_size
= 0;
82 ip
->i_delayed_blks
= 0;
83 memset(&ip
->i_d
, 0, sizeof(xfs_icdinode_t
));
88 * Initialize inode's trace buffers.
90 #ifdef XFS_INODE_TRACE
91 ip
->i_trace
= ktrace_alloc(INODE_TRACE_SIZE
, KM_NOFS
);
94 ip
->i_xtrace
= ktrace_alloc(XFS_BMAP_KTRACE_SIZE
, KM_NOFS
);
96 #ifdef XFS_BTREE_TRACE
97 ip
->i_btrace
= ktrace_alloc(XFS_BMBT_KTRACE_SIZE
, KM_NOFS
);
100 ip
->i_rwtrace
= ktrace_alloc(XFS_RW_KTRACE_SIZE
, KM_NOFS
);
102 #ifdef XFS_ILOCK_TRACE
103 ip
->i_lock_trace
= ktrace_alloc(XFS_ILOCK_KTRACE_SIZE
, KM_NOFS
);
105 #ifdef XFS_DIR2_TRACE
106 ip
->i_dir_trace
= ktrace_alloc(XFS_DIR2_KTRACE_SIZE
, KM_NOFS
);
109 * Now initialise the VFS inode. We do this after the xfs_inode
110 * initialisation as internal failures will result in ->destroy_inode
111 * being called and that will pass down through the reclaim path and
112 * free the XFS inode. This path requires the XFS inode to already be
113 * initialised. Hence if this call fails, the xfs_inode has already
114 * been freed and we should not reference it at all in the error
117 if (!inode_init_always(mp
->m_super
, VFS_I(ip
)))
120 /* prevent anyone from using this yet */
121 VFS_I(ip
)->i_state
= I_NEW
|I_LOCK
;
127 * Check the validity of the inode we just found it the cache
131 struct xfs_perag
*pag
,
132 struct xfs_inode
*ip
,
134 int lock_flags
) __releases(pag
->pag_ici_lock
)
136 struct xfs_mount
*mp
= ip
->i_mount
;
140 * If INEW is set this inode is being set up
141 * If IRECLAIM is set this inode is being torn down
142 * Pause and try again.
144 if (xfs_iflags_test(ip
, (XFS_INEW
|XFS_IRECLAIM
))) {
145 XFS_STATS_INC(xs_ig_frecycle
);
149 /* If IRECLAIMABLE is set, we've torn down the vfs inode part */
150 if (xfs_iflags_test(ip
, XFS_IRECLAIMABLE
)) {
153 * If lookup is racing with unlink, then we should return an
154 * error immediately so we don't remove it from the reclaim
155 * list and potentially leak the inode.
157 if ((ip
->i_d
.di_mode
== 0) && !(flags
& XFS_IGET_CREATE
)) {
162 xfs_itrace_exit_tag(ip
, "xfs_iget.alloc");
165 * We need to re-initialise the VFS inode as it has been
166 * 'freed' by the VFS. Do this here so we can deal with
167 * errors cleanly, then tag it so it can be set up correctly
170 if (!inode_init_always(mp
->m_super
, VFS_I(ip
))) {
176 * We must set the XFS_INEW flag before clearing the
177 * XFS_IRECLAIMABLE flag so that if a racing lookup does
178 * not find the XFS_IRECLAIMABLE above but has the igrab()
179 * below succeed we can safely check XFS_INEW to detect
180 * that this inode is still being initialised.
182 xfs_iflags_set(ip
, XFS_INEW
);
183 xfs_iflags_clear(ip
, XFS_IRECLAIMABLE
);
185 /* clear the radix tree reclaim flag as well. */
186 __xfs_inode_clear_reclaim_tag(mp
, pag
, ip
);
187 } else if (!igrab(VFS_I(ip
))) {
188 /* If the VFS inode is being torn down, pause and try again. */
189 XFS_STATS_INC(xs_ig_frecycle
);
191 } else if (xfs_iflags_test(ip
, XFS_INEW
)) {
193 * We are racing with another cache hit that is
194 * currently recycling this inode out of the XFS_IRECLAIMABLE
195 * state. Wait for the initialisation to complete before
198 wait_on_inode(VFS_I(ip
));
201 if (ip
->i_d
.di_mode
== 0 && !(flags
& XFS_IGET_CREATE
)) {
207 /* We've got a live one. */
208 read_unlock(&pag
->pag_ici_lock
);
211 xfs_ilock(ip
, lock_flags
);
213 xfs_iflags_clear(ip
, XFS_ISTALE
);
214 xfs_itrace_exit_tag(ip
, "xfs_iget.found");
215 XFS_STATS_INC(xs_ig_found
);
219 read_unlock(&pag
->pag_ici_lock
);
226 struct xfs_mount
*mp
,
227 struct xfs_perag
*pag
,
230 struct xfs_inode
**ipp
,
233 int lock_flags
) __releases(pag
->pag_ici_lock
)
235 struct xfs_inode
*ip
;
237 unsigned long first_index
, mask
;
238 xfs_agino_t agino
= XFS_INO_TO_AGINO(mp
, ino
);
240 ip
= xfs_inode_alloc(mp
, ino
);
244 error
= xfs_iread(mp
, tp
, ip
, bno
, flags
);
248 xfs_itrace_exit_tag(ip
, "xfs_iget.alloc");
250 if ((ip
->i_d
.di_mode
== 0) && !(flags
& XFS_IGET_CREATE
)) {
256 * Preload the radix tree so we can insert safely under the
257 * write spinlock. Note that we cannot sleep inside the preload
260 if (radix_tree_preload(GFP_KERNEL
)) {
266 * Because the inode hasn't been added to the radix-tree yet it can't
267 * be found by another thread, so we can do the non-sleeping lock here.
270 if (!xfs_ilock_nowait(ip
, lock_flags
))
274 mask
= ~(((XFS_INODE_CLUSTER_SIZE(mp
) >> mp
->m_sb
.sb_inodelog
)) - 1);
275 first_index
= agino
& mask
;
276 write_lock(&pag
->pag_ici_lock
);
278 /* insert the new inode */
279 error
= radix_tree_insert(&pag
->pag_ici_root
, agino
, ip
);
280 if (unlikely(error
)) {
281 WARN_ON(error
!= -EEXIST
);
282 XFS_STATS_INC(xs_ig_dup
);
284 goto out_preload_end
;
287 /* These values _must_ be set before releasing the radix tree lock! */
288 ip
->i_udquot
= ip
->i_gdquot
= NULL
;
289 xfs_iflags_set(ip
, XFS_INEW
);
291 write_unlock(&pag
->pag_ici_lock
);
292 radix_tree_preload_end();
297 write_unlock(&pag
->pag_ici_lock
);
298 radix_tree_preload_end();
300 xfs_iunlock(ip
, lock_flags
);
302 xfs_destroy_inode(ip
);
307 * Look up an inode by number in the given file system.
308 * The inode is looked up in the cache held in each AG.
309 * If the inode is found in the cache, initialise the vfs inode
312 * If it is not in core, read it in from the file system's device,
313 * add it to the cache and initialise the vfs inode.
315 * The inode is locked according to the value of the lock_flags parameter.
316 * This flag parameter indicates how and if the inode's IO lock and inode lock
319 * mp -- the mount point structure for the current file system. It points
320 * to the inode hash table.
321 * tp -- a pointer to the current transaction if there is one. This is
322 * simply passed through to the xfs_iread() call.
323 * ino -- the number of the inode desired. This is the unique identifier
324 * within the file system for the inode being requested.
325 * lock_flags -- flags indicating how to lock the inode. See the comment
326 * for xfs_ilock() for a list of valid values.
327 * bno -- the block number starting the buffer containing the inode,
328 * if known (as by bulkstat), else 0.
345 /* the radix tree exists only in inode capable AGs */
346 if (XFS_INO_TO_AGNO(mp
, ino
) >= mp
->m_maxagi
)
349 /* get the perag structure and ensure that it's inode capable */
350 pag
= xfs_get_perag(mp
, ino
);
351 if (!pag
->pagi_inodeok
)
353 ASSERT(pag
->pag_ici_init
);
354 agino
= XFS_INO_TO_AGINO(mp
, ino
);
358 read_lock(&pag
->pag_ici_lock
);
359 ip
= radix_tree_lookup(&pag
->pag_ici_root
, agino
);
362 error
= xfs_iget_cache_hit(pag
, ip
, flags
, lock_flags
);
364 goto out_error_or_again
;
366 read_unlock(&pag
->pag_ici_lock
);
367 XFS_STATS_INC(xs_ig_missed
);
369 error
= xfs_iget_cache_miss(mp
, pag
, tp
, ino
, &ip
, bno
,
372 goto out_error_or_again
;
374 xfs_put_perag(mp
, pag
);
378 ASSERT(ip
->i_df
.if_ext_max
==
379 XFS_IFORK_DSIZE(ip
) / sizeof(xfs_bmbt_rec_t
));
381 * If we have a real type for an on-disk inode, we can set ops(&unlock)
382 * now. If it's a new inode being created, xfs_ialloc will handle it.
384 if (xfs_iflags_test(ip
, XFS_INEW
) && ip
->i_d
.di_mode
!= 0)
389 if (error
== EAGAIN
) {
393 xfs_put_perag(mp
, pag
);
399 * Look for the inode corresponding to the given ino in the hash table.
400 * If it is there and its i_transp pointer matches tp, return it.
401 * Otherwise, return NULL.
404 xfs_inode_incore(xfs_mount_t
*mp
,
411 pag
= xfs_get_perag(mp
, ino
);
412 read_lock(&pag
->pag_ici_lock
);
413 ip
= radix_tree_lookup(&pag
->pag_ici_root
, XFS_INO_TO_AGINO(mp
, ino
));
414 read_unlock(&pag
->pag_ici_lock
);
415 xfs_put_perag(mp
, pag
);
417 /* the returned inode must match the transaction */
418 if (ip
&& (ip
->i_transp
!= tp
))
424 * Decrement reference count of an inode structure and unlock it.
426 * ip -- the inode being released
427 * lock_flags -- this parameter indicates the inode's locks to be
428 * to be released. See the comment on xfs_iunlock() for a list
432 xfs_iput(xfs_inode_t
*ip
,
435 xfs_itrace_entry(ip
);
436 xfs_iunlock(ip
, lock_flags
);
441 * Special iput for brand-new inodes that are still locked
448 struct inode
*inode
= VFS_I(ip
);
450 xfs_itrace_entry(ip
);
452 if ((ip
->i_d
.di_mode
== 0)) {
453 ASSERT(!xfs_iflags_test(ip
, XFS_IRECLAIMABLE
));
454 make_bad_inode(inode
);
456 if (inode
->i_state
& I_NEW
)
457 unlock_new_inode(inode
);
459 xfs_iunlock(ip
, lock_flags
);
464 * This is called free all the memory associated with an inode.
465 * It must free the inode itself and any buffers allocated for
466 * if_extents/if_data and if_broot. It must also free the lock
467 * associated with the inode.
469 * Note: because we don't initialise everything on reallocation out
470 * of the zone, we must ensure we nullify everything correctly before
471 * freeing the structure.
475 struct xfs_inode
*ip
)
477 struct xfs_mount
*mp
= ip
->i_mount
;
478 struct xfs_perag
*pag
;
480 XFS_STATS_INC(xs_ig_reclaims
);
483 * Remove the inode from the per-AG radix tree. It doesn't matter
484 * if it was never added to it because radix_tree_delete can deal
485 * with that case just fine.
487 pag
= xfs_get_perag(mp
, ip
->i_ino
);
488 write_lock(&pag
->pag_ici_lock
);
489 radix_tree_delete(&pag
->pag_ici_root
, XFS_INO_TO_AGINO(mp
, ip
->i_ino
));
490 write_unlock(&pag
->pag_ici_lock
);
491 xfs_put_perag(mp
, pag
);
494 * Here we do an (almost) spurious inode lock in order to coordinate
495 * with inode cache radix tree lookups. This is because the lookup
496 * can reference the inodes in the cache without taking references.
498 * We make that OK here by ensuring that we wait until the inode is
499 * unlocked after the lookup before we go ahead and free it. We get
500 * both the ilock and the iolock because the code may need to drop the
501 * ilock one but will still hold the iolock.
503 xfs_ilock(ip
, XFS_ILOCK_EXCL
| XFS_IOLOCK_EXCL
);
505 xfs_iunlock(ip
, XFS_ILOCK_EXCL
| XFS_IOLOCK_EXCL
);
507 switch (ip
->i_d
.di_mode
& S_IFMT
) {
511 xfs_idestroy_fork(ip
, XFS_DATA_FORK
);
516 xfs_idestroy_fork(ip
, XFS_ATTR_FORK
);
518 #ifdef XFS_INODE_TRACE
519 ktrace_free(ip
->i_trace
);
521 #ifdef XFS_BMAP_TRACE
522 ktrace_free(ip
->i_xtrace
);
524 #ifdef XFS_BTREE_TRACE
525 ktrace_free(ip
->i_btrace
);
528 ktrace_free(ip
->i_rwtrace
);
530 #ifdef XFS_ILOCK_TRACE
531 ktrace_free(ip
->i_lock_trace
);
533 #ifdef XFS_DIR2_TRACE
534 ktrace_free(ip
->i_dir_trace
);
538 * Only if we are shutting down the fs will we see an
539 * inode still in the AIL. If it is there, we should remove
540 * it to prevent a use-after-free from occurring.
542 xfs_log_item_t
*lip
= &ip
->i_itemp
->ili_item
;
543 struct xfs_ail
*ailp
= lip
->li_ailp
;
545 ASSERT(((lip
->li_flags
& XFS_LI_IN_AIL
) == 0) ||
546 XFS_FORCED_SHUTDOWN(ip
->i_mount
));
547 if (lip
->li_flags
& XFS_LI_IN_AIL
) {
548 spin_lock(&ailp
->xa_lock
);
549 if (lip
->li_flags
& XFS_LI_IN_AIL
)
550 xfs_trans_ail_delete(ailp
, lip
);
552 spin_unlock(&ailp
->xa_lock
);
554 xfs_inode_item_destroy(ip
);
557 /* asserts to verify all state is correct here */
558 ASSERT(atomic_read(&ip
->i_iocount
) == 0);
559 ASSERT(atomic_read(&ip
->i_pincount
) == 0);
560 ASSERT(!spin_is_locked(&ip
->i_flags_lock
));
561 ASSERT(completion_done(&ip
->i_flush
));
562 kmem_zone_free(xfs_inode_zone
, ip
);
566 * This is a wrapper routine around the xfs_ilock() routine
567 * used to centralize some grungy code. It is used in places
568 * that wish to lock the inode solely for reading the extents.
569 * The reason these places can't just call xfs_ilock(SHARED)
570 * is that the inode lock also guards to bringing in of the
571 * extents from disk for a file in b-tree format. If the inode
572 * is in b-tree format, then we need to lock the inode exclusively
573 * until the extents are read in. Locking it exclusively all
574 * the time would limit our parallelism unnecessarily, though.
575 * What we do instead is check to see if the extents have been
576 * read in yet, and only lock the inode exclusively if they
579 * The function returns a value which should be given to the
580 * corresponding xfs_iunlock_map_shared(). This value is
581 * the mode in which the lock was actually taken.
584 xfs_ilock_map_shared(
589 if ((ip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
) &&
590 ((ip
->i_df
.if_flags
& XFS_IFEXTENTS
) == 0)) {
591 lock_mode
= XFS_ILOCK_EXCL
;
593 lock_mode
= XFS_ILOCK_SHARED
;
596 xfs_ilock(ip
, lock_mode
);
602 * This is simply the unlock routine to go with xfs_ilock_map_shared().
603 * All it does is call xfs_iunlock() with the given lock_mode.
606 xfs_iunlock_map_shared(
608 unsigned int lock_mode
)
610 xfs_iunlock(ip
, lock_mode
);
614 * The xfs inode contains 2 locks: a multi-reader lock called the
615 * i_iolock and a multi-reader lock called the i_lock. This routine
616 * allows either or both of the locks to be obtained.
618 * The 2 locks should always be ordered so that the IO lock is
619 * obtained first in order to prevent deadlock.
621 * ip -- the inode being locked
622 * lock_flags -- this parameter indicates the inode's locks
623 * to be locked. It can be:
628 * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
629 * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
630 * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
631 * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
639 * You can't set both SHARED and EXCL for the same lock,
640 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
641 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
643 ASSERT((lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) !=
644 (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
));
645 ASSERT((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) !=
646 (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
647 ASSERT((lock_flags
& ~(XFS_LOCK_MASK
| XFS_LOCK_DEP_MASK
)) == 0);
649 if (lock_flags
& XFS_IOLOCK_EXCL
)
650 mrupdate_nested(&ip
->i_iolock
, XFS_IOLOCK_DEP(lock_flags
));
651 else if (lock_flags
& XFS_IOLOCK_SHARED
)
652 mraccess_nested(&ip
->i_iolock
, XFS_IOLOCK_DEP(lock_flags
));
654 if (lock_flags
& XFS_ILOCK_EXCL
)
655 mrupdate_nested(&ip
->i_lock
, XFS_ILOCK_DEP(lock_flags
));
656 else if (lock_flags
& XFS_ILOCK_SHARED
)
657 mraccess_nested(&ip
->i_lock
, XFS_ILOCK_DEP(lock_flags
));
659 xfs_ilock_trace(ip
, 1, lock_flags
, (inst_t
*)__return_address
);
663 * This is just like xfs_ilock(), except that the caller
664 * is guaranteed not to sleep. It returns 1 if it gets
665 * the requested locks and 0 otherwise. If the IO lock is
666 * obtained but the inode lock cannot be, then the IO lock
667 * is dropped before returning.
669 * ip -- the inode being locked
670 * lock_flags -- this parameter indicates the inode's locks to be
671 * to be locked. See the comment for xfs_ilock() for a list
680 * You can't set both SHARED and EXCL for the same lock,
681 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
682 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
684 ASSERT((lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) !=
685 (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
));
686 ASSERT((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) !=
687 (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
688 ASSERT((lock_flags
& ~(XFS_LOCK_MASK
| XFS_LOCK_DEP_MASK
)) == 0);
690 if (lock_flags
& XFS_IOLOCK_EXCL
) {
691 if (!mrtryupdate(&ip
->i_iolock
))
693 } else if (lock_flags
& XFS_IOLOCK_SHARED
) {
694 if (!mrtryaccess(&ip
->i_iolock
))
697 if (lock_flags
& XFS_ILOCK_EXCL
) {
698 if (!mrtryupdate(&ip
->i_lock
))
699 goto out_undo_iolock
;
700 } else if (lock_flags
& XFS_ILOCK_SHARED
) {
701 if (!mrtryaccess(&ip
->i_lock
))
702 goto out_undo_iolock
;
704 xfs_ilock_trace(ip
, 2, lock_flags
, (inst_t
*)__return_address
);
708 if (lock_flags
& XFS_IOLOCK_EXCL
)
709 mrunlock_excl(&ip
->i_iolock
);
710 else if (lock_flags
& XFS_IOLOCK_SHARED
)
711 mrunlock_shared(&ip
->i_iolock
);
717 * xfs_iunlock() is used to drop the inode locks acquired with
718 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
719 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
720 * that we know which locks to drop.
722 * ip -- the inode being unlocked
723 * lock_flags -- this parameter indicates the inode's locks to be
724 * to be unlocked. See the comment for xfs_ilock() for a list
725 * of valid values for this parameter.
734 * You can't set both SHARED and EXCL for the same lock,
735 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
736 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
738 ASSERT((lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) !=
739 (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
));
740 ASSERT((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) !=
741 (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
742 ASSERT((lock_flags
& ~(XFS_LOCK_MASK
| XFS_IUNLOCK_NONOTIFY
|
743 XFS_LOCK_DEP_MASK
)) == 0);
744 ASSERT(lock_flags
!= 0);
746 if (lock_flags
& XFS_IOLOCK_EXCL
)
747 mrunlock_excl(&ip
->i_iolock
);
748 else if (lock_flags
& XFS_IOLOCK_SHARED
)
749 mrunlock_shared(&ip
->i_iolock
);
751 if (lock_flags
& XFS_ILOCK_EXCL
)
752 mrunlock_excl(&ip
->i_lock
);
753 else if (lock_flags
& XFS_ILOCK_SHARED
)
754 mrunlock_shared(&ip
->i_lock
);
756 if ((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) &&
757 !(lock_flags
& XFS_IUNLOCK_NONOTIFY
) && ip
->i_itemp
) {
759 * Let the AIL know that this item has been unlocked in case
760 * it is in the AIL and anyone is waiting on it. Don't do
761 * this if the caller has asked us not to.
763 xfs_trans_unlocked_item(ip
->i_itemp
->ili_item
.li_ailp
,
764 (xfs_log_item_t
*)(ip
->i_itemp
));
766 xfs_ilock_trace(ip
, 3, lock_flags
, (inst_t
*)__return_address
);
770 * give up write locks. the i/o lock cannot be held nested
771 * if it is being demoted.
778 ASSERT(lock_flags
& (XFS_IOLOCK_EXCL
|XFS_ILOCK_EXCL
));
779 ASSERT((lock_flags
& ~(XFS_IOLOCK_EXCL
|XFS_ILOCK_EXCL
)) == 0);
781 if (lock_flags
& XFS_ILOCK_EXCL
)
782 mrdemote(&ip
->i_lock
);
783 if (lock_flags
& XFS_IOLOCK_EXCL
)
784 mrdemote(&ip
->i_iolock
);
789 * Debug-only routine, without additional rw_semaphore APIs, we can
790 * now only answer requests regarding whether we hold the lock for write
791 * (reader state is outside our visibility, we only track writer state).
793 * Note: this means !xfs_isilocked would give false positives, so don't do that.
800 if ((lock_flags
& (XFS_ILOCK_EXCL
|XFS_ILOCK_SHARED
)) ==
802 if (!ip
->i_lock
.mr_writer
)
806 if ((lock_flags
& (XFS_IOLOCK_EXCL
|XFS_IOLOCK_SHARED
)) ==
808 if (!ip
->i_iolock
.mr_writer
)
816 #ifdef XFS_INODE_TRACE
818 #define KTRACE_ENTER(ip, vk, s, line, ra) \
819 ktrace_enter((ip)->i_trace, \
820 /* 0 */ (void *)(__psint_t)(vk), \
821 /* 1 */ (void *)(s), \
822 /* 2 */ (void *)(__psint_t) line, \
823 /* 3 */ (void *)(__psint_t)atomic_read(&VFS_I(ip)->i_count), \
824 /* 4 */ (void *)(ra), \
826 /* 6 */ (void *)(__psint_t)current_cpu(), \
827 /* 7 */ (void *)(__psint_t)current_pid(), \
828 /* 8 */ (void *)__return_address, \
829 /* 9 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL)
832 * Vnode tracing code.
835 _xfs_itrace_entry(xfs_inode_t
*ip
, const char *func
, inst_t
*ra
)
837 KTRACE_ENTER(ip
, INODE_KTRACE_ENTRY
, func
, 0, ra
);
841 _xfs_itrace_exit(xfs_inode_t
*ip
, const char *func
, inst_t
*ra
)
843 KTRACE_ENTER(ip
, INODE_KTRACE_EXIT
, func
, 0, ra
);
847 xfs_itrace_hold(xfs_inode_t
*ip
, char *file
, int line
, inst_t
*ra
)
849 KTRACE_ENTER(ip
, INODE_KTRACE_HOLD
, file
, line
, ra
);
853 _xfs_itrace_ref(xfs_inode_t
*ip
, char *file
, int line
, inst_t
*ra
)
855 KTRACE_ENTER(ip
, INODE_KTRACE_REF
, file
, line
, ra
);
859 xfs_itrace_rele(xfs_inode_t
*ip
, char *file
, int line
, inst_t
*ra
)
861 KTRACE_ENTER(ip
, INODE_KTRACE_RELE
, file
, line
, ra
);
863 #endif /* XFS_INODE_TRACE */