watchdog: coh901327: convert to use watchdog core
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / xfs / xfs_iget.c
bloba98cb4524e6cbc8d0c5064813014c36963bc03e0
1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_acl.h"
22 #include "xfs_bit.h"
23 #include "xfs_log.h"
24 #include "xfs_inum.h"
25 #include "xfs_trans.h"
26 #include "xfs_sb.h"
27 #include "xfs_ag.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_dinode.h"
33 #include "xfs_inode.h"
34 #include "xfs_btree.h"
35 #include "xfs_ialloc.h"
36 #include "xfs_quota.h"
37 #include "xfs_utils.h"
38 #include "xfs_trans_priv.h"
39 #include "xfs_inode_item.h"
40 #include "xfs_bmap.h"
41 #include "xfs_trace.h"
45 * Define xfs inode iolock lockdep classes. We need to ensure that all active
46 * inodes are considered the same for lockdep purposes, including inodes that
47 * are recycled through the XFS_IRECLAIMABLE state. This is the the only way to
48 * guarantee the locks are considered the same when there are multiple lock
49 * initialisation siteѕ. Also, define a reclaimable inode class so it is
50 * obvious in lockdep reports which class the report is against.
52 static struct lock_class_key xfs_iolock_active;
53 struct lock_class_key xfs_iolock_reclaimable;
56 * Allocate and initialise an xfs_inode.
58 STATIC struct xfs_inode *
59 xfs_inode_alloc(
60 struct xfs_mount *mp,
61 xfs_ino_t ino)
63 struct xfs_inode *ip;
66 * if this didn't occur in transactions, we could use
67 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
68 * code up to do this anyway.
70 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
71 if (!ip)
72 return NULL;
73 if (inode_init_always(mp->m_super, VFS_I(ip))) {
74 kmem_zone_free(xfs_inode_zone, ip);
75 return NULL;
78 ASSERT(atomic_read(&ip->i_pincount) == 0);
79 ASSERT(!spin_is_locked(&ip->i_flags_lock));
80 ASSERT(!xfs_isiflocked(ip));
81 ASSERT(ip->i_ino == 0);
83 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
84 lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
85 &xfs_iolock_active, "xfs_iolock_active");
87 /* initialise the xfs inode */
88 ip->i_ino = ino;
89 ip->i_mount = mp;
90 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
91 ip->i_afp = NULL;
92 memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
93 ip->i_flags = 0;
94 ip->i_delayed_blks = 0;
95 memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
97 return ip;
100 STATIC void
101 xfs_inode_free_callback(
102 struct rcu_head *head)
104 struct inode *inode = container_of(head, struct inode, i_rcu);
105 struct xfs_inode *ip = XFS_I(inode);
107 kmem_zone_free(xfs_inode_zone, ip);
110 void
111 xfs_inode_free(
112 struct xfs_inode *ip)
114 switch (ip->i_d.di_mode & S_IFMT) {
115 case S_IFREG:
116 case S_IFDIR:
117 case S_IFLNK:
118 xfs_idestroy_fork(ip, XFS_DATA_FORK);
119 break;
122 if (ip->i_afp)
123 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
125 if (ip->i_itemp) {
127 * Only if we are shutting down the fs will we see an
128 * inode still in the AIL. If it is there, we should remove
129 * it to prevent a use-after-free from occurring.
131 xfs_log_item_t *lip = &ip->i_itemp->ili_item;
132 struct xfs_ail *ailp = lip->li_ailp;
134 ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
135 XFS_FORCED_SHUTDOWN(ip->i_mount));
136 if (lip->li_flags & XFS_LI_IN_AIL) {
137 spin_lock(&ailp->xa_lock);
138 if (lip->li_flags & XFS_LI_IN_AIL)
139 xfs_trans_ail_delete(ailp, lip);
140 else
141 spin_unlock(&ailp->xa_lock);
143 xfs_inode_item_destroy(ip);
144 ip->i_itemp = NULL;
147 /* asserts to verify all state is correct here */
148 ASSERT(atomic_read(&ip->i_pincount) == 0);
149 ASSERT(!spin_is_locked(&ip->i_flags_lock));
150 ASSERT(!xfs_isiflocked(ip));
153 * Because we use RCU freeing we need to ensure the inode always
154 * appears to be reclaimed with an invalid inode number when in the
155 * free state. The ip->i_flags_lock provides the barrier against lookup
156 * races.
158 spin_lock(&ip->i_flags_lock);
159 ip->i_flags = XFS_IRECLAIM;
160 ip->i_ino = 0;
161 spin_unlock(&ip->i_flags_lock);
163 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
167 * Check the validity of the inode we just found it the cache
169 static int
170 xfs_iget_cache_hit(
171 struct xfs_perag *pag,
172 struct xfs_inode *ip,
173 xfs_ino_t ino,
174 int flags,
175 int lock_flags) __releases(RCU)
177 struct inode *inode = VFS_I(ip);
178 struct xfs_mount *mp = ip->i_mount;
179 int error;
182 * check for re-use of an inode within an RCU grace period due to the
183 * radix tree nodes not being updated yet. We monitor for this by
184 * setting the inode number to zero before freeing the inode structure.
185 * If the inode has been reallocated and set up, then the inode number
186 * will not match, so check for that, too.
188 spin_lock(&ip->i_flags_lock);
189 if (ip->i_ino != ino) {
190 trace_xfs_iget_skip(ip);
191 XFS_STATS_INC(xs_ig_frecycle);
192 error = EAGAIN;
193 goto out_error;
198 * If we are racing with another cache hit that is currently
199 * instantiating this inode or currently recycling it out of
200 * reclaimabe state, wait for the initialisation to complete
201 * before continuing.
203 * XXX(hch): eventually we should do something equivalent to
204 * wait_on_inode to wait for these flags to be cleared
205 * instead of polling for it.
207 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
208 trace_xfs_iget_skip(ip);
209 XFS_STATS_INC(xs_ig_frecycle);
210 error = EAGAIN;
211 goto out_error;
215 * If lookup is racing with unlink return an error immediately.
217 if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
218 error = ENOENT;
219 goto out_error;
223 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
224 * Need to carefully get it back into useable state.
226 if (ip->i_flags & XFS_IRECLAIMABLE) {
227 trace_xfs_iget_reclaim(ip);
230 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
231 * from stomping over us while we recycle the inode. We can't
232 * clear the radix tree reclaimable tag yet as it requires
233 * pag_ici_lock to be held exclusive.
235 ip->i_flags |= XFS_IRECLAIM;
237 spin_unlock(&ip->i_flags_lock);
238 rcu_read_unlock();
240 error = -inode_init_always(mp->m_super, inode);
241 if (error) {
243 * Re-initializing the inode failed, and we are in deep
244 * trouble. Try to re-add it to the reclaim list.
246 rcu_read_lock();
247 spin_lock(&ip->i_flags_lock);
249 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
250 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
251 trace_xfs_iget_reclaim_fail(ip);
252 goto out_error;
255 spin_lock(&pag->pag_ici_lock);
256 spin_lock(&ip->i_flags_lock);
259 * Clear the per-lifetime state in the inode as we are now
260 * effectively a new inode and need to return to the initial
261 * state before reuse occurs.
263 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
264 ip->i_flags |= XFS_INEW;
265 __xfs_inode_clear_reclaim_tag(mp, pag, ip);
266 inode->i_state = I_NEW;
268 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
269 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
270 lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
271 &xfs_iolock_active, "xfs_iolock_active");
273 spin_unlock(&ip->i_flags_lock);
274 spin_unlock(&pag->pag_ici_lock);
275 } else {
276 /* If the VFS inode is being torn down, pause and try again. */
277 if (!igrab(inode)) {
278 trace_xfs_iget_skip(ip);
279 error = EAGAIN;
280 goto out_error;
283 /* We've got a live one. */
284 spin_unlock(&ip->i_flags_lock);
285 rcu_read_unlock();
286 trace_xfs_iget_hit(ip);
289 if (lock_flags != 0)
290 xfs_ilock(ip, lock_flags);
292 xfs_iflags_clear(ip, XFS_ISTALE);
293 XFS_STATS_INC(xs_ig_found);
295 return 0;
297 out_error:
298 spin_unlock(&ip->i_flags_lock);
299 rcu_read_unlock();
300 return error;
304 static int
305 xfs_iget_cache_miss(
306 struct xfs_mount *mp,
307 struct xfs_perag *pag,
308 xfs_trans_t *tp,
309 xfs_ino_t ino,
310 struct xfs_inode **ipp,
311 int flags,
312 int lock_flags)
314 struct xfs_inode *ip;
315 int error;
316 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
318 ip = xfs_inode_alloc(mp, ino);
319 if (!ip)
320 return ENOMEM;
322 error = xfs_iread(mp, tp, ip, flags);
323 if (error)
324 goto out_destroy;
326 trace_xfs_iget_miss(ip);
328 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
329 error = ENOENT;
330 goto out_destroy;
334 * Preload the radix tree so we can insert safely under the
335 * write spinlock. Note that we cannot sleep inside the preload
336 * region.
338 if (radix_tree_preload(GFP_KERNEL)) {
339 error = EAGAIN;
340 goto out_destroy;
344 * Because the inode hasn't been added to the radix-tree yet it can't
345 * be found by another thread, so we can do the non-sleeping lock here.
347 if (lock_flags) {
348 if (!xfs_ilock_nowait(ip, lock_flags))
349 BUG();
353 * These values must be set before inserting the inode into the radix
354 * tree as the moment it is inserted a concurrent lookup (allowed by the
355 * RCU locking mechanism) can find it and that lookup must see that this
356 * is an inode currently under construction (i.e. that XFS_INEW is set).
357 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
358 * memory barrier that ensures this detection works correctly at lookup
359 * time.
361 ip->i_udquot = ip->i_gdquot = NULL;
362 xfs_iflags_set(ip, XFS_INEW);
364 /* insert the new inode */
365 spin_lock(&pag->pag_ici_lock);
366 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
367 if (unlikely(error)) {
368 WARN_ON(error != -EEXIST);
369 XFS_STATS_INC(xs_ig_dup);
370 error = EAGAIN;
371 goto out_preload_end;
373 spin_unlock(&pag->pag_ici_lock);
374 radix_tree_preload_end();
376 *ipp = ip;
377 return 0;
379 out_preload_end:
380 spin_unlock(&pag->pag_ici_lock);
381 radix_tree_preload_end();
382 if (lock_flags)
383 xfs_iunlock(ip, lock_flags);
384 out_destroy:
385 __destroy_inode(VFS_I(ip));
386 xfs_inode_free(ip);
387 return error;
391 * Look up an inode by number in the given file system.
392 * The inode is looked up in the cache held in each AG.
393 * If the inode is found in the cache, initialise the vfs inode
394 * if necessary.
396 * If it is not in core, read it in from the file system's device,
397 * add it to the cache and initialise the vfs inode.
399 * The inode is locked according to the value of the lock_flags parameter.
400 * This flag parameter indicates how and if the inode's IO lock and inode lock
401 * should be taken.
403 * mp -- the mount point structure for the current file system. It points
404 * to the inode hash table.
405 * tp -- a pointer to the current transaction if there is one. This is
406 * simply passed through to the xfs_iread() call.
407 * ino -- the number of the inode desired. This is the unique identifier
408 * within the file system for the inode being requested.
409 * lock_flags -- flags indicating how to lock the inode. See the comment
410 * for xfs_ilock() for a list of valid values.
413 xfs_iget(
414 xfs_mount_t *mp,
415 xfs_trans_t *tp,
416 xfs_ino_t ino,
417 uint flags,
418 uint lock_flags,
419 xfs_inode_t **ipp)
421 xfs_inode_t *ip;
422 int error;
423 xfs_perag_t *pag;
424 xfs_agino_t agino;
427 * xfs_reclaim_inode() uses the ILOCK to ensure an inode
428 * doesn't get freed while it's being referenced during a
429 * radix tree traversal here. It assumes this function
430 * aqcuires only the ILOCK (and therefore it has no need to
431 * involve the IOLOCK in this synchronization).
433 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
435 /* reject inode numbers outside existing AGs */
436 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
437 return EINVAL;
439 /* get the perag structure and ensure that it's inode capable */
440 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
441 agino = XFS_INO_TO_AGINO(mp, ino);
443 again:
444 error = 0;
445 rcu_read_lock();
446 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
448 if (ip) {
449 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
450 if (error)
451 goto out_error_or_again;
452 } else {
453 rcu_read_unlock();
454 XFS_STATS_INC(xs_ig_missed);
456 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
457 flags, lock_flags);
458 if (error)
459 goto out_error_or_again;
461 xfs_perag_put(pag);
463 *ipp = ip;
466 * If we have a real type for an on-disk inode, we can set ops(&unlock)
467 * now. If it's a new inode being created, xfs_ialloc will handle it.
469 if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
470 xfs_setup_inode(ip);
471 return 0;
473 out_error_or_again:
474 if (error == EAGAIN) {
475 delay(1);
476 goto again;
478 xfs_perag_put(pag);
479 return error;
483 * This is a wrapper routine around the xfs_ilock() routine
484 * used to centralize some grungy code. It is used in places
485 * that wish to lock the inode solely for reading the extents.
486 * The reason these places can't just call xfs_ilock(SHARED)
487 * is that the inode lock also guards to bringing in of the
488 * extents from disk for a file in b-tree format. If the inode
489 * is in b-tree format, then we need to lock the inode exclusively
490 * until the extents are read in. Locking it exclusively all
491 * the time would limit our parallelism unnecessarily, though.
492 * What we do instead is check to see if the extents have been
493 * read in yet, and only lock the inode exclusively if they
494 * have not.
496 * The function returns a value which should be given to the
497 * corresponding xfs_iunlock_map_shared(). This value is
498 * the mode in which the lock was actually taken.
500 uint
501 xfs_ilock_map_shared(
502 xfs_inode_t *ip)
504 uint lock_mode;
506 if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
507 ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
508 lock_mode = XFS_ILOCK_EXCL;
509 } else {
510 lock_mode = XFS_ILOCK_SHARED;
513 xfs_ilock(ip, lock_mode);
515 return lock_mode;
519 * This is simply the unlock routine to go with xfs_ilock_map_shared().
520 * All it does is call xfs_iunlock() with the given lock_mode.
522 void
523 xfs_iunlock_map_shared(
524 xfs_inode_t *ip,
525 unsigned int lock_mode)
527 xfs_iunlock(ip, lock_mode);
531 * The xfs inode contains 2 locks: a multi-reader lock called the
532 * i_iolock and a multi-reader lock called the i_lock. This routine
533 * allows either or both of the locks to be obtained.
535 * The 2 locks should always be ordered so that the IO lock is
536 * obtained first in order to prevent deadlock.
538 * ip -- the inode being locked
539 * lock_flags -- this parameter indicates the inode's locks
540 * to be locked. It can be:
541 * XFS_IOLOCK_SHARED,
542 * XFS_IOLOCK_EXCL,
543 * XFS_ILOCK_SHARED,
544 * XFS_ILOCK_EXCL,
545 * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
546 * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
547 * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
548 * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
550 void
551 xfs_ilock(
552 xfs_inode_t *ip,
553 uint lock_flags)
556 * You can't set both SHARED and EXCL for the same lock,
557 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
558 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
560 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
561 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
562 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
563 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
564 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
566 if (lock_flags & XFS_IOLOCK_EXCL)
567 mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
568 else if (lock_flags & XFS_IOLOCK_SHARED)
569 mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
571 if (lock_flags & XFS_ILOCK_EXCL)
572 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
573 else if (lock_flags & XFS_ILOCK_SHARED)
574 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
576 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
580 * This is just like xfs_ilock(), except that the caller
581 * is guaranteed not to sleep. It returns 1 if it gets
582 * the requested locks and 0 otherwise. If the IO lock is
583 * obtained but the inode lock cannot be, then the IO lock
584 * is dropped before returning.
586 * ip -- the inode being locked
587 * lock_flags -- this parameter indicates the inode's locks to be
588 * to be locked. See the comment for xfs_ilock() for a list
589 * of valid values.
592 xfs_ilock_nowait(
593 xfs_inode_t *ip,
594 uint lock_flags)
597 * You can't set both SHARED and EXCL for the same lock,
598 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
599 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
601 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
602 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
603 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
604 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
605 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
607 if (lock_flags & XFS_IOLOCK_EXCL) {
608 if (!mrtryupdate(&ip->i_iolock))
609 goto out;
610 } else if (lock_flags & XFS_IOLOCK_SHARED) {
611 if (!mrtryaccess(&ip->i_iolock))
612 goto out;
614 if (lock_flags & XFS_ILOCK_EXCL) {
615 if (!mrtryupdate(&ip->i_lock))
616 goto out_undo_iolock;
617 } else if (lock_flags & XFS_ILOCK_SHARED) {
618 if (!mrtryaccess(&ip->i_lock))
619 goto out_undo_iolock;
621 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
622 return 1;
624 out_undo_iolock:
625 if (lock_flags & XFS_IOLOCK_EXCL)
626 mrunlock_excl(&ip->i_iolock);
627 else if (lock_flags & XFS_IOLOCK_SHARED)
628 mrunlock_shared(&ip->i_iolock);
629 out:
630 return 0;
634 * xfs_iunlock() is used to drop the inode locks acquired with
635 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
636 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
637 * that we know which locks to drop.
639 * ip -- the inode being unlocked
640 * lock_flags -- this parameter indicates the inode's locks to be
641 * to be unlocked. See the comment for xfs_ilock() for a list
642 * of valid values for this parameter.
645 void
646 xfs_iunlock(
647 xfs_inode_t *ip,
648 uint lock_flags)
651 * You can't set both SHARED and EXCL for the same lock,
652 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
653 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
655 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
656 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
657 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
658 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
659 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
660 ASSERT(lock_flags != 0);
662 if (lock_flags & XFS_IOLOCK_EXCL)
663 mrunlock_excl(&ip->i_iolock);
664 else if (lock_flags & XFS_IOLOCK_SHARED)
665 mrunlock_shared(&ip->i_iolock);
667 if (lock_flags & XFS_ILOCK_EXCL)
668 mrunlock_excl(&ip->i_lock);
669 else if (lock_flags & XFS_ILOCK_SHARED)
670 mrunlock_shared(&ip->i_lock);
672 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
676 * give up write locks. the i/o lock cannot be held nested
677 * if it is being demoted.
679 void
680 xfs_ilock_demote(
681 xfs_inode_t *ip,
682 uint lock_flags)
684 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
685 ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
687 if (lock_flags & XFS_ILOCK_EXCL)
688 mrdemote(&ip->i_lock);
689 if (lock_flags & XFS_IOLOCK_EXCL)
690 mrdemote(&ip->i_iolock);
692 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
695 #ifdef DEBUG
697 xfs_isilocked(
698 xfs_inode_t *ip,
699 uint lock_flags)
701 if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
702 if (!(lock_flags & XFS_ILOCK_SHARED))
703 return !!ip->i_lock.mr_writer;
704 return rwsem_is_locked(&ip->i_lock.mr_lock);
707 if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
708 if (!(lock_flags & XFS_IOLOCK_SHARED))
709 return !!ip->i_iolock.mr_writer;
710 return rwsem_is_locked(&ip->i_iolock.mr_lock);
713 ASSERT(0);
714 return 0;
716 #endif
718 void
719 __xfs_iflock(
720 struct xfs_inode *ip)
722 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
723 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
725 do {
726 prepare_to_wait_exclusive(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
727 if (xfs_isiflocked(ip))
728 io_schedule();
729 } while (!xfs_iflock_nowait(ip));
731 finish_wait(wq, &wait.wait);