move wm8400-regulator's probe function to .devinit.text
[linux-2.6/sactl.git] / fs / xfs / xfs_iget.c
blobe2fb6210d4c58e7e9609d9e5c584210d5a4058c3
1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_dir2.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_dir2_sf.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_btree.h"
38 #include "xfs_ialloc.h"
39 #include "xfs_quota.h"
40 #include "xfs_utils.h"
41 #include "xfs_trans_priv.h"
42 #include "xfs_inode_item.h"
43 #include "xfs_bmap.h"
44 #include "xfs_btree_trace.h"
45 #include "xfs_dir2_trace.h"
49 * Allocate and initialise an xfs_inode.
51 STATIC struct xfs_inode *
52 xfs_inode_alloc(
53 struct xfs_mount *mp,
54 xfs_ino_t ino)
56 struct xfs_inode *ip;
59 * if this didn't occur in transactions, we could use
60 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
61 * code up to do this anyway.
63 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
64 if (!ip)
65 return NULL;
67 ASSERT(atomic_read(&ip->i_iocount) == 0);
68 ASSERT(atomic_read(&ip->i_pincount) == 0);
69 ASSERT(!spin_is_locked(&ip->i_flags_lock));
70 ASSERT(completion_done(&ip->i_flush));
73 * initialise the VFS inode here to get failures
74 * out of the way early.
76 if (!inode_init_always(mp->m_super, VFS_I(ip))) {
77 kmem_zone_free(xfs_inode_zone, ip);
78 return NULL;
81 /* initialise the xfs inode */
82 ip->i_ino = ino;
83 ip->i_mount = mp;
84 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
85 ip->i_afp = NULL;
86 memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
87 ip->i_flags = 0;
88 ip->i_update_core = 0;
89 ip->i_update_size = 0;
90 ip->i_delayed_blks = 0;
91 memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
92 ip->i_size = 0;
93 ip->i_new_size = 0;
96 * Initialize inode's trace buffers.
98 #ifdef XFS_INODE_TRACE
99 ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_NOFS);
100 #endif
101 #ifdef XFS_BMAP_TRACE
102 ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_NOFS);
103 #endif
104 #ifdef XFS_BTREE_TRACE
105 ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_NOFS);
106 #endif
107 #ifdef XFS_RW_TRACE
108 ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_NOFS);
109 #endif
110 #ifdef XFS_ILOCK_TRACE
111 ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_NOFS);
112 #endif
113 #ifdef XFS_DIR2_TRACE
114 ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS);
115 #endif
117 return ip;
121 * Check the validity of the inode we just found it the cache
123 static int
124 xfs_iget_cache_hit(
125 struct xfs_perag *pag,
126 struct xfs_inode *ip,
127 int flags,
128 int lock_flags) __releases(pag->pag_ici_lock)
130 struct xfs_mount *mp = ip->i_mount;
131 int error = EAGAIN;
134 * If INEW is set this inode is being set up
135 * If IRECLAIM is set this inode is being torn down
136 * Pause and try again.
138 if (xfs_iflags_test(ip, (XFS_INEW|XFS_IRECLAIM))) {
139 XFS_STATS_INC(xs_ig_frecycle);
140 goto out_error;
143 /* If IRECLAIMABLE is set, we've torn down the vfs inode part */
144 if (xfs_iflags_test(ip, XFS_IRECLAIMABLE)) {
147 * If lookup is racing with unlink, then we should return an
148 * error immediately so we don't remove it from the reclaim
149 * list and potentially leak the inode.
151 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
152 error = ENOENT;
153 goto out_error;
156 xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
159 * We need to re-initialise the VFS inode as it has been
160 * 'freed' by the VFS. Do this here so we can deal with
161 * errors cleanly, then tag it so it can be set up correctly
162 * later.
164 if (!inode_init_always(mp->m_super, VFS_I(ip))) {
165 error = ENOMEM;
166 goto out_error;
170 * We must set the XFS_INEW flag before clearing the
171 * XFS_IRECLAIMABLE flag so that if a racing lookup does
172 * not find the XFS_IRECLAIMABLE above but has the igrab()
173 * below succeed we can safely check XFS_INEW to detect
174 * that this inode is still being initialised.
176 xfs_iflags_set(ip, XFS_INEW);
177 xfs_iflags_clear(ip, XFS_IRECLAIMABLE);
179 /* clear the radix tree reclaim flag as well. */
180 __xfs_inode_clear_reclaim_tag(mp, pag, ip);
181 } else if (!igrab(VFS_I(ip))) {
182 /* If the VFS inode is being torn down, pause and try again. */
183 XFS_STATS_INC(xs_ig_frecycle);
184 goto out_error;
185 } else if (xfs_iflags_test(ip, XFS_INEW)) {
187 * We are racing with another cache hit that is
188 * currently recycling this inode out of the XFS_IRECLAIMABLE
189 * state. Wait for the initialisation to complete before
190 * continuing.
192 wait_on_inode(VFS_I(ip));
195 if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
196 error = ENOENT;
197 iput(VFS_I(ip));
198 goto out_error;
201 /* We've got a live one. */
202 read_unlock(&pag->pag_ici_lock);
204 if (lock_flags != 0)
205 xfs_ilock(ip, lock_flags);
207 xfs_iflags_clear(ip, XFS_ISTALE);
208 xfs_itrace_exit_tag(ip, "xfs_iget.found");
209 XFS_STATS_INC(xs_ig_found);
210 return 0;
212 out_error:
213 read_unlock(&pag->pag_ici_lock);
214 return error;
218 static int
219 xfs_iget_cache_miss(
220 struct xfs_mount *mp,
221 struct xfs_perag *pag,
222 xfs_trans_t *tp,
223 xfs_ino_t ino,
224 struct xfs_inode **ipp,
225 xfs_daddr_t bno,
226 int flags,
227 int lock_flags) __releases(pag->pag_ici_lock)
229 struct xfs_inode *ip;
230 int error;
231 unsigned long first_index, mask;
232 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
234 ip = xfs_inode_alloc(mp, ino);
235 if (!ip)
236 return ENOMEM;
238 error = xfs_iread(mp, tp, ip, bno, flags);
239 if (error)
240 goto out_destroy;
242 xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
244 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
245 error = ENOENT;
246 goto out_destroy;
249 if (lock_flags)
250 xfs_ilock(ip, lock_flags);
253 * Preload the radix tree so we can insert safely under the
254 * write spinlock. Note that we cannot sleep inside the preload
255 * region.
257 if (radix_tree_preload(GFP_KERNEL)) {
258 error = EAGAIN;
259 goto out_unlock;
262 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
263 first_index = agino & mask;
264 write_lock(&pag->pag_ici_lock);
266 /* insert the new inode */
267 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
268 if (unlikely(error)) {
269 WARN_ON(error != -EEXIST);
270 XFS_STATS_INC(xs_ig_dup);
271 error = EAGAIN;
272 goto out_preload_end;
275 /* These values _must_ be set before releasing the radix tree lock! */
276 ip->i_udquot = ip->i_gdquot = NULL;
277 xfs_iflags_set(ip, XFS_INEW);
279 write_unlock(&pag->pag_ici_lock);
280 radix_tree_preload_end();
281 *ipp = ip;
282 return 0;
284 out_preload_end:
285 write_unlock(&pag->pag_ici_lock);
286 radix_tree_preload_end();
287 out_unlock:
288 if (lock_flags)
289 xfs_iunlock(ip, lock_flags);
290 out_destroy:
291 xfs_destroy_inode(ip);
292 return error;
296 * Look up an inode by number in the given file system.
297 * The inode is looked up in the cache held in each AG.
298 * If the inode is found in the cache, initialise the vfs inode
299 * if necessary.
301 * If it is not in core, read it in from the file system's device,
302 * add it to the cache and initialise the vfs inode.
304 * The inode is locked according to the value of the lock_flags parameter.
305 * This flag parameter indicates how and if the inode's IO lock and inode lock
306 * should be taken.
308 * mp -- the mount point structure for the current file system. It points
309 * to the inode hash table.
310 * tp -- a pointer to the current transaction if there is one. This is
311 * simply passed through to the xfs_iread() call.
312 * ino -- the number of the inode desired. This is the unique identifier
313 * within the file system for the inode being requested.
314 * lock_flags -- flags indicating how to lock the inode. See the comment
315 * for xfs_ilock() for a list of valid values.
316 * bno -- the block number starting the buffer containing the inode,
317 * if known (as by bulkstat), else 0.
320 xfs_iget(
321 xfs_mount_t *mp,
322 xfs_trans_t *tp,
323 xfs_ino_t ino,
324 uint flags,
325 uint lock_flags,
326 xfs_inode_t **ipp,
327 xfs_daddr_t bno)
329 xfs_inode_t *ip;
330 int error;
331 xfs_perag_t *pag;
332 xfs_agino_t agino;
334 /* the radix tree exists only in inode capable AGs */
335 if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_maxagi)
336 return EINVAL;
338 /* get the perag structure and ensure that it's inode capable */
339 pag = xfs_get_perag(mp, ino);
340 if (!pag->pagi_inodeok)
341 return EINVAL;
342 ASSERT(pag->pag_ici_init);
343 agino = XFS_INO_TO_AGINO(mp, ino);
345 again:
346 error = 0;
347 read_lock(&pag->pag_ici_lock);
348 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
350 if (ip) {
351 error = xfs_iget_cache_hit(pag, ip, flags, lock_flags);
352 if (error)
353 goto out_error_or_again;
354 } else {
355 read_unlock(&pag->pag_ici_lock);
356 XFS_STATS_INC(xs_ig_missed);
358 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, bno,
359 flags, lock_flags);
360 if (error)
361 goto out_error_or_again;
363 xfs_put_perag(mp, pag);
365 *ipp = ip;
367 ASSERT(ip->i_df.if_ext_max ==
368 XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t));
370 * If we have a real type for an on-disk inode, we can set ops(&unlock)
371 * now. If it's a new inode being created, xfs_ialloc will handle it.
373 if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
374 xfs_setup_inode(ip);
375 return 0;
377 out_error_or_again:
378 if (error == EAGAIN) {
379 delay(1);
380 goto again;
382 xfs_put_perag(mp, pag);
383 return error;
388 * Look for the inode corresponding to the given ino in the hash table.
389 * If it is there and its i_transp pointer matches tp, return it.
390 * Otherwise, return NULL.
392 xfs_inode_t *
393 xfs_inode_incore(xfs_mount_t *mp,
394 xfs_ino_t ino,
395 xfs_trans_t *tp)
397 xfs_inode_t *ip;
398 xfs_perag_t *pag;
400 pag = xfs_get_perag(mp, ino);
401 read_lock(&pag->pag_ici_lock);
402 ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ino));
403 read_unlock(&pag->pag_ici_lock);
404 xfs_put_perag(mp, pag);
406 /* the returned inode must match the transaction */
407 if (ip && (ip->i_transp != tp))
408 return NULL;
409 return ip;
413 * Decrement reference count of an inode structure and unlock it.
415 * ip -- the inode being released
416 * lock_flags -- this parameter indicates the inode's locks to be
417 * to be released. See the comment on xfs_iunlock() for a list
418 * of valid values.
420 void
421 xfs_iput(xfs_inode_t *ip,
422 uint lock_flags)
424 xfs_itrace_entry(ip);
425 xfs_iunlock(ip, lock_flags);
426 IRELE(ip);
430 * Special iput for brand-new inodes that are still locked
432 void
433 xfs_iput_new(
434 xfs_inode_t *ip,
435 uint lock_flags)
437 struct inode *inode = VFS_I(ip);
439 xfs_itrace_entry(ip);
441 if ((ip->i_d.di_mode == 0)) {
442 ASSERT(!xfs_iflags_test(ip, XFS_IRECLAIMABLE));
443 make_bad_inode(inode);
445 if (inode->i_state & I_NEW)
446 unlock_new_inode(inode);
447 if (lock_flags)
448 xfs_iunlock(ip, lock_flags);
449 IRELE(ip);
453 * This is called free all the memory associated with an inode.
454 * It must free the inode itself and any buffers allocated for
455 * if_extents/if_data and if_broot. It must also free the lock
456 * associated with the inode.
458 * Note: because we don't initialise everything on reallocation out
459 * of the zone, we must ensure we nullify everything correctly before
460 * freeing the structure.
462 void
463 xfs_ireclaim(
464 struct xfs_inode *ip)
466 struct xfs_mount *mp = ip->i_mount;
467 struct xfs_perag *pag;
469 XFS_STATS_INC(xs_ig_reclaims);
472 * Remove the inode from the per-AG radix tree. It doesn't matter
473 * if it was never added to it because radix_tree_delete can deal
474 * with that case just fine.
476 pag = xfs_get_perag(mp, ip->i_ino);
477 write_lock(&pag->pag_ici_lock);
478 radix_tree_delete(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino));
479 write_unlock(&pag->pag_ici_lock);
480 xfs_put_perag(mp, pag);
483 * Here we do an (almost) spurious inode lock in order to coordinate
484 * with inode cache radix tree lookups. This is because the lookup
485 * can reference the inodes in the cache without taking references.
487 * We make that OK here by ensuring that we wait until the inode is
488 * unlocked after the lookup before we go ahead and free it. We get
489 * both the ilock and the iolock because the code may need to drop the
490 * ilock one but will still hold the iolock.
492 xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
494 * Release dquots (and their references) if any.
496 XFS_QM_DQDETACH(ip->i_mount, ip);
497 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
499 switch (ip->i_d.di_mode & S_IFMT) {
500 case S_IFREG:
501 case S_IFDIR:
502 case S_IFLNK:
503 xfs_idestroy_fork(ip, XFS_DATA_FORK);
504 break;
507 if (ip->i_afp)
508 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
510 #ifdef XFS_INODE_TRACE
511 ktrace_free(ip->i_trace);
512 #endif
513 #ifdef XFS_BMAP_TRACE
514 ktrace_free(ip->i_xtrace);
515 #endif
516 #ifdef XFS_BTREE_TRACE
517 ktrace_free(ip->i_btrace);
518 #endif
519 #ifdef XFS_RW_TRACE
520 ktrace_free(ip->i_rwtrace);
521 #endif
522 #ifdef XFS_ILOCK_TRACE
523 ktrace_free(ip->i_lock_trace);
524 #endif
525 #ifdef XFS_DIR2_TRACE
526 ktrace_free(ip->i_dir_trace);
527 #endif
528 if (ip->i_itemp) {
530 * Only if we are shutting down the fs will we see an
531 * inode still in the AIL. If it is there, we should remove
532 * it to prevent a use-after-free from occurring.
534 xfs_log_item_t *lip = &ip->i_itemp->ili_item;
535 struct xfs_ail *ailp = lip->li_ailp;
537 ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
538 XFS_FORCED_SHUTDOWN(ip->i_mount));
539 if (lip->li_flags & XFS_LI_IN_AIL) {
540 spin_lock(&ailp->xa_lock);
541 if (lip->li_flags & XFS_LI_IN_AIL)
542 xfs_trans_ail_delete(ailp, lip);
543 else
544 spin_unlock(&ailp->xa_lock);
546 xfs_inode_item_destroy(ip);
547 ip->i_itemp = NULL;
549 /* asserts to verify all state is correct here */
550 ASSERT(atomic_read(&ip->i_iocount) == 0);
551 ASSERT(atomic_read(&ip->i_pincount) == 0);
552 ASSERT(!spin_is_locked(&ip->i_flags_lock));
553 ASSERT(completion_done(&ip->i_flush));
554 kmem_zone_free(xfs_inode_zone, ip);
558 * This is a wrapper routine around the xfs_ilock() routine
559 * used to centralize some grungy code. It is used in places
560 * that wish to lock the inode solely for reading the extents.
561 * The reason these places can't just call xfs_ilock(SHARED)
562 * is that the inode lock also guards to bringing in of the
563 * extents from disk for a file in b-tree format. If the inode
564 * is in b-tree format, then we need to lock the inode exclusively
565 * until the extents are read in. Locking it exclusively all
566 * the time would limit our parallelism unnecessarily, though.
567 * What we do instead is check to see if the extents have been
568 * read in yet, and only lock the inode exclusively if they
569 * have not.
571 * The function returns a value which should be given to the
572 * corresponding xfs_iunlock_map_shared(). This value is
573 * the mode in which the lock was actually taken.
575 uint
576 xfs_ilock_map_shared(
577 xfs_inode_t *ip)
579 uint lock_mode;
581 if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
582 ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
583 lock_mode = XFS_ILOCK_EXCL;
584 } else {
585 lock_mode = XFS_ILOCK_SHARED;
588 xfs_ilock(ip, lock_mode);
590 return lock_mode;
594 * This is simply the unlock routine to go with xfs_ilock_map_shared().
595 * All it does is call xfs_iunlock() with the given lock_mode.
597 void
598 xfs_iunlock_map_shared(
599 xfs_inode_t *ip,
600 unsigned int lock_mode)
602 xfs_iunlock(ip, lock_mode);
606 * The xfs inode contains 2 locks: a multi-reader lock called the
607 * i_iolock and a multi-reader lock called the i_lock. This routine
608 * allows either or both of the locks to be obtained.
610 * The 2 locks should always be ordered so that the IO lock is
611 * obtained first in order to prevent deadlock.
613 * ip -- the inode being locked
614 * lock_flags -- this parameter indicates the inode's locks
615 * to be locked. It can be:
616 * XFS_IOLOCK_SHARED,
617 * XFS_IOLOCK_EXCL,
618 * XFS_ILOCK_SHARED,
619 * XFS_ILOCK_EXCL,
620 * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
621 * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
622 * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
623 * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
625 void
626 xfs_ilock(
627 xfs_inode_t *ip,
628 uint lock_flags)
631 * You can't set both SHARED and EXCL for the same lock,
632 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
633 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
635 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
636 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
637 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
638 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
639 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
641 if (lock_flags & XFS_IOLOCK_EXCL)
642 mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
643 else if (lock_flags & XFS_IOLOCK_SHARED)
644 mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
646 if (lock_flags & XFS_ILOCK_EXCL)
647 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
648 else if (lock_flags & XFS_ILOCK_SHARED)
649 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
651 xfs_ilock_trace(ip, 1, lock_flags, (inst_t *)__return_address);
655 * This is just like xfs_ilock(), except that the caller
656 * is guaranteed not to sleep. It returns 1 if it gets
657 * the requested locks and 0 otherwise. If the IO lock is
658 * obtained but the inode lock cannot be, then the IO lock
659 * is dropped before returning.
661 * ip -- the inode being locked
662 * lock_flags -- this parameter indicates the inode's locks to be
663 * to be locked. See the comment for xfs_ilock() for a list
664 * of valid values.
667 xfs_ilock_nowait(
668 xfs_inode_t *ip,
669 uint lock_flags)
672 * You can't set both SHARED and EXCL for the same lock,
673 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
674 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
676 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
677 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
678 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
679 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
680 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
682 if (lock_flags & XFS_IOLOCK_EXCL) {
683 if (!mrtryupdate(&ip->i_iolock))
684 goto out;
685 } else if (lock_flags & XFS_IOLOCK_SHARED) {
686 if (!mrtryaccess(&ip->i_iolock))
687 goto out;
689 if (lock_flags & XFS_ILOCK_EXCL) {
690 if (!mrtryupdate(&ip->i_lock))
691 goto out_undo_iolock;
692 } else if (lock_flags & XFS_ILOCK_SHARED) {
693 if (!mrtryaccess(&ip->i_lock))
694 goto out_undo_iolock;
696 xfs_ilock_trace(ip, 2, lock_flags, (inst_t *)__return_address);
697 return 1;
699 out_undo_iolock:
700 if (lock_flags & XFS_IOLOCK_EXCL)
701 mrunlock_excl(&ip->i_iolock);
702 else if (lock_flags & XFS_IOLOCK_SHARED)
703 mrunlock_shared(&ip->i_iolock);
704 out:
705 return 0;
709 * xfs_iunlock() is used to drop the inode locks acquired with
710 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
711 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
712 * that we know which locks to drop.
714 * ip -- the inode being unlocked
715 * lock_flags -- this parameter indicates the inode's locks to be
716 * to be unlocked. See the comment for xfs_ilock() for a list
717 * of valid values for this parameter.
720 void
721 xfs_iunlock(
722 xfs_inode_t *ip,
723 uint lock_flags)
726 * You can't set both SHARED and EXCL for the same lock,
727 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
728 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
730 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
731 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
732 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
733 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
734 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_IUNLOCK_NONOTIFY |
735 XFS_LOCK_DEP_MASK)) == 0);
736 ASSERT(lock_flags != 0);
738 if (lock_flags & XFS_IOLOCK_EXCL)
739 mrunlock_excl(&ip->i_iolock);
740 else if (lock_flags & XFS_IOLOCK_SHARED)
741 mrunlock_shared(&ip->i_iolock);
743 if (lock_flags & XFS_ILOCK_EXCL)
744 mrunlock_excl(&ip->i_lock);
745 else if (lock_flags & XFS_ILOCK_SHARED)
746 mrunlock_shared(&ip->i_lock);
748 if ((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) &&
749 !(lock_flags & XFS_IUNLOCK_NONOTIFY) && ip->i_itemp) {
751 * Let the AIL know that this item has been unlocked in case
752 * it is in the AIL and anyone is waiting on it. Don't do
753 * this if the caller has asked us not to.
755 xfs_trans_unlocked_item(ip->i_itemp->ili_item.li_ailp,
756 (xfs_log_item_t*)(ip->i_itemp));
758 xfs_ilock_trace(ip, 3, lock_flags, (inst_t *)__return_address);
762 * give up write locks. the i/o lock cannot be held nested
763 * if it is being demoted.
765 void
766 xfs_ilock_demote(
767 xfs_inode_t *ip,
768 uint lock_flags)
770 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
771 ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
773 if (lock_flags & XFS_ILOCK_EXCL)
774 mrdemote(&ip->i_lock);
775 if (lock_flags & XFS_IOLOCK_EXCL)
776 mrdemote(&ip->i_iolock);
779 #ifdef DEBUG
781 * Debug-only routine, without additional rw_semaphore APIs, we can
782 * now only answer requests regarding whether we hold the lock for write
783 * (reader state is outside our visibility, we only track writer state).
785 * Note: this means !xfs_isilocked would give false positives, so don't do that.
788 xfs_isilocked(
789 xfs_inode_t *ip,
790 uint lock_flags)
792 if ((lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) ==
793 XFS_ILOCK_EXCL) {
794 if (!ip->i_lock.mr_writer)
795 return 0;
798 if ((lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) ==
799 XFS_IOLOCK_EXCL) {
800 if (!ip->i_iolock.mr_writer)
801 return 0;
804 return 1;
806 #endif
808 #ifdef XFS_INODE_TRACE
810 #define KTRACE_ENTER(ip, vk, s, line, ra) \
811 ktrace_enter((ip)->i_trace, \
812 /* 0 */ (void *)(__psint_t)(vk), \
813 /* 1 */ (void *)(s), \
814 /* 2 */ (void *)(__psint_t) line, \
815 /* 3 */ (void *)(__psint_t)atomic_read(&VFS_I(ip)->i_count), \
816 /* 4 */ (void *)(ra), \
817 /* 5 */ NULL, \
818 /* 6 */ (void *)(__psint_t)current_cpu(), \
819 /* 7 */ (void *)(__psint_t)current_pid(), \
820 /* 8 */ (void *)__return_address, \
821 /* 9 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL)
824 * Vnode tracing code.
826 void
827 _xfs_itrace_entry(xfs_inode_t *ip, const char *func, inst_t *ra)
829 KTRACE_ENTER(ip, INODE_KTRACE_ENTRY, func, 0, ra);
832 void
833 _xfs_itrace_exit(xfs_inode_t *ip, const char *func, inst_t *ra)
835 KTRACE_ENTER(ip, INODE_KTRACE_EXIT, func, 0, ra);
838 void
839 xfs_itrace_hold(xfs_inode_t *ip, char *file, int line, inst_t *ra)
841 KTRACE_ENTER(ip, INODE_KTRACE_HOLD, file, line, ra);
844 void
845 _xfs_itrace_ref(xfs_inode_t *ip, char *file, int line, inst_t *ra)
847 KTRACE_ENTER(ip, INODE_KTRACE_REF, file, line, ra);
850 void
851 xfs_itrace_rele(xfs_inode_t *ip, char *file, int line, inst_t *ra)
853 KTRACE_ENTER(ip, INODE_KTRACE_RELE, file, line, ra);
855 #endif /* XFS_INODE_TRACE */