GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / fs / xfs / xfs_iget.c
blob214e69723943b216a61cf8783ec5a258ba9a89e9
1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_acl.h"
22 #include "xfs_bit.h"
23 #include "xfs_log.h"
24 #include "xfs_inum.h"
25 #include "xfs_trans.h"
26 #include "xfs_sb.h"
27 #include "xfs_ag.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_dinode.h"
33 #include "xfs_inode.h"
34 #include "xfs_btree.h"
35 #include "xfs_ialloc.h"
36 #include "xfs_quota.h"
37 #include "xfs_utils.h"
38 #include "xfs_trans_priv.h"
39 #include "xfs_inode_item.h"
40 #include "xfs_bmap.h"
41 #include "xfs_btree_trace.h"
42 #include "xfs_trace.h"
46 * Allocate and initialise an xfs_inode.
48 STATIC struct xfs_inode *
49 xfs_inode_alloc(
50 struct xfs_mount *mp,
51 xfs_ino_t ino)
53 struct xfs_inode *ip;
56 * if this didn't occur in transactions, we could use
57 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
58 * code up to do this anyway.
60 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
61 if (!ip)
62 return NULL;
63 if (inode_init_always(mp->m_super, VFS_I(ip))) {
64 kmem_zone_free(xfs_inode_zone, ip);
65 return NULL;
68 ASSERT(atomic_read(&ip->i_iocount) == 0);
69 ASSERT(atomic_read(&ip->i_pincount) == 0);
70 ASSERT(!spin_is_locked(&ip->i_flags_lock));
71 ASSERT(completion_done(&ip->i_flush));
73 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
75 /* initialise the xfs inode */
76 ip->i_ino = ino;
77 ip->i_mount = mp;
78 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
79 ip->i_afp = NULL;
80 memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
81 ip->i_flags = 0;
82 ip->i_update_core = 0;
83 ip->i_delayed_blks = 0;
84 memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
85 ip->i_size = 0;
86 ip->i_new_size = 0;
88 /* prevent anyone from using this yet */
89 VFS_I(ip)->i_state = I_NEW;
91 return ip;
94 void
95 xfs_inode_free(
96 struct xfs_inode *ip)
98 switch (ip->i_d.di_mode & S_IFMT) {
99 case S_IFREG:
100 case S_IFDIR:
101 case S_IFLNK:
102 xfs_idestroy_fork(ip, XFS_DATA_FORK);
103 break;
106 if (ip->i_afp)
107 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
109 if (ip->i_itemp) {
111 * Only if we are shutting down the fs will we see an
112 * inode still in the AIL. If it is there, we should remove
113 * it to prevent a use-after-free from occurring.
115 xfs_log_item_t *lip = &ip->i_itemp->ili_item;
116 struct xfs_ail *ailp = lip->li_ailp;
118 ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
119 XFS_FORCED_SHUTDOWN(ip->i_mount));
120 if (lip->li_flags & XFS_LI_IN_AIL) {
121 spin_lock(&ailp->xa_lock);
122 if (lip->li_flags & XFS_LI_IN_AIL)
123 xfs_trans_ail_delete(ailp, lip);
124 else
125 spin_unlock(&ailp->xa_lock);
127 xfs_inode_item_destroy(ip);
128 ip->i_itemp = NULL;
131 /* asserts to verify all state is correct here */
132 ASSERT(atomic_read(&ip->i_iocount) == 0);
133 ASSERT(atomic_read(&ip->i_pincount) == 0);
134 ASSERT(!spin_is_locked(&ip->i_flags_lock));
135 ASSERT(completion_done(&ip->i_flush));
137 kmem_zone_free(xfs_inode_zone, ip);
141 * Check the validity of the inode we just found it the cache
143 static int
144 xfs_iget_cache_hit(
145 struct xfs_perag *pag,
146 struct xfs_inode *ip,
147 int flags,
148 int lock_flags) __releases(pag->pag_ici_lock)
150 struct inode *inode = VFS_I(ip);
151 struct xfs_mount *mp = ip->i_mount;
152 int error;
154 spin_lock(&ip->i_flags_lock);
156 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
157 trace_xfs_iget_skip(ip);
158 XFS_STATS_INC(xs_ig_frecycle);
159 error = EAGAIN;
160 goto out_error;
164 * If lookup is racing with unlink return an error immediately.
166 if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
167 error = ENOENT;
168 goto out_error;
172 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
173 * Need to carefully get it back into useable state.
175 if (ip->i_flags & XFS_IRECLAIMABLE) {
176 trace_xfs_iget_reclaim(ip);
179 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
180 * from stomping over us while we recycle the inode. We can't
181 * clear the radix tree reclaimable tag yet as it requires
182 * pag_ici_lock to be held exclusive.
184 ip->i_flags |= XFS_IRECLAIM;
186 spin_unlock(&ip->i_flags_lock);
187 read_unlock(&pag->pag_ici_lock);
189 error = -inode_init_always(mp->m_super, inode);
190 if (error) {
192 * Re-initializing the inode failed, and we are in deep
193 * trouble. Try to re-add it to the reclaim list.
195 read_lock(&pag->pag_ici_lock);
196 spin_lock(&ip->i_flags_lock);
198 ip->i_flags &= ~XFS_INEW;
199 ip->i_flags |= XFS_IRECLAIMABLE;
200 __xfs_inode_set_reclaim_tag(pag, ip);
201 trace_xfs_iget_reclaim_fail(ip);
202 goto out_error;
205 write_lock(&pag->pag_ici_lock);
206 spin_lock(&ip->i_flags_lock);
207 ip->i_flags &= ~(XFS_IRECLAIMABLE | XFS_IRECLAIM);
208 ip->i_flags |= XFS_INEW;
209 __xfs_inode_clear_reclaim_tag(mp, pag, ip);
210 inode->i_state = I_NEW;
211 spin_unlock(&ip->i_flags_lock);
212 write_unlock(&pag->pag_ici_lock);
213 } else {
214 /* If the VFS inode is being torn down, pause and try again. */
215 if (!igrab(inode)) {
216 trace_xfs_iget_skip(ip);
217 error = EAGAIN;
218 goto out_error;
221 /* We've got a live one. */
222 spin_unlock(&ip->i_flags_lock);
223 read_unlock(&pag->pag_ici_lock);
224 trace_xfs_iget_hit(ip);
227 if (lock_flags != 0)
228 xfs_ilock(ip, lock_flags);
230 xfs_iflags_clear(ip, XFS_ISTALE);
231 XFS_STATS_INC(xs_ig_found);
233 return 0;
235 out_error:
236 spin_unlock(&ip->i_flags_lock);
237 read_unlock(&pag->pag_ici_lock);
238 return error;
242 static int
243 xfs_iget_cache_miss(
244 struct xfs_mount *mp,
245 struct xfs_perag *pag,
246 xfs_trans_t *tp,
247 xfs_ino_t ino,
248 struct xfs_inode **ipp,
249 int flags,
250 int lock_flags)
252 struct xfs_inode *ip;
253 int error;
254 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
256 ip = xfs_inode_alloc(mp, ino);
257 if (!ip)
258 return ENOMEM;
260 error = xfs_iread(mp, tp, ip, flags);
261 if (error)
262 goto out_destroy;
264 trace_xfs_iget_miss(ip);
266 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
267 error = ENOENT;
268 goto out_destroy;
272 * Preload the radix tree so we can insert safely under the
273 * write spinlock. Note that we cannot sleep inside the preload
274 * region.
276 if (radix_tree_preload(GFP_KERNEL)) {
277 error = EAGAIN;
278 goto out_destroy;
282 * Because the inode hasn't been added to the radix-tree yet it can't
283 * be found by another thread, so we can do the non-sleeping lock here.
285 if (lock_flags) {
286 if (!xfs_ilock_nowait(ip, lock_flags))
287 BUG();
290 write_lock(&pag->pag_ici_lock);
292 /* insert the new inode */
293 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
294 if (unlikely(error)) {
295 WARN_ON(error != -EEXIST);
296 XFS_STATS_INC(xs_ig_dup);
297 error = EAGAIN;
298 goto out_preload_end;
301 /* These values _must_ be set before releasing the radix tree lock! */
302 ip->i_udquot = ip->i_gdquot = NULL;
303 xfs_iflags_set(ip, XFS_INEW);
305 write_unlock(&pag->pag_ici_lock);
306 radix_tree_preload_end();
308 *ipp = ip;
309 return 0;
311 out_preload_end:
312 write_unlock(&pag->pag_ici_lock);
313 radix_tree_preload_end();
314 if (lock_flags)
315 xfs_iunlock(ip, lock_flags);
316 out_destroy:
317 __destroy_inode(VFS_I(ip));
318 xfs_inode_free(ip);
319 return error;
323 * Look up an inode by number in the given file system.
324 * The inode is looked up in the cache held in each AG.
325 * If the inode is found in the cache, initialise the vfs inode
326 * if necessary.
328 * If it is not in core, read it in from the file system's device,
329 * add it to the cache and initialise the vfs inode.
331 * The inode is locked according to the value of the lock_flags parameter.
332 * This flag parameter indicates how and if the inode's IO lock and inode lock
333 * should be taken.
335 * mp -- the mount point structure for the current file system. It points
336 * to the inode hash table.
337 * tp -- a pointer to the current transaction if there is one. This is
338 * simply passed through to the xfs_iread() call.
339 * ino -- the number of the inode desired. This is the unique identifier
340 * within the file system for the inode being requested.
341 * lock_flags -- flags indicating how to lock the inode. See the comment
342 * for xfs_ilock() for a list of valid values.
345 xfs_iget(
346 xfs_mount_t *mp,
347 xfs_trans_t *tp,
348 xfs_ino_t ino,
349 uint flags,
350 uint lock_flags,
351 xfs_inode_t **ipp)
353 xfs_inode_t *ip;
354 int error;
355 xfs_perag_t *pag;
356 xfs_agino_t agino;
358 /* the radix tree exists only in inode capable AGs */
359 if (XFS_INO_TO_AGNO(mp, ino) >= mp->m_maxagi)
360 return EINVAL;
362 /* get the perag structure and ensure that it's inode capable */
363 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
364 agino = XFS_INO_TO_AGINO(mp, ino);
366 again:
367 error = 0;
368 read_lock(&pag->pag_ici_lock);
369 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
371 if (ip) {
372 error = xfs_iget_cache_hit(pag, ip, flags, lock_flags);
373 if (error)
374 goto out_error_or_again;
375 } else {
376 read_unlock(&pag->pag_ici_lock);
377 XFS_STATS_INC(xs_ig_missed);
379 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
380 flags, lock_flags);
381 if (error)
382 goto out_error_or_again;
384 xfs_perag_put(pag);
386 *ipp = ip;
388 ASSERT(ip->i_df.if_ext_max ==
389 XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t));
391 * If we have a real type for an on-disk inode, we can set ops(&unlock)
392 * now. If it's a new inode being created, xfs_ialloc will handle it.
394 if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
395 xfs_setup_inode(ip);
396 return 0;
398 out_error_or_again:
399 if (error == EAGAIN) {
400 delay(1);
401 goto again;
403 xfs_perag_put(pag);
404 return error;
408 * This is a wrapper routine around the xfs_ilock() routine
409 * used to centralize some grungy code. It is used in places
410 * that wish to lock the inode solely for reading the extents.
411 * The reason these places can't just call xfs_ilock(SHARED)
412 * is that the inode lock also guards to bringing in of the
413 * extents from disk for a file in b-tree format. If the inode
414 * is in b-tree format, then we need to lock the inode exclusively
415 * until the extents are read in. Locking it exclusively all
416 * the time would limit our parallelism unnecessarily, though.
417 * What we do instead is check to see if the extents have been
418 * read in yet, and only lock the inode exclusively if they
419 * have not.
421 * The function returns a value which should be given to the
422 * corresponding xfs_iunlock_map_shared(). This value is
423 * the mode in which the lock was actually taken.
425 uint
426 xfs_ilock_map_shared(
427 xfs_inode_t *ip)
429 uint lock_mode;
431 if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
432 ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
433 lock_mode = XFS_ILOCK_EXCL;
434 } else {
435 lock_mode = XFS_ILOCK_SHARED;
438 xfs_ilock(ip, lock_mode);
440 return lock_mode;
444 * This is simply the unlock routine to go with xfs_ilock_map_shared().
445 * All it does is call xfs_iunlock() with the given lock_mode.
447 void
448 xfs_iunlock_map_shared(
449 xfs_inode_t *ip,
450 unsigned int lock_mode)
452 xfs_iunlock(ip, lock_mode);
456 * The xfs inode contains 2 locks: a multi-reader lock called the
457 * i_iolock and a multi-reader lock called the i_lock. This routine
458 * allows either or both of the locks to be obtained.
460 * The 2 locks should always be ordered so that the IO lock is
461 * obtained first in order to prevent deadlock.
463 * ip -- the inode being locked
464 * lock_flags -- this parameter indicates the inode's locks
465 * to be locked. It can be:
466 * XFS_IOLOCK_SHARED,
467 * XFS_IOLOCK_EXCL,
468 * XFS_ILOCK_SHARED,
469 * XFS_ILOCK_EXCL,
470 * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
471 * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
472 * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
473 * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
475 void
476 xfs_ilock(
477 xfs_inode_t *ip,
478 uint lock_flags)
481 * You can't set both SHARED and EXCL for the same lock,
482 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
483 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
485 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
486 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
487 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
488 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
489 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
491 if (lock_flags & XFS_IOLOCK_EXCL)
492 mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
493 else if (lock_flags & XFS_IOLOCK_SHARED)
494 mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
496 if (lock_flags & XFS_ILOCK_EXCL)
497 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
498 else if (lock_flags & XFS_ILOCK_SHARED)
499 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
501 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
505 * This is just like xfs_ilock(), except that the caller
506 * is guaranteed not to sleep. It returns 1 if it gets
507 * the requested locks and 0 otherwise. If the IO lock is
508 * obtained but the inode lock cannot be, then the IO lock
509 * is dropped before returning.
511 * ip -- the inode being locked
512 * lock_flags -- this parameter indicates the inode's locks to be
513 * to be locked. See the comment for xfs_ilock() for a list
514 * of valid values.
517 xfs_ilock_nowait(
518 xfs_inode_t *ip,
519 uint lock_flags)
522 * You can't set both SHARED and EXCL for the same lock,
523 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
524 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
526 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
527 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
528 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
529 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
530 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
532 if (lock_flags & XFS_IOLOCK_EXCL) {
533 if (!mrtryupdate(&ip->i_iolock))
534 goto out;
535 } else if (lock_flags & XFS_IOLOCK_SHARED) {
536 if (!mrtryaccess(&ip->i_iolock))
537 goto out;
539 if (lock_flags & XFS_ILOCK_EXCL) {
540 if (!mrtryupdate(&ip->i_lock))
541 goto out_undo_iolock;
542 } else if (lock_flags & XFS_ILOCK_SHARED) {
543 if (!mrtryaccess(&ip->i_lock))
544 goto out_undo_iolock;
546 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
547 return 1;
549 out_undo_iolock:
550 if (lock_flags & XFS_IOLOCK_EXCL)
551 mrunlock_excl(&ip->i_iolock);
552 else if (lock_flags & XFS_IOLOCK_SHARED)
553 mrunlock_shared(&ip->i_iolock);
554 out:
555 return 0;
559 * xfs_iunlock() is used to drop the inode locks acquired with
560 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
561 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
562 * that we know which locks to drop.
564 * ip -- the inode being unlocked
565 * lock_flags -- this parameter indicates the inode's locks to be
566 * to be unlocked. See the comment for xfs_ilock() for a list
567 * of valid values for this parameter.
570 void
571 xfs_iunlock(
572 xfs_inode_t *ip,
573 uint lock_flags)
576 * You can't set both SHARED and EXCL for the same lock,
577 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
578 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
580 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
581 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
582 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
583 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
584 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_IUNLOCK_NONOTIFY |
585 XFS_LOCK_DEP_MASK)) == 0);
586 ASSERT(lock_flags != 0);
588 if (lock_flags & XFS_IOLOCK_EXCL)
589 mrunlock_excl(&ip->i_iolock);
590 else if (lock_flags & XFS_IOLOCK_SHARED)
591 mrunlock_shared(&ip->i_iolock);
593 if (lock_flags & XFS_ILOCK_EXCL)
594 mrunlock_excl(&ip->i_lock);
595 else if (lock_flags & XFS_ILOCK_SHARED)
596 mrunlock_shared(&ip->i_lock);
598 if ((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) &&
599 !(lock_flags & XFS_IUNLOCK_NONOTIFY) && ip->i_itemp) {
601 * Let the AIL know that this item has been unlocked in case
602 * it is in the AIL and anyone is waiting on it. Don't do
603 * this if the caller has asked us not to.
605 xfs_trans_unlocked_item(ip->i_itemp->ili_item.li_ailp,
606 (xfs_log_item_t*)(ip->i_itemp));
608 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
612 * give up write locks. the i/o lock cannot be held nested
613 * if it is being demoted.
615 void
616 xfs_ilock_demote(
617 xfs_inode_t *ip,
618 uint lock_flags)
620 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
621 ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
623 if (lock_flags & XFS_ILOCK_EXCL)
624 mrdemote(&ip->i_lock);
625 if (lock_flags & XFS_IOLOCK_EXCL)
626 mrdemote(&ip->i_iolock);
628 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
631 #ifdef DEBUG
633 xfs_isilocked(
634 xfs_inode_t *ip,
635 uint lock_flags)
637 if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
638 if (!(lock_flags & XFS_ILOCK_SHARED))
639 return !!ip->i_lock.mr_writer;
640 return rwsem_is_locked(&ip->i_lock.mr_lock);
643 if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
644 if (!(lock_flags & XFS_IOLOCK_SHARED))
645 return !!ip->i_iolock.mr_writer;
646 return rwsem_is_locked(&ip->i_iolock.mr_lock);
649 ASSERT(0);
650 return 0;
652 #endif