2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it would be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 * Further, this software is distributed without any warranty that it is
13 * free of the rightful claim of any third person regarding infringement
14 * or the like. Any license provided herein, whether implied or
15 * otherwise, applies only to this software file. Patent licenses, if
16 * any, provided herein do not apply to combinations of this program with
17 * other software, or any other product whatsoever.
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, write the Free Software Foundation, Inc., 59
21 * Temple Place - Suite 330, Boston MA 02111-1307, USA.
23 * Contact information: Silicon Graphics, Inc., 1600 Amphitheatre Pkwy,
24 * Mountain View, CA 94043, or:
28 * For further information regarding this notice, see:
30 * http://oss.sgi.com/projects/GenInfo/SGIGPLNoticeExplan/
35 #include "xfs_macros.h"
36 #include "xfs_types.h"
39 #include "xfs_trans.h"
44 #include "xfs_dmapi.h"
45 #include "xfs_mount.h"
46 #include "xfs_alloc_btree.h"
47 #include "xfs_bmap_btree.h"
48 #include "xfs_ialloc_btree.h"
49 #include "xfs_btree.h"
50 #include "xfs_ialloc.h"
51 #include "xfs_attr_sf.h"
52 #include "xfs_dir_sf.h"
53 #include "xfs_dir2_sf.h"
54 #include "xfs_dinode.h"
55 #include "xfs_inode.h"
56 #include "xfs_quota.h"
57 #include "xfs_utils.h"
61 * Initialize the inode hash table for the newly mounted file system.
62 * Choose an initial table size based on user specified value, else
63 * use a simple algorithm using the maximum number of inodes as an
64 * indicator for table size, and clamp it between one and some large
68 xfs_ihash_init(xfs_mount_t
*mp
)
71 uint i
, flags
= KM_SLEEP
| KM_MAYFAIL
;
74 icount
= mp
->m_maxicount
? mp
->m_maxicount
:
75 (mp
->m_sb
.sb_dblocks
<< mp
->m_sb
.sb_inopblog
);
76 mp
->m_ihsize
= 1 << max_t(uint
, 8,
77 (xfs_highbit64(icount
) + 1) / 2);
78 mp
->m_ihsize
= min_t(uint
, mp
->m_ihsize
,
79 (64 * NBPP
) / sizeof(xfs_ihash_t
));
82 while (!(mp
->m_ihash
= (xfs_ihash_t
*)kmem_zalloc(mp
->m_ihsize
*
83 sizeof(xfs_ihash_t
), flags
))) {
84 if ((mp
->m_ihsize
>>= 1) <= NBPP
)
87 for (i
= 0; i
< mp
->m_ihsize
; i
++) {
88 rwlock_init(&(mp
->m_ihash
[i
].ih_lock
));
93 * Free up structures allocated by xfs_ihash_init, at unmount time.
96 xfs_ihash_free(xfs_mount_t
*mp
)
98 kmem_free(mp
->m_ihash
, mp
->m_ihsize
*sizeof(xfs_ihash_t
));
103 * Initialize the inode cluster hash table for the newly mounted file system.
104 * Its size is derived from the ihash table size.
107 xfs_chash_init(xfs_mount_t
*mp
)
111 mp
->m_chsize
= max_t(uint
, 1, mp
->m_ihsize
/
112 (XFS_INODE_CLUSTER_SIZE(mp
) >> mp
->m_sb
.sb_inodelog
));
113 mp
->m_chsize
= min_t(uint
, mp
->m_chsize
, mp
->m_ihsize
);
114 mp
->m_chash
= (xfs_chash_t
*)kmem_zalloc(mp
->m_chsize
115 * sizeof(xfs_chash_t
),
117 for (i
= 0; i
< mp
->m_chsize
; i
++) {
118 spinlock_init(&mp
->m_chash
[i
].ch_lock
,"xfshash");
123 * Free up structures allocated by xfs_chash_init, at unmount time.
126 xfs_chash_free(xfs_mount_t
*mp
)
130 for (i
= 0; i
< mp
->m_chsize
; i
++) {
131 spinlock_destroy(&mp
->m_chash
[i
].ch_lock
);
134 kmem_free(mp
->m_chash
, mp
->m_chsize
*sizeof(xfs_chash_t
));
139 * Look up an inode by number in the given file system.
140 * The inode is looked up in the hash table for the file system
141 * represented by the mount point parameter mp. Each bucket of
142 * the hash table is guarded by an individual semaphore.
144 * If the inode is found in the hash table, its corresponding vnode
145 * is obtained with a call to vn_get(). This call takes care of
146 * coordination with the reclamation of the inode and vnode. Note
147 * that the vmap structure is filled in while holding the hash lock.
148 * This gives us the state of the inode/vnode when we found it and
149 * is used for coordination in vn_get().
151 * If it is not in core, read it in from the file system's device and
152 * add the inode into the hash table.
154 * The inode is locked according to the value of the lock_flags parameter.
155 * This flag parameter indicates how and if the inode's IO lock and inode lock
158 * mp -- the mount point structure for the current file system. It points
159 * to the inode hash table.
160 * tp -- a pointer to the current transaction if there is one. This is
161 * simply passed through to the xfs_iread() call.
162 * ino -- the number of the inode desired. This is the unique identifier
163 * within the file system for the inode being requested.
164 * lock_flags -- flags indicating how to lock the inode. See the comment
165 * for xfs_ilock() for a list of valid values.
166 * bno -- the block number starting the buffer containing the inode,
167 * if known (as by bulkstat), else 0.
188 xfs_chashlist_t
*chl
, *chlnew
;
192 ih
= XFS_IHASH(mp
, ino
);
195 read_lock(&ih
->ih_lock
);
197 for (ip
= ih
->ih_next
; ip
!= NULL
; ip
= ip
->i_next
) {
198 if (ip
->i_ino
== ino
) {
200 * If INEW is set this inode is being set up
201 * we need to pause and try again.
203 if (ip
->i_flags
& XFS_INEW
) {
204 read_unlock(&ih
->ih_lock
);
206 XFS_STATS_INC(xs_ig_frecycle
);
211 inode_vp
= XFS_ITOV_NULL(ip
);
212 if (inode_vp
== NULL
) {
214 * If IRECLAIM is set this inode is
215 * on its way out of the system,
216 * we need to pause and try again.
218 if (ip
->i_flags
& XFS_IRECLAIM
) {
219 read_unlock(&ih
->ih_lock
);
221 XFS_STATS_INC(xs_ig_frecycle
);
226 vn_trace_exit(vp
, "xfs_iget.alloc",
227 (inst_t
*)__return_address
);
229 XFS_STATS_INC(xs_ig_found
);
231 ip
->i_flags
&= ~XFS_IRECLAIMABLE
;
232 read_unlock(&ih
->ih_lock
);
235 list_del_init(&ip
->i_reclaim
);
236 XFS_MOUNT_IUNLOCK(mp
);
240 } else if (vp
!= inode_vp
) {
241 struct inode
*inode
= LINVFS_GET_IP(inode_vp
);
243 /* The inode is being torn down, pause and
246 if (inode
->i_state
& (I_FREEING
| I_CLEAR
)) {
247 read_unlock(&ih
->ih_lock
);
249 XFS_STATS_INC(xs_ig_frecycle
);
253 /* Chances are the other vnode (the one in the inode) is being torn
254 * down right now, and we landed on top of it. Question is, what do
255 * we do? Unhook the old inode and hook up the new one?
258 "xfs_iget_core: ambiguous vns: vp/0x%p, invp/0x%p",
262 read_unlock(&ih
->ih_lock
);
264 XFS_STATS_INC(xs_ig_found
);
267 if (ip
->i_d
.di_mode
== 0) {
268 if (!(flags
& IGET_CREATE
))
270 xfs_iocore_inode_reinit(ip
);
274 xfs_ilock(ip
, lock_flags
);
276 ip
->i_flags
&= ~XFS_ISTALE
;
278 vn_trace_exit(vp
, "xfs_iget.found",
279 (inst_t
*)__return_address
);
285 * Inode cache miss: save the hash chain version stamp and unlock
286 * the chain, so we don't deadlock in vn_alloc.
288 XFS_STATS_INC(xs_ig_missed
);
290 version
= ih
->ih_version
;
292 read_unlock(&ih
->ih_lock
);
295 * Read the disk inode attributes into a new inode structure and get
296 * a new vnode for it. This should also initialize i_ino and i_mount.
298 error
= xfs_iread(mp
, tp
, ino
, &ip
, bno
);
303 vn_trace_exit(vp
, "xfs_iget.alloc", (inst_t
*)__return_address
);
305 xfs_inode_lock_init(ip
, vp
);
306 xfs_iocore_inode_init(ip
);
308 if (lock_flags
!= 0) {
309 xfs_ilock(ip
, lock_flags
);
312 if ((ip
->i_d
.di_mode
== 0) && !(flags
& IGET_CREATE
)) {
318 * Put ip on its hash chain, unless someone else hashed a duplicate
319 * after we released the hash lock.
321 write_lock(&ih
->ih_lock
);
323 if (ih
->ih_version
!= version
) {
324 for (iq
= ih
->ih_next
; iq
!= NULL
; iq
= iq
->i_next
) {
325 if (iq
->i_ino
== ino
) {
326 write_unlock(&ih
->ih_lock
);
329 XFS_STATS_INC(xs_ig_dup
);
336 * These values _must_ be set before releasing ihlock!
339 if ((iq
= ih
->ih_next
)) {
340 iq
->i_prevp
= &ip
->i_next
;
343 ip
->i_prevp
= &ih
->ih_next
;
345 ip
->i_udquot
= ip
->i_gdquot
= NULL
;
347 ip
->i_flags
|= XFS_INEW
;
349 write_unlock(&ih
->ih_lock
);
352 * put ip on its cluster's hash chain
354 ASSERT(ip
->i_chash
== NULL
&& ip
->i_cprev
== NULL
&&
355 ip
->i_cnext
== NULL
);
358 ch
= XFS_CHASH(mp
, ip
->i_blkno
);
360 s
= mutex_spinlock(&ch
->ch_lock
);
361 for (chl
= ch
->ch_list
; chl
!= NULL
; chl
= chl
->chl_next
) {
362 if (chl
->chl_blkno
== ip
->i_blkno
) {
364 /* insert this inode into the doubly-linked list
365 * where chl points */
366 if ((iq
= chl
->chl_ip
)) {
367 ip
->i_cprev
= iq
->i_cprev
;
368 iq
->i_cprev
->i_cnext
= ip
;
381 /* no hash list found for this block; add a new hash list */
383 if (chlnew
== NULL
) {
384 mutex_spinunlock(&ch
->ch_lock
, s
);
385 ASSERT(xfs_chashlist_zone
!= NULL
);
386 chlnew
= (xfs_chashlist_t
*)
387 kmem_zone_alloc(xfs_chashlist_zone
,
389 ASSERT(chlnew
!= NULL
);
394 ip
->i_chash
= chlnew
;
396 chlnew
->chl_blkno
= ip
->i_blkno
;
397 chlnew
->chl_next
= ch
->ch_list
;
398 ch
->ch_list
= chlnew
;
402 if (chlnew
!= NULL
) {
403 kmem_zone_free(xfs_chashlist_zone
, chlnew
);
407 mutex_spinunlock(&ch
->ch_lock
, s
);
411 * Link ip to its mount and thread it on the mount's inode list.
414 if ((iq
= mp
->m_inodes
)) {
415 ASSERT(iq
->i_mprev
->i_mnext
== iq
);
416 ip
->i_mprev
= iq
->i_mprev
;
417 iq
->i_mprev
->i_mnext
= ip
;
426 XFS_MOUNT_IUNLOCK(mp
);
429 ASSERT(ip
->i_df
.if_ext_max
==
430 XFS_IFORK_DSIZE(ip
) / sizeof(xfs_bmbt_rec_t
));
432 ASSERT(((ip
->i_d
.di_flags
& XFS_DIFLAG_REALTIME
) != 0) ==
433 ((ip
->i_iocore
.io_flags
& XFS_IOCORE_RT
) != 0));
438 * If we have a real type for an on-disk inode, we can set ops(&unlock)
439 * now. If it's a new inode being created, xfs_ialloc will handle it.
441 VFS_INIT_VNODE(XFS_MTOVFS(mp
), vp
, XFS_ITOBHV(ip
), 1);
448 * The 'normal' internal xfs_iget, if needed it will
449 * 'allocate', or 'get', the vnode.
466 XFS_STATS_INC(xs_ig_attempts
);
468 if ((inode
= iget_locked(XFS_MTOVFS(mp
)->vfs_super
, ino
))) {
473 vp
= LINVFS_GET_VP(inode
);
474 if (inode
->i_state
& I_NEW
) {
476 vn_initialize(inode
);
477 error
= xfs_iget_core(vp
, mp
, tp
, ino
, flags
,
478 lock_flags
, ipp
, bno
);
481 if (inode
->i_state
& I_NEW
)
482 unlock_new_inode(inode
);
486 /* These are true if the inode is in inactive or
487 * reclaim. The linux inode is about to go away,
488 * wait for that path to finish, and try again.
490 if (vp
->v_flag
& (VINACT
| VRECLM
)) {
496 if (is_bad_inode(inode
)) {
501 bdp
= vn_bhv_lookup(VN_BHV_HEAD(vp
), &xfs_vnodeops
);
503 XFS_STATS_INC(xs_ig_dup
);
506 ip
= XFS_BHVTOI(bdp
);
508 xfs_ilock(ip
, lock_flags
);
509 newnode
= (ip
->i_d
.di_mode
== 0);
511 xfs_iocore_inode_reinit(ip
);
512 XFS_STATS_INC(xs_ig_found
);
517 error
= ENOMEM
; /* If we got no inode we are out of memory */
523 * Do the setup for the various locks within the incore inode.
530 mrlock_init(&ip
->i_lock
, MRLOCK_ALLOW_EQUAL_PRI
|MRLOCK_BARRIER
,
531 "xfsino", (long)vp
->v_number
);
532 mrlock_init(&ip
->i_iolock
, MRLOCK_BARRIER
, "xfsio", vp
->v_number
);
533 init_waitqueue_head(&ip
->i_ipin_wait
);
534 atomic_set(&ip
->i_pincount
, 0);
535 init_sema(&ip
->i_flock
, 1, "xfsfino", vp
->v_number
);
539 * Look for the inode corresponding to the given ino in the hash table.
540 * If it is there and its i_transp pointer matches tp, return it.
541 * Otherwise, return NULL.
544 xfs_inode_incore(xfs_mount_t
*mp
,
551 ih
= XFS_IHASH(mp
, ino
);
552 read_lock(&ih
->ih_lock
);
553 for (ip
= ih
->ih_next
; ip
!= NULL
; ip
= ip
->i_next
) {
554 if (ip
->i_ino
== ino
) {
556 * If we find it and tp matches, return it.
557 * Otherwise break from the loop and return
560 if (ip
->i_transp
== tp
) {
561 read_unlock(&ih
->ih_lock
);
567 read_unlock(&ih
->ih_lock
);
572 * Decrement reference count of an inode structure and unlock it.
574 * ip -- the inode being released
575 * lock_flags -- this parameter indicates the inode's locks to be
576 * to be released. See the comment on xfs_iunlock() for a list
580 xfs_iput(xfs_inode_t
*ip
,
583 vnode_t
*vp
= XFS_ITOV(ip
);
585 vn_trace_entry(vp
, "xfs_iput", (inst_t
*)__return_address
);
587 xfs_iunlock(ip
, lock_flags
);
593 * Special iput for brand-new inodes that are still locked
596 xfs_iput_new(xfs_inode_t
*ip
,
599 vnode_t
*vp
= XFS_ITOV(ip
);
600 struct inode
*inode
= LINVFS_GET_IP(vp
);
602 vn_trace_entry(vp
, "xfs_iput_new", (inst_t
*)__return_address
);
604 if ((ip
->i_d
.di_mode
== 0)) {
605 ASSERT(!(ip
->i_flags
& XFS_IRECLAIMABLE
));
608 if (inode
->i_state
& I_NEW
)
609 unlock_new_inode(inode
);
611 xfs_iunlock(ip
, lock_flags
);
617 * This routine embodies the part of the reclaim code that pulls
618 * the inode from the inode hash table and the mount structure's
620 * This should only be called from xfs_reclaim().
623 xfs_ireclaim(xfs_inode_t
*ip
)
628 * Remove from old hash list and mount list.
630 XFS_STATS_INC(xs_ig_reclaims
);
635 * Here we do a spurious inode lock in order to coordinate with
636 * xfs_sync(). This is because xfs_sync() references the inodes
637 * in the mount list without taking references on the corresponding
638 * vnodes. We make that OK here by ensuring that we wait until
639 * the inode is unlocked in xfs_sync() before we go ahead and
640 * free it. We get both the regular lock and the io lock because
641 * the xfs_sync() code may need to drop the regular one but will
642 * still hold the io lock.
644 xfs_ilock(ip
, XFS_ILOCK_EXCL
| XFS_IOLOCK_EXCL
);
647 * Release dquots (and their references) if any. An inode may escape
648 * xfs_inactive and get here via vn_alloc->vn_reclaim path.
650 XFS_QM_DQDETACH(ip
->i_mount
, ip
);
653 * Pull our behavior descriptor from the vnode chain.
655 vp
= XFS_ITOV_NULL(ip
);
657 vn_bhv_remove(VN_BHV_HEAD(vp
), XFS_ITOBHV(ip
));
661 * Free all memory associated with the inode.
667 * This routine removes an about-to-be-destroyed inode from
668 * all of the lists in which it is located with the exception
669 * of the behavior chain.
679 xfs_chashlist_t
*chl
, *chm
;
683 write_lock(&ih
->ih_lock
);
684 if ((iq
= ip
->i_next
)) {
685 iq
->i_prevp
= ip
->i_prevp
;
688 write_unlock(&ih
->ih_lock
);
691 * Remove from cluster hash list
692 * 1) delete the chashlist if this is the last inode on the chashlist
693 * 2) unchain from list of inodes
694 * 3) point chashlist->chl_ip to 'chl_next' if to this inode.
697 ch
= XFS_CHASH(mp
, ip
->i_blkno
);
698 s
= mutex_spinlock(&ch
->ch_lock
);
700 if (ip
->i_cnext
== ip
) {
701 /* Last inode on chashlist */
702 ASSERT(ip
->i_cnext
== ip
&& ip
->i_cprev
== ip
);
703 ASSERT(ip
->i_chash
!= NULL
);
705 for (chl
= ch
->ch_list
; chl
!= NULL
; chl
= chl
->chl_next
) {
706 if (chl
->chl_blkno
== ip
->i_blkno
) {
708 /* first item on the list */
709 ch
->ch_list
= chl
->chl_next
;
711 chm
->chl_next
= chl
->chl_next
;
713 kmem_zone_free(xfs_chashlist_zone
, chl
);
716 ASSERT(chl
->chl_ip
!= ip
);
720 ASSERT_ALWAYS(chl
!= NULL
);
722 /* delete one inode from a non-empty list */
724 iq
->i_cprev
= ip
->i_cprev
;
725 ip
->i_cprev
->i_cnext
= iq
;
726 if (ip
->i_chash
->chl_ip
== ip
) {
727 ip
->i_chash
->chl_ip
= iq
;
729 ip
->i_chash
= __return_address
;
730 ip
->i_cprev
= __return_address
;
731 ip
->i_cnext
= __return_address
;
733 mutex_spinunlock(&ch
->ch_lock
, s
);
736 * Remove from mount's inode list.
739 ASSERT((ip
->i_mnext
!= NULL
) && (ip
->i_mprev
!= NULL
));
741 iq
->i_mprev
= ip
->i_mprev
;
742 ip
->i_mprev
->i_mnext
= iq
;
745 * Fix up the head pointer if it points to the inode being deleted.
747 if (mp
->m_inodes
== ip
) {
755 /* Deal with the deleted inodes list */
756 list_del_init(&ip
->i_reclaim
);
759 XFS_MOUNT_IUNLOCK(mp
);
763 * This is a wrapper routine around the xfs_ilock() routine
764 * used to centralize some grungy code. It is used in places
765 * that wish to lock the inode solely for reading the extents.
766 * The reason these places can't just call xfs_ilock(SHARED)
767 * is that the inode lock also guards to bringing in of the
768 * extents from disk for a file in b-tree format. If the inode
769 * is in b-tree format, then we need to lock the inode exclusively
770 * until the extents are read in. Locking it exclusively all
771 * the time would limit our parallelism unnecessarily, though.
772 * What we do instead is check to see if the extents have been
773 * read in yet, and only lock the inode exclusively if they
776 * The function returns a value which should be given to the
777 * corresponding xfs_iunlock_map_shared(). This value is
778 * the mode in which the lock was actually taken.
781 xfs_ilock_map_shared(
786 if ((ip
->i_d
.di_format
== XFS_DINODE_FMT_BTREE
) &&
787 ((ip
->i_df
.if_flags
& XFS_IFEXTENTS
) == 0)) {
788 lock_mode
= XFS_ILOCK_EXCL
;
790 lock_mode
= XFS_ILOCK_SHARED
;
793 xfs_ilock(ip
, lock_mode
);
799 * This is simply the unlock routine to go with xfs_ilock_map_shared().
800 * All it does is call xfs_iunlock() with the given lock_mode.
803 xfs_iunlock_map_shared(
805 unsigned int lock_mode
)
807 xfs_iunlock(ip
, lock_mode
);
811 * The xfs inode contains 2 locks: a multi-reader lock called the
812 * i_iolock and a multi-reader lock called the i_lock. This routine
813 * allows either or both of the locks to be obtained.
815 * The 2 locks should always be ordered so that the IO lock is
816 * obtained first in order to prevent deadlock.
818 * ip -- the inode being locked
819 * lock_flags -- this parameter indicates the inode's locks
820 * to be locked. It can be:
825 * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
826 * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
827 * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
828 * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
831 xfs_ilock(xfs_inode_t
*ip
,
835 * You can't set both SHARED and EXCL for the same lock,
836 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
837 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
839 ASSERT((lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) !=
840 (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
));
841 ASSERT((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) !=
842 (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
843 ASSERT((lock_flags
& ~XFS_LOCK_MASK
) == 0);
845 if (lock_flags
& XFS_IOLOCK_EXCL
) {
846 mrupdate(&ip
->i_iolock
);
847 } else if (lock_flags
& XFS_IOLOCK_SHARED
) {
848 mraccess(&ip
->i_iolock
);
850 if (lock_flags
& XFS_ILOCK_EXCL
) {
851 mrupdate(&ip
->i_lock
);
852 } else if (lock_flags
& XFS_ILOCK_SHARED
) {
853 mraccess(&ip
->i_lock
);
855 xfs_ilock_trace(ip
, 1, lock_flags
, (inst_t
*)__return_address
);
859 * This is just like xfs_ilock(), except that the caller
860 * is guaranteed not to sleep. It returns 1 if it gets
861 * the requested locks and 0 otherwise. If the IO lock is
862 * obtained but the inode lock cannot be, then the IO lock
863 * is dropped before returning.
865 * ip -- the inode being locked
866 * lock_flags -- this parameter indicates the inode's locks to be
867 * to be locked. See the comment for xfs_ilock() for a list
872 xfs_ilock_nowait(xfs_inode_t
*ip
,
879 * You can't set both SHARED and EXCL for the same lock,
880 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
881 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
883 ASSERT((lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) !=
884 (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
));
885 ASSERT((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) !=
886 (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
887 ASSERT((lock_flags
& ~XFS_LOCK_MASK
) == 0);
890 if (lock_flags
& XFS_IOLOCK_EXCL
) {
891 iolocked
= mrtryupdate(&ip
->i_iolock
);
895 } else if (lock_flags
& XFS_IOLOCK_SHARED
) {
896 iolocked
= mrtryaccess(&ip
->i_iolock
);
901 if (lock_flags
& XFS_ILOCK_EXCL
) {
902 ilocked
= mrtryupdate(&ip
->i_lock
);
905 mrunlock(&ip
->i_iolock
);
909 } else if (lock_flags
& XFS_ILOCK_SHARED
) {
910 ilocked
= mrtryaccess(&ip
->i_lock
);
913 mrunlock(&ip
->i_iolock
);
918 xfs_ilock_trace(ip
, 2, lock_flags
, (inst_t
*)__return_address
);
923 * xfs_iunlock() is used to drop the inode locks acquired with
924 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
925 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
926 * that we know which locks to drop.
928 * ip -- the inode being unlocked
929 * lock_flags -- this parameter indicates the inode's locks to be
930 * to be unlocked. See the comment for xfs_ilock() for a list
931 * of valid values for this parameter.
935 xfs_iunlock(xfs_inode_t
*ip
,
939 * You can't set both SHARED and EXCL for the same lock,
940 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
941 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
943 ASSERT((lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) !=
944 (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
));
945 ASSERT((lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) !=
946 (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
));
947 ASSERT((lock_flags
& ~(XFS_LOCK_MASK
| XFS_IUNLOCK_NONOTIFY
)) == 0);
948 ASSERT(lock_flags
!= 0);
950 if (lock_flags
& (XFS_IOLOCK_SHARED
| XFS_IOLOCK_EXCL
)) {
951 ASSERT(!(lock_flags
& XFS_IOLOCK_SHARED
) ||
952 (ismrlocked(&ip
->i_iolock
, MR_ACCESS
)));
953 ASSERT(!(lock_flags
& XFS_IOLOCK_EXCL
) ||
954 (ismrlocked(&ip
->i_iolock
, MR_UPDATE
)));
955 mrunlock(&ip
->i_iolock
);
958 if (lock_flags
& (XFS_ILOCK_SHARED
| XFS_ILOCK_EXCL
)) {
959 ASSERT(!(lock_flags
& XFS_ILOCK_SHARED
) ||
960 (ismrlocked(&ip
->i_lock
, MR_ACCESS
)));
961 ASSERT(!(lock_flags
& XFS_ILOCK_EXCL
) ||
962 (ismrlocked(&ip
->i_lock
, MR_UPDATE
)));
963 mrunlock(&ip
->i_lock
);
966 * Let the AIL know that this item has been unlocked in case
967 * it is in the AIL and anyone is waiting on it. Don't do
968 * this if the caller has asked us not to.
970 if (!(lock_flags
& XFS_IUNLOCK_NONOTIFY
) &&
971 ip
->i_itemp
!= NULL
) {
972 xfs_trans_unlocked_item(ip
->i_mount
,
973 (xfs_log_item_t
*)(ip
->i_itemp
));
976 xfs_ilock_trace(ip
, 3, lock_flags
, (inst_t
*)__return_address
);
980 * give up write locks. the i/o lock cannot be held nested
981 * if it is being demoted.
984 xfs_ilock_demote(xfs_inode_t
*ip
,
987 ASSERT(lock_flags
& (XFS_IOLOCK_EXCL
|XFS_ILOCK_EXCL
));
988 ASSERT((lock_flags
& ~(XFS_IOLOCK_EXCL
|XFS_ILOCK_EXCL
)) == 0);
990 if (lock_flags
& XFS_ILOCK_EXCL
) {
991 ASSERT(ismrlocked(&ip
->i_lock
, MR_UPDATE
));
992 mrdemote(&ip
->i_lock
);
994 if (lock_flags
& XFS_IOLOCK_EXCL
) {
995 ASSERT(ismrlocked(&ip
->i_iolock
, MR_UPDATE
));
996 mrdemote(&ip
->i_iolock
);
1001 * The following three routines simply manage the i_flock
1002 * semaphore embedded in the inode. This semaphore synchronizes
1003 * processes attempting to flush the in-core inode back to disk.
1006 xfs_iflock(xfs_inode_t
*ip
)
1008 psema(&(ip
->i_flock
), PINOD
|PLTWAIT
);
1012 xfs_iflock_nowait(xfs_inode_t
*ip
)
1014 return (cpsema(&(ip
->i_flock
)));
1018 xfs_ifunlock(xfs_inode_t
*ip
)
1020 ASSERT(valusema(&(ip
->i_flock
)) <= 0);
1021 vsema(&(ip
->i_flock
));