x86: fix regression: boot failure on AMD Elan TS-5500
[linux-2.6/mini2440.git] / fs / xfs / xfs_trans_inode.c
blob4c70bf5e9985a8145a65b2a3670e41a5ca42ab8a
1 /*
2 * Copyright (c) 2000,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_types.h"
21 #include "xfs_bit.h"
22 #include "xfs_log.h"
23 #include "xfs_inum.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_dir2.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_dir2_sf.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_btree.h"
38 #include "xfs_ialloc.h"
39 #include "xfs_trans_priv.h"
40 #include "xfs_inode_item.h"
42 #ifdef XFS_TRANS_DEBUG
43 STATIC void
44 xfs_trans_inode_broot_debug(
45 xfs_inode_t *ip);
46 #else
47 #define xfs_trans_inode_broot_debug(ip)
48 #endif
52 * Get and lock the inode for the caller if it is not already
53 * locked within the given transaction. If it is already locked
54 * within the transaction, just increment its lock recursion count
55 * and return a pointer to it.
57 * For an inode to be locked in a transaction, the inode lock, as
58 * opposed to the io lock, must be taken exclusively. This ensures
59 * that the inode can be involved in only 1 transaction at a time.
60 * Lock recursion is handled on the io lock, but only for lock modes
61 * of equal or lesser strength. That is, you can recur on the io lock
62 * held EXCL with a SHARED request but not vice versa. Also, if
63 * the inode is already a part of the transaction then you cannot
64 * go from not holding the io lock to having it EXCL or SHARED.
66 * Use the inode cache routine xfs_inode_incore() to find the inode
67 * if it is already owned by this transaction.
69 * If we don't already own the inode, use xfs_iget() to get it.
70 * Since the inode log item structure is embedded in the incore
71 * inode structure and is initialized when the inode is brought
72 * into memory, there is nothing to do with it here.
74 * If the given transaction pointer is NULL, just call xfs_iget().
75 * This simplifies code which must handle both cases.
77 int
78 xfs_trans_iget(
79 xfs_mount_t *mp,
80 xfs_trans_t *tp,
81 xfs_ino_t ino,
82 uint flags,
83 uint lock_flags,
84 xfs_inode_t **ipp)
86 int error;
87 xfs_inode_t *ip;
88 xfs_inode_log_item_t *iip;
91 * If the transaction pointer is NULL, just call the normal
92 * xfs_iget().
94 if (tp == NULL)
95 return xfs_iget(mp, NULL, ino, flags, lock_flags, ipp, 0);
98 * If we find the inode in core with this transaction
99 * pointer in its i_transp field, then we know we already
100 * have it locked. In this case we just increment the lock
101 * recursion count and return the inode to the caller.
102 * Assert that the inode is already locked in the mode requested
103 * by the caller. We cannot do lock promotions yet, so
104 * die if someone gets this wrong.
106 if ((ip = xfs_inode_incore(tp->t_mountp, ino, tp)) != NULL) {
108 * Make sure that the inode lock is held EXCL and
109 * that the io lock is never upgraded when the inode
110 * is already a part of the transaction.
112 ASSERT(ip->i_itemp != NULL);
113 ASSERT(lock_flags & XFS_ILOCK_EXCL);
114 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
115 ASSERT((!(lock_flags & XFS_IOLOCK_EXCL)) ||
116 xfs_isilocked(ip, XFS_IOLOCK_EXCL));
117 ASSERT((!(lock_flags & XFS_IOLOCK_EXCL)) ||
118 (ip->i_itemp->ili_flags & XFS_ILI_IOLOCKED_EXCL));
119 ASSERT((!(lock_flags & XFS_IOLOCK_SHARED)) ||
120 xfs_isilocked(ip, XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED));
121 ASSERT((!(lock_flags & XFS_IOLOCK_SHARED)) ||
122 (ip->i_itemp->ili_flags & XFS_ILI_IOLOCKED_ANY));
124 if (lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) {
125 ip->i_itemp->ili_iolock_recur++;
127 if (lock_flags & XFS_ILOCK_EXCL) {
128 ip->i_itemp->ili_ilock_recur++;
130 *ipp = ip;
131 return 0;
134 ASSERT(lock_flags & XFS_ILOCK_EXCL);
135 error = xfs_iget(tp->t_mountp, tp, ino, flags, lock_flags, &ip, 0);
136 if (error) {
137 return error;
139 ASSERT(ip != NULL);
142 * Get a log_item_desc to point at the new item.
144 if (ip->i_itemp == NULL)
145 xfs_inode_item_init(ip, mp);
146 iip = ip->i_itemp;
147 (void) xfs_trans_add_item(tp, (xfs_log_item_t *)(iip));
149 xfs_trans_inode_broot_debug(ip);
152 * If the IO lock has been acquired, mark that in
153 * the inode log item so we'll know to unlock it
154 * when the transaction commits.
156 ASSERT(iip->ili_flags == 0);
157 if (lock_flags & XFS_IOLOCK_EXCL) {
158 iip->ili_flags |= XFS_ILI_IOLOCKED_EXCL;
159 } else if (lock_flags & XFS_IOLOCK_SHARED) {
160 iip->ili_flags |= XFS_ILI_IOLOCKED_SHARED;
164 * Initialize i_transp so we can find it with xfs_inode_incore()
165 * above.
167 ip->i_transp = tp;
169 *ipp = ip;
170 return 0;
174 * Add the locked inode to the transaction.
175 * The inode must be locked, and it cannot be associated with any
176 * transaction. The caller must specify the locks already held
177 * on the inode.
179 void
180 xfs_trans_ijoin(
181 xfs_trans_t *tp,
182 xfs_inode_t *ip,
183 uint lock_flags)
185 xfs_inode_log_item_t *iip;
187 ASSERT(ip->i_transp == NULL);
188 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
189 ASSERT(lock_flags & XFS_ILOCK_EXCL);
190 if (ip->i_itemp == NULL)
191 xfs_inode_item_init(ip, ip->i_mount);
192 iip = ip->i_itemp;
193 ASSERT(iip->ili_flags == 0);
194 ASSERT(iip->ili_ilock_recur == 0);
195 ASSERT(iip->ili_iolock_recur == 0);
198 * Get a log_item_desc to point at the new item.
200 (void) xfs_trans_add_item(tp, (xfs_log_item_t*)(iip));
202 xfs_trans_inode_broot_debug(ip);
205 * If the IO lock is already held, mark that in the inode log item.
207 if (lock_flags & XFS_IOLOCK_EXCL) {
208 iip->ili_flags |= XFS_ILI_IOLOCKED_EXCL;
209 } else if (lock_flags & XFS_IOLOCK_SHARED) {
210 iip->ili_flags |= XFS_ILI_IOLOCKED_SHARED;
214 * Initialize i_transp so we can find it with xfs_inode_incore()
215 * in xfs_trans_iget() above.
217 ip->i_transp = tp;
223 * Mark the inode as not needing to be unlocked when the inode item's
224 * IOP_UNLOCK() routine is called. The inode must already be locked
225 * and associated with the given transaction.
227 /*ARGSUSED*/
228 void
229 xfs_trans_ihold(
230 xfs_trans_t *tp,
231 xfs_inode_t *ip)
233 ASSERT(ip->i_transp == tp);
234 ASSERT(ip->i_itemp != NULL);
235 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
237 ip->i_itemp->ili_flags |= XFS_ILI_HOLD;
242 * This is called to mark the fields indicated in fieldmask as needing
243 * to be logged when the transaction is committed. The inode must
244 * already be associated with the given transaction.
246 * The values for fieldmask are defined in xfs_inode_item.h. We always
247 * log all of the core inode if any of it has changed, and we always log
248 * all of the inline data/extents/b-tree root if any of them has changed.
250 void
251 xfs_trans_log_inode(
252 xfs_trans_t *tp,
253 xfs_inode_t *ip,
254 uint flags)
256 xfs_log_item_desc_t *lidp;
258 ASSERT(ip->i_transp == tp);
259 ASSERT(ip->i_itemp != NULL);
260 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
262 lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)(ip->i_itemp));
263 ASSERT(lidp != NULL);
265 tp->t_flags |= XFS_TRANS_DIRTY;
266 lidp->lid_flags |= XFS_LID_DIRTY;
269 * Always OR in the bits from the ili_last_fields field.
270 * This is to coordinate with the xfs_iflush() and xfs_iflush_done()
271 * routines in the eventual clearing of the ilf_fields bits.
272 * See the big comment in xfs_iflush() for an explanation of
273 * this coordination mechanism.
275 flags |= ip->i_itemp->ili_last_fields;
276 ip->i_itemp->ili_format.ilf_fields |= flags;
279 #ifdef XFS_TRANS_DEBUG
281 * Keep track of the state of the inode btree root to make sure we
282 * log it properly.
284 STATIC void
285 xfs_trans_inode_broot_debug(
286 xfs_inode_t *ip)
288 xfs_inode_log_item_t *iip;
290 ASSERT(ip->i_itemp != NULL);
291 iip = ip->i_itemp;
292 if (iip->ili_root_size != 0) {
293 ASSERT(iip->ili_orig_root != NULL);
294 kmem_free(iip->ili_orig_root, iip->ili_root_size);
295 iip->ili_root_size = 0;
296 iip->ili_orig_root = NULL;
298 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
299 ASSERT((ip->i_df.if_broot != NULL) &&
300 (ip->i_df.if_broot_bytes > 0));
301 iip->ili_root_size = ip->i_df.if_broot_bytes;
302 iip->ili_orig_root =
303 (char*)kmem_alloc(iip->ili_root_size, KM_SLEEP);
304 memcpy(iip->ili_orig_root, (char*)(ip->i_df.if_broot),
305 iip->ili_root_size);
308 #endif