[XFS] kill bhv_vnode_t
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / xfs / xfs_inode.c
blob19e7a7b82703d6e3c3803c0a9cc3ac486562d8a4
1 /*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include <linux/log2.h>
20 #include "xfs.h"
21 #include "xfs_fs.h"
22 #include "xfs_types.h"
23 #include "xfs_bit.h"
24 #include "xfs_log.h"
25 #include "xfs_inum.h"
26 #include "xfs_imap.h"
27 #include "xfs_trans.h"
28 #include "xfs_trans_priv.h"
29 #include "xfs_sb.h"
30 #include "xfs_ag.h"
31 #include "xfs_dir2.h"
32 #include "xfs_dmapi.h"
33 #include "xfs_mount.h"
34 #include "xfs_bmap_btree.h"
35 #include "xfs_alloc_btree.h"
36 #include "xfs_ialloc_btree.h"
37 #include "xfs_dir2_sf.h"
38 #include "xfs_attr_sf.h"
39 #include "xfs_dinode.h"
40 #include "xfs_inode.h"
41 #include "xfs_buf_item.h"
42 #include "xfs_inode_item.h"
43 #include "xfs_btree.h"
44 #include "xfs_alloc.h"
45 #include "xfs_ialloc.h"
46 #include "xfs_bmap.h"
47 #include "xfs_rw.h"
48 #include "xfs_error.h"
49 #include "xfs_utils.h"
50 #include "xfs_dir2_trace.h"
51 #include "xfs_quota.h"
52 #include "xfs_acl.h"
53 #include "xfs_filestream.h"
54 #include "xfs_vnodeops.h"
56 kmem_zone_t *xfs_ifork_zone;
57 kmem_zone_t *xfs_inode_zone;
60 * Used in xfs_itruncate(). This is the maximum number of extents
61 * freed from a file in a single transaction.
63 #define XFS_ITRUNC_MAX_EXTENTS 2
65 STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *);
66 STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int);
67 STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int);
68 STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int);
70 #ifdef DEBUG
72 * Make sure that the extents in the given memory buffer
73 * are valid.
75 STATIC void
76 xfs_validate_extents(
77 xfs_ifork_t *ifp,
78 int nrecs,
79 xfs_exntfmt_t fmt)
81 xfs_bmbt_irec_t irec;
82 xfs_bmbt_rec_host_t rec;
83 int i;
85 for (i = 0; i < nrecs; i++) {
86 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
87 rec.l0 = get_unaligned(&ep->l0);
88 rec.l1 = get_unaligned(&ep->l1);
89 xfs_bmbt_get_all(&rec, &irec);
90 if (fmt == XFS_EXTFMT_NOSTATE)
91 ASSERT(irec.br_state == XFS_EXT_NORM);
94 #else /* DEBUG */
95 #define xfs_validate_extents(ifp, nrecs, fmt)
96 #endif /* DEBUG */
99 * Check that none of the inode's in the buffer have a next
100 * unlinked field of 0.
102 #if defined(DEBUG)
103 void
104 xfs_inobp_check(
105 xfs_mount_t *mp,
106 xfs_buf_t *bp)
108 int i;
109 int j;
110 xfs_dinode_t *dip;
112 j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
114 for (i = 0; i < j; i++) {
115 dip = (xfs_dinode_t *)xfs_buf_offset(bp,
116 i * mp->m_sb.sb_inodesize);
117 if (!dip->di_next_unlinked) {
118 xfs_fs_cmn_err(CE_ALERT, mp,
119 "Detected a bogus zero next_unlinked field in incore inode buffer 0x%p. About to pop an ASSERT.",
120 bp);
121 ASSERT(dip->di_next_unlinked);
125 #endif
128 * Find the buffer associated with the given inode map
129 * We do basic validation checks on the buffer once it has been
130 * retrieved from disk.
132 STATIC int
133 xfs_imap_to_bp(
134 xfs_mount_t *mp,
135 xfs_trans_t *tp,
136 xfs_imap_t *imap,
137 xfs_buf_t **bpp,
138 uint buf_flags,
139 uint imap_flags)
141 int error;
142 int i;
143 int ni;
144 xfs_buf_t *bp;
146 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
147 (int)imap->im_len, buf_flags, &bp);
148 if (error) {
149 if (error != EAGAIN) {
150 cmn_err(CE_WARN,
151 "xfs_imap_to_bp: xfs_trans_read_buf()returned "
152 "an error %d on %s. Returning error.",
153 error, mp->m_fsname);
154 } else {
155 ASSERT(buf_flags & XFS_BUF_TRYLOCK);
157 return error;
161 * Validate the magic number and version of every inode in the buffer
162 * (if DEBUG kernel) or the first inode in the buffer, otherwise.
164 #ifdef DEBUG
165 ni = BBTOB(imap->im_len) >> mp->m_sb.sb_inodelog;
166 #else /* usual case */
167 ni = 1;
168 #endif
170 for (i = 0; i < ni; i++) {
171 int di_ok;
172 xfs_dinode_t *dip;
174 dip = (xfs_dinode_t *)xfs_buf_offset(bp,
175 (i << mp->m_sb.sb_inodelog));
176 di_ok = be16_to_cpu(dip->di_core.di_magic) == XFS_DINODE_MAGIC &&
177 XFS_DINODE_GOOD_VERSION(dip->di_core.di_version);
178 if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
179 XFS_ERRTAG_ITOBP_INOTOBP,
180 XFS_RANDOM_ITOBP_INOTOBP))) {
181 if (imap_flags & XFS_IMAP_BULKSTAT) {
182 xfs_trans_brelse(tp, bp);
183 return XFS_ERROR(EINVAL);
185 XFS_CORRUPTION_ERROR("xfs_imap_to_bp",
186 XFS_ERRLEVEL_HIGH, mp, dip);
187 #ifdef DEBUG
188 cmn_err(CE_PANIC,
189 "Device %s - bad inode magic/vsn "
190 "daddr %lld #%d (magic=%x)",
191 XFS_BUFTARG_NAME(mp->m_ddev_targp),
192 (unsigned long long)imap->im_blkno, i,
193 be16_to_cpu(dip->di_core.di_magic));
194 #endif
195 xfs_trans_brelse(tp, bp);
196 return XFS_ERROR(EFSCORRUPTED);
200 xfs_inobp_check(mp, bp);
203 * Mark the buffer as an inode buffer now that it looks good
205 XFS_BUF_SET_VTYPE(bp, B_FS_INO);
207 *bpp = bp;
208 return 0;
212 * This routine is called to map an inode number within a file
213 * system to the buffer containing the on-disk version of the
214 * inode. It returns a pointer to the buffer containing the
215 * on-disk inode in the bpp parameter, and in the dip parameter
216 * it returns a pointer to the on-disk inode within that buffer.
218 * If a non-zero error is returned, then the contents of bpp and
219 * dipp are undefined.
221 * Use xfs_imap() to determine the size and location of the
222 * buffer to read from disk.
224 STATIC int
225 xfs_inotobp(
226 xfs_mount_t *mp,
227 xfs_trans_t *tp,
228 xfs_ino_t ino,
229 xfs_dinode_t **dipp,
230 xfs_buf_t **bpp,
231 int *offset)
233 xfs_imap_t imap;
234 xfs_buf_t *bp;
235 int error;
237 imap.im_blkno = 0;
238 error = xfs_imap(mp, tp, ino, &imap, XFS_IMAP_LOOKUP);
239 if (error)
240 return error;
242 error = xfs_imap_to_bp(mp, tp, &imap, &bp, XFS_BUF_LOCK, 0);
243 if (error)
244 return error;
246 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
247 *bpp = bp;
248 *offset = imap.im_boffset;
249 return 0;
254 * This routine is called to map an inode to the buffer containing
255 * the on-disk version of the inode. It returns a pointer to the
256 * buffer containing the on-disk inode in the bpp parameter, and in
257 * the dip parameter it returns a pointer to the on-disk inode within
258 * that buffer.
260 * If a non-zero error is returned, then the contents of bpp and
261 * dipp are undefined.
263 * If the inode is new and has not yet been initialized, use xfs_imap()
264 * to determine the size and location of the buffer to read from disk.
265 * If the inode has already been mapped to its buffer and read in once,
266 * then use the mapping information stored in the inode rather than
267 * calling xfs_imap(). This allows us to avoid the overhead of looking
268 * at the inode btree for small block file systems (see xfs_dilocate()).
269 * We can tell whether the inode has been mapped in before by comparing
270 * its disk block address to 0. Only uninitialized inodes will have
271 * 0 for the disk block address.
274 xfs_itobp(
275 xfs_mount_t *mp,
276 xfs_trans_t *tp,
277 xfs_inode_t *ip,
278 xfs_dinode_t **dipp,
279 xfs_buf_t **bpp,
280 xfs_daddr_t bno,
281 uint imap_flags,
282 uint buf_flags)
284 xfs_imap_t imap;
285 xfs_buf_t *bp;
286 int error;
288 if (ip->i_blkno == (xfs_daddr_t)0) {
289 imap.im_blkno = bno;
290 error = xfs_imap(mp, tp, ip->i_ino, &imap,
291 XFS_IMAP_LOOKUP | imap_flags);
292 if (error)
293 return error;
296 * Fill in the fields in the inode that will be used to
297 * map the inode to its buffer from now on.
299 ip->i_blkno = imap.im_blkno;
300 ip->i_len = imap.im_len;
301 ip->i_boffset = imap.im_boffset;
302 } else {
304 * We've already mapped the inode once, so just use the
305 * mapping that we saved the first time.
307 imap.im_blkno = ip->i_blkno;
308 imap.im_len = ip->i_len;
309 imap.im_boffset = ip->i_boffset;
311 ASSERT(bno == 0 || bno == imap.im_blkno);
313 error = xfs_imap_to_bp(mp, tp, &imap, &bp, buf_flags, imap_flags);
314 if (error)
315 return error;
317 if (!bp) {
318 ASSERT(buf_flags & XFS_BUF_TRYLOCK);
319 ASSERT(tp == NULL);
320 *bpp = NULL;
321 return EAGAIN;
324 *dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
325 *bpp = bp;
326 return 0;
330 * Move inode type and inode format specific information from the
331 * on-disk inode to the in-core inode. For fifos, devs, and sockets
332 * this means set if_rdev to the proper value. For files, directories,
333 * and symlinks this means to bring in the in-line data or extent
334 * pointers. For a file in B-tree format, only the root is immediately
335 * brought in-core. The rest will be in-lined in if_extents when it
336 * is first referenced (see xfs_iread_extents()).
338 STATIC int
339 xfs_iformat(
340 xfs_inode_t *ip,
341 xfs_dinode_t *dip)
343 xfs_attr_shortform_t *atp;
344 int size;
345 int error;
346 xfs_fsize_t di_size;
347 ip->i_df.if_ext_max =
348 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
349 error = 0;
351 if (unlikely(be32_to_cpu(dip->di_core.di_nextents) +
352 be16_to_cpu(dip->di_core.di_anextents) >
353 be64_to_cpu(dip->di_core.di_nblocks))) {
354 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
355 "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.",
356 (unsigned long long)ip->i_ino,
357 (int)(be32_to_cpu(dip->di_core.di_nextents) +
358 be16_to_cpu(dip->di_core.di_anextents)),
359 (unsigned long long)
360 be64_to_cpu(dip->di_core.di_nblocks));
361 XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW,
362 ip->i_mount, dip);
363 return XFS_ERROR(EFSCORRUPTED);
366 if (unlikely(dip->di_core.di_forkoff > ip->i_mount->m_sb.sb_inodesize)) {
367 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
368 "corrupt dinode %Lu, forkoff = 0x%x.",
369 (unsigned long long)ip->i_ino,
370 dip->di_core.di_forkoff);
371 XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW,
372 ip->i_mount, dip);
373 return XFS_ERROR(EFSCORRUPTED);
376 switch (ip->i_d.di_mode & S_IFMT) {
377 case S_IFIFO:
378 case S_IFCHR:
379 case S_IFBLK:
380 case S_IFSOCK:
381 if (unlikely(dip->di_core.di_format != XFS_DINODE_FMT_DEV)) {
382 XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW,
383 ip->i_mount, dip);
384 return XFS_ERROR(EFSCORRUPTED);
386 ip->i_d.di_size = 0;
387 ip->i_size = 0;
388 ip->i_df.if_u2.if_rdev = be32_to_cpu(dip->di_u.di_dev);
389 break;
391 case S_IFREG:
392 case S_IFLNK:
393 case S_IFDIR:
394 switch (dip->di_core.di_format) {
395 case XFS_DINODE_FMT_LOCAL:
397 * no local regular files yet
399 if (unlikely((be16_to_cpu(dip->di_core.di_mode) & S_IFMT) == S_IFREG)) {
400 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
401 "corrupt inode %Lu "
402 "(local format for regular file).",
403 (unsigned long long) ip->i_ino);
404 XFS_CORRUPTION_ERROR("xfs_iformat(4)",
405 XFS_ERRLEVEL_LOW,
406 ip->i_mount, dip);
407 return XFS_ERROR(EFSCORRUPTED);
410 di_size = be64_to_cpu(dip->di_core.di_size);
411 if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) {
412 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
413 "corrupt inode %Lu "
414 "(bad size %Ld for local inode).",
415 (unsigned long long) ip->i_ino,
416 (long long) di_size);
417 XFS_CORRUPTION_ERROR("xfs_iformat(5)",
418 XFS_ERRLEVEL_LOW,
419 ip->i_mount, dip);
420 return XFS_ERROR(EFSCORRUPTED);
423 size = (int)di_size;
424 error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size);
425 break;
426 case XFS_DINODE_FMT_EXTENTS:
427 error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK);
428 break;
429 case XFS_DINODE_FMT_BTREE:
430 error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK);
431 break;
432 default:
433 XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW,
434 ip->i_mount);
435 return XFS_ERROR(EFSCORRUPTED);
437 break;
439 default:
440 XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount);
441 return XFS_ERROR(EFSCORRUPTED);
443 if (error) {
444 return error;
446 if (!XFS_DFORK_Q(dip))
447 return 0;
448 ASSERT(ip->i_afp == NULL);
449 ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
450 ip->i_afp->if_ext_max =
451 XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
452 switch (dip->di_core.di_aformat) {
453 case XFS_DINODE_FMT_LOCAL:
454 atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip);
455 size = be16_to_cpu(atp->hdr.totsize);
456 error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size);
457 break;
458 case XFS_DINODE_FMT_EXTENTS:
459 error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK);
460 break;
461 case XFS_DINODE_FMT_BTREE:
462 error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK);
463 break;
464 default:
465 error = XFS_ERROR(EFSCORRUPTED);
466 break;
468 if (error) {
469 kmem_zone_free(xfs_ifork_zone, ip->i_afp);
470 ip->i_afp = NULL;
471 xfs_idestroy_fork(ip, XFS_DATA_FORK);
473 return error;
477 * The file is in-lined in the on-disk inode.
478 * If it fits into if_inline_data, then copy
479 * it there, otherwise allocate a buffer for it
480 * and copy the data there. Either way, set
481 * if_data to point at the data.
482 * If we allocate a buffer for the data, make
483 * sure that its size is a multiple of 4 and
484 * record the real size in i_real_bytes.
486 STATIC int
487 xfs_iformat_local(
488 xfs_inode_t *ip,
489 xfs_dinode_t *dip,
490 int whichfork,
491 int size)
493 xfs_ifork_t *ifp;
494 int real_size;
497 * If the size is unreasonable, then something
498 * is wrong and we just bail out rather than crash in
499 * kmem_alloc() or memcpy() below.
501 if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
502 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
503 "corrupt inode %Lu "
504 "(bad size %d for local fork, size = %d).",
505 (unsigned long long) ip->i_ino, size,
506 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork));
507 XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW,
508 ip->i_mount, dip);
509 return XFS_ERROR(EFSCORRUPTED);
511 ifp = XFS_IFORK_PTR(ip, whichfork);
512 real_size = 0;
513 if (size == 0)
514 ifp->if_u1.if_data = NULL;
515 else if (size <= sizeof(ifp->if_u2.if_inline_data))
516 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
517 else {
518 real_size = roundup(size, 4);
519 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
521 ifp->if_bytes = size;
522 ifp->if_real_bytes = real_size;
523 if (size)
524 memcpy(ifp->if_u1.if_data, XFS_DFORK_PTR(dip, whichfork), size);
525 ifp->if_flags &= ~XFS_IFEXTENTS;
526 ifp->if_flags |= XFS_IFINLINE;
527 return 0;
531 * The file consists of a set of extents all
532 * of which fit into the on-disk inode.
533 * If there are few enough extents to fit into
534 * the if_inline_ext, then copy them there.
535 * Otherwise allocate a buffer for them and copy
536 * them into it. Either way, set if_extents
537 * to point at the extents.
539 STATIC int
540 xfs_iformat_extents(
541 xfs_inode_t *ip,
542 xfs_dinode_t *dip,
543 int whichfork)
545 xfs_bmbt_rec_t *dp;
546 xfs_ifork_t *ifp;
547 int nex;
548 int size;
549 int i;
551 ifp = XFS_IFORK_PTR(ip, whichfork);
552 nex = XFS_DFORK_NEXTENTS(dip, whichfork);
553 size = nex * (uint)sizeof(xfs_bmbt_rec_t);
556 * If the number of extents is unreasonable, then something
557 * is wrong and we just bail out rather than crash in
558 * kmem_alloc() or memcpy() below.
560 if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
561 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
562 "corrupt inode %Lu ((a)extents = %d).",
563 (unsigned long long) ip->i_ino, nex);
564 XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW,
565 ip->i_mount, dip);
566 return XFS_ERROR(EFSCORRUPTED);
569 ifp->if_real_bytes = 0;
570 if (nex == 0)
571 ifp->if_u1.if_extents = NULL;
572 else if (nex <= XFS_INLINE_EXTS)
573 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
574 else
575 xfs_iext_add(ifp, 0, nex);
577 ifp->if_bytes = size;
578 if (size) {
579 dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork);
580 xfs_validate_extents(ifp, nex, XFS_EXTFMT_INODE(ip));
581 for (i = 0; i < nex; i++, dp++) {
582 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
583 ep->l0 = be64_to_cpu(get_unaligned(&dp->l0));
584 ep->l1 = be64_to_cpu(get_unaligned(&dp->l1));
586 XFS_BMAP_TRACE_EXLIST(ip, nex, whichfork);
587 if (whichfork != XFS_DATA_FORK ||
588 XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE)
589 if (unlikely(xfs_check_nostate_extents(
590 ifp, 0, nex))) {
591 XFS_ERROR_REPORT("xfs_iformat_extents(2)",
592 XFS_ERRLEVEL_LOW,
593 ip->i_mount);
594 return XFS_ERROR(EFSCORRUPTED);
597 ifp->if_flags |= XFS_IFEXTENTS;
598 return 0;
602 * The file has too many extents to fit into
603 * the inode, so they are in B-tree format.
604 * Allocate a buffer for the root of the B-tree
605 * and copy the root into it. The i_extents
606 * field will remain NULL until all of the
607 * extents are read in (when they are needed).
609 STATIC int
610 xfs_iformat_btree(
611 xfs_inode_t *ip,
612 xfs_dinode_t *dip,
613 int whichfork)
615 xfs_bmdr_block_t *dfp;
616 xfs_ifork_t *ifp;
617 /* REFERENCED */
618 int nrecs;
619 int size;
621 ifp = XFS_IFORK_PTR(ip, whichfork);
622 dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork);
623 size = XFS_BMAP_BROOT_SPACE(dfp);
624 nrecs = XFS_BMAP_BROOT_NUMRECS(dfp);
627 * blow out if -- fork has less extents than can fit in
628 * fork (fork shouldn't be a btree format), root btree
629 * block has more records than can fit into the fork,
630 * or the number of extents is greater than the number of
631 * blocks.
633 if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max
634 || XFS_BMDR_SPACE_CALC(nrecs) >
635 XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)
636 || XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) {
637 xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
638 "corrupt inode %Lu (btree).",
639 (unsigned long long) ip->i_ino);
640 XFS_ERROR_REPORT("xfs_iformat_btree", XFS_ERRLEVEL_LOW,
641 ip->i_mount);
642 return XFS_ERROR(EFSCORRUPTED);
645 ifp->if_broot_bytes = size;
646 ifp->if_broot = kmem_alloc(size, KM_SLEEP);
647 ASSERT(ifp->if_broot != NULL);
649 * Copy and convert from the on-disk structure
650 * to the in-memory structure.
652 xfs_bmdr_to_bmbt(dfp, XFS_DFORK_SIZE(dip, ip->i_mount, whichfork),
653 ifp->if_broot, size);
654 ifp->if_flags &= ~XFS_IFEXTENTS;
655 ifp->if_flags |= XFS_IFBROOT;
657 return 0;
660 void
661 xfs_dinode_from_disk(
662 xfs_icdinode_t *to,
663 xfs_dinode_core_t *from)
665 to->di_magic = be16_to_cpu(from->di_magic);
666 to->di_mode = be16_to_cpu(from->di_mode);
667 to->di_version = from ->di_version;
668 to->di_format = from->di_format;
669 to->di_onlink = be16_to_cpu(from->di_onlink);
670 to->di_uid = be32_to_cpu(from->di_uid);
671 to->di_gid = be32_to_cpu(from->di_gid);
672 to->di_nlink = be32_to_cpu(from->di_nlink);
673 to->di_projid = be16_to_cpu(from->di_projid);
674 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
675 to->di_flushiter = be16_to_cpu(from->di_flushiter);
676 to->di_atime.t_sec = be32_to_cpu(from->di_atime.t_sec);
677 to->di_atime.t_nsec = be32_to_cpu(from->di_atime.t_nsec);
678 to->di_mtime.t_sec = be32_to_cpu(from->di_mtime.t_sec);
679 to->di_mtime.t_nsec = be32_to_cpu(from->di_mtime.t_nsec);
680 to->di_ctime.t_sec = be32_to_cpu(from->di_ctime.t_sec);
681 to->di_ctime.t_nsec = be32_to_cpu(from->di_ctime.t_nsec);
682 to->di_size = be64_to_cpu(from->di_size);
683 to->di_nblocks = be64_to_cpu(from->di_nblocks);
684 to->di_extsize = be32_to_cpu(from->di_extsize);
685 to->di_nextents = be32_to_cpu(from->di_nextents);
686 to->di_anextents = be16_to_cpu(from->di_anextents);
687 to->di_forkoff = from->di_forkoff;
688 to->di_aformat = from->di_aformat;
689 to->di_dmevmask = be32_to_cpu(from->di_dmevmask);
690 to->di_dmstate = be16_to_cpu(from->di_dmstate);
691 to->di_flags = be16_to_cpu(from->di_flags);
692 to->di_gen = be32_to_cpu(from->di_gen);
695 void
696 xfs_dinode_to_disk(
697 xfs_dinode_core_t *to,
698 xfs_icdinode_t *from)
700 to->di_magic = cpu_to_be16(from->di_magic);
701 to->di_mode = cpu_to_be16(from->di_mode);
702 to->di_version = from ->di_version;
703 to->di_format = from->di_format;
704 to->di_onlink = cpu_to_be16(from->di_onlink);
705 to->di_uid = cpu_to_be32(from->di_uid);
706 to->di_gid = cpu_to_be32(from->di_gid);
707 to->di_nlink = cpu_to_be32(from->di_nlink);
708 to->di_projid = cpu_to_be16(from->di_projid);
709 memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
710 to->di_flushiter = cpu_to_be16(from->di_flushiter);
711 to->di_atime.t_sec = cpu_to_be32(from->di_atime.t_sec);
712 to->di_atime.t_nsec = cpu_to_be32(from->di_atime.t_nsec);
713 to->di_mtime.t_sec = cpu_to_be32(from->di_mtime.t_sec);
714 to->di_mtime.t_nsec = cpu_to_be32(from->di_mtime.t_nsec);
715 to->di_ctime.t_sec = cpu_to_be32(from->di_ctime.t_sec);
716 to->di_ctime.t_nsec = cpu_to_be32(from->di_ctime.t_nsec);
717 to->di_size = cpu_to_be64(from->di_size);
718 to->di_nblocks = cpu_to_be64(from->di_nblocks);
719 to->di_extsize = cpu_to_be32(from->di_extsize);
720 to->di_nextents = cpu_to_be32(from->di_nextents);
721 to->di_anextents = cpu_to_be16(from->di_anextents);
722 to->di_forkoff = from->di_forkoff;
723 to->di_aformat = from->di_aformat;
724 to->di_dmevmask = cpu_to_be32(from->di_dmevmask);
725 to->di_dmstate = cpu_to_be16(from->di_dmstate);
726 to->di_flags = cpu_to_be16(from->di_flags);
727 to->di_gen = cpu_to_be32(from->di_gen);
730 STATIC uint
731 _xfs_dic2xflags(
732 __uint16_t di_flags)
734 uint flags = 0;
736 if (di_flags & XFS_DIFLAG_ANY) {
737 if (di_flags & XFS_DIFLAG_REALTIME)
738 flags |= XFS_XFLAG_REALTIME;
739 if (di_flags & XFS_DIFLAG_PREALLOC)
740 flags |= XFS_XFLAG_PREALLOC;
741 if (di_flags & XFS_DIFLAG_IMMUTABLE)
742 flags |= XFS_XFLAG_IMMUTABLE;
743 if (di_flags & XFS_DIFLAG_APPEND)
744 flags |= XFS_XFLAG_APPEND;
745 if (di_flags & XFS_DIFLAG_SYNC)
746 flags |= XFS_XFLAG_SYNC;
747 if (di_flags & XFS_DIFLAG_NOATIME)
748 flags |= XFS_XFLAG_NOATIME;
749 if (di_flags & XFS_DIFLAG_NODUMP)
750 flags |= XFS_XFLAG_NODUMP;
751 if (di_flags & XFS_DIFLAG_RTINHERIT)
752 flags |= XFS_XFLAG_RTINHERIT;
753 if (di_flags & XFS_DIFLAG_PROJINHERIT)
754 flags |= XFS_XFLAG_PROJINHERIT;
755 if (di_flags & XFS_DIFLAG_NOSYMLINKS)
756 flags |= XFS_XFLAG_NOSYMLINKS;
757 if (di_flags & XFS_DIFLAG_EXTSIZE)
758 flags |= XFS_XFLAG_EXTSIZE;
759 if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
760 flags |= XFS_XFLAG_EXTSZINHERIT;
761 if (di_flags & XFS_DIFLAG_NODEFRAG)
762 flags |= XFS_XFLAG_NODEFRAG;
763 if (di_flags & XFS_DIFLAG_FILESTREAM)
764 flags |= XFS_XFLAG_FILESTREAM;
767 return flags;
770 uint
771 xfs_ip2xflags(
772 xfs_inode_t *ip)
774 xfs_icdinode_t *dic = &ip->i_d;
776 return _xfs_dic2xflags(dic->di_flags) |
777 (XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0);
780 uint
781 xfs_dic2xflags(
782 xfs_dinode_t *dip)
784 xfs_dinode_core_t *dic = &dip->di_core;
786 return _xfs_dic2xflags(be16_to_cpu(dic->di_flags)) |
787 (XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0);
791 * Given a mount structure and an inode number, return a pointer
792 * to a newly allocated in-core inode corresponding to the given
793 * inode number.
795 * Initialize the inode's attributes and extent pointers if it
796 * already has them (it will not if the inode has no links).
799 xfs_iread(
800 xfs_mount_t *mp,
801 xfs_trans_t *tp,
802 xfs_ino_t ino,
803 xfs_inode_t **ipp,
804 xfs_daddr_t bno,
805 uint imap_flags)
807 xfs_buf_t *bp;
808 xfs_dinode_t *dip;
809 xfs_inode_t *ip;
810 int error;
812 ASSERT(xfs_inode_zone != NULL);
814 ip = kmem_zone_zalloc(xfs_inode_zone, KM_SLEEP);
815 ip->i_ino = ino;
816 ip->i_mount = mp;
817 atomic_set(&ip->i_iocount, 0);
818 spin_lock_init(&ip->i_flags_lock);
821 * Get pointer's to the on-disk inode and the buffer containing it.
822 * If the inode number refers to a block outside the file system
823 * then xfs_itobp() will return NULL. In this case we should
824 * return NULL as well. Set i_blkno to 0 so that xfs_itobp() will
825 * know that this is a new incore inode.
827 error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, imap_flags, XFS_BUF_LOCK);
828 if (error) {
829 kmem_zone_free(xfs_inode_zone, ip);
830 return error;
834 * Initialize inode's trace buffers.
835 * Do this before xfs_iformat in case it adds entries.
837 #ifdef XFS_INODE_TRACE
838 ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_SLEEP);
839 #endif
840 #ifdef XFS_BMAP_TRACE
841 ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_SLEEP);
842 #endif
843 #ifdef XFS_BMBT_TRACE
844 ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_SLEEP);
845 #endif
846 #ifdef XFS_RW_TRACE
847 ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_SLEEP);
848 #endif
849 #ifdef XFS_ILOCK_TRACE
850 ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_SLEEP);
851 #endif
852 #ifdef XFS_DIR2_TRACE
853 ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_SLEEP);
854 #endif
857 * If we got something that isn't an inode it means someone
858 * (nfs or dmi) has a stale handle.
860 if (be16_to_cpu(dip->di_core.di_magic) != XFS_DINODE_MAGIC) {
861 kmem_zone_free(xfs_inode_zone, ip);
862 xfs_trans_brelse(tp, bp);
863 #ifdef DEBUG
864 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: "
865 "dip->di_core.di_magic (0x%x) != "
866 "XFS_DINODE_MAGIC (0x%x)",
867 be16_to_cpu(dip->di_core.di_magic),
868 XFS_DINODE_MAGIC);
869 #endif /* DEBUG */
870 return XFS_ERROR(EINVAL);
874 * If the on-disk inode is already linked to a directory
875 * entry, copy all of the inode into the in-core inode.
876 * xfs_iformat() handles copying in the inode format
877 * specific information.
878 * Otherwise, just get the truly permanent information.
880 if (dip->di_core.di_mode) {
881 xfs_dinode_from_disk(&ip->i_d, &dip->di_core);
882 error = xfs_iformat(ip, dip);
883 if (error) {
884 kmem_zone_free(xfs_inode_zone, ip);
885 xfs_trans_brelse(tp, bp);
886 #ifdef DEBUG
887 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: "
888 "xfs_iformat() returned error %d",
889 error);
890 #endif /* DEBUG */
891 return error;
893 } else {
894 ip->i_d.di_magic = be16_to_cpu(dip->di_core.di_magic);
895 ip->i_d.di_version = dip->di_core.di_version;
896 ip->i_d.di_gen = be32_to_cpu(dip->di_core.di_gen);
897 ip->i_d.di_flushiter = be16_to_cpu(dip->di_core.di_flushiter);
899 * Make sure to pull in the mode here as well in
900 * case the inode is released without being used.
901 * This ensures that xfs_inactive() will see that
902 * the inode is already free and not try to mess
903 * with the uninitialized part of it.
905 ip->i_d.di_mode = 0;
907 * Initialize the per-fork minima and maxima for a new
908 * inode here. xfs_iformat will do it for old inodes.
910 ip->i_df.if_ext_max =
911 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
914 INIT_LIST_HEAD(&ip->i_reclaim);
917 * The inode format changed when we moved the link count and
918 * made it 32 bits long. If this is an old format inode,
919 * convert it in memory to look like a new one. If it gets
920 * flushed to disk we will convert back before flushing or
921 * logging it. We zero out the new projid field and the old link
922 * count field. We'll handle clearing the pad field (the remains
923 * of the old uuid field) when we actually convert the inode to
924 * the new format. We don't change the version number so that we
925 * can distinguish this from a real new format inode.
927 if (ip->i_d.di_version == XFS_DINODE_VERSION_1) {
928 ip->i_d.di_nlink = ip->i_d.di_onlink;
929 ip->i_d.di_onlink = 0;
930 ip->i_d.di_projid = 0;
933 ip->i_delayed_blks = 0;
934 ip->i_size = ip->i_d.di_size;
937 * Mark the buffer containing the inode as something to keep
938 * around for a while. This helps to keep recently accessed
939 * meta-data in-core longer.
941 XFS_BUF_SET_REF(bp, XFS_INO_REF);
944 * Use xfs_trans_brelse() to release the buffer containing the
945 * on-disk inode, because it was acquired with xfs_trans_read_buf()
946 * in xfs_itobp() above. If tp is NULL, this is just a normal
947 * brelse(). If we're within a transaction, then xfs_trans_brelse()
948 * will only release the buffer if it is not dirty within the
949 * transaction. It will be OK to release the buffer in this case,
950 * because inodes on disk are never destroyed and we will be
951 * locking the new in-core inode before putting it in the hash
952 * table where other processes can find it. Thus we don't have
953 * to worry about the inode being changed just because we released
954 * the buffer.
956 xfs_trans_brelse(tp, bp);
957 *ipp = ip;
958 return 0;
962 * Read in extents from a btree-format inode.
963 * Allocate and fill in if_extents. Real work is done in xfs_bmap.c.
966 xfs_iread_extents(
967 xfs_trans_t *tp,
968 xfs_inode_t *ip,
969 int whichfork)
971 int error;
972 xfs_ifork_t *ifp;
973 xfs_extnum_t nextents;
974 size_t size;
976 if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
977 XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW,
978 ip->i_mount);
979 return XFS_ERROR(EFSCORRUPTED);
981 nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
982 size = nextents * sizeof(xfs_bmbt_rec_t);
983 ifp = XFS_IFORK_PTR(ip, whichfork);
986 * We know that the size is valid (it's checked in iformat_btree)
988 ifp->if_lastex = NULLEXTNUM;
989 ifp->if_bytes = ifp->if_real_bytes = 0;
990 ifp->if_flags |= XFS_IFEXTENTS;
991 xfs_iext_add(ifp, 0, nextents);
992 error = xfs_bmap_read_extents(tp, ip, whichfork);
993 if (error) {
994 xfs_iext_destroy(ifp);
995 ifp->if_flags &= ~XFS_IFEXTENTS;
996 return error;
998 xfs_validate_extents(ifp, nextents, XFS_EXTFMT_INODE(ip));
999 return 0;
1003 * Allocate an inode on disk and return a copy of its in-core version.
1004 * The in-core inode is locked exclusively. Set mode, nlink, and rdev
1005 * appropriately within the inode. The uid and gid for the inode are
1006 * set according to the contents of the given cred structure.
1008 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
1009 * has a free inode available, call xfs_iget()
1010 * to obtain the in-core version of the allocated inode. Finally,
1011 * fill in the inode and log its initial contents. In this case,
1012 * ialloc_context would be set to NULL and call_again set to false.
1014 * If xfs_dialloc() does not have an available inode,
1015 * it will replenish its supply by doing an allocation. Since we can
1016 * only do one allocation within a transaction without deadlocks, we
1017 * must commit the current transaction before returning the inode itself.
1018 * In this case, therefore, we will set call_again to true and return.
1019 * The caller should then commit the current transaction, start a new
1020 * transaction, and call xfs_ialloc() again to actually get the inode.
1022 * To ensure that some other process does not grab the inode that
1023 * was allocated during the first call to xfs_ialloc(), this routine
1024 * also returns the [locked] bp pointing to the head of the freelist
1025 * as ialloc_context. The caller should hold this buffer across
1026 * the commit and pass it back into this routine on the second call.
1028 * If we are allocating quota inodes, we do not have a parent inode
1029 * to attach to or associate with (i.e. pip == NULL) because they
1030 * are not linked into the directory structure - they are attached
1031 * directly to the superblock - and so have no parent.
1034 xfs_ialloc(
1035 xfs_trans_t *tp,
1036 xfs_inode_t *pip,
1037 mode_t mode,
1038 xfs_nlink_t nlink,
1039 xfs_dev_t rdev,
1040 cred_t *cr,
1041 xfs_prid_t prid,
1042 int okalloc,
1043 xfs_buf_t **ialloc_context,
1044 boolean_t *call_again,
1045 xfs_inode_t **ipp)
1047 xfs_ino_t ino;
1048 xfs_inode_t *ip;
1049 struct inode *vp;
1050 uint flags;
1051 int error;
1054 * Call the space management code to pick
1055 * the on-disk inode to be allocated.
1057 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc,
1058 ialloc_context, call_again, &ino);
1059 if (error != 0) {
1060 return error;
1062 if (*call_again || ino == NULLFSINO) {
1063 *ipp = NULL;
1064 return 0;
1066 ASSERT(*ialloc_context == NULL);
1069 * Get the in-core inode with the lock held exclusively.
1070 * This is because we're setting fields here we need
1071 * to prevent others from looking at until we're done.
1073 error = xfs_trans_iget(tp->t_mountp, tp, ino,
1074 XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
1075 if (error != 0) {
1076 return error;
1078 ASSERT(ip != NULL);
1080 vp = VFS_I(ip);
1081 ip->i_d.di_mode = (__uint16_t)mode;
1082 ip->i_d.di_onlink = 0;
1083 ip->i_d.di_nlink = nlink;
1084 ASSERT(ip->i_d.di_nlink == nlink);
1085 ip->i_d.di_uid = current_fsuid(cr);
1086 ip->i_d.di_gid = current_fsgid(cr);
1087 ip->i_d.di_projid = prid;
1088 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
1091 * If the superblock version is up to where we support new format
1092 * inodes and this is currently an old format inode, then change
1093 * the inode version number now. This way we only do the conversion
1094 * here rather than here and in the flush/logging code.
1096 if (xfs_sb_version_hasnlink(&tp->t_mountp->m_sb) &&
1097 ip->i_d.di_version == XFS_DINODE_VERSION_1) {
1098 ip->i_d.di_version = XFS_DINODE_VERSION_2;
1100 * We've already zeroed the old link count, the projid field,
1101 * and the pad field.
1106 * Project ids won't be stored on disk if we are using a version 1 inode.
1108 if ((prid != 0) && (ip->i_d.di_version == XFS_DINODE_VERSION_1))
1109 xfs_bump_ino_vers2(tp, ip);
1111 if (pip && XFS_INHERIT_GID(pip)) {
1112 ip->i_d.di_gid = pip->i_d.di_gid;
1113 if ((pip->i_d.di_mode & S_ISGID) && (mode & S_IFMT) == S_IFDIR) {
1114 ip->i_d.di_mode |= S_ISGID;
1119 * If the group ID of the new file does not match the effective group
1120 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
1121 * (and only if the irix_sgid_inherit compatibility variable is set).
1123 if ((irix_sgid_inherit) &&
1124 (ip->i_d.di_mode & S_ISGID) &&
1125 (!in_group_p((gid_t)ip->i_d.di_gid))) {
1126 ip->i_d.di_mode &= ~S_ISGID;
1129 ip->i_d.di_size = 0;
1130 ip->i_size = 0;
1131 ip->i_d.di_nextents = 0;
1132 ASSERT(ip->i_d.di_nblocks == 0);
1133 xfs_ichgtime(ip, XFS_ICHGTIME_CHG|XFS_ICHGTIME_ACC|XFS_ICHGTIME_MOD);
1135 * di_gen will have been taken care of in xfs_iread.
1137 ip->i_d.di_extsize = 0;
1138 ip->i_d.di_dmevmask = 0;
1139 ip->i_d.di_dmstate = 0;
1140 ip->i_d.di_flags = 0;
1141 flags = XFS_ILOG_CORE;
1142 switch (mode & S_IFMT) {
1143 case S_IFIFO:
1144 case S_IFCHR:
1145 case S_IFBLK:
1146 case S_IFSOCK:
1147 ip->i_d.di_format = XFS_DINODE_FMT_DEV;
1148 ip->i_df.if_u2.if_rdev = rdev;
1149 ip->i_df.if_flags = 0;
1150 flags |= XFS_ILOG_DEV;
1151 break;
1152 case S_IFREG:
1153 if (pip && xfs_inode_is_filestream(pip)) {
1154 error = xfs_filestream_associate(pip, ip);
1155 if (error < 0)
1156 return -error;
1157 if (!error)
1158 xfs_iflags_set(ip, XFS_IFILESTREAM);
1160 /* fall through */
1161 case S_IFDIR:
1162 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
1163 uint di_flags = 0;
1165 if ((mode & S_IFMT) == S_IFDIR) {
1166 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
1167 di_flags |= XFS_DIFLAG_RTINHERIT;
1168 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1169 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
1170 ip->i_d.di_extsize = pip->i_d.di_extsize;
1172 } else if ((mode & S_IFMT) == S_IFREG) {
1173 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
1174 di_flags |= XFS_DIFLAG_REALTIME;
1175 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1176 di_flags |= XFS_DIFLAG_EXTSIZE;
1177 ip->i_d.di_extsize = pip->i_d.di_extsize;
1180 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
1181 xfs_inherit_noatime)
1182 di_flags |= XFS_DIFLAG_NOATIME;
1183 if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
1184 xfs_inherit_nodump)
1185 di_flags |= XFS_DIFLAG_NODUMP;
1186 if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
1187 xfs_inherit_sync)
1188 di_flags |= XFS_DIFLAG_SYNC;
1189 if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
1190 xfs_inherit_nosymlinks)
1191 di_flags |= XFS_DIFLAG_NOSYMLINKS;
1192 if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
1193 di_flags |= XFS_DIFLAG_PROJINHERIT;
1194 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
1195 xfs_inherit_nodefrag)
1196 di_flags |= XFS_DIFLAG_NODEFRAG;
1197 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
1198 di_flags |= XFS_DIFLAG_FILESTREAM;
1199 ip->i_d.di_flags |= di_flags;
1201 /* FALLTHROUGH */
1202 case S_IFLNK:
1203 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
1204 ip->i_df.if_flags = XFS_IFEXTENTS;
1205 ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
1206 ip->i_df.if_u1.if_extents = NULL;
1207 break;
1208 default:
1209 ASSERT(0);
1212 * Attribute fork settings for new inode.
1214 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1215 ip->i_d.di_anextents = 0;
1218 * Log the new values stuffed into the inode.
1220 xfs_trans_log_inode(tp, ip, flags);
1222 /* now that we have an i_mode we can setup inode ops and unlock */
1223 xfs_initialize_vnode(tp->t_mountp, vp, ip);
1225 *ipp = ip;
1226 return 0;
1230 * Check to make sure that there are no blocks allocated to the
1231 * file beyond the size of the file. We don't check this for
1232 * files with fixed size extents or real time extents, but we
1233 * at least do it for regular files.
1235 #ifdef DEBUG
1236 void
1237 xfs_isize_check(
1238 xfs_mount_t *mp,
1239 xfs_inode_t *ip,
1240 xfs_fsize_t isize)
1242 xfs_fileoff_t map_first;
1243 int nimaps;
1244 xfs_bmbt_irec_t imaps[2];
1246 if ((ip->i_d.di_mode & S_IFMT) != S_IFREG)
1247 return;
1249 if (XFS_IS_REALTIME_INODE(ip))
1250 return;
1252 if (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)
1253 return;
1255 nimaps = 2;
1256 map_first = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
1258 * The filesystem could be shutting down, so bmapi may return
1259 * an error.
1261 if (xfs_bmapi(NULL, ip, map_first,
1262 (XFS_B_TO_FSB(mp,
1263 (xfs_ufsize_t)XFS_MAXIOFFSET(mp)) -
1264 map_first),
1265 XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps,
1266 NULL, NULL))
1267 return;
1268 ASSERT(nimaps == 1);
1269 ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK);
1271 #endif /* DEBUG */
1274 * Calculate the last possible buffered byte in a file. This must
1275 * include data that was buffered beyond the EOF by the write code.
1276 * This also needs to deal with overflowing the xfs_fsize_t type
1277 * which can happen for sizes near the limit.
1279 * We also need to take into account any blocks beyond the EOF. It
1280 * may be the case that they were buffered by a write which failed.
1281 * In that case the pages will still be in memory, but the inode size
1282 * will never have been updated.
1284 xfs_fsize_t
1285 xfs_file_last_byte(
1286 xfs_inode_t *ip)
1288 xfs_mount_t *mp;
1289 xfs_fsize_t last_byte;
1290 xfs_fileoff_t last_block;
1291 xfs_fileoff_t size_last_block;
1292 int error;
1294 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED));
1296 mp = ip->i_mount;
1298 * Only check for blocks beyond the EOF if the extents have
1299 * been read in. This eliminates the need for the inode lock,
1300 * and it also saves us from looking when it really isn't
1301 * necessary.
1303 if (ip->i_df.if_flags & XFS_IFEXTENTS) {
1304 error = xfs_bmap_last_offset(NULL, ip, &last_block,
1305 XFS_DATA_FORK);
1306 if (error) {
1307 last_block = 0;
1309 } else {
1310 last_block = 0;
1312 size_last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)ip->i_size);
1313 last_block = XFS_FILEOFF_MAX(last_block, size_last_block);
1315 last_byte = XFS_FSB_TO_B(mp, last_block);
1316 if (last_byte < 0) {
1317 return XFS_MAXIOFFSET(mp);
1319 last_byte += (1 << mp->m_writeio_log);
1320 if (last_byte < 0) {
1321 return XFS_MAXIOFFSET(mp);
1323 return last_byte;
1326 #if defined(XFS_RW_TRACE)
1327 STATIC void
1328 xfs_itrunc_trace(
1329 int tag,
1330 xfs_inode_t *ip,
1331 int flag,
1332 xfs_fsize_t new_size,
1333 xfs_off_t toss_start,
1334 xfs_off_t toss_finish)
1336 if (ip->i_rwtrace == NULL) {
1337 return;
1340 ktrace_enter(ip->i_rwtrace,
1341 (void*)((long)tag),
1342 (void*)ip,
1343 (void*)(unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff),
1344 (void*)(unsigned long)(ip->i_d.di_size & 0xffffffff),
1345 (void*)((long)flag),
1346 (void*)(unsigned long)((new_size >> 32) & 0xffffffff),
1347 (void*)(unsigned long)(new_size & 0xffffffff),
1348 (void*)(unsigned long)((toss_start >> 32) & 0xffffffff),
1349 (void*)(unsigned long)(toss_start & 0xffffffff),
1350 (void*)(unsigned long)((toss_finish >> 32) & 0xffffffff),
1351 (void*)(unsigned long)(toss_finish & 0xffffffff),
1352 (void*)(unsigned long)current_cpu(),
1353 (void*)(unsigned long)current_pid(),
1354 (void*)NULL,
1355 (void*)NULL,
1356 (void*)NULL);
1358 #else
1359 #define xfs_itrunc_trace(tag, ip, flag, new_size, toss_start, toss_finish)
1360 #endif
1363 * Start the truncation of the file to new_size. The new size
1364 * must be smaller than the current size. This routine will
1365 * clear the buffer and page caches of file data in the removed
1366 * range, and xfs_itruncate_finish() will remove the underlying
1367 * disk blocks.
1369 * The inode must have its I/O lock locked EXCLUSIVELY, and it
1370 * must NOT have the inode lock held at all. This is because we're
1371 * calling into the buffer/page cache code and we can't hold the
1372 * inode lock when we do so.
1374 * We need to wait for any direct I/Os in flight to complete before we
1375 * proceed with the truncate. This is needed to prevent the extents
1376 * being read or written by the direct I/Os from being removed while the
1377 * I/O is in flight as there is no other method of synchronising
1378 * direct I/O with the truncate operation. Also, because we hold
1379 * the IOLOCK in exclusive mode, we prevent new direct I/Os from being
1380 * started until the truncate completes and drops the lock. Essentially,
1381 * the vn_iowait() call forms an I/O barrier that provides strict ordering
1382 * between direct I/Os and the truncate operation.
1384 * The flags parameter can have either the value XFS_ITRUNC_DEFINITE
1385 * or XFS_ITRUNC_MAYBE. The XFS_ITRUNC_MAYBE value should be used
1386 * in the case that the caller is locking things out of order and
1387 * may not be able to call xfs_itruncate_finish() with the inode lock
1388 * held without dropping the I/O lock. If the caller must drop the
1389 * I/O lock before calling xfs_itruncate_finish(), then xfs_itruncate_start()
1390 * must be called again with all the same restrictions as the initial
1391 * call.
1394 xfs_itruncate_start(
1395 xfs_inode_t *ip,
1396 uint flags,
1397 xfs_fsize_t new_size)
1399 xfs_fsize_t last_byte;
1400 xfs_off_t toss_start;
1401 xfs_mount_t *mp;
1402 int error = 0;
1404 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1405 ASSERT((new_size == 0) || (new_size <= ip->i_size));
1406 ASSERT((flags == XFS_ITRUNC_DEFINITE) ||
1407 (flags == XFS_ITRUNC_MAYBE));
1409 mp = ip->i_mount;
1411 /* wait for the completion of any pending DIOs */
1412 if (new_size < ip->i_size)
1413 vn_iowait(ip);
1416 * Call toss_pages or flushinval_pages to get rid of pages
1417 * overlapping the region being removed. We have to use
1418 * the less efficient flushinval_pages in the case that the
1419 * caller may not be able to finish the truncate without
1420 * dropping the inode's I/O lock. Make sure
1421 * to catch any pages brought in by buffers overlapping
1422 * the EOF by searching out beyond the isize by our
1423 * block size. We round new_size up to a block boundary
1424 * so that we don't toss things on the same block as
1425 * new_size but before it.
1427 * Before calling toss_page or flushinval_pages, make sure to
1428 * call remapf() over the same region if the file is mapped.
1429 * This frees up mapped file references to the pages in the
1430 * given range and for the flushinval_pages case it ensures
1431 * that we get the latest mapped changes flushed out.
1433 toss_start = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1434 toss_start = XFS_FSB_TO_B(mp, toss_start);
1435 if (toss_start < 0) {
1437 * The place to start tossing is beyond our maximum
1438 * file size, so there is no way that the data extended
1439 * out there.
1441 return 0;
1443 last_byte = xfs_file_last_byte(ip);
1444 xfs_itrunc_trace(XFS_ITRUNC_START, ip, flags, new_size, toss_start,
1445 last_byte);
1446 if (last_byte > toss_start) {
1447 if (flags & XFS_ITRUNC_DEFINITE) {
1448 xfs_tosspages(ip, toss_start,
1449 -1, FI_REMAPF_LOCKED);
1450 } else {
1451 error = xfs_flushinval_pages(ip, toss_start,
1452 -1, FI_REMAPF_LOCKED);
1456 #ifdef DEBUG
1457 if (new_size == 0) {
1458 ASSERT(VN_CACHED(VFS_I(ip)) == 0);
1460 #endif
1461 return error;
1465 * Shrink the file to the given new_size. The new size must be smaller than
1466 * the current size. This will free up the underlying blocks in the removed
1467 * range after a call to xfs_itruncate_start() or xfs_atruncate_start().
1469 * The transaction passed to this routine must have made a permanent log
1470 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
1471 * given transaction and start new ones, so make sure everything involved in
1472 * the transaction is tidy before calling here. Some transaction will be
1473 * returned to the caller to be committed. The incoming transaction must
1474 * already include the inode, and both inode locks must be held exclusively.
1475 * The inode must also be "held" within the transaction. On return the inode
1476 * will be "held" within the returned transaction. This routine does NOT
1477 * require any disk space to be reserved for it within the transaction.
1479 * The fork parameter must be either xfs_attr_fork or xfs_data_fork, and it
1480 * indicates the fork which is to be truncated. For the attribute fork we only
1481 * support truncation to size 0.
1483 * We use the sync parameter to indicate whether or not the first transaction
1484 * we perform might have to be synchronous. For the attr fork, it needs to be
1485 * so if the unlink of the inode is not yet known to be permanent in the log.
1486 * This keeps us from freeing and reusing the blocks of the attribute fork
1487 * before the unlink of the inode becomes permanent.
1489 * For the data fork, we normally have to run synchronously if we're being
1490 * called out of the inactive path or we're being called out of the create path
1491 * where we're truncating an existing file. Either way, the truncate needs to
1492 * be sync so blocks don't reappear in the file with altered data in case of a
1493 * crash. wsync filesystems can run the first case async because anything that
1494 * shrinks the inode has to run sync so by the time we're called here from
1495 * inactive, the inode size is permanently set to 0.
1497 * Calls from the truncate path always need to be sync unless we're in a wsync
1498 * filesystem and the file has already been unlinked.
1500 * The caller is responsible for correctly setting the sync parameter. It gets
1501 * too hard for us to guess here which path we're being called out of just
1502 * based on inode state.
1504 * If we get an error, we must return with the inode locked and linked into the
1505 * current transaction. This keeps things simple for the higher level code,
1506 * because it always knows that the inode is locked and held in the transaction
1507 * that returns to it whether errors occur or not. We don't mark the inode
1508 * dirty on error so that transactions can be easily aborted if possible.
1511 xfs_itruncate_finish(
1512 xfs_trans_t **tp,
1513 xfs_inode_t *ip,
1514 xfs_fsize_t new_size,
1515 int fork,
1516 int sync)
1518 xfs_fsblock_t first_block;
1519 xfs_fileoff_t first_unmap_block;
1520 xfs_fileoff_t last_block;
1521 xfs_filblks_t unmap_len=0;
1522 xfs_mount_t *mp;
1523 xfs_trans_t *ntp;
1524 int done;
1525 int committed;
1526 xfs_bmap_free_t free_list;
1527 int error;
1529 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
1530 ASSERT((new_size == 0) || (new_size <= ip->i_size));
1531 ASSERT(*tp != NULL);
1532 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
1533 ASSERT(ip->i_transp == *tp);
1534 ASSERT(ip->i_itemp != NULL);
1535 ASSERT(ip->i_itemp->ili_flags & XFS_ILI_HOLD);
1538 ntp = *tp;
1539 mp = (ntp)->t_mountp;
1540 ASSERT(! XFS_NOT_DQATTACHED(mp, ip));
1543 * We only support truncating the entire attribute fork.
1545 if (fork == XFS_ATTR_FORK) {
1546 new_size = 0LL;
1548 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1549 xfs_itrunc_trace(XFS_ITRUNC_FINISH1, ip, 0, new_size, 0, 0);
1551 * The first thing we do is set the size to new_size permanently
1552 * on disk. This way we don't have to worry about anyone ever
1553 * being able to look at the data being freed even in the face
1554 * of a crash. What we're getting around here is the case where
1555 * we free a block, it is allocated to another file, it is written
1556 * to, and then we crash. If the new data gets written to the
1557 * file but the log buffers containing the free and reallocation
1558 * don't, then we'd end up with garbage in the blocks being freed.
1559 * As long as we make the new_size permanent before actually
1560 * freeing any blocks it doesn't matter if they get writtten to.
1562 * The callers must signal into us whether or not the size
1563 * setting here must be synchronous. There are a few cases
1564 * where it doesn't have to be synchronous. Those cases
1565 * occur if the file is unlinked and we know the unlink is
1566 * permanent or if the blocks being truncated are guaranteed
1567 * to be beyond the inode eof (regardless of the link count)
1568 * and the eof value is permanent. Both of these cases occur
1569 * only on wsync-mounted filesystems. In those cases, we're
1570 * guaranteed that no user will ever see the data in the blocks
1571 * that are being truncated so the truncate can run async.
1572 * In the free beyond eof case, the file may wind up with
1573 * more blocks allocated to it than it needs if we crash
1574 * and that won't get fixed until the next time the file
1575 * is re-opened and closed but that's ok as that shouldn't
1576 * be too many blocks.
1578 * However, we can't just make all wsync xactions run async
1579 * because there's one call out of the create path that needs
1580 * to run sync where it's truncating an existing file to size
1581 * 0 whose size is > 0.
1583 * It's probably possible to come up with a test in this
1584 * routine that would correctly distinguish all the above
1585 * cases from the values of the function parameters and the
1586 * inode state but for sanity's sake, I've decided to let the
1587 * layers above just tell us. It's simpler to correctly figure
1588 * out in the layer above exactly under what conditions we
1589 * can run async and I think it's easier for others read and
1590 * follow the logic in case something has to be changed.
1591 * cscope is your friend -- rcc.
1593 * The attribute fork is much simpler.
1595 * For the attribute fork we allow the caller to tell us whether
1596 * the unlink of the inode that led to this call is yet permanent
1597 * in the on disk log. If it is not and we will be freeing extents
1598 * in this inode then we make the first transaction synchronous
1599 * to make sure that the unlink is permanent by the time we free
1600 * the blocks.
1602 if (fork == XFS_DATA_FORK) {
1603 if (ip->i_d.di_nextents > 0) {
1605 * If we are not changing the file size then do
1606 * not update the on-disk file size - we may be
1607 * called from xfs_inactive_free_eofblocks(). If we
1608 * update the on-disk file size and then the system
1609 * crashes before the contents of the file are
1610 * flushed to disk then the files may be full of
1611 * holes (ie NULL files bug).
1613 if (ip->i_size != new_size) {
1614 ip->i_d.di_size = new_size;
1615 ip->i_size = new_size;
1616 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
1619 } else if (sync) {
1620 ASSERT(!(mp->m_flags & XFS_MOUNT_WSYNC));
1621 if (ip->i_d.di_anextents > 0)
1622 xfs_trans_set_sync(ntp);
1624 ASSERT(fork == XFS_DATA_FORK ||
1625 (fork == XFS_ATTR_FORK &&
1626 ((sync && !(mp->m_flags & XFS_MOUNT_WSYNC)) ||
1627 (sync == 0 && (mp->m_flags & XFS_MOUNT_WSYNC)))));
1630 * Since it is possible for space to become allocated beyond
1631 * the end of the file (in a crash where the space is allocated
1632 * but the inode size is not yet updated), simply remove any
1633 * blocks which show up between the new EOF and the maximum
1634 * possible file size. If the first block to be removed is
1635 * beyond the maximum file size (ie it is the same as last_block),
1636 * then there is nothing to do.
1638 last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
1639 ASSERT(first_unmap_block <= last_block);
1640 done = 0;
1641 if (last_block == first_unmap_block) {
1642 done = 1;
1643 } else {
1644 unmap_len = last_block - first_unmap_block + 1;
1646 while (!done) {
1648 * Free up up to XFS_ITRUNC_MAX_EXTENTS. xfs_bunmapi()
1649 * will tell us whether it freed the entire range or
1650 * not. If this is a synchronous mount (wsync),
1651 * then we can tell bunmapi to keep all the
1652 * transactions asynchronous since the unlink
1653 * transaction that made this inode inactive has
1654 * already hit the disk. There's no danger of
1655 * the freed blocks being reused, there being a
1656 * crash, and the reused blocks suddenly reappearing
1657 * in this file with garbage in them once recovery
1658 * runs.
1660 XFS_BMAP_INIT(&free_list, &first_block);
1661 error = xfs_bunmapi(ntp, ip,
1662 first_unmap_block, unmap_len,
1663 XFS_BMAPI_AFLAG(fork) |
1664 (sync ? 0 : XFS_BMAPI_ASYNC),
1665 XFS_ITRUNC_MAX_EXTENTS,
1666 &first_block, &free_list,
1667 NULL, &done);
1668 if (error) {
1670 * If the bunmapi call encounters an error,
1671 * return to the caller where the transaction
1672 * can be properly aborted. We just need to
1673 * make sure we're not holding any resources
1674 * that we were not when we came in.
1676 xfs_bmap_cancel(&free_list);
1677 return error;
1681 * Duplicate the transaction that has the permanent
1682 * reservation and commit the old transaction.
1684 error = xfs_bmap_finish(tp, &free_list, &committed);
1685 ntp = *tp;
1686 if (committed) {
1687 /* link the inode into the next xact in the chain */
1688 xfs_trans_ijoin(ntp, ip,
1689 XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1690 xfs_trans_ihold(ntp, ip);
1693 if (error) {
1695 * If the bmap finish call encounters an error, return
1696 * to the caller where the transaction can be properly
1697 * aborted. We just need to make sure we're not
1698 * holding any resources that we were not when we came
1699 * in.
1701 * Aborting from this point might lose some blocks in
1702 * the file system, but oh well.
1704 xfs_bmap_cancel(&free_list);
1705 return error;
1708 if (committed) {
1710 * Mark the inode dirty so it will be logged and
1711 * moved forward in the log as part of every commit.
1713 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
1716 ntp = xfs_trans_dup(ntp);
1717 error = xfs_trans_commit(*tp, 0);
1718 *tp = ntp;
1720 /* link the inode into the next transaction in the chain */
1721 xfs_trans_ijoin(ntp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1722 xfs_trans_ihold(ntp, ip);
1724 if (!error)
1725 error = xfs_trans_reserve(ntp, 0,
1726 XFS_ITRUNCATE_LOG_RES(mp), 0,
1727 XFS_TRANS_PERM_LOG_RES,
1728 XFS_ITRUNCATE_LOG_COUNT);
1729 if (error)
1730 return error;
1733 * Only update the size in the case of the data fork, but
1734 * always re-log the inode so that our permanent transaction
1735 * can keep on rolling it forward in the log.
1737 if (fork == XFS_DATA_FORK) {
1738 xfs_isize_check(mp, ip, new_size);
1740 * If we are not changing the file size then do
1741 * not update the on-disk file size - we may be
1742 * called from xfs_inactive_free_eofblocks(). If we
1743 * update the on-disk file size and then the system
1744 * crashes before the contents of the file are
1745 * flushed to disk then the files may be full of
1746 * holes (ie NULL files bug).
1748 if (ip->i_size != new_size) {
1749 ip->i_d.di_size = new_size;
1750 ip->i_size = new_size;
1753 xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
1754 ASSERT((new_size != 0) ||
1755 (fork == XFS_ATTR_FORK) ||
1756 (ip->i_delayed_blks == 0));
1757 ASSERT((new_size != 0) ||
1758 (fork == XFS_ATTR_FORK) ||
1759 (ip->i_d.di_nextents == 0));
1760 xfs_itrunc_trace(XFS_ITRUNC_FINISH2, ip, 0, new_size, 0, 0);
1761 return 0;
1765 * This is called when the inode's link count goes to 0.
1766 * We place the on-disk inode on a list in the AGI. It
1767 * will be pulled from this list when the inode is freed.
1770 xfs_iunlink(
1771 xfs_trans_t *tp,
1772 xfs_inode_t *ip)
1774 xfs_mount_t *mp;
1775 xfs_agi_t *agi;
1776 xfs_dinode_t *dip;
1777 xfs_buf_t *agibp;
1778 xfs_buf_t *ibp;
1779 xfs_agnumber_t agno;
1780 xfs_daddr_t agdaddr;
1781 xfs_agino_t agino;
1782 short bucket_index;
1783 int offset;
1784 int error;
1785 int agi_ok;
1787 ASSERT(ip->i_d.di_nlink == 0);
1788 ASSERT(ip->i_d.di_mode != 0);
1789 ASSERT(ip->i_transp == tp);
1791 mp = tp->t_mountp;
1793 agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
1794 agdaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp));
1797 * Get the agi buffer first. It ensures lock ordering
1798 * on the list.
1800 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, agdaddr,
1801 XFS_FSS_TO_BB(mp, 1), 0, &agibp);
1802 if (error)
1803 return error;
1806 * Validate the magic number of the agi block.
1808 agi = XFS_BUF_TO_AGI(agibp);
1809 agi_ok =
1810 be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC &&
1811 XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum));
1812 if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK,
1813 XFS_RANDOM_IUNLINK))) {
1814 XFS_CORRUPTION_ERROR("xfs_iunlink", XFS_ERRLEVEL_LOW, mp, agi);
1815 xfs_trans_brelse(tp, agibp);
1816 return XFS_ERROR(EFSCORRUPTED);
1819 * Get the index into the agi hash table for the
1820 * list this inode will go on.
1822 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1823 ASSERT(agino != 0);
1824 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1825 ASSERT(agi->agi_unlinked[bucket_index]);
1826 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
1828 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO) {
1830 * There is already another inode in the bucket we need
1831 * to add ourselves to. Add us at the front of the list.
1832 * Here we put the head pointer into our next pointer,
1833 * and then we fall through to point the head at us.
1835 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0, XFS_BUF_LOCK);
1836 if (error)
1837 return error;
1839 ASSERT(be32_to_cpu(dip->di_next_unlinked) == NULLAGINO);
1840 /* both on-disk, don't endian flip twice */
1841 dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
1842 offset = ip->i_boffset +
1843 offsetof(xfs_dinode_t, di_next_unlinked);
1844 xfs_trans_inode_buf(tp, ibp);
1845 xfs_trans_log_buf(tp, ibp, offset,
1846 (offset + sizeof(xfs_agino_t) - 1));
1847 xfs_inobp_check(mp, ibp);
1851 * Point the bucket head pointer at the inode being inserted.
1853 ASSERT(agino != 0);
1854 agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
1855 offset = offsetof(xfs_agi_t, agi_unlinked) +
1856 (sizeof(xfs_agino_t) * bucket_index);
1857 xfs_trans_log_buf(tp, agibp, offset,
1858 (offset + sizeof(xfs_agino_t) - 1));
1859 return 0;
1863 * Pull the on-disk inode from the AGI unlinked list.
1865 STATIC int
1866 xfs_iunlink_remove(
1867 xfs_trans_t *tp,
1868 xfs_inode_t *ip)
1870 xfs_ino_t next_ino;
1871 xfs_mount_t *mp;
1872 xfs_agi_t *agi;
1873 xfs_dinode_t *dip;
1874 xfs_buf_t *agibp;
1875 xfs_buf_t *ibp;
1876 xfs_agnumber_t agno;
1877 xfs_daddr_t agdaddr;
1878 xfs_agino_t agino;
1879 xfs_agino_t next_agino;
1880 xfs_buf_t *last_ibp;
1881 xfs_dinode_t *last_dip = NULL;
1882 short bucket_index;
1883 int offset, last_offset = 0;
1884 int error;
1885 int agi_ok;
1888 * First pull the on-disk inode from the AGI unlinked list.
1890 mp = tp->t_mountp;
1892 agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
1893 agdaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp));
1896 * Get the agi buffer first. It ensures lock ordering
1897 * on the list.
1899 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, agdaddr,
1900 XFS_FSS_TO_BB(mp, 1), 0, &agibp);
1901 if (error) {
1902 cmn_err(CE_WARN,
1903 "xfs_iunlink_remove: xfs_trans_read_buf() returned an error %d on %s. Returning error.",
1904 error, mp->m_fsname);
1905 return error;
1908 * Validate the magic number of the agi block.
1910 agi = XFS_BUF_TO_AGI(agibp);
1911 agi_ok =
1912 be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC &&
1913 XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum));
1914 if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK_REMOVE,
1915 XFS_RANDOM_IUNLINK_REMOVE))) {
1916 XFS_CORRUPTION_ERROR("xfs_iunlink_remove", XFS_ERRLEVEL_LOW,
1917 mp, agi);
1918 xfs_trans_brelse(tp, agibp);
1919 cmn_err(CE_WARN,
1920 "xfs_iunlink_remove: XFS_TEST_ERROR() returned an error on %s. Returning EFSCORRUPTED.",
1921 mp->m_fsname);
1922 return XFS_ERROR(EFSCORRUPTED);
1925 * Get the index into the agi hash table for the
1926 * list this inode will go on.
1928 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1929 ASSERT(agino != 0);
1930 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1931 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO);
1932 ASSERT(agi->agi_unlinked[bucket_index]);
1934 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
1936 * We're at the head of the list. Get the inode's
1937 * on-disk buffer to see if there is anyone after us
1938 * on the list. Only modify our next pointer if it
1939 * is not already NULLAGINO. This saves us the overhead
1940 * of dealing with the buffer when there is no need to
1941 * change it.
1943 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0, XFS_BUF_LOCK);
1944 if (error) {
1945 cmn_err(CE_WARN,
1946 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
1947 error, mp->m_fsname);
1948 return error;
1950 next_agino = be32_to_cpu(dip->di_next_unlinked);
1951 ASSERT(next_agino != 0);
1952 if (next_agino != NULLAGINO) {
1953 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
1954 offset = ip->i_boffset +
1955 offsetof(xfs_dinode_t, di_next_unlinked);
1956 xfs_trans_inode_buf(tp, ibp);
1957 xfs_trans_log_buf(tp, ibp, offset,
1958 (offset + sizeof(xfs_agino_t) - 1));
1959 xfs_inobp_check(mp, ibp);
1960 } else {
1961 xfs_trans_brelse(tp, ibp);
1964 * Point the bucket head pointer at the next inode.
1966 ASSERT(next_agino != 0);
1967 ASSERT(next_agino != agino);
1968 agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
1969 offset = offsetof(xfs_agi_t, agi_unlinked) +
1970 (sizeof(xfs_agino_t) * bucket_index);
1971 xfs_trans_log_buf(tp, agibp, offset,
1972 (offset + sizeof(xfs_agino_t) - 1));
1973 } else {
1975 * We need to search the list for the inode being freed.
1977 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
1978 last_ibp = NULL;
1979 while (next_agino != agino) {
1981 * If the last inode wasn't the one pointing to
1982 * us, then release its buffer since we're not
1983 * going to do anything with it.
1985 if (last_ibp != NULL) {
1986 xfs_trans_brelse(tp, last_ibp);
1988 next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
1989 error = xfs_inotobp(mp, tp, next_ino, &last_dip,
1990 &last_ibp, &last_offset);
1991 if (error) {
1992 cmn_err(CE_WARN,
1993 "xfs_iunlink_remove: xfs_inotobp() returned an error %d on %s. Returning error.",
1994 error, mp->m_fsname);
1995 return error;
1997 next_agino = be32_to_cpu(last_dip->di_next_unlinked);
1998 ASSERT(next_agino != NULLAGINO);
1999 ASSERT(next_agino != 0);
2002 * Now last_ibp points to the buffer previous to us on
2003 * the unlinked list. Pull us from the list.
2005 error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0, XFS_BUF_LOCK);
2006 if (error) {
2007 cmn_err(CE_WARN,
2008 "xfs_iunlink_remove: xfs_itobp() returned an error %d on %s. Returning error.",
2009 error, mp->m_fsname);
2010 return error;
2012 next_agino = be32_to_cpu(dip->di_next_unlinked);
2013 ASSERT(next_agino != 0);
2014 ASSERT(next_agino != agino);
2015 if (next_agino != NULLAGINO) {
2016 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
2017 offset = ip->i_boffset +
2018 offsetof(xfs_dinode_t, di_next_unlinked);
2019 xfs_trans_inode_buf(tp, ibp);
2020 xfs_trans_log_buf(tp, ibp, offset,
2021 (offset + sizeof(xfs_agino_t) - 1));
2022 xfs_inobp_check(mp, ibp);
2023 } else {
2024 xfs_trans_brelse(tp, ibp);
2027 * Point the previous inode on the list to the next inode.
2029 last_dip->di_next_unlinked = cpu_to_be32(next_agino);
2030 ASSERT(next_agino != 0);
2031 offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
2032 xfs_trans_inode_buf(tp, last_ibp);
2033 xfs_trans_log_buf(tp, last_ibp, offset,
2034 (offset + sizeof(xfs_agino_t) - 1));
2035 xfs_inobp_check(mp, last_ibp);
2037 return 0;
2040 STATIC void
2041 xfs_ifree_cluster(
2042 xfs_inode_t *free_ip,
2043 xfs_trans_t *tp,
2044 xfs_ino_t inum)
2046 xfs_mount_t *mp = free_ip->i_mount;
2047 int blks_per_cluster;
2048 int nbufs;
2049 int ninodes;
2050 int i, j, found, pre_flushed;
2051 xfs_daddr_t blkno;
2052 xfs_buf_t *bp;
2053 xfs_inode_t *ip, **ip_found;
2054 xfs_inode_log_item_t *iip;
2055 xfs_log_item_t *lip;
2056 xfs_perag_t *pag = xfs_get_perag(mp, inum);
2058 if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) {
2059 blks_per_cluster = 1;
2060 ninodes = mp->m_sb.sb_inopblock;
2061 nbufs = XFS_IALLOC_BLOCKS(mp);
2062 } else {
2063 blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
2064 mp->m_sb.sb_blocksize;
2065 ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
2066 nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster;
2069 ip_found = kmem_alloc(ninodes * sizeof(xfs_inode_t *), KM_NOFS);
2071 for (j = 0; j < nbufs; j++, inum += ninodes) {
2072 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2073 XFS_INO_TO_AGBNO(mp, inum));
2077 * Look for each inode in memory and attempt to lock it,
2078 * we can be racing with flush and tail pushing here.
2079 * any inode we get the locks on, add to an array of
2080 * inode items to process later.
2082 * The get the buffer lock, we could beat a flush
2083 * or tail pushing thread to the lock here, in which
2084 * case they will go looking for the inode buffer
2085 * and fail, we need some other form of interlock
2086 * here.
2088 found = 0;
2089 for (i = 0; i < ninodes; i++) {
2090 read_lock(&pag->pag_ici_lock);
2091 ip = radix_tree_lookup(&pag->pag_ici_root,
2092 XFS_INO_TO_AGINO(mp, (inum + i)));
2094 /* Inode not in memory or we found it already,
2095 * nothing to do
2097 if (!ip || xfs_iflags_test(ip, XFS_ISTALE)) {
2098 read_unlock(&pag->pag_ici_lock);
2099 continue;
2102 if (xfs_inode_clean(ip)) {
2103 read_unlock(&pag->pag_ici_lock);
2104 continue;
2107 /* If we can get the locks then add it to the
2108 * list, otherwise by the time we get the bp lock
2109 * below it will already be attached to the
2110 * inode buffer.
2113 /* This inode will already be locked - by us, lets
2114 * keep it that way.
2117 if (ip == free_ip) {
2118 if (xfs_iflock_nowait(ip)) {
2119 xfs_iflags_set(ip, XFS_ISTALE);
2120 if (xfs_inode_clean(ip)) {
2121 xfs_ifunlock(ip);
2122 } else {
2123 ip_found[found++] = ip;
2126 read_unlock(&pag->pag_ici_lock);
2127 continue;
2130 if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2131 if (xfs_iflock_nowait(ip)) {
2132 xfs_iflags_set(ip, XFS_ISTALE);
2134 if (xfs_inode_clean(ip)) {
2135 xfs_ifunlock(ip);
2136 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2137 } else {
2138 ip_found[found++] = ip;
2140 } else {
2141 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2144 read_unlock(&pag->pag_ici_lock);
2147 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2148 mp->m_bsize * blks_per_cluster,
2149 XFS_BUF_LOCK);
2151 pre_flushed = 0;
2152 lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
2153 while (lip) {
2154 if (lip->li_type == XFS_LI_INODE) {
2155 iip = (xfs_inode_log_item_t *)lip;
2156 ASSERT(iip->ili_logged == 1);
2157 lip->li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*)) xfs_istale_done;
2158 spin_lock(&mp->m_ail_lock);
2159 iip->ili_flush_lsn = iip->ili_item.li_lsn;
2160 spin_unlock(&mp->m_ail_lock);
2161 xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
2162 pre_flushed++;
2164 lip = lip->li_bio_list;
2167 for (i = 0; i < found; i++) {
2168 ip = ip_found[i];
2169 iip = ip->i_itemp;
2171 if (!iip) {
2172 ip->i_update_core = 0;
2173 xfs_ifunlock(ip);
2174 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2175 continue;
2178 iip->ili_last_fields = iip->ili_format.ilf_fields;
2179 iip->ili_format.ilf_fields = 0;
2180 iip->ili_logged = 1;
2181 spin_lock(&mp->m_ail_lock);
2182 iip->ili_flush_lsn = iip->ili_item.li_lsn;
2183 spin_unlock(&mp->m_ail_lock);
2185 xfs_buf_attach_iodone(bp,
2186 (void(*)(xfs_buf_t*,xfs_log_item_t*))
2187 xfs_istale_done, (xfs_log_item_t *)iip);
2188 if (ip != free_ip) {
2189 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2193 if (found || pre_flushed)
2194 xfs_trans_stale_inode_buf(tp, bp);
2195 xfs_trans_binval(tp, bp);
2198 kmem_free(ip_found);
2199 xfs_put_perag(mp, pag);
2203 * This is called to return an inode to the inode free list.
2204 * The inode should already be truncated to 0 length and have
2205 * no pages associated with it. This routine also assumes that
2206 * the inode is already a part of the transaction.
2208 * The on-disk copy of the inode will have been added to the list
2209 * of unlinked inodes in the AGI. We need to remove the inode from
2210 * that list atomically with respect to freeing it here.
2213 xfs_ifree(
2214 xfs_trans_t *tp,
2215 xfs_inode_t *ip,
2216 xfs_bmap_free_t *flist)
2218 int error;
2219 int delete;
2220 xfs_ino_t first_ino;
2221 xfs_dinode_t *dip;
2222 xfs_buf_t *ibp;
2224 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2225 ASSERT(ip->i_transp == tp);
2226 ASSERT(ip->i_d.di_nlink == 0);
2227 ASSERT(ip->i_d.di_nextents == 0);
2228 ASSERT(ip->i_d.di_anextents == 0);
2229 ASSERT((ip->i_d.di_size == 0 && ip->i_size == 0) ||
2230 ((ip->i_d.di_mode & S_IFMT) != S_IFREG));
2231 ASSERT(ip->i_d.di_nblocks == 0);
2234 * Pull the on-disk inode from the AGI unlinked list.
2236 error = xfs_iunlink_remove(tp, ip);
2237 if (error != 0) {
2238 return error;
2241 error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino);
2242 if (error != 0) {
2243 return error;
2245 ip->i_d.di_mode = 0; /* mark incore inode as free */
2246 ip->i_d.di_flags = 0;
2247 ip->i_d.di_dmevmask = 0;
2248 ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
2249 ip->i_df.if_ext_max =
2250 XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
2251 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
2252 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
2254 * Bump the generation count so no one will be confused
2255 * by reincarnations of this inode.
2257 ip->i_d.di_gen++;
2259 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2261 error = xfs_itobp(ip->i_mount, tp, ip, &dip, &ibp, 0, 0, XFS_BUF_LOCK);
2262 if (error)
2263 return error;
2266 * Clear the on-disk di_mode. This is to prevent xfs_bulkstat
2267 * from picking up this inode when it is reclaimed (its incore state
2268 * initialzed but not flushed to disk yet). The in-core di_mode is
2269 * already cleared and a corresponding transaction logged.
2270 * The hack here just synchronizes the in-core to on-disk
2271 * di_mode value in advance before the actual inode sync to disk.
2272 * This is OK because the inode is already unlinked and would never
2273 * change its di_mode again for this inode generation.
2274 * This is a temporary hack that would require a proper fix
2275 * in the future.
2277 dip->di_core.di_mode = 0;
2279 if (delete) {
2280 xfs_ifree_cluster(ip, tp, first_ino);
2283 return 0;
2287 * Reallocate the space for if_broot based on the number of records
2288 * being added or deleted as indicated in rec_diff. Move the records
2289 * and pointers in if_broot to fit the new size. When shrinking this
2290 * will eliminate holes between the records and pointers created by
2291 * the caller. When growing this will create holes to be filled in
2292 * by the caller.
2294 * The caller must not request to add more records than would fit in
2295 * the on-disk inode root. If the if_broot is currently NULL, then
2296 * if we adding records one will be allocated. The caller must also
2297 * not request that the number of records go below zero, although
2298 * it can go to zero.
2300 * ip -- the inode whose if_broot area is changing
2301 * ext_diff -- the change in the number of records, positive or negative,
2302 * requested for the if_broot array.
2304 void
2305 xfs_iroot_realloc(
2306 xfs_inode_t *ip,
2307 int rec_diff,
2308 int whichfork)
2310 int cur_max;
2311 xfs_ifork_t *ifp;
2312 xfs_bmbt_block_t *new_broot;
2313 int new_max;
2314 size_t new_size;
2315 char *np;
2316 char *op;
2319 * Handle the degenerate case quietly.
2321 if (rec_diff == 0) {
2322 return;
2325 ifp = XFS_IFORK_PTR(ip, whichfork);
2326 if (rec_diff > 0) {
2328 * If there wasn't any memory allocated before, just
2329 * allocate it now and get out.
2331 if (ifp->if_broot_bytes == 0) {
2332 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff);
2333 ifp->if_broot = (xfs_bmbt_block_t*)kmem_alloc(new_size,
2334 KM_SLEEP);
2335 ifp->if_broot_bytes = (int)new_size;
2336 return;
2340 * If there is already an existing if_broot, then we need
2341 * to realloc() it and shift the pointers to their new
2342 * location. The records don't change location because
2343 * they are kept butted up against the btree block header.
2345 cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes);
2346 new_max = cur_max + rec_diff;
2347 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
2348 ifp->if_broot = (xfs_bmbt_block_t *)
2349 kmem_realloc(ifp->if_broot,
2350 new_size,
2351 (size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */
2352 KM_SLEEP);
2353 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1,
2354 ifp->if_broot_bytes);
2355 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1,
2356 (int)new_size);
2357 ifp->if_broot_bytes = (int)new_size;
2358 ASSERT(ifp->if_broot_bytes <=
2359 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ);
2360 memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t));
2361 return;
2365 * rec_diff is less than 0. In this case, we are shrinking the
2366 * if_broot buffer. It must already exist. If we go to zero
2367 * records, just get rid of the root and clear the status bit.
2369 ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0));
2370 cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes);
2371 new_max = cur_max + rec_diff;
2372 ASSERT(new_max >= 0);
2373 if (new_max > 0)
2374 new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
2375 else
2376 new_size = 0;
2377 if (new_size > 0) {
2378 new_broot = (xfs_bmbt_block_t *)kmem_alloc(new_size, KM_SLEEP);
2380 * First copy over the btree block header.
2382 memcpy(new_broot, ifp->if_broot, sizeof(xfs_bmbt_block_t));
2383 } else {
2384 new_broot = NULL;
2385 ifp->if_flags &= ~XFS_IFBROOT;
2389 * Only copy the records and pointers if there are any.
2391 if (new_max > 0) {
2393 * First copy the records.
2395 op = (char *)XFS_BMAP_BROOT_REC_ADDR(ifp->if_broot, 1,
2396 ifp->if_broot_bytes);
2397 np = (char *)XFS_BMAP_BROOT_REC_ADDR(new_broot, 1,
2398 (int)new_size);
2399 memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t));
2402 * Then copy the pointers.
2404 op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1,
2405 ifp->if_broot_bytes);
2406 np = (char *)XFS_BMAP_BROOT_PTR_ADDR(new_broot, 1,
2407 (int)new_size);
2408 memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t));
2410 kmem_free(ifp->if_broot);
2411 ifp->if_broot = new_broot;
2412 ifp->if_broot_bytes = (int)new_size;
2413 ASSERT(ifp->if_broot_bytes <=
2414 XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ);
2415 return;
2420 * This is called when the amount of space needed for if_data
2421 * is increased or decreased. The change in size is indicated by
2422 * the number of bytes that need to be added or deleted in the
2423 * byte_diff parameter.
2425 * If the amount of space needed has decreased below the size of the
2426 * inline buffer, then switch to using the inline buffer. Otherwise,
2427 * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer
2428 * to what is needed.
2430 * ip -- the inode whose if_data area is changing
2431 * byte_diff -- the change in the number of bytes, positive or negative,
2432 * requested for the if_data array.
2434 void
2435 xfs_idata_realloc(
2436 xfs_inode_t *ip,
2437 int byte_diff,
2438 int whichfork)
2440 xfs_ifork_t *ifp;
2441 int new_size;
2442 int real_size;
2444 if (byte_diff == 0) {
2445 return;
2448 ifp = XFS_IFORK_PTR(ip, whichfork);
2449 new_size = (int)ifp->if_bytes + byte_diff;
2450 ASSERT(new_size >= 0);
2452 if (new_size == 0) {
2453 if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2454 kmem_free(ifp->if_u1.if_data);
2456 ifp->if_u1.if_data = NULL;
2457 real_size = 0;
2458 } else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) {
2460 * If the valid extents/data can fit in if_inline_ext/data,
2461 * copy them from the malloc'd vector and free it.
2463 if (ifp->if_u1.if_data == NULL) {
2464 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
2465 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2466 ASSERT(ifp->if_real_bytes != 0);
2467 memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data,
2468 new_size);
2469 kmem_free(ifp->if_u1.if_data);
2470 ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
2472 real_size = 0;
2473 } else {
2475 * Stuck with malloc/realloc.
2476 * For inline data, the underlying buffer must be
2477 * a multiple of 4 bytes in size so that it can be
2478 * logged and stay on word boundaries. We enforce
2479 * that here.
2481 real_size = roundup(new_size, 4);
2482 if (ifp->if_u1.if_data == NULL) {
2483 ASSERT(ifp->if_real_bytes == 0);
2484 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
2485 } else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2487 * Only do the realloc if the underlying size
2488 * is really changing.
2490 if (ifp->if_real_bytes != real_size) {
2491 ifp->if_u1.if_data =
2492 kmem_realloc(ifp->if_u1.if_data,
2493 real_size,
2494 ifp->if_real_bytes,
2495 KM_SLEEP);
2497 } else {
2498 ASSERT(ifp->if_real_bytes == 0);
2499 ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
2500 memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data,
2501 ifp->if_bytes);
2504 ifp->if_real_bytes = real_size;
2505 ifp->if_bytes = new_size;
2506 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
2513 * Map inode to disk block and offset.
2515 * mp -- the mount point structure for the current file system
2516 * tp -- the current transaction
2517 * ino -- the inode number of the inode to be located
2518 * imap -- this structure is filled in with the information necessary
2519 * to retrieve the given inode from disk
2520 * flags -- flags to pass to xfs_dilocate indicating whether or not
2521 * lookups in the inode btree were OK or not
2524 xfs_imap(
2525 xfs_mount_t *mp,
2526 xfs_trans_t *tp,
2527 xfs_ino_t ino,
2528 xfs_imap_t *imap,
2529 uint flags)
2531 xfs_fsblock_t fsbno;
2532 int len;
2533 int off;
2534 int error;
2536 fsbno = imap->im_blkno ?
2537 XFS_DADDR_TO_FSB(mp, imap->im_blkno) : NULLFSBLOCK;
2538 error = xfs_dilocate(mp, tp, ino, &fsbno, &len, &off, flags);
2539 if (error)
2540 return error;
2542 imap->im_blkno = XFS_FSB_TO_DADDR(mp, fsbno);
2543 imap->im_len = XFS_FSB_TO_BB(mp, len);
2544 imap->im_agblkno = XFS_FSB_TO_AGBNO(mp, fsbno);
2545 imap->im_ioffset = (ushort)off;
2546 imap->im_boffset = (ushort)(off << mp->m_sb.sb_inodelog);
2549 * If the inode number maps to a block outside the bounds
2550 * of the file system then return NULL rather than calling
2551 * read_buf and panicing when we get an error from the
2552 * driver.
2554 if ((imap->im_blkno + imap->im_len) >
2555 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
2556 xfs_fs_cmn_err(CE_ALERT, mp, "xfs_imap: "
2557 "(imap->im_blkno (0x%llx) + imap->im_len (0x%llx)) > "
2558 " XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) (0x%llx)",
2559 (unsigned long long) imap->im_blkno,
2560 (unsigned long long) imap->im_len,
2561 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks));
2562 return EINVAL;
2564 return 0;
2567 void
2568 xfs_idestroy_fork(
2569 xfs_inode_t *ip,
2570 int whichfork)
2572 xfs_ifork_t *ifp;
2574 ifp = XFS_IFORK_PTR(ip, whichfork);
2575 if (ifp->if_broot != NULL) {
2576 kmem_free(ifp->if_broot);
2577 ifp->if_broot = NULL;
2581 * If the format is local, then we can't have an extents
2582 * array so just look for an inline data array. If we're
2583 * not local then we may or may not have an extents list,
2584 * so check and free it up if we do.
2586 if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
2587 if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) &&
2588 (ifp->if_u1.if_data != NULL)) {
2589 ASSERT(ifp->if_real_bytes != 0);
2590 kmem_free(ifp->if_u1.if_data);
2591 ifp->if_u1.if_data = NULL;
2592 ifp->if_real_bytes = 0;
2594 } else if ((ifp->if_flags & XFS_IFEXTENTS) &&
2595 ((ifp->if_flags & XFS_IFEXTIREC) ||
2596 ((ifp->if_u1.if_extents != NULL) &&
2597 (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) {
2598 ASSERT(ifp->if_real_bytes != 0);
2599 xfs_iext_destroy(ifp);
2601 ASSERT(ifp->if_u1.if_extents == NULL ||
2602 ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext);
2603 ASSERT(ifp->if_real_bytes == 0);
2604 if (whichfork == XFS_ATTR_FORK) {
2605 kmem_zone_free(xfs_ifork_zone, ip->i_afp);
2606 ip->i_afp = NULL;
2611 * This is called free all the memory associated with an inode.
2612 * It must free the inode itself and any buffers allocated for
2613 * if_extents/if_data and if_broot. It must also free the lock
2614 * associated with the inode.
2616 void
2617 xfs_idestroy(
2618 xfs_inode_t *ip)
2620 switch (ip->i_d.di_mode & S_IFMT) {
2621 case S_IFREG:
2622 case S_IFDIR:
2623 case S_IFLNK:
2624 xfs_idestroy_fork(ip, XFS_DATA_FORK);
2625 break;
2627 if (ip->i_afp)
2628 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
2629 mrfree(&ip->i_lock);
2630 mrfree(&ip->i_iolock);
2631 freesema(&ip->i_flock);
2633 #ifdef XFS_INODE_TRACE
2634 ktrace_free(ip->i_trace);
2635 #endif
2636 #ifdef XFS_BMAP_TRACE
2637 ktrace_free(ip->i_xtrace);
2638 #endif
2639 #ifdef XFS_BMBT_TRACE
2640 ktrace_free(ip->i_btrace);
2641 #endif
2642 #ifdef XFS_RW_TRACE
2643 ktrace_free(ip->i_rwtrace);
2644 #endif
2645 #ifdef XFS_ILOCK_TRACE
2646 ktrace_free(ip->i_lock_trace);
2647 #endif
2648 #ifdef XFS_DIR2_TRACE
2649 ktrace_free(ip->i_dir_trace);
2650 #endif
2651 if (ip->i_itemp) {
2653 * Only if we are shutting down the fs will we see an
2654 * inode still in the AIL. If it is there, we should remove
2655 * it to prevent a use-after-free from occurring.
2657 xfs_mount_t *mp = ip->i_mount;
2658 xfs_log_item_t *lip = &ip->i_itemp->ili_item;
2660 ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
2661 XFS_FORCED_SHUTDOWN(ip->i_mount));
2662 if (lip->li_flags & XFS_LI_IN_AIL) {
2663 spin_lock(&mp->m_ail_lock);
2664 if (lip->li_flags & XFS_LI_IN_AIL)
2665 xfs_trans_delete_ail(mp, lip);
2666 else
2667 spin_unlock(&mp->m_ail_lock);
2669 xfs_inode_item_destroy(ip);
2671 kmem_zone_free(xfs_inode_zone, ip);
2676 * Increment the pin count of the given buffer.
2677 * This value is protected by ipinlock spinlock in the mount structure.
2679 void
2680 xfs_ipin(
2681 xfs_inode_t *ip)
2683 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2685 atomic_inc(&ip->i_pincount);
2689 * Decrement the pin count of the given inode, and wake up
2690 * anyone in xfs_iwait_unpin() if the count goes to 0. The
2691 * inode must have been previously pinned with a call to xfs_ipin().
2693 void
2694 xfs_iunpin(
2695 xfs_inode_t *ip)
2697 ASSERT(atomic_read(&ip->i_pincount) > 0);
2699 if (atomic_dec_and_test(&ip->i_pincount))
2700 wake_up(&ip->i_ipin_wait);
2704 * This is called to unpin an inode. It can be directed to wait or to return
2705 * immediately without waiting for the inode to be unpinned. The caller must
2706 * have the inode locked in at least shared mode so that the buffer cannot be
2707 * subsequently pinned once someone is waiting for it to be unpinned.
2709 STATIC void
2710 __xfs_iunpin_wait(
2711 xfs_inode_t *ip,
2712 int wait)
2714 xfs_inode_log_item_t *iip = ip->i_itemp;
2716 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2717 if (atomic_read(&ip->i_pincount) == 0)
2718 return;
2720 /* Give the log a push to start the unpinning I/O */
2721 xfs_log_force(ip->i_mount, (iip && iip->ili_last_lsn) ?
2722 iip->ili_last_lsn : 0, XFS_LOG_FORCE);
2723 if (wait)
2724 wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0));
2727 static inline void
2728 xfs_iunpin_wait(
2729 xfs_inode_t *ip)
2731 __xfs_iunpin_wait(ip, 1);
2734 static inline void
2735 xfs_iunpin_nowait(
2736 xfs_inode_t *ip)
2738 __xfs_iunpin_wait(ip, 0);
2743 * xfs_iextents_copy()
2745 * This is called to copy the REAL extents (as opposed to the delayed
2746 * allocation extents) from the inode into the given buffer. It
2747 * returns the number of bytes copied into the buffer.
2749 * If there are no delayed allocation extents, then we can just
2750 * memcpy() the extents into the buffer. Otherwise, we need to
2751 * examine each extent in turn and skip those which are delayed.
2754 xfs_iextents_copy(
2755 xfs_inode_t *ip,
2756 xfs_bmbt_rec_t *dp,
2757 int whichfork)
2759 int copied;
2760 int i;
2761 xfs_ifork_t *ifp;
2762 int nrecs;
2763 xfs_fsblock_t start_block;
2765 ifp = XFS_IFORK_PTR(ip, whichfork);
2766 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2767 ASSERT(ifp->if_bytes > 0);
2769 nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
2770 XFS_BMAP_TRACE_EXLIST(ip, nrecs, whichfork);
2771 ASSERT(nrecs > 0);
2774 * There are some delayed allocation extents in the
2775 * inode, so copy the extents one at a time and skip
2776 * the delayed ones. There must be at least one
2777 * non-delayed extent.
2779 copied = 0;
2780 for (i = 0; i < nrecs; i++) {
2781 xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
2782 start_block = xfs_bmbt_get_startblock(ep);
2783 if (ISNULLSTARTBLOCK(start_block)) {
2785 * It's a delayed allocation extent, so skip it.
2787 continue;
2790 /* Translate to on disk format */
2791 put_unaligned(cpu_to_be64(ep->l0), &dp->l0);
2792 put_unaligned(cpu_to_be64(ep->l1), &dp->l1);
2793 dp++;
2794 copied++;
2796 ASSERT(copied != 0);
2797 xfs_validate_extents(ifp, copied, XFS_EXTFMT_INODE(ip));
2799 return (copied * (uint)sizeof(xfs_bmbt_rec_t));
2803 * Each of the following cases stores data into the same region
2804 * of the on-disk inode, so only one of them can be valid at
2805 * any given time. While it is possible to have conflicting formats
2806 * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is
2807 * in EXTENTS format, this can only happen when the fork has
2808 * changed formats after being modified but before being flushed.
2809 * In these cases, the format always takes precedence, because the
2810 * format indicates the current state of the fork.
2812 /*ARGSUSED*/
2813 STATIC void
2814 xfs_iflush_fork(
2815 xfs_inode_t *ip,
2816 xfs_dinode_t *dip,
2817 xfs_inode_log_item_t *iip,
2818 int whichfork,
2819 xfs_buf_t *bp)
2821 char *cp;
2822 xfs_ifork_t *ifp;
2823 xfs_mount_t *mp;
2824 #ifdef XFS_TRANS_DEBUG
2825 int first;
2826 #endif
2827 static const short brootflag[2] =
2828 { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
2829 static const short dataflag[2] =
2830 { XFS_ILOG_DDATA, XFS_ILOG_ADATA };
2831 static const short extflag[2] =
2832 { XFS_ILOG_DEXT, XFS_ILOG_AEXT };
2834 if (!iip)
2835 return;
2836 ifp = XFS_IFORK_PTR(ip, whichfork);
2838 * This can happen if we gave up in iformat in an error path,
2839 * for the attribute fork.
2841 if (!ifp) {
2842 ASSERT(whichfork == XFS_ATTR_FORK);
2843 return;
2845 cp = XFS_DFORK_PTR(dip, whichfork);
2846 mp = ip->i_mount;
2847 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
2848 case XFS_DINODE_FMT_LOCAL:
2849 if ((iip->ili_format.ilf_fields & dataflag[whichfork]) &&
2850 (ifp->if_bytes > 0)) {
2851 ASSERT(ifp->if_u1.if_data != NULL);
2852 ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
2853 memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes);
2855 break;
2857 case XFS_DINODE_FMT_EXTENTS:
2858 ASSERT((ifp->if_flags & XFS_IFEXTENTS) ||
2859 !(iip->ili_format.ilf_fields & extflag[whichfork]));
2860 ASSERT((xfs_iext_get_ext(ifp, 0) != NULL) ||
2861 (ifp->if_bytes == 0));
2862 ASSERT((xfs_iext_get_ext(ifp, 0) == NULL) ||
2863 (ifp->if_bytes > 0));
2864 if ((iip->ili_format.ilf_fields & extflag[whichfork]) &&
2865 (ifp->if_bytes > 0)) {
2866 ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0);
2867 (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp,
2868 whichfork);
2870 break;
2872 case XFS_DINODE_FMT_BTREE:
2873 if ((iip->ili_format.ilf_fields & brootflag[whichfork]) &&
2874 (ifp->if_broot_bytes > 0)) {
2875 ASSERT(ifp->if_broot != NULL);
2876 ASSERT(ifp->if_broot_bytes <=
2877 (XFS_IFORK_SIZE(ip, whichfork) +
2878 XFS_BROOT_SIZE_ADJ));
2879 xfs_bmbt_to_bmdr(ifp->if_broot, ifp->if_broot_bytes,
2880 (xfs_bmdr_block_t *)cp,
2881 XFS_DFORK_SIZE(dip, mp, whichfork));
2883 break;
2885 case XFS_DINODE_FMT_DEV:
2886 if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) {
2887 ASSERT(whichfork == XFS_DATA_FORK);
2888 dip->di_u.di_dev = cpu_to_be32(ip->i_df.if_u2.if_rdev);
2890 break;
2892 case XFS_DINODE_FMT_UUID:
2893 if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) {
2894 ASSERT(whichfork == XFS_DATA_FORK);
2895 memcpy(&dip->di_u.di_muuid, &ip->i_df.if_u2.if_uuid,
2896 sizeof(uuid_t));
2898 break;
2900 default:
2901 ASSERT(0);
2902 break;
2906 STATIC int
2907 xfs_iflush_cluster(
2908 xfs_inode_t *ip,
2909 xfs_buf_t *bp)
2911 xfs_mount_t *mp = ip->i_mount;
2912 xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
2913 unsigned long first_index, mask;
2914 unsigned long inodes_per_cluster;
2915 int ilist_size;
2916 xfs_inode_t **ilist;
2917 xfs_inode_t *iq;
2918 int nr_found;
2919 int clcount = 0;
2920 int bufwasdelwri;
2921 int i;
2923 ASSERT(pag->pagi_inodeok);
2924 ASSERT(pag->pag_ici_init);
2926 inodes_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog;
2927 ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
2928 ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS);
2929 if (!ilist)
2930 return 0;
2932 mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
2933 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
2934 read_lock(&pag->pag_ici_lock);
2935 /* really need a gang lookup range call here */
2936 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist,
2937 first_index, inodes_per_cluster);
2938 if (nr_found == 0)
2939 goto out_free;
2941 for (i = 0; i < nr_found; i++) {
2942 iq = ilist[i];
2943 if (iq == ip)
2944 continue;
2945 /* if the inode lies outside this cluster, we're done. */
2946 if ((XFS_INO_TO_AGINO(mp, iq->i_ino) & mask) != first_index)
2947 break;
2949 * Do an un-protected check to see if the inode is dirty and
2950 * is a candidate for flushing. These checks will be repeated
2951 * later after the appropriate locks are acquired.
2953 if (xfs_inode_clean(iq) && xfs_ipincount(iq) == 0)
2954 continue;
2957 * Try to get locks. If any are unavailable or it is pinned,
2958 * then this inode cannot be flushed and is skipped.
2961 if (!xfs_ilock_nowait(iq, XFS_ILOCK_SHARED))
2962 continue;
2963 if (!xfs_iflock_nowait(iq)) {
2964 xfs_iunlock(iq, XFS_ILOCK_SHARED);
2965 continue;
2967 if (xfs_ipincount(iq)) {
2968 xfs_ifunlock(iq);
2969 xfs_iunlock(iq, XFS_ILOCK_SHARED);
2970 continue;
2974 * arriving here means that this inode can be flushed. First
2975 * re-check that it's dirty before flushing.
2977 if (!xfs_inode_clean(iq)) {
2978 int error;
2979 error = xfs_iflush_int(iq, bp);
2980 if (error) {
2981 xfs_iunlock(iq, XFS_ILOCK_SHARED);
2982 goto cluster_corrupt_out;
2984 clcount++;
2985 } else {
2986 xfs_ifunlock(iq);
2988 xfs_iunlock(iq, XFS_ILOCK_SHARED);
2991 if (clcount) {
2992 XFS_STATS_INC(xs_icluster_flushcnt);
2993 XFS_STATS_ADD(xs_icluster_flushinode, clcount);
2996 out_free:
2997 read_unlock(&pag->pag_ici_lock);
2998 kmem_free(ilist);
2999 return 0;
3002 cluster_corrupt_out:
3004 * Corruption detected in the clustering loop. Invalidate the
3005 * inode buffer and shut down the filesystem.
3007 read_unlock(&pag->pag_ici_lock);
3009 * Clean up the buffer. If it was B_DELWRI, just release it --
3010 * brelse can handle it with no problems. If not, shut down the
3011 * filesystem before releasing the buffer.
3013 bufwasdelwri = XFS_BUF_ISDELAYWRITE(bp);
3014 if (bufwasdelwri)
3015 xfs_buf_relse(bp);
3017 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3019 if (!bufwasdelwri) {
3021 * Just like incore_relse: if we have b_iodone functions,
3022 * mark the buffer as an error and call them. Otherwise
3023 * mark it as stale and brelse.
3025 if (XFS_BUF_IODONE_FUNC(bp)) {
3026 XFS_BUF_CLR_BDSTRAT_FUNC(bp);
3027 XFS_BUF_UNDONE(bp);
3028 XFS_BUF_STALE(bp);
3029 XFS_BUF_SHUT(bp);
3030 XFS_BUF_ERROR(bp,EIO);
3031 xfs_biodone(bp);
3032 } else {
3033 XFS_BUF_STALE(bp);
3034 xfs_buf_relse(bp);
3039 * Unlocks the flush lock
3041 xfs_iflush_abort(iq);
3042 kmem_free(ilist);
3043 return XFS_ERROR(EFSCORRUPTED);
3047 * xfs_iflush() will write a modified inode's changes out to the
3048 * inode's on disk home. The caller must have the inode lock held
3049 * in at least shared mode and the inode flush semaphore must be
3050 * held as well. The inode lock will still be held upon return from
3051 * the call and the caller is free to unlock it.
3052 * The inode flush lock will be unlocked when the inode reaches the disk.
3053 * The flags indicate how the inode's buffer should be written out.
3056 xfs_iflush(
3057 xfs_inode_t *ip,
3058 uint flags)
3060 xfs_inode_log_item_t *iip;
3061 xfs_buf_t *bp;
3062 xfs_dinode_t *dip;
3063 xfs_mount_t *mp;
3064 int error;
3065 int noblock = (flags == XFS_IFLUSH_ASYNC_NOBLOCK);
3066 enum { INT_DELWRI = (1 << 0), INT_ASYNC = (1 << 1) };
3068 XFS_STATS_INC(xs_iflush_count);
3070 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3071 ASSERT(issemalocked(&(ip->i_flock)));
3072 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3073 ip->i_d.di_nextents > ip->i_df.if_ext_max);
3075 iip = ip->i_itemp;
3076 mp = ip->i_mount;
3079 * If the inode isn't dirty, then just release the inode
3080 * flush lock and do nothing.
3082 if (xfs_inode_clean(ip)) {
3083 xfs_ifunlock(ip);
3084 return 0;
3088 * We can't flush the inode until it is unpinned, so wait for it if we
3089 * are allowed to block. We know noone new can pin it, because we are
3090 * holding the inode lock shared and you need to hold it exclusively to
3091 * pin the inode.
3093 * If we are not allowed to block, force the log out asynchronously so
3094 * that when we come back the inode will be unpinned. If other inodes
3095 * in the same cluster are dirty, they will probably write the inode
3096 * out for us if they occur after the log force completes.
3098 if (noblock && xfs_ipincount(ip)) {
3099 xfs_iunpin_nowait(ip);
3100 xfs_ifunlock(ip);
3101 return EAGAIN;
3103 xfs_iunpin_wait(ip);
3106 * This may have been unpinned because the filesystem is shutting
3107 * down forcibly. If that's the case we must not write this inode
3108 * to disk, because the log record didn't make it to disk!
3110 if (XFS_FORCED_SHUTDOWN(mp)) {
3111 ip->i_update_core = 0;
3112 if (iip)
3113 iip->ili_format.ilf_fields = 0;
3114 xfs_ifunlock(ip);
3115 return XFS_ERROR(EIO);
3119 * Decide how buffer will be flushed out. This is done before
3120 * the call to xfs_iflush_int because this field is zeroed by it.
3122 if (iip != NULL && iip->ili_format.ilf_fields != 0) {
3124 * Flush out the inode buffer according to the directions
3125 * of the caller. In the cases where the caller has given
3126 * us a choice choose the non-delwri case. This is because
3127 * the inode is in the AIL and we need to get it out soon.
3129 switch (flags) {
3130 case XFS_IFLUSH_SYNC:
3131 case XFS_IFLUSH_DELWRI_ELSE_SYNC:
3132 flags = 0;
3133 break;
3134 case XFS_IFLUSH_ASYNC_NOBLOCK:
3135 case XFS_IFLUSH_ASYNC:
3136 case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
3137 flags = INT_ASYNC;
3138 break;
3139 case XFS_IFLUSH_DELWRI:
3140 flags = INT_DELWRI;
3141 break;
3142 default:
3143 ASSERT(0);
3144 flags = 0;
3145 break;
3147 } else {
3148 switch (flags) {
3149 case XFS_IFLUSH_DELWRI_ELSE_SYNC:
3150 case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
3151 case XFS_IFLUSH_DELWRI:
3152 flags = INT_DELWRI;
3153 break;
3154 case XFS_IFLUSH_ASYNC_NOBLOCK:
3155 case XFS_IFLUSH_ASYNC:
3156 flags = INT_ASYNC;
3157 break;
3158 case XFS_IFLUSH_SYNC:
3159 flags = 0;
3160 break;
3161 default:
3162 ASSERT(0);
3163 flags = 0;
3164 break;
3169 * Get the buffer containing the on-disk inode.
3171 error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0, 0,
3172 noblock ? XFS_BUF_TRYLOCK : XFS_BUF_LOCK);
3173 if (error || !bp) {
3174 xfs_ifunlock(ip);
3175 return error;
3179 * First flush out the inode that xfs_iflush was called with.
3181 error = xfs_iflush_int(ip, bp);
3182 if (error)
3183 goto corrupt_out;
3186 * If the buffer is pinned then push on the log now so we won't
3187 * get stuck waiting in the write for too long.
3189 if (XFS_BUF_ISPINNED(bp))
3190 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
3193 * inode clustering:
3194 * see if other inodes can be gathered into this write
3196 error = xfs_iflush_cluster(ip, bp);
3197 if (error)
3198 goto cluster_corrupt_out;
3200 if (flags & INT_DELWRI) {
3201 xfs_bdwrite(mp, bp);
3202 } else if (flags & INT_ASYNC) {
3203 error = xfs_bawrite(mp, bp);
3204 } else {
3205 error = xfs_bwrite(mp, bp);
3207 return error;
3209 corrupt_out:
3210 xfs_buf_relse(bp);
3211 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3212 cluster_corrupt_out:
3214 * Unlocks the flush lock
3216 xfs_iflush_abort(ip);
3217 return XFS_ERROR(EFSCORRUPTED);
3221 STATIC int
3222 xfs_iflush_int(
3223 xfs_inode_t *ip,
3224 xfs_buf_t *bp)
3226 xfs_inode_log_item_t *iip;
3227 xfs_dinode_t *dip;
3228 xfs_mount_t *mp;
3229 #ifdef XFS_TRANS_DEBUG
3230 int first;
3231 #endif
3233 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3234 ASSERT(issemalocked(&(ip->i_flock)));
3235 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3236 ip->i_d.di_nextents > ip->i_df.if_ext_max);
3238 iip = ip->i_itemp;
3239 mp = ip->i_mount;
3243 * If the inode isn't dirty, then just release the inode
3244 * flush lock and do nothing.
3246 if (xfs_inode_clean(ip)) {
3247 xfs_ifunlock(ip);
3248 return 0;
3251 /* set *dip = inode's place in the buffer */
3252 dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_boffset);
3255 * Clear i_update_core before copying out the data.
3256 * This is for coordination with our timestamp updates
3257 * that don't hold the inode lock. They will always
3258 * update the timestamps BEFORE setting i_update_core,
3259 * so if we clear i_update_core after they set it we
3260 * are guaranteed to see their updates to the timestamps.
3261 * I believe that this depends on strongly ordered memory
3262 * semantics, but we have that. We use the SYNCHRONIZE
3263 * macro to make sure that the compiler does not reorder
3264 * the i_update_core access below the data copy below.
3266 ip->i_update_core = 0;
3267 SYNCHRONIZE();
3270 * Make sure to get the latest atime from the Linux inode.
3272 xfs_synchronize_atime(ip);
3274 if (XFS_TEST_ERROR(be16_to_cpu(dip->di_core.di_magic) != XFS_DINODE_MAGIC,
3275 mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
3276 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3277 "xfs_iflush: Bad inode %Lu magic number 0x%x, ptr 0x%p",
3278 ip->i_ino, be16_to_cpu(dip->di_core.di_magic), dip);
3279 goto corrupt_out;
3281 if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC,
3282 mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) {
3283 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3284 "xfs_iflush: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
3285 ip->i_ino, ip, ip->i_d.di_magic);
3286 goto corrupt_out;
3288 if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) {
3289 if (XFS_TEST_ERROR(
3290 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3291 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
3292 mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) {
3293 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3294 "xfs_iflush: Bad regular inode %Lu, ptr 0x%p",
3295 ip->i_ino, ip);
3296 goto corrupt_out;
3298 } else if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
3299 if (XFS_TEST_ERROR(
3300 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3301 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
3302 (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
3303 mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) {
3304 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3305 "xfs_iflush: Bad directory inode %Lu, ptr 0x%p",
3306 ip->i_ino, ip);
3307 goto corrupt_out;
3310 if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
3311 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5,
3312 XFS_RANDOM_IFLUSH_5)) {
3313 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3314 "xfs_iflush: detected corrupt incore inode %Lu, total extents = %d, nblocks = %Ld, ptr 0x%p",
3315 ip->i_ino,
3316 ip->i_d.di_nextents + ip->i_d.di_anextents,
3317 ip->i_d.di_nblocks,
3318 ip);
3319 goto corrupt_out;
3321 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
3322 mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) {
3323 xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3324 "xfs_iflush: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
3325 ip->i_ino, ip->i_d.di_forkoff, ip);
3326 goto corrupt_out;
3329 * bump the flush iteration count, used to detect flushes which
3330 * postdate a log record during recovery.
3333 ip->i_d.di_flushiter++;
3336 * Copy the dirty parts of the inode into the on-disk
3337 * inode. We always copy out the core of the inode,
3338 * because if the inode is dirty at all the core must
3339 * be.
3341 xfs_dinode_to_disk(&dip->di_core, &ip->i_d);
3343 /* Wrap, we never let the log put out DI_MAX_FLUSH */
3344 if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3345 ip->i_d.di_flushiter = 0;
3348 * If this is really an old format inode and the superblock version
3349 * has not been updated to support only new format inodes, then
3350 * convert back to the old inode format. If the superblock version
3351 * has been updated, then make the conversion permanent.
3353 ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1 ||
3354 xfs_sb_version_hasnlink(&mp->m_sb));
3355 if (ip->i_d.di_version == XFS_DINODE_VERSION_1) {
3356 if (!xfs_sb_version_hasnlink(&mp->m_sb)) {
3358 * Convert it back.
3360 ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1);
3361 dip->di_core.di_onlink = cpu_to_be16(ip->i_d.di_nlink);
3362 } else {
3364 * The superblock version has already been bumped,
3365 * so just make the conversion to the new inode
3366 * format permanent.
3368 ip->i_d.di_version = XFS_DINODE_VERSION_2;
3369 dip->di_core.di_version = XFS_DINODE_VERSION_2;
3370 ip->i_d.di_onlink = 0;
3371 dip->di_core.di_onlink = 0;
3372 memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
3373 memset(&(dip->di_core.di_pad[0]), 0,
3374 sizeof(dip->di_core.di_pad));
3375 ASSERT(ip->i_d.di_projid == 0);
3379 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp);
3380 if (XFS_IFORK_Q(ip))
3381 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp);
3382 xfs_inobp_check(mp, bp);
3385 * We've recorded everything logged in the inode, so we'd
3386 * like to clear the ilf_fields bits so we don't log and
3387 * flush things unnecessarily. However, we can't stop
3388 * logging all this information until the data we've copied
3389 * into the disk buffer is written to disk. If we did we might
3390 * overwrite the copy of the inode in the log with all the
3391 * data after re-logging only part of it, and in the face of
3392 * a crash we wouldn't have all the data we need to recover.
3394 * What we do is move the bits to the ili_last_fields field.
3395 * When logging the inode, these bits are moved back to the
3396 * ilf_fields field. In the xfs_iflush_done() routine we
3397 * clear ili_last_fields, since we know that the information
3398 * those bits represent is permanently on disk. As long as
3399 * the flush completes before the inode is logged again, then
3400 * both ilf_fields and ili_last_fields will be cleared.
3402 * We can play with the ilf_fields bits here, because the inode
3403 * lock must be held exclusively in order to set bits there
3404 * and the flush lock protects the ili_last_fields bits.
3405 * Set ili_logged so the flush done
3406 * routine can tell whether or not to look in the AIL.
3407 * Also, store the current LSN of the inode so that we can tell
3408 * whether the item has moved in the AIL from xfs_iflush_done().
3409 * In order to read the lsn we need the AIL lock, because
3410 * it is a 64 bit value that cannot be read atomically.
3412 if (iip != NULL && iip->ili_format.ilf_fields != 0) {
3413 iip->ili_last_fields = iip->ili_format.ilf_fields;
3414 iip->ili_format.ilf_fields = 0;
3415 iip->ili_logged = 1;
3417 ASSERT(sizeof(xfs_lsn_t) == 8); /* don't lock if it shrinks */
3418 spin_lock(&mp->m_ail_lock);
3419 iip->ili_flush_lsn = iip->ili_item.li_lsn;
3420 spin_unlock(&mp->m_ail_lock);
3423 * Attach the function xfs_iflush_done to the inode's
3424 * buffer. This will remove the inode from the AIL
3425 * and unlock the inode's flush lock when the inode is
3426 * completely written to disk.
3428 xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t*,xfs_log_item_t*))
3429 xfs_iflush_done, (xfs_log_item_t *)iip);
3431 ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
3432 ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL);
3433 } else {
3435 * We're flushing an inode which is not in the AIL and has
3436 * not been logged but has i_update_core set. For this
3437 * case we can use a B_DELWRI flush and immediately drop
3438 * the inode flush lock because we can avoid the whole
3439 * AIL state thing. It's OK to drop the flush lock now,
3440 * because we've already locked the buffer and to do anything
3441 * you really need both.
3443 if (iip != NULL) {
3444 ASSERT(iip->ili_logged == 0);
3445 ASSERT(iip->ili_last_fields == 0);
3446 ASSERT((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0);
3448 xfs_ifunlock(ip);
3451 return 0;
3453 corrupt_out:
3454 return XFS_ERROR(EFSCORRUPTED);
3459 * Flush all inactive inodes in mp.
3461 void
3462 xfs_iflush_all(
3463 xfs_mount_t *mp)
3465 xfs_inode_t *ip;
3467 again:
3468 XFS_MOUNT_ILOCK(mp);
3469 ip = mp->m_inodes;
3470 if (ip == NULL)
3471 goto out;
3473 do {
3474 /* Make sure we skip markers inserted by sync */
3475 if (ip->i_mount == NULL) {
3476 ip = ip->i_mnext;
3477 continue;
3480 if (!VFS_I(ip)) {
3481 XFS_MOUNT_IUNLOCK(mp);
3482 xfs_finish_reclaim(ip, 0, XFS_IFLUSH_ASYNC);
3483 goto again;
3486 ASSERT(vn_count(VFS_I(ip)) == 0);
3488 ip = ip->i_mnext;
3489 } while (ip != mp->m_inodes);
3490 out:
3491 XFS_MOUNT_IUNLOCK(mp);
3494 #ifdef XFS_ILOCK_TRACE
3495 ktrace_t *xfs_ilock_trace_buf;
3497 void
3498 xfs_ilock_trace(xfs_inode_t *ip, int lock, unsigned int lockflags, inst_t *ra)
3500 ktrace_enter(ip->i_lock_trace,
3501 (void *)ip,
3502 (void *)(unsigned long)lock, /* 1 = LOCK, 3=UNLOCK, etc */
3503 (void *)(unsigned long)lockflags, /* XFS_ILOCK_EXCL etc */
3504 (void *)ra, /* caller of ilock */
3505 (void *)(unsigned long)current_cpu(),
3506 (void *)(unsigned long)current_pid(),
3507 NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);
3509 #endif
3512 * Return a pointer to the extent record at file index idx.
3514 xfs_bmbt_rec_host_t *
3515 xfs_iext_get_ext(
3516 xfs_ifork_t *ifp, /* inode fork pointer */
3517 xfs_extnum_t idx) /* index of target extent */
3519 ASSERT(idx >= 0);
3520 if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) {
3521 return ifp->if_u1.if_ext_irec->er_extbuf;
3522 } else if (ifp->if_flags & XFS_IFEXTIREC) {
3523 xfs_ext_irec_t *erp; /* irec pointer */
3524 int erp_idx = 0; /* irec index */
3525 xfs_extnum_t page_idx = idx; /* ext index in target list */
3527 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
3528 return &erp->er_extbuf[page_idx];
3529 } else if (ifp->if_bytes) {
3530 return &ifp->if_u1.if_extents[idx];
3531 } else {
3532 return NULL;
3537 * Insert new item(s) into the extent records for incore inode
3538 * fork 'ifp'. 'count' new items are inserted at index 'idx'.
3540 void
3541 xfs_iext_insert(
3542 xfs_ifork_t *ifp, /* inode fork pointer */
3543 xfs_extnum_t idx, /* starting index of new items */
3544 xfs_extnum_t count, /* number of inserted items */
3545 xfs_bmbt_irec_t *new) /* items to insert */
3547 xfs_extnum_t i; /* extent record index */
3549 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
3550 xfs_iext_add(ifp, idx, count);
3551 for (i = idx; i < idx + count; i++, new++)
3552 xfs_bmbt_set_all(xfs_iext_get_ext(ifp, i), new);
3556 * This is called when the amount of space required for incore file
3557 * extents needs to be increased. The ext_diff parameter stores the
3558 * number of new extents being added and the idx parameter contains
3559 * the extent index where the new extents will be added. If the new
3560 * extents are being appended, then we just need to (re)allocate and
3561 * initialize the space. Otherwise, if the new extents are being
3562 * inserted into the middle of the existing entries, a bit more work
3563 * is required to make room for the new extents to be inserted. The
3564 * caller is responsible for filling in the new extent entries upon
3565 * return.
3567 void
3568 xfs_iext_add(
3569 xfs_ifork_t *ifp, /* inode fork pointer */
3570 xfs_extnum_t idx, /* index to begin adding exts */
3571 int ext_diff) /* number of extents to add */
3573 int byte_diff; /* new bytes being added */
3574 int new_size; /* size of extents after adding */
3575 xfs_extnum_t nextents; /* number of extents in file */
3577 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3578 ASSERT((idx >= 0) && (idx <= nextents));
3579 byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t);
3580 new_size = ifp->if_bytes + byte_diff;
3582 * If the new number of extents (nextents + ext_diff)
3583 * fits inside the inode, then continue to use the inline
3584 * extent buffer.
3586 if (nextents + ext_diff <= XFS_INLINE_EXTS) {
3587 if (idx < nextents) {
3588 memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff],
3589 &ifp->if_u2.if_inline_ext[idx],
3590 (nextents - idx) * sizeof(xfs_bmbt_rec_t));
3591 memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff);
3593 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
3594 ifp->if_real_bytes = 0;
3595 ifp->if_lastex = nextents + ext_diff;
3598 * Otherwise use a linear (direct) extent list.
3599 * If the extents are currently inside the inode,
3600 * xfs_iext_realloc_direct will switch us from
3601 * inline to direct extent allocation mode.
3603 else if (nextents + ext_diff <= XFS_LINEAR_EXTS) {
3604 xfs_iext_realloc_direct(ifp, new_size);
3605 if (idx < nextents) {
3606 memmove(&ifp->if_u1.if_extents[idx + ext_diff],
3607 &ifp->if_u1.if_extents[idx],
3608 (nextents - idx) * sizeof(xfs_bmbt_rec_t));
3609 memset(&ifp->if_u1.if_extents[idx], 0, byte_diff);
3612 /* Indirection array */
3613 else {
3614 xfs_ext_irec_t *erp;
3615 int erp_idx = 0;
3616 int page_idx = idx;
3618 ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS);
3619 if (ifp->if_flags & XFS_IFEXTIREC) {
3620 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1);
3621 } else {
3622 xfs_iext_irec_init(ifp);
3623 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3624 erp = ifp->if_u1.if_ext_irec;
3626 /* Extents fit in target extent page */
3627 if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) {
3628 if (page_idx < erp->er_extcount) {
3629 memmove(&erp->er_extbuf[page_idx + ext_diff],
3630 &erp->er_extbuf[page_idx],
3631 (erp->er_extcount - page_idx) *
3632 sizeof(xfs_bmbt_rec_t));
3633 memset(&erp->er_extbuf[page_idx], 0, byte_diff);
3635 erp->er_extcount += ext_diff;
3636 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3638 /* Insert a new extent page */
3639 else if (erp) {
3640 xfs_iext_add_indirect_multi(ifp,
3641 erp_idx, page_idx, ext_diff);
3644 * If extent(s) are being appended to the last page in
3645 * the indirection array and the new extent(s) don't fit
3646 * in the page, then erp is NULL and erp_idx is set to
3647 * the next index needed in the indirection array.
3649 else {
3650 int count = ext_diff;
3652 while (count) {
3653 erp = xfs_iext_irec_new(ifp, erp_idx);
3654 erp->er_extcount = count;
3655 count -= MIN(count, (int)XFS_LINEAR_EXTS);
3656 if (count) {
3657 erp_idx++;
3662 ifp->if_bytes = new_size;
3666 * This is called when incore extents are being added to the indirection
3667 * array and the new extents do not fit in the target extent list. The
3668 * erp_idx parameter contains the irec index for the target extent list
3669 * in the indirection array, and the idx parameter contains the extent
3670 * index within the list. The number of extents being added is stored
3671 * in the count parameter.
3673 * |-------| |-------|
3674 * | | | | idx - number of extents before idx
3675 * | idx | | count |
3676 * | | | | count - number of extents being inserted at idx
3677 * |-------| |-------|
3678 * | count | | nex2 | nex2 - number of extents after idx + count
3679 * |-------| |-------|
3681 void
3682 xfs_iext_add_indirect_multi(
3683 xfs_ifork_t *ifp, /* inode fork pointer */
3684 int erp_idx, /* target extent irec index */
3685 xfs_extnum_t idx, /* index within target list */
3686 int count) /* new extents being added */
3688 int byte_diff; /* new bytes being added */
3689 xfs_ext_irec_t *erp; /* pointer to irec entry */
3690 xfs_extnum_t ext_diff; /* number of extents to add */
3691 xfs_extnum_t ext_cnt; /* new extents still needed */
3692 xfs_extnum_t nex2; /* extents after idx + count */
3693 xfs_bmbt_rec_t *nex2_ep = NULL; /* temp list for nex2 extents */
3694 int nlists; /* number of irec's (lists) */
3696 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3697 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3698 nex2 = erp->er_extcount - idx;
3699 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3702 * Save second part of target extent list
3703 * (all extents past */
3704 if (nex2) {
3705 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3706 nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_NOFS);
3707 memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff);
3708 erp->er_extcount -= nex2;
3709 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2);
3710 memset(&erp->er_extbuf[idx], 0, byte_diff);
3714 * Add the new extents to the end of the target
3715 * list, then allocate new irec record(s) and
3716 * extent buffer(s) as needed to store the rest
3717 * of the new extents.
3719 ext_cnt = count;
3720 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount);
3721 if (ext_diff) {
3722 erp->er_extcount += ext_diff;
3723 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3724 ext_cnt -= ext_diff;
3726 while (ext_cnt) {
3727 erp_idx++;
3728 erp = xfs_iext_irec_new(ifp, erp_idx);
3729 ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS);
3730 erp->er_extcount = ext_diff;
3731 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3732 ext_cnt -= ext_diff;
3735 /* Add nex2 extents back to indirection array */
3736 if (nex2) {
3737 xfs_extnum_t ext_avail;
3738 int i;
3740 byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3741 ext_avail = XFS_LINEAR_EXTS - erp->er_extcount;
3742 i = 0;
3744 * If nex2 extents fit in the current page, append
3745 * nex2_ep after the new extents.
3747 if (nex2 <= ext_avail) {
3748 i = erp->er_extcount;
3751 * Otherwise, check if space is available in the
3752 * next page.
3754 else if ((erp_idx < nlists - 1) &&
3755 (nex2 <= (ext_avail = XFS_LINEAR_EXTS -
3756 ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) {
3757 erp_idx++;
3758 erp++;
3759 /* Create a hole for nex2 extents */
3760 memmove(&erp->er_extbuf[nex2], erp->er_extbuf,
3761 erp->er_extcount * sizeof(xfs_bmbt_rec_t));
3764 * Final choice, create a new extent page for
3765 * nex2 extents.
3767 else {
3768 erp_idx++;
3769 erp = xfs_iext_irec_new(ifp, erp_idx);
3771 memmove(&erp->er_extbuf[i], nex2_ep, byte_diff);
3772 kmem_free(nex2_ep);
3773 erp->er_extcount += nex2;
3774 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2);
3779 * This is called when the amount of space required for incore file
3780 * extents needs to be decreased. The ext_diff parameter stores the
3781 * number of extents to be removed and the idx parameter contains
3782 * the extent index where the extents will be removed from.
3784 * If the amount of space needed has decreased below the linear
3785 * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous
3786 * extent array. Otherwise, use kmem_realloc() to adjust the
3787 * size to what is needed.
3789 void
3790 xfs_iext_remove(
3791 xfs_ifork_t *ifp, /* inode fork pointer */
3792 xfs_extnum_t idx, /* index to begin removing exts */
3793 int ext_diff) /* number of extents to remove */
3795 xfs_extnum_t nextents; /* number of extents in file */
3796 int new_size; /* size of extents after removal */
3798 ASSERT(ext_diff > 0);
3799 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3800 new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t);
3802 if (new_size == 0) {
3803 xfs_iext_destroy(ifp);
3804 } else if (ifp->if_flags & XFS_IFEXTIREC) {
3805 xfs_iext_remove_indirect(ifp, idx, ext_diff);
3806 } else if (ifp->if_real_bytes) {
3807 xfs_iext_remove_direct(ifp, idx, ext_diff);
3808 } else {
3809 xfs_iext_remove_inline(ifp, idx, ext_diff);
3811 ifp->if_bytes = new_size;
3815 * This removes ext_diff extents from the inline buffer, beginning
3816 * at extent index idx.
3818 void
3819 xfs_iext_remove_inline(
3820 xfs_ifork_t *ifp, /* inode fork pointer */
3821 xfs_extnum_t idx, /* index to begin removing exts */
3822 int ext_diff) /* number of extents to remove */
3824 int nextents; /* number of extents in file */
3826 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
3827 ASSERT(idx < XFS_INLINE_EXTS);
3828 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3829 ASSERT(((nextents - ext_diff) > 0) &&
3830 (nextents - ext_diff) < XFS_INLINE_EXTS);
3832 if (idx + ext_diff < nextents) {
3833 memmove(&ifp->if_u2.if_inline_ext[idx],
3834 &ifp->if_u2.if_inline_ext[idx + ext_diff],
3835 (nextents - (idx + ext_diff)) *
3836 sizeof(xfs_bmbt_rec_t));
3837 memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff],
3838 0, ext_diff * sizeof(xfs_bmbt_rec_t));
3839 } else {
3840 memset(&ifp->if_u2.if_inline_ext[idx], 0,
3841 ext_diff * sizeof(xfs_bmbt_rec_t));
3846 * This removes ext_diff extents from a linear (direct) extent list,
3847 * beginning at extent index idx. If the extents are being removed
3848 * from the end of the list (ie. truncate) then we just need to re-
3849 * allocate the list to remove the extra space. Otherwise, if the
3850 * extents are being removed from the middle of the existing extent
3851 * entries, then we first need to move the extent records beginning
3852 * at idx + ext_diff up in the list to overwrite the records being
3853 * removed, then remove the extra space via kmem_realloc.
3855 void
3856 xfs_iext_remove_direct(
3857 xfs_ifork_t *ifp, /* inode fork pointer */
3858 xfs_extnum_t idx, /* index to begin removing exts */
3859 int ext_diff) /* number of extents to remove */
3861 xfs_extnum_t nextents; /* number of extents in file */
3862 int new_size; /* size of extents after removal */
3864 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
3865 new_size = ifp->if_bytes -
3866 (ext_diff * sizeof(xfs_bmbt_rec_t));
3867 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3869 if (new_size == 0) {
3870 xfs_iext_destroy(ifp);
3871 return;
3873 /* Move extents up in the list (if needed) */
3874 if (idx + ext_diff < nextents) {
3875 memmove(&ifp->if_u1.if_extents[idx],
3876 &ifp->if_u1.if_extents[idx + ext_diff],
3877 (nextents - (idx + ext_diff)) *
3878 sizeof(xfs_bmbt_rec_t));
3880 memset(&ifp->if_u1.if_extents[nextents - ext_diff],
3881 0, ext_diff * sizeof(xfs_bmbt_rec_t));
3883 * Reallocate the direct extent list. If the extents
3884 * will fit inside the inode then xfs_iext_realloc_direct
3885 * will switch from direct to inline extent allocation
3886 * mode for us.
3888 xfs_iext_realloc_direct(ifp, new_size);
3889 ifp->if_bytes = new_size;
3893 * This is called when incore extents are being removed from the
3894 * indirection array and the extents being removed span multiple extent
3895 * buffers. The idx parameter contains the file extent index where we
3896 * want to begin removing extents, and the count parameter contains
3897 * how many extents need to be removed.
3899 * |-------| |-------|
3900 * | nex1 | | | nex1 - number of extents before idx
3901 * |-------| | count |
3902 * | | | | count - number of extents being removed at idx
3903 * | count | |-------|
3904 * | | | nex2 | nex2 - number of extents after idx + count
3905 * |-------| |-------|
3907 void
3908 xfs_iext_remove_indirect(
3909 xfs_ifork_t *ifp, /* inode fork pointer */
3910 xfs_extnum_t idx, /* index to begin removing extents */
3911 int count) /* number of extents to remove */
3913 xfs_ext_irec_t *erp; /* indirection array pointer */
3914 int erp_idx = 0; /* indirection array index */
3915 xfs_extnum_t ext_cnt; /* extents left to remove */
3916 xfs_extnum_t ext_diff; /* extents to remove in current list */
3917 xfs_extnum_t nex1; /* number of extents before idx */
3918 xfs_extnum_t nex2; /* extents after idx + count */
3919 int nlists; /* entries in indirection array */
3920 int page_idx = idx; /* index in target extent list */
3922 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3923 erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
3924 ASSERT(erp != NULL);
3925 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3926 nex1 = page_idx;
3927 ext_cnt = count;
3928 while (ext_cnt) {
3929 nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0);
3930 ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1));
3932 * Check for deletion of entire list;
3933 * xfs_iext_irec_remove() updates extent offsets.
3935 if (ext_diff == erp->er_extcount) {
3936 xfs_iext_irec_remove(ifp, erp_idx);
3937 ext_cnt -= ext_diff;
3938 nex1 = 0;
3939 if (ext_cnt) {
3940 ASSERT(erp_idx < ifp->if_real_bytes /
3941 XFS_IEXT_BUFSZ);
3942 erp = &ifp->if_u1.if_ext_irec[erp_idx];
3943 nex1 = 0;
3944 continue;
3945 } else {
3946 break;
3949 /* Move extents up (if needed) */
3950 if (nex2) {
3951 memmove(&erp->er_extbuf[nex1],
3952 &erp->er_extbuf[nex1 + ext_diff],
3953 nex2 * sizeof(xfs_bmbt_rec_t));
3955 /* Zero out rest of page */
3956 memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ -
3957 ((nex1 + nex2) * sizeof(xfs_bmbt_rec_t))));
3958 /* Update remaining counters */
3959 erp->er_extcount -= ext_diff;
3960 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff);
3961 ext_cnt -= ext_diff;
3962 nex1 = 0;
3963 erp_idx++;
3964 erp++;
3966 ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t);
3967 xfs_iext_irec_compact(ifp);
3971 * Create, destroy, or resize a linear (direct) block of extents.
3973 void
3974 xfs_iext_realloc_direct(
3975 xfs_ifork_t *ifp, /* inode fork pointer */
3976 int new_size) /* new size of extents */
3978 int rnew_size; /* real new size of extents */
3980 rnew_size = new_size;
3982 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) ||
3983 ((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) &&
3984 (new_size != ifp->if_real_bytes)));
3986 /* Free extent records */
3987 if (new_size == 0) {
3988 xfs_iext_destroy(ifp);
3990 /* Resize direct extent list and zero any new bytes */
3991 else if (ifp->if_real_bytes) {
3992 /* Check if extents will fit inside the inode */
3993 if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) {
3994 xfs_iext_direct_to_inline(ifp, new_size /
3995 (uint)sizeof(xfs_bmbt_rec_t));
3996 ifp->if_bytes = new_size;
3997 return;
3999 if (!is_power_of_2(new_size)){
4000 rnew_size = roundup_pow_of_two(new_size);
4002 if (rnew_size != ifp->if_real_bytes) {
4003 ifp->if_u1.if_extents =
4004 kmem_realloc(ifp->if_u1.if_extents,
4005 rnew_size,
4006 ifp->if_real_bytes, KM_NOFS);
4008 if (rnew_size > ifp->if_real_bytes) {
4009 memset(&ifp->if_u1.if_extents[ifp->if_bytes /
4010 (uint)sizeof(xfs_bmbt_rec_t)], 0,
4011 rnew_size - ifp->if_real_bytes);
4015 * Switch from the inline extent buffer to a direct
4016 * extent list. Be sure to include the inline extent
4017 * bytes in new_size.
4019 else {
4020 new_size += ifp->if_bytes;
4021 if (!is_power_of_2(new_size)) {
4022 rnew_size = roundup_pow_of_two(new_size);
4024 xfs_iext_inline_to_direct(ifp, rnew_size);
4026 ifp->if_real_bytes = rnew_size;
4027 ifp->if_bytes = new_size;
4031 * Switch from linear (direct) extent records to inline buffer.
4033 void
4034 xfs_iext_direct_to_inline(
4035 xfs_ifork_t *ifp, /* inode fork pointer */
4036 xfs_extnum_t nextents) /* number of extents in file */
4038 ASSERT(ifp->if_flags & XFS_IFEXTENTS);
4039 ASSERT(nextents <= XFS_INLINE_EXTS);
4041 * The inline buffer was zeroed when we switched
4042 * from inline to direct extent allocation mode,
4043 * so we don't need to clear it here.
4045 memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents,
4046 nextents * sizeof(xfs_bmbt_rec_t));
4047 kmem_free(ifp->if_u1.if_extents);
4048 ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
4049 ifp->if_real_bytes = 0;
4053 * Switch from inline buffer to linear (direct) extent records.
4054 * new_size should already be rounded up to the next power of 2
4055 * by the caller (when appropriate), so use new_size as it is.
4056 * However, since new_size may be rounded up, we can't update
4057 * if_bytes here. It is the caller's responsibility to update
4058 * if_bytes upon return.
4060 void
4061 xfs_iext_inline_to_direct(
4062 xfs_ifork_t *ifp, /* inode fork pointer */
4063 int new_size) /* number of extents in file */
4065 ifp->if_u1.if_extents = kmem_alloc(new_size, KM_NOFS);
4066 memset(ifp->if_u1.if_extents, 0, new_size);
4067 if (ifp->if_bytes) {
4068 memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext,
4069 ifp->if_bytes);
4070 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
4071 sizeof(xfs_bmbt_rec_t));
4073 ifp->if_real_bytes = new_size;
4077 * Resize an extent indirection array to new_size bytes.
4079 void
4080 xfs_iext_realloc_indirect(
4081 xfs_ifork_t *ifp, /* inode fork pointer */
4082 int new_size) /* new indirection array size */
4084 int nlists; /* number of irec's (ex lists) */
4085 int size; /* current indirection array size */
4087 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4088 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4089 size = nlists * sizeof(xfs_ext_irec_t);
4090 ASSERT(ifp->if_real_bytes);
4091 ASSERT((new_size >= 0) && (new_size != size));
4092 if (new_size == 0) {
4093 xfs_iext_destroy(ifp);
4094 } else {
4095 ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *)
4096 kmem_realloc(ifp->if_u1.if_ext_irec,
4097 new_size, size, KM_NOFS);
4102 * Switch from indirection array to linear (direct) extent allocations.
4104 void
4105 xfs_iext_indirect_to_direct(
4106 xfs_ifork_t *ifp) /* inode fork pointer */
4108 xfs_bmbt_rec_host_t *ep; /* extent record pointer */
4109 xfs_extnum_t nextents; /* number of extents in file */
4110 int size; /* size of file extents */
4112 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4113 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4114 ASSERT(nextents <= XFS_LINEAR_EXTS);
4115 size = nextents * sizeof(xfs_bmbt_rec_t);
4117 xfs_iext_irec_compact_full(ifp);
4118 ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ);
4120 ep = ifp->if_u1.if_ext_irec->er_extbuf;
4121 kmem_free(ifp->if_u1.if_ext_irec);
4122 ifp->if_flags &= ~XFS_IFEXTIREC;
4123 ifp->if_u1.if_extents = ep;
4124 ifp->if_bytes = size;
4125 if (nextents < XFS_LINEAR_EXTS) {
4126 xfs_iext_realloc_direct(ifp, size);
4131 * Free incore file extents.
4133 void
4134 xfs_iext_destroy(
4135 xfs_ifork_t *ifp) /* inode fork pointer */
4137 if (ifp->if_flags & XFS_IFEXTIREC) {
4138 int erp_idx;
4139 int nlists;
4141 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4142 for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) {
4143 xfs_iext_irec_remove(ifp, erp_idx);
4145 ifp->if_flags &= ~XFS_IFEXTIREC;
4146 } else if (ifp->if_real_bytes) {
4147 kmem_free(ifp->if_u1.if_extents);
4148 } else if (ifp->if_bytes) {
4149 memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
4150 sizeof(xfs_bmbt_rec_t));
4152 ifp->if_u1.if_extents = NULL;
4153 ifp->if_real_bytes = 0;
4154 ifp->if_bytes = 0;
4158 * Return a pointer to the extent record for file system block bno.
4160 xfs_bmbt_rec_host_t * /* pointer to found extent record */
4161 xfs_iext_bno_to_ext(
4162 xfs_ifork_t *ifp, /* inode fork pointer */
4163 xfs_fileoff_t bno, /* block number to search for */
4164 xfs_extnum_t *idxp) /* index of target extent */
4166 xfs_bmbt_rec_host_t *base; /* pointer to first extent */
4167 xfs_filblks_t blockcount = 0; /* number of blocks in extent */
4168 xfs_bmbt_rec_host_t *ep = NULL; /* pointer to target extent */
4169 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */
4170 int high; /* upper boundary in search */
4171 xfs_extnum_t idx = 0; /* index of target extent */
4172 int low; /* lower boundary in search */
4173 xfs_extnum_t nextents; /* number of file extents */
4174 xfs_fileoff_t startoff = 0; /* start offset of extent */
4176 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4177 if (nextents == 0) {
4178 *idxp = 0;
4179 return NULL;
4181 low = 0;
4182 if (ifp->if_flags & XFS_IFEXTIREC) {
4183 /* Find target extent list */
4184 int erp_idx = 0;
4185 erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx);
4186 base = erp->er_extbuf;
4187 high = erp->er_extcount - 1;
4188 } else {
4189 base = ifp->if_u1.if_extents;
4190 high = nextents - 1;
4192 /* Binary search extent records */
4193 while (low <= high) {
4194 idx = (low + high) >> 1;
4195 ep = base + idx;
4196 startoff = xfs_bmbt_get_startoff(ep);
4197 blockcount = xfs_bmbt_get_blockcount(ep);
4198 if (bno < startoff) {
4199 high = idx - 1;
4200 } else if (bno >= startoff + blockcount) {
4201 low = idx + 1;
4202 } else {
4203 /* Convert back to file-based extent index */
4204 if (ifp->if_flags & XFS_IFEXTIREC) {
4205 idx += erp->er_extoff;
4207 *idxp = idx;
4208 return ep;
4211 /* Convert back to file-based extent index */
4212 if (ifp->if_flags & XFS_IFEXTIREC) {
4213 idx += erp->er_extoff;
4215 if (bno >= startoff + blockcount) {
4216 if (++idx == nextents) {
4217 ep = NULL;
4218 } else {
4219 ep = xfs_iext_get_ext(ifp, idx);
4222 *idxp = idx;
4223 return ep;
4227 * Return a pointer to the indirection array entry containing the
4228 * extent record for filesystem block bno. Store the index of the
4229 * target irec in *erp_idxp.
4231 xfs_ext_irec_t * /* pointer to found extent record */
4232 xfs_iext_bno_to_irec(
4233 xfs_ifork_t *ifp, /* inode fork pointer */
4234 xfs_fileoff_t bno, /* block number to search for */
4235 int *erp_idxp) /* irec index of target ext list */
4237 xfs_ext_irec_t *erp = NULL; /* indirection array pointer */
4238 xfs_ext_irec_t *erp_next; /* next indirection array entry */
4239 int erp_idx; /* indirection array index */
4240 int nlists; /* number of extent irec's (lists) */
4241 int high; /* binary search upper limit */
4242 int low; /* binary search lower limit */
4244 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4245 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4246 erp_idx = 0;
4247 low = 0;
4248 high = nlists - 1;
4249 while (low <= high) {
4250 erp_idx = (low + high) >> 1;
4251 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4252 erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL;
4253 if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) {
4254 high = erp_idx - 1;
4255 } else if (erp_next && bno >=
4256 xfs_bmbt_get_startoff(erp_next->er_extbuf)) {
4257 low = erp_idx + 1;
4258 } else {
4259 break;
4262 *erp_idxp = erp_idx;
4263 return erp;
4267 * Return a pointer to the indirection array entry containing the
4268 * extent record at file extent index *idxp. Store the index of the
4269 * target irec in *erp_idxp and store the page index of the target
4270 * extent record in *idxp.
4272 xfs_ext_irec_t *
4273 xfs_iext_idx_to_irec(
4274 xfs_ifork_t *ifp, /* inode fork pointer */
4275 xfs_extnum_t *idxp, /* extent index (file -> page) */
4276 int *erp_idxp, /* pointer to target irec */
4277 int realloc) /* new bytes were just added */
4279 xfs_ext_irec_t *prev; /* pointer to previous irec */
4280 xfs_ext_irec_t *erp = NULL; /* pointer to current irec */
4281 int erp_idx; /* indirection array index */
4282 int nlists; /* number of irec's (ex lists) */
4283 int high; /* binary search upper limit */
4284 int low; /* binary search lower limit */
4285 xfs_extnum_t page_idx = *idxp; /* extent index in target list */
4287 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4288 ASSERT(page_idx >= 0 && page_idx <=
4289 ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
4290 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4291 erp_idx = 0;
4292 low = 0;
4293 high = nlists - 1;
4295 /* Binary search extent irec's */
4296 while (low <= high) {
4297 erp_idx = (low + high) >> 1;
4298 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4299 prev = erp_idx > 0 ? erp - 1 : NULL;
4300 if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff &&
4301 realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) {
4302 high = erp_idx - 1;
4303 } else if (page_idx > erp->er_extoff + erp->er_extcount ||
4304 (page_idx == erp->er_extoff + erp->er_extcount &&
4305 !realloc)) {
4306 low = erp_idx + 1;
4307 } else if (page_idx == erp->er_extoff + erp->er_extcount &&
4308 erp->er_extcount == XFS_LINEAR_EXTS) {
4309 ASSERT(realloc);
4310 page_idx = 0;
4311 erp_idx++;
4312 erp = erp_idx < nlists ? erp + 1 : NULL;
4313 break;
4314 } else {
4315 page_idx -= erp->er_extoff;
4316 break;
4319 *idxp = page_idx;
4320 *erp_idxp = erp_idx;
4321 return(erp);
4325 * Allocate and initialize an indirection array once the space needed
4326 * for incore extents increases above XFS_IEXT_BUFSZ.
4328 void
4329 xfs_iext_irec_init(
4330 xfs_ifork_t *ifp) /* inode fork pointer */
4332 xfs_ext_irec_t *erp; /* indirection array pointer */
4333 xfs_extnum_t nextents; /* number of extents in file */
4335 ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
4336 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4337 ASSERT(nextents <= XFS_LINEAR_EXTS);
4339 erp = kmem_alloc(sizeof(xfs_ext_irec_t), KM_NOFS);
4341 if (nextents == 0) {
4342 ifp->if_u1.if_extents = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS);
4343 } else if (!ifp->if_real_bytes) {
4344 xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ);
4345 } else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) {
4346 xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ);
4348 erp->er_extbuf = ifp->if_u1.if_extents;
4349 erp->er_extcount = nextents;
4350 erp->er_extoff = 0;
4352 ifp->if_flags |= XFS_IFEXTIREC;
4353 ifp->if_real_bytes = XFS_IEXT_BUFSZ;
4354 ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t);
4355 ifp->if_u1.if_ext_irec = erp;
4357 return;
4361 * Allocate and initialize a new entry in the indirection array.
4363 xfs_ext_irec_t *
4364 xfs_iext_irec_new(
4365 xfs_ifork_t *ifp, /* inode fork pointer */
4366 int erp_idx) /* index for new irec */
4368 xfs_ext_irec_t *erp; /* indirection array pointer */
4369 int i; /* loop counter */
4370 int nlists; /* number of irec's (ex lists) */
4372 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4373 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4375 /* Resize indirection array */
4376 xfs_iext_realloc_indirect(ifp, ++nlists *
4377 sizeof(xfs_ext_irec_t));
4379 * Move records down in the array so the
4380 * new page can use erp_idx.
4382 erp = ifp->if_u1.if_ext_irec;
4383 for (i = nlists - 1; i > erp_idx; i--) {
4384 memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t));
4386 ASSERT(i == erp_idx);
4388 /* Initialize new extent record */
4389 erp = ifp->if_u1.if_ext_irec;
4390 erp[erp_idx].er_extbuf = kmem_alloc(XFS_IEXT_BUFSZ, KM_NOFS);
4391 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
4392 memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ);
4393 erp[erp_idx].er_extcount = 0;
4394 erp[erp_idx].er_extoff = erp_idx > 0 ?
4395 erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0;
4396 return (&erp[erp_idx]);
4400 * Remove a record from the indirection array.
4402 void
4403 xfs_iext_irec_remove(
4404 xfs_ifork_t *ifp, /* inode fork pointer */
4405 int erp_idx) /* irec index to remove */
4407 xfs_ext_irec_t *erp; /* indirection array pointer */
4408 int i; /* loop counter */
4409 int nlists; /* number of irec's (ex lists) */
4411 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4412 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4413 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4414 if (erp->er_extbuf) {
4415 xfs_iext_irec_update_extoffs(ifp, erp_idx + 1,
4416 -erp->er_extcount);
4417 kmem_free(erp->er_extbuf);
4419 /* Compact extent records */
4420 erp = ifp->if_u1.if_ext_irec;
4421 for (i = erp_idx; i < nlists - 1; i++) {
4422 memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t));
4425 * Manually free the last extent record from the indirection
4426 * array. A call to xfs_iext_realloc_indirect() with a size
4427 * of zero would result in a call to xfs_iext_destroy() which
4428 * would in turn call this function again, creating a nasty
4429 * infinite loop.
4431 if (--nlists) {
4432 xfs_iext_realloc_indirect(ifp,
4433 nlists * sizeof(xfs_ext_irec_t));
4434 } else {
4435 kmem_free(ifp->if_u1.if_ext_irec);
4437 ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
4441 * This is called to clean up large amounts of unused memory allocated
4442 * by the indirection array. Before compacting anything though, verify
4443 * that the indirection array is still needed and switch back to the
4444 * linear extent list (or even the inline buffer) if possible. The
4445 * compaction policy is as follows:
4447 * Full Compaction: Extents fit into a single page (or inline buffer)
4448 * Full Compaction: Extents occupy less than 10% of allocated space
4449 * Partial Compaction: Extents occupy > 10% and < 50% of allocated space
4450 * No Compaction: Extents occupy at least 50% of allocated space
4452 void
4453 xfs_iext_irec_compact(
4454 xfs_ifork_t *ifp) /* inode fork pointer */
4456 xfs_extnum_t nextents; /* number of extents in file */
4457 int nlists; /* number of irec's (ex lists) */
4459 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4460 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4461 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4463 if (nextents == 0) {
4464 xfs_iext_destroy(ifp);
4465 } else if (nextents <= XFS_INLINE_EXTS) {
4466 xfs_iext_indirect_to_direct(ifp);
4467 xfs_iext_direct_to_inline(ifp, nextents);
4468 } else if (nextents <= XFS_LINEAR_EXTS) {
4469 xfs_iext_indirect_to_direct(ifp);
4470 } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 3) {
4471 xfs_iext_irec_compact_full(ifp);
4472 } else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) {
4473 xfs_iext_irec_compact_pages(ifp);
4478 * Combine extents from neighboring extent pages.
4480 void
4481 xfs_iext_irec_compact_pages(
4482 xfs_ifork_t *ifp) /* inode fork pointer */
4484 xfs_ext_irec_t *erp, *erp_next;/* pointers to irec entries */
4485 int erp_idx = 0; /* indirection array index */
4486 int nlists; /* number of irec's (ex lists) */
4488 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4489 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4490 while (erp_idx < nlists - 1) {
4491 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4492 erp_next = erp + 1;
4493 if (erp_next->er_extcount <=
4494 (XFS_LINEAR_EXTS - erp->er_extcount)) {
4495 memmove(&erp->er_extbuf[erp->er_extcount],
4496 erp_next->er_extbuf, erp_next->er_extcount *
4497 sizeof(xfs_bmbt_rec_t));
4498 erp->er_extcount += erp_next->er_extcount;
4500 * Free page before removing extent record
4501 * so er_extoffs don't get modified in
4502 * xfs_iext_irec_remove.
4504 kmem_free(erp_next->er_extbuf);
4505 erp_next->er_extbuf = NULL;
4506 xfs_iext_irec_remove(ifp, erp_idx + 1);
4507 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4508 } else {
4509 erp_idx++;
4515 * Fully compact the extent records managed by the indirection array.
4517 void
4518 xfs_iext_irec_compact_full(
4519 xfs_ifork_t *ifp) /* inode fork pointer */
4521 xfs_bmbt_rec_host_t *ep, *ep_next; /* extent record pointers */
4522 xfs_ext_irec_t *erp, *erp_next; /* extent irec pointers */
4523 int erp_idx = 0; /* extent irec index */
4524 int ext_avail; /* empty entries in ex list */
4525 int ext_diff; /* number of exts to add */
4526 int nlists; /* number of irec's (ex lists) */
4528 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4530 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4531 erp = ifp->if_u1.if_ext_irec;
4532 ep = &erp->er_extbuf[erp->er_extcount];
4533 erp_next = erp + 1;
4534 ep_next = erp_next->er_extbuf;
4536 while (erp_idx < nlists - 1) {
4538 * Check how many extent records are available in this irec.
4539 * If there is none skip the whole exercise.
4541 ext_avail = XFS_LINEAR_EXTS - erp->er_extcount;
4542 if (ext_avail) {
4545 * Copy over as many as possible extent records into
4546 * the previous page.
4548 ext_diff = MIN(ext_avail, erp_next->er_extcount);
4549 memcpy(ep, ep_next, ext_diff * sizeof(xfs_bmbt_rec_t));
4550 erp->er_extcount += ext_diff;
4551 erp_next->er_extcount -= ext_diff;
4554 * If the next irec is empty now we can simply
4555 * remove it.
4557 if (erp_next->er_extcount == 0) {
4559 * Free page before removing extent record
4560 * so er_extoffs don't get modified in
4561 * xfs_iext_irec_remove.
4563 kmem_free(erp_next->er_extbuf);
4564 erp_next->er_extbuf = NULL;
4565 xfs_iext_irec_remove(ifp, erp_idx + 1);
4566 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4567 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4570 * If the next irec is not empty move up the content
4571 * that has not been copied to the previous page to
4572 * the beggining of this one.
4574 } else {
4575 memmove(erp_next->er_extbuf, &ep_next[ext_diff],
4576 erp_next->er_extcount *
4577 sizeof(xfs_bmbt_rec_t));
4578 ep_next = erp_next->er_extbuf;
4579 memset(&ep_next[erp_next->er_extcount], 0,
4580 (XFS_LINEAR_EXTS -
4581 erp_next->er_extcount) *
4582 sizeof(xfs_bmbt_rec_t));
4586 if (erp->er_extcount == XFS_LINEAR_EXTS) {
4587 erp_idx++;
4588 if (erp_idx < nlists)
4589 erp = &ifp->if_u1.if_ext_irec[erp_idx];
4590 else
4591 break;
4593 ep = &erp->er_extbuf[erp->er_extcount];
4594 erp_next = erp + 1;
4595 ep_next = erp_next->er_extbuf;
4600 * This is called to update the er_extoff field in the indirection
4601 * array when extents have been added or removed from one of the
4602 * extent lists. erp_idx contains the irec index to begin updating
4603 * at and ext_diff contains the number of extents that were added
4604 * or removed.
4606 void
4607 xfs_iext_irec_update_extoffs(
4608 xfs_ifork_t *ifp, /* inode fork pointer */
4609 int erp_idx, /* irec index to update */
4610 int ext_diff) /* number of new extents */
4612 int i; /* loop counter */
4613 int nlists; /* number of irec's (ex lists */
4615 ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4616 nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4617 for (i = erp_idx; i < nlists; i++) {
4618 ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff;