2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #include <linux/log2.h>
22 #include "xfs_types.h"
26 #include "xfs_trans.h"
27 #include "xfs_trans_priv.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_attr_sf.h"
35 #include "xfs_dinode.h"
36 #include "xfs_inode.h"
37 #include "xfs_buf_item.h"
38 #include "xfs_inode_item.h"
39 #include "xfs_btree.h"
40 #include "xfs_btree_trace.h"
41 #include "xfs_alloc.h"
42 #include "xfs_ialloc.h"
44 #include "xfs_error.h"
45 #include "xfs_utils.h"
46 #include "xfs_quota.h"
47 #include "xfs_filestream.h"
48 #include "xfs_vnodeops.h"
49 #include "xfs_trace.h"
51 kmem_zone_t
*xfs_ifork_zone
;
52 kmem_zone_t
*xfs_inode_zone
;
55 * Used in xfs_itruncate_extents(). This is the maximum number of extents
56 * freed from a file in a single transaction.
58 #define XFS_ITRUNC_MAX_EXTENTS 2
60 STATIC
int xfs_iflush_int(xfs_inode_t
*, xfs_buf_t
*);
61 STATIC
int xfs_iformat_local(xfs_inode_t
*, xfs_dinode_t
*, int, int);
62 STATIC
int xfs_iformat_extents(xfs_inode_t
*, xfs_dinode_t
*, int);
63 STATIC
int xfs_iformat_btree(xfs_inode_t
*, xfs_dinode_t
*, int);
67 * Make sure that the extents in the given memory buffer
77 xfs_bmbt_rec_host_t rec
;
80 for (i
= 0; i
< nrecs
; i
++) {
81 xfs_bmbt_rec_host_t
*ep
= xfs_iext_get_ext(ifp
, i
);
82 rec
.l0
= get_unaligned(&ep
->l0
);
83 rec
.l1
= get_unaligned(&ep
->l1
);
84 xfs_bmbt_get_all(&rec
, &irec
);
85 if (fmt
== XFS_EXTFMT_NOSTATE
)
86 ASSERT(irec
.br_state
== XFS_EXT_NORM
);
90 #define xfs_validate_extents(ifp, nrecs, fmt)
94 * Check that none of the inode's in the buffer have a next
95 * unlinked field of 0.
107 j
= mp
->m_inode_cluster_size
>> mp
->m_sb
.sb_inodelog
;
109 for (i
= 0; i
< j
; i
++) {
110 dip
= (xfs_dinode_t
*)xfs_buf_offset(bp
,
111 i
* mp
->m_sb
.sb_inodesize
);
112 if (!dip
->di_next_unlinked
) {
114 "Detected bogus zero next_unlinked field in incore inode buffer 0x%p.",
116 ASSERT(dip
->di_next_unlinked
);
123 * Find the buffer associated with the given inode map
124 * We do basic validation checks on the buffer once it has been
125 * retrieved from disk.
131 struct xfs_imap
*imap
,
141 error
= xfs_trans_read_buf(mp
, tp
, mp
->m_ddev_targp
, imap
->im_blkno
,
142 (int)imap
->im_len
, buf_flags
, &bp
);
144 if (error
!= EAGAIN
) {
146 "%s: xfs_trans_read_buf() returned error %d.",
149 ASSERT(buf_flags
& XBF_TRYLOCK
);
155 * Validate the magic number and version of every inode in the buffer
156 * (if DEBUG kernel) or the first inode in the buffer, otherwise.
159 ni
= BBTOB(imap
->im_len
) >> mp
->m_sb
.sb_inodelog
;
160 #else /* usual case */
164 for (i
= 0; i
< ni
; i
++) {
168 dip
= (xfs_dinode_t
*)xfs_buf_offset(bp
,
169 (i
<< mp
->m_sb
.sb_inodelog
));
170 di_ok
= dip
->di_magic
== cpu_to_be16(XFS_DINODE_MAGIC
) &&
171 XFS_DINODE_GOOD_VERSION(dip
->di_version
);
172 if (unlikely(XFS_TEST_ERROR(!di_ok
, mp
,
173 XFS_ERRTAG_ITOBP_INOTOBP
,
174 XFS_RANDOM_ITOBP_INOTOBP
))) {
175 if (iget_flags
& XFS_IGET_UNTRUSTED
) {
176 xfs_trans_brelse(tp
, bp
);
177 return XFS_ERROR(EINVAL
);
179 XFS_CORRUPTION_ERROR("xfs_imap_to_bp",
180 XFS_ERRLEVEL_HIGH
, mp
, dip
);
183 "bad inode magic/vsn daddr %lld #%d (magic=%x)",
184 (unsigned long long)imap
->im_blkno
, i
,
185 be16_to_cpu(dip
->di_magic
));
188 xfs_trans_brelse(tp
, bp
);
189 return XFS_ERROR(EFSCORRUPTED
);
193 xfs_inobp_check(mp
, bp
);
196 * Mark the buffer as an inode buffer now that it looks good
198 XFS_BUF_SET_VTYPE(bp
, B_FS_INO
);
205 * This routine is called to map an inode number within a file
206 * system to the buffer containing the on-disk version of the
207 * inode. It returns a pointer to the buffer containing the
208 * on-disk inode in the bpp parameter, and in the dip parameter
209 * it returns a pointer to the on-disk inode within that buffer.
211 * If a non-zero error is returned, then the contents of bpp and
212 * dipp are undefined.
214 * Use xfs_imap() to determine the size and location of the
215 * buffer to read from disk.
227 struct xfs_imap imap
;
232 error
= xfs_imap(mp
, tp
, ino
, &imap
, imap_flags
);
236 error
= xfs_imap_to_bp(mp
, tp
, &imap
, &bp
, XBF_LOCK
, imap_flags
);
240 *dipp
= (xfs_dinode_t
*)xfs_buf_offset(bp
, imap
.im_boffset
);
242 *offset
= imap
.im_boffset
;
248 * This routine is called to map an inode to the buffer containing
249 * the on-disk version of the inode. It returns a pointer to the
250 * buffer containing the on-disk inode in the bpp parameter, and in
251 * the dip parameter it returns a pointer to the on-disk inode within
254 * If a non-zero error is returned, then the contents of bpp and
255 * dipp are undefined.
257 * The inode is expected to already been mapped to its buffer and read
258 * in once, thus we can use the mapping information stored in the inode
259 * rather than calling xfs_imap(). This allows us to avoid the overhead
260 * of looking at the inode btree for small block file systems
275 ASSERT(ip
->i_imap
.im_blkno
!= 0);
277 error
= xfs_imap_to_bp(mp
, tp
, &ip
->i_imap
, &bp
, buf_flags
, 0);
282 ASSERT(buf_flags
& XBF_TRYLOCK
);
288 *dipp
= (xfs_dinode_t
*)xfs_buf_offset(bp
, ip
->i_imap
.im_boffset
);
294 * Move inode type and inode format specific information from the
295 * on-disk inode to the in-core inode. For fifos, devs, and sockets
296 * this means set if_rdev to the proper value. For files, directories,
297 * and symlinks this means to bring in the in-line data or extent
298 * pointers. For a file in B-tree format, only the root is immediately
299 * brought in-core. The rest will be in-lined in if_extents when it
300 * is first referenced (see xfs_iread_extents()).
307 xfs_attr_shortform_t
*atp
;
311 ip
->i_df
.if_ext_max
=
312 XFS_IFORK_DSIZE(ip
) / (uint
)sizeof(xfs_bmbt_rec_t
);
315 if (unlikely(be32_to_cpu(dip
->di_nextents
) +
316 be16_to_cpu(dip
->di_anextents
) >
317 be64_to_cpu(dip
->di_nblocks
))) {
318 xfs_warn(ip
->i_mount
,
319 "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.",
320 (unsigned long long)ip
->i_ino
,
321 (int)(be32_to_cpu(dip
->di_nextents
) +
322 be16_to_cpu(dip
->di_anextents
)),
324 be64_to_cpu(dip
->di_nblocks
));
325 XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW
,
327 return XFS_ERROR(EFSCORRUPTED
);
330 if (unlikely(dip
->di_forkoff
> ip
->i_mount
->m_sb
.sb_inodesize
)) {
331 xfs_warn(ip
->i_mount
, "corrupt dinode %Lu, forkoff = 0x%x.",
332 (unsigned long long)ip
->i_ino
,
334 XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW
,
336 return XFS_ERROR(EFSCORRUPTED
);
339 if (unlikely((ip
->i_d
.di_flags
& XFS_DIFLAG_REALTIME
) &&
340 !ip
->i_mount
->m_rtdev_targp
)) {
341 xfs_warn(ip
->i_mount
,
342 "corrupt dinode %Lu, has realtime flag set.",
344 XFS_CORRUPTION_ERROR("xfs_iformat(realtime)",
345 XFS_ERRLEVEL_LOW
, ip
->i_mount
, dip
);
346 return XFS_ERROR(EFSCORRUPTED
);
349 switch (ip
->i_d
.di_mode
& S_IFMT
) {
354 if (unlikely(dip
->di_format
!= XFS_DINODE_FMT_DEV
)) {
355 XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW
,
357 return XFS_ERROR(EFSCORRUPTED
);
361 ip
->i_df
.if_u2
.if_rdev
= xfs_dinode_get_rdev(dip
);
367 switch (dip
->di_format
) {
368 case XFS_DINODE_FMT_LOCAL
:
370 * no local regular files yet
372 if (unlikely((be16_to_cpu(dip
->di_mode
) & S_IFMT
) == S_IFREG
)) {
373 xfs_warn(ip
->i_mount
,
374 "corrupt inode %Lu (local format for regular file).",
375 (unsigned long long) ip
->i_ino
);
376 XFS_CORRUPTION_ERROR("xfs_iformat(4)",
379 return XFS_ERROR(EFSCORRUPTED
);
382 di_size
= be64_to_cpu(dip
->di_size
);
383 if (unlikely(di_size
> XFS_DFORK_DSIZE(dip
, ip
->i_mount
))) {
384 xfs_warn(ip
->i_mount
,
385 "corrupt inode %Lu (bad size %Ld for local inode).",
386 (unsigned long long) ip
->i_ino
,
387 (long long) di_size
);
388 XFS_CORRUPTION_ERROR("xfs_iformat(5)",
391 return XFS_ERROR(EFSCORRUPTED
);
395 error
= xfs_iformat_local(ip
, dip
, XFS_DATA_FORK
, size
);
397 case XFS_DINODE_FMT_EXTENTS
:
398 error
= xfs_iformat_extents(ip
, dip
, XFS_DATA_FORK
);
400 case XFS_DINODE_FMT_BTREE
:
401 error
= xfs_iformat_btree(ip
, dip
, XFS_DATA_FORK
);
404 XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW
,
406 return XFS_ERROR(EFSCORRUPTED
);
411 XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW
, ip
->i_mount
);
412 return XFS_ERROR(EFSCORRUPTED
);
417 if (!XFS_DFORK_Q(dip
))
419 ASSERT(ip
->i_afp
== NULL
);
420 ip
->i_afp
= kmem_zone_zalloc(xfs_ifork_zone
, KM_SLEEP
| KM_NOFS
);
421 ip
->i_afp
->if_ext_max
=
422 XFS_IFORK_ASIZE(ip
) / (uint
)sizeof(xfs_bmbt_rec_t
);
423 switch (dip
->di_aformat
) {
424 case XFS_DINODE_FMT_LOCAL
:
425 atp
= (xfs_attr_shortform_t
*)XFS_DFORK_APTR(dip
);
426 size
= be16_to_cpu(atp
->hdr
.totsize
);
428 if (unlikely(size
< sizeof(struct xfs_attr_sf_hdr
))) {
429 xfs_warn(ip
->i_mount
,
430 "corrupt inode %Lu (bad attr fork size %Ld).",
431 (unsigned long long) ip
->i_ino
,
433 XFS_CORRUPTION_ERROR("xfs_iformat(8)",
436 return XFS_ERROR(EFSCORRUPTED
);
439 error
= xfs_iformat_local(ip
, dip
, XFS_ATTR_FORK
, size
);
441 case XFS_DINODE_FMT_EXTENTS
:
442 error
= xfs_iformat_extents(ip
, dip
, XFS_ATTR_FORK
);
444 case XFS_DINODE_FMT_BTREE
:
445 error
= xfs_iformat_btree(ip
, dip
, XFS_ATTR_FORK
);
448 error
= XFS_ERROR(EFSCORRUPTED
);
452 kmem_zone_free(xfs_ifork_zone
, ip
->i_afp
);
454 xfs_idestroy_fork(ip
, XFS_DATA_FORK
);
460 * The file is in-lined in the on-disk inode.
461 * If it fits into if_inline_data, then copy
462 * it there, otherwise allocate a buffer for it
463 * and copy the data there. Either way, set
464 * if_data to point at the data.
465 * If we allocate a buffer for the data, make
466 * sure that its size is a multiple of 4 and
467 * record the real size in i_real_bytes.
480 * If the size is unreasonable, then something
481 * is wrong and we just bail out rather than crash in
482 * kmem_alloc() or memcpy() below.
484 if (unlikely(size
> XFS_DFORK_SIZE(dip
, ip
->i_mount
, whichfork
))) {
485 xfs_warn(ip
->i_mount
,
486 "corrupt inode %Lu (bad size %d for local fork, size = %d).",
487 (unsigned long long) ip
->i_ino
, size
,
488 XFS_DFORK_SIZE(dip
, ip
->i_mount
, whichfork
));
489 XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW
,
491 return XFS_ERROR(EFSCORRUPTED
);
493 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
496 ifp
->if_u1
.if_data
= NULL
;
497 else if (size
<= sizeof(ifp
->if_u2
.if_inline_data
))
498 ifp
->if_u1
.if_data
= ifp
->if_u2
.if_inline_data
;
500 real_size
= roundup(size
, 4);
501 ifp
->if_u1
.if_data
= kmem_alloc(real_size
, KM_SLEEP
| KM_NOFS
);
503 ifp
->if_bytes
= size
;
504 ifp
->if_real_bytes
= real_size
;
506 memcpy(ifp
->if_u1
.if_data
, XFS_DFORK_PTR(dip
, whichfork
), size
);
507 ifp
->if_flags
&= ~XFS_IFEXTENTS
;
508 ifp
->if_flags
|= XFS_IFINLINE
;
513 * The file consists of a set of extents all
514 * of which fit into the on-disk inode.
515 * If there are few enough extents to fit into
516 * the if_inline_ext, then copy them there.
517 * Otherwise allocate a buffer for them and copy
518 * them into it. Either way, set if_extents
519 * to point at the extents.
533 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
534 nex
= XFS_DFORK_NEXTENTS(dip
, whichfork
);
535 size
= nex
* (uint
)sizeof(xfs_bmbt_rec_t
);
538 * If the number of extents is unreasonable, then something
539 * is wrong and we just bail out rather than crash in
540 * kmem_alloc() or memcpy() below.
542 if (unlikely(size
< 0 || size
> XFS_DFORK_SIZE(dip
, ip
->i_mount
, whichfork
))) {
543 xfs_warn(ip
->i_mount
, "corrupt inode %Lu ((a)extents = %d).",
544 (unsigned long long) ip
->i_ino
, nex
);
545 XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW
,
547 return XFS_ERROR(EFSCORRUPTED
);
550 ifp
->if_real_bytes
= 0;
552 ifp
->if_u1
.if_extents
= NULL
;
553 else if (nex
<= XFS_INLINE_EXTS
)
554 ifp
->if_u1
.if_extents
= ifp
->if_u2
.if_inline_ext
;
556 xfs_iext_add(ifp
, 0, nex
);
558 ifp
->if_bytes
= size
;
560 dp
= (xfs_bmbt_rec_t
*) XFS_DFORK_PTR(dip
, whichfork
);
561 xfs_validate_extents(ifp
, nex
, XFS_EXTFMT_INODE(ip
));
562 for (i
= 0; i
< nex
; i
++, dp
++) {
563 xfs_bmbt_rec_host_t
*ep
= xfs_iext_get_ext(ifp
, i
);
564 ep
->l0
= get_unaligned_be64(&dp
->l0
);
565 ep
->l1
= get_unaligned_be64(&dp
->l1
);
567 XFS_BMAP_TRACE_EXLIST(ip
, nex
, whichfork
);
568 if (whichfork
!= XFS_DATA_FORK
||
569 XFS_EXTFMT_INODE(ip
) == XFS_EXTFMT_NOSTATE
)
570 if (unlikely(xfs_check_nostate_extents(
572 XFS_ERROR_REPORT("xfs_iformat_extents(2)",
575 return XFS_ERROR(EFSCORRUPTED
);
578 ifp
->if_flags
|= XFS_IFEXTENTS
;
583 * The file has too many extents to fit into
584 * the inode, so they are in B-tree format.
585 * Allocate a buffer for the root of the B-tree
586 * and copy the root into it. The i_extents
587 * field will remain NULL until all of the
588 * extents are read in (when they are needed).
596 xfs_bmdr_block_t
*dfp
;
602 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
603 dfp
= (xfs_bmdr_block_t
*)XFS_DFORK_PTR(dip
, whichfork
);
604 size
= XFS_BMAP_BROOT_SPACE(dfp
);
605 nrecs
= be16_to_cpu(dfp
->bb_numrecs
);
608 * blow out if -- fork has less extents than can fit in
609 * fork (fork shouldn't be a btree format), root btree
610 * block has more records than can fit into the fork,
611 * or the number of extents is greater than the number of
614 if (unlikely(XFS_IFORK_NEXTENTS(ip
, whichfork
) <= ifp
->if_ext_max
615 || XFS_BMDR_SPACE_CALC(nrecs
) >
616 XFS_DFORK_SIZE(dip
, ip
->i_mount
, whichfork
)
617 || XFS_IFORK_NEXTENTS(ip
, whichfork
) > ip
->i_d
.di_nblocks
)) {
618 xfs_warn(ip
->i_mount
, "corrupt inode %Lu (btree).",
619 (unsigned long long) ip
->i_ino
);
620 XFS_CORRUPTION_ERROR("xfs_iformat_btree", XFS_ERRLEVEL_LOW
,
622 return XFS_ERROR(EFSCORRUPTED
);
625 ifp
->if_broot_bytes
= size
;
626 ifp
->if_broot
= kmem_alloc(size
, KM_SLEEP
| KM_NOFS
);
627 ASSERT(ifp
->if_broot
!= NULL
);
629 * Copy and convert from the on-disk structure
630 * to the in-memory structure.
632 xfs_bmdr_to_bmbt(ip
->i_mount
, dfp
,
633 XFS_DFORK_SIZE(dip
, ip
->i_mount
, whichfork
),
634 ifp
->if_broot
, size
);
635 ifp
->if_flags
&= ~XFS_IFEXTENTS
;
636 ifp
->if_flags
|= XFS_IFBROOT
;
642 xfs_dinode_from_disk(
646 to
->di_magic
= be16_to_cpu(from
->di_magic
);
647 to
->di_mode
= be16_to_cpu(from
->di_mode
);
648 to
->di_version
= from
->di_version
;
649 to
->di_format
= from
->di_format
;
650 to
->di_onlink
= be16_to_cpu(from
->di_onlink
);
651 to
->di_uid
= be32_to_cpu(from
->di_uid
);
652 to
->di_gid
= be32_to_cpu(from
->di_gid
);
653 to
->di_nlink
= be32_to_cpu(from
->di_nlink
);
654 to
->di_projid_lo
= be16_to_cpu(from
->di_projid_lo
);
655 to
->di_projid_hi
= be16_to_cpu(from
->di_projid_hi
);
656 memcpy(to
->di_pad
, from
->di_pad
, sizeof(to
->di_pad
));
657 to
->di_flushiter
= be16_to_cpu(from
->di_flushiter
);
658 to
->di_atime
.t_sec
= be32_to_cpu(from
->di_atime
.t_sec
);
659 to
->di_atime
.t_nsec
= be32_to_cpu(from
->di_atime
.t_nsec
);
660 to
->di_mtime
.t_sec
= be32_to_cpu(from
->di_mtime
.t_sec
);
661 to
->di_mtime
.t_nsec
= be32_to_cpu(from
->di_mtime
.t_nsec
);
662 to
->di_ctime
.t_sec
= be32_to_cpu(from
->di_ctime
.t_sec
);
663 to
->di_ctime
.t_nsec
= be32_to_cpu(from
->di_ctime
.t_nsec
);
664 to
->di_size
= be64_to_cpu(from
->di_size
);
665 to
->di_nblocks
= be64_to_cpu(from
->di_nblocks
);
666 to
->di_extsize
= be32_to_cpu(from
->di_extsize
);
667 to
->di_nextents
= be32_to_cpu(from
->di_nextents
);
668 to
->di_anextents
= be16_to_cpu(from
->di_anextents
);
669 to
->di_forkoff
= from
->di_forkoff
;
670 to
->di_aformat
= from
->di_aformat
;
671 to
->di_dmevmask
= be32_to_cpu(from
->di_dmevmask
);
672 to
->di_dmstate
= be16_to_cpu(from
->di_dmstate
);
673 to
->di_flags
= be16_to_cpu(from
->di_flags
);
674 to
->di_gen
= be32_to_cpu(from
->di_gen
);
680 xfs_icdinode_t
*from
)
682 to
->di_magic
= cpu_to_be16(from
->di_magic
);
683 to
->di_mode
= cpu_to_be16(from
->di_mode
);
684 to
->di_version
= from
->di_version
;
685 to
->di_format
= from
->di_format
;
686 to
->di_onlink
= cpu_to_be16(from
->di_onlink
);
687 to
->di_uid
= cpu_to_be32(from
->di_uid
);
688 to
->di_gid
= cpu_to_be32(from
->di_gid
);
689 to
->di_nlink
= cpu_to_be32(from
->di_nlink
);
690 to
->di_projid_lo
= cpu_to_be16(from
->di_projid_lo
);
691 to
->di_projid_hi
= cpu_to_be16(from
->di_projid_hi
);
692 memcpy(to
->di_pad
, from
->di_pad
, sizeof(to
->di_pad
));
693 to
->di_flushiter
= cpu_to_be16(from
->di_flushiter
);
694 to
->di_atime
.t_sec
= cpu_to_be32(from
->di_atime
.t_sec
);
695 to
->di_atime
.t_nsec
= cpu_to_be32(from
->di_atime
.t_nsec
);
696 to
->di_mtime
.t_sec
= cpu_to_be32(from
->di_mtime
.t_sec
);
697 to
->di_mtime
.t_nsec
= cpu_to_be32(from
->di_mtime
.t_nsec
);
698 to
->di_ctime
.t_sec
= cpu_to_be32(from
->di_ctime
.t_sec
);
699 to
->di_ctime
.t_nsec
= cpu_to_be32(from
->di_ctime
.t_nsec
);
700 to
->di_size
= cpu_to_be64(from
->di_size
);
701 to
->di_nblocks
= cpu_to_be64(from
->di_nblocks
);
702 to
->di_extsize
= cpu_to_be32(from
->di_extsize
);
703 to
->di_nextents
= cpu_to_be32(from
->di_nextents
);
704 to
->di_anextents
= cpu_to_be16(from
->di_anextents
);
705 to
->di_forkoff
= from
->di_forkoff
;
706 to
->di_aformat
= from
->di_aformat
;
707 to
->di_dmevmask
= cpu_to_be32(from
->di_dmevmask
);
708 to
->di_dmstate
= cpu_to_be16(from
->di_dmstate
);
709 to
->di_flags
= cpu_to_be16(from
->di_flags
);
710 to
->di_gen
= cpu_to_be32(from
->di_gen
);
719 if (di_flags
& XFS_DIFLAG_ANY
) {
720 if (di_flags
& XFS_DIFLAG_REALTIME
)
721 flags
|= XFS_XFLAG_REALTIME
;
722 if (di_flags
& XFS_DIFLAG_PREALLOC
)
723 flags
|= XFS_XFLAG_PREALLOC
;
724 if (di_flags
& XFS_DIFLAG_IMMUTABLE
)
725 flags
|= XFS_XFLAG_IMMUTABLE
;
726 if (di_flags
& XFS_DIFLAG_APPEND
)
727 flags
|= XFS_XFLAG_APPEND
;
728 if (di_flags
& XFS_DIFLAG_SYNC
)
729 flags
|= XFS_XFLAG_SYNC
;
730 if (di_flags
& XFS_DIFLAG_NOATIME
)
731 flags
|= XFS_XFLAG_NOATIME
;
732 if (di_flags
& XFS_DIFLAG_NODUMP
)
733 flags
|= XFS_XFLAG_NODUMP
;
734 if (di_flags
& XFS_DIFLAG_RTINHERIT
)
735 flags
|= XFS_XFLAG_RTINHERIT
;
736 if (di_flags
& XFS_DIFLAG_PROJINHERIT
)
737 flags
|= XFS_XFLAG_PROJINHERIT
;
738 if (di_flags
& XFS_DIFLAG_NOSYMLINKS
)
739 flags
|= XFS_XFLAG_NOSYMLINKS
;
740 if (di_flags
& XFS_DIFLAG_EXTSIZE
)
741 flags
|= XFS_XFLAG_EXTSIZE
;
742 if (di_flags
& XFS_DIFLAG_EXTSZINHERIT
)
743 flags
|= XFS_XFLAG_EXTSZINHERIT
;
744 if (di_flags
& XFS_DIFLAG_NODEFRAG
)
745 flags
|= XFS_XFLAG_NODEFRAG
;
746 if (di_flags
& XFS_DIFLAG_FILESTREAM
)
747 flags
|= XFS_XFLAG_FILESTREAM
;
757 xfs_icdinode_t
*dic
= &ip
->i_d
;
759 return _xfs_dic2xflags(dic
->di_flags
) |
760 (XFS_IFORK_Q(ip
) ? XFS_XFLAG_HASATTR
: 0);
767 return _xfs_dic2xflags(be16_to_cpu(dip
->di_flags
)) |
768 (XFS_DFORK_Q(dip
) ? XFS_XFLAG_HASATTR
: 0);
772 * Read the disk inode attributes into the in-core inode structure.
786 * Fill in the location information in the in-core inode.
788 error
= xfs_imap(mp
, tp
, ip
->i_ino
, &ip
->i_imap
, iget_flags
);
793 * Get pointers to the on-disk inode and the buffer containing it.
795 error
= xfs_imap_to_bp(mp
, tp
, &ip
->i_imap
, &bp
,
796 XBF_LOCK
, iget_flags
);
799 dip
= (xfs_dinode_t
*)xfs_buf_offset(bp
, ip
->i_imap
.im_boffset
);
802 * If we got something that isn't an inode it means someone
803 * (nfs or dmi) has a stale handle.
805 if (dip
->di_magic
!= cpu_to_be16(XFS_DINODE_MAGIC
)) {
808 "%s: dip->di_magic (0x%x) != XFS_DINODE_MAGIC (0x%x)",
809 __func__
, be16_to_cpu(dip
->di_magic
), XFS_DINODE_MAGIC
);
811 error
= XFS_ERROR(EINVAL
);
816 * If the on-disk inode is already linked to a directory
817 * entry, copy all of the inode into the in-core inode.
818 * xfs_iformat() handles copying in the inode format
819 * specific information.
820 * Otherwise, just get the truly permanent information.
823 xfs_dinode_from_disk(&ip
->i_d
, dip
);
824 error
= xfs_iformat(ip
, dip
);
827 xfs_alert(mp
, "%s: xfs_iformat() returned error %d",
833 ip
->i_d
.di_magic
= be16_to_cpu(dip
->di_magic
);
834 ip
->i_d
.di_version
= dip
->di_version
;
835 ip
->i_d
.di_gen
= be32_to_cpu(dip
->di_gen
);
836 ip
->i_d
.di_flushiter
= be16_to_cpu(dip
->di_flushiter
);
838 * Make sure to pull in the mode here as well in
839 * case the inode is released without being used.
840 * This ensures that xfs_inactive() will see that
841 * the inode is already free and not try to mess
842 * with the uninitialized part of it.
846 * Initialize the per-fork minima and maxima for a new
847 * inode here. xfs_iformat will do it for old inodes.
849 ip
->i_df
.if_ext_max
=
850 XFS_IFORK_DSIZE(ip
) / (uint
)sizeof(xfs_bmbt_rec_t
);
854 * The inode format changed when we moved the link count and
855 * made it 32 bits long. If this is an old format inode,
856 * convert it in memory to look like a new one. If it gets
857 * flushed to disk we will convert back before flushing or
858 * logging it. We zero out the new projid field and the old link
859 * count field. We'll handle clearing the pad field (the remains
860 * of the old uuid field) when we actually convert the inode to
861 * the new format. We don't change the version number so that we
862 * can distinguish this from a real new format inode.
864 if (ip
->i_d
.di_version
== 1) {
865 ip
->i_d
.di_nlink
= ip
->i_d
.di_onlink
;
866 ip
->i_d
.di_onlink
= 0;
867 xfs_set_projid(ip
, 0);
870 ip
->i_delayed_blks
= 0;
871 ip
->i_size
= ip
->i_d
.di_size
;
874 * Mark the buffer containing the inode as something to keep
875 * around for a while. This helps to keep recently accessed
876 * meta-data in-core longer.
878 xfs_buf_set_ref(bp
, XFS_INO_REF
);
881 * Use xfs_trans_brelse() to release the buffer containing the
882 * on-disk inode, because it was acquired with xfs_trans_read_buf()
883 * in xfs_itobp() above. If tp is NULL, this is just a normal
884 * brelse(). If we're within a transaction, then xfs_trans_brelse()
885 * will only release the buffer if it is not dirty within the
886 * transaction. It will be OK to release the buffer in this case,
887 * because inodes on disk are never destroyed and we will be
888 * locking the new in-core inode before putting it in the hash
889 * table where other processes can find it. Thus we don't have
890 * to worry about the inode being changed just because we released
894 xfs_trans_brelse(tp
, bp
);
899 * Read in extents from a btree-format inode.
900 * Allocate and fill in if_extents. Real work is done in xfs_bmap.c.
910 xfs_extnum_t nextents
;
912 if (unlikely(XFS_IFORK_FORMAT(ip
, whichfork
) != XFS_DINODE_FMT_BTREE
)) {
913 XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW
,
915 return XFS_ERROR(EFSCORRUPTED
);
917 nextents
= XFS_IFORK_NEXTENTS(ip
, whichfork
);
918 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
921 * We know that the size is valid (it's checked in iformat_btree)
923 ifp
->if_bytes
= ifp
->if_real_bytes
= 0;
924 ifp
->if_flags
|= XFS_IFEXTENTS
;
925 xfs_iext_add(ifp
, 0, nextents
);
926 error
= xfs_bmap_read_extents(tp
, ip
, whichfork
);
928 xfs_iext_destroy(ifp
);
929 ifp
->if_flags
&= ~XFS_IFEXTENTS
;
932 xfs_validate_extents(ifp
, nextents
, XFS_EXTFMT_INODE(ip
));
937 * Allocate an inode on disk and return a copy of its in-core version.
938 * The in-core inode is locked exclusively. Set mode, nlink, and rdev
939 * appropriately within the inode. The uid and gid for the inode are
940 * set according to the contents of the given cred structure.
942 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
943 * has a free inode available, call xfs_iget()
944 * to obtain the in-core version of the allocated inode. Finally,
945 * fill in the inode and log its initial contents. In this case,
946 * ialloc_context would be set to NULL and call_again set to false.
948 * If xfs_dialloc() does not have an available inode,
949 * it will replenish its supply by doing an allocation. Since we can
950 * only do one allocation within a transaction without deadlocks, we
951 * must commit the current transaction before returning the inode itself.
952 * In this case, therefore, we will set call_again to true and return.
953 * The caller should then commit the current transaction, start a new
954 * transaction, and call xfs_ialloc() again to actually get the inode.
956 * To ensure that some other process does not grab the inode that
957 * was allocated during the first call to xfs_ialloc(), this routine
958 * also returns the [locked] bp pointing to the head of the freelist
959 * as ialloc_context. The caller should hold this buffer across
960 * the commit and pass it back into this routine on the second call.
962 * If we are allocating quota inodes, we do not have a parent inode
963 * to attach to or associate with (i.e. pip == NULL) because they
964 * are not linked into the directory structure - they are attached
965 * directly to the superblock - and so have no parent.
976 xfs_buf_t
**ialloc_context
,
977 boolean_t
*call_again
,
988 * Call the space management code to pick
989 * the on-disk inode to be allocated.
991 error
= xfs_dialloc(tp
, pip
? pip
->i_ino
: 0, mode
, okalloc
,
992 ialloc_context
, call_again
, &ino
);
995 if (*call_again
|| ino
== NULLFSINO
) {
999 ASSERT(*ialloc_context
== NULL
);
1002 * Get the in-core inode with the lock held exclusively.
1003 * This is because we're setting fields here we need
1004 * to prevent others from looking at until we're done.
1006 error
= xfs_iget(tp
->t_mountp
, tp
, ino
, XFS_IGET_CREATE
,
1007 XFS_ILOCK_EXCL
, &ip
);
1012 ip
->i_d
.di_mode
= (__uint16_t
)mode
;
1013 ip
->i_d
.di_onlink
= 0;
1014 ip
->i_d
.di_nlink
= nlink
;
1015 ASSERT(ip
->i_d
.di_nlink
== nlink
);
1016 ip
->i_d
.di_uid
= current_fsuid();
1017 ip
->i_d
.di_gid
= current_fsgid();
1018 xfs_set_projid(ip
, prid
);
1019 memset(&(ip
->i_d
.di_pad
[0]), 0, sizeof(ip
->i_d
.di_pad
));
1022 * If the superblock version is up to where we support new format
1023 * inodes and this is currently an old format inode, then change
1024 * the inode version number now. This way we only do the conversion
1025 * here rather than here and in the flush/logging code.
1027 if (xfs_sb_version_hasnlink(&tp
->t_mountp
->m_sb
) &&
1028 ip
->i_d
.di_version
== 1) {
1029 ip
->i_d
.di_version
= 2;
1031 * We've already zeroed the old link count, the projid field,
1032 * and the pad field.
1037 * Project ids won't be stored on disk if we are using a version 1 inode.
1039 if ((prid
!= 0) && (ip
->i_d
.di_version
== 1))
1040 xfs_bump_ino_vers2(tp
, ip
);
1042 if (pip
&& XFS_INHERIT_GID(pip
)) {
1043 ip
->i_d
.di_gid
= pip
->i_d
.di_gid
;
1044 if ((pip
->i_d
.di_mode
& S_ISGID
) && (mode
& S_IFMT
) == S_IFDIR
) {
1045 ip
->i_d
.di_mode
|= S_ISGID
;
1050 * If the group ID of the new file does not match the effective group
1051 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
1052 * (and only if the irix_sgid_inherit compatibility variable is set).
1054 if ((irix_sgid_inherit
) &&
1055 (ip
->i_d
.di_mode
& S_ISGID
) &&
1056 (!in_group_p((gid_t
)ip
->i_d
.di_gid
))) {
1057 ip
->i_d
.di_mode
&= ~S_ISGID
;
1060 ip
->i_d
.di_size
= 0;
1062 ip
->i_d
.di_nextents
= 0;
1063 ASSERT(ip
->i_d
.di_nblocks
== 0);
1066 ip
->i_d
.di_mtime
.t_sec
= (__int32_t
)tv
.tv_sec
;
1067 ip
->i_d
.di_mtime
.t_nsec
= (__int32_t
)tv
.tv_nsec
;
1068 ip
->i_d
.di_atime
= ip
->i_d
.di_mtime
;
1069 ip
->i_d
.di_ctime
= ip
->i_d
.di_mtime
;
1072 * di_gen will have been taken care of in xfs_iread.
1074 ip
->i_d
.di_extsize
= 0;
1075 ip
->i_d
.di_dmevmask
= 0;
1076 ip
->i_d
.di_dmstate
= 0;
1077 ip
->i_d
.di_flags
= 0;
1078 flags
= XFS_ILOG_CORE
;
1079 switch (mode
& S_IFMT
) {
1084 ip
->i_d
.di_format
= XFS_DINODE_FMT_DEV
;
1085 ip
->i_df
.if_u2
.if_rdev
= rdev
;
1086 ip
->i_df
.if_flags
= 0;
1087 flags
|= XFS_ILOG_DEV
;
1091 * we can't set up filestreams until after the VFS inode
1092 * is set up properly.
1094 if (pip
&& xfs_inode_is_filestream(pip
))
1098 if (pip
&& (pip
->i_d
.di_flags
& XFS_DIFLAG_ANY
)) {
1101 if ((mode
& S_IFMT
) == S_IFDIR
) {
1102 if (pip
->i_d
.di_flags
& XFS_DIFLAG_RTINHERIT
)
1103 di_flags
|= XFS_DIFLAG_RTINHERIT
;
1104 if (pip
->i_d
.di_flags
& XFS_DIFLAG_EXTSZINHERIT
) {
1105 di_flags
|= XFS_DIFLAG_EXTSZINHERIT
;
1106 ip
->i_d
.di_extsize
= pip
->i_d
.di_extsize
;
1108 } else if ((mode
& S_IFMT
) == S_IFREG
) {
1109 if (pip
->i_d
.di_flags
& XFS_DIFLAG_RTINHERIT
)
1110 di_flags
|= XFS_DIFLAG_REALTIME
;
1111 if (pip
->i_d
.di_flags
& XFS_DIFLAG_EXTSZINHERIT
) {
1112 di_flags
|= XFS_DIFLAG_EXTSIZE
;
1113 ip
->i_d
.di_extsize
= pip
->i_d
.di_extsize
;
1116 if ((pip
->i_d
.di_flags
& XFS_DIFLAG_NOATIME
) &&
1117 xfs_inherit_noatime
)
1118 di_flags
|= XFS_DIFLAG_NOATIME
;
1119 if ((pip
->i_d
.di_flags
& XFS_DIFLAG_NODUMP
) &&
1121 di_flags
|= XFS_DIFLAG_NODUMP
;
1122 if ((pip
->i_d
.di_flags
& XFS_DIFLAG_SYNC
) &&
1124 di_flags
|= XFS_DIFLAG_SYNC
;
1125 if ((pip
->i_d
.di_flags
& XFS_DIFLAG_NOSYMLINKS
) &&
1126 xfs_inherit_nosymlinks
)
1127 di_flags
|= XFS_DIFLAG_NOSYMLINKS
;
1128 if (pip
->i_d
.di_flags
& XFS_DIFLAG_PROJINHERIT
)
1129 di_flags
|= XFS_DIFLAG_PROJINHERIT
;
1130 if ((pip
->i_d
.di_flags
& XFS_DIFLAG_NODEFRAG
) &&
1131 xfs_inherit_nodefrag
)
1132 di_flags
|= XFS_DIFLAG_NODEFRAG
;
1133 if (pip
->i_d
.di_flags
& XFS_DIFLAG_FILESTREAM
)
1134 di_flags
|= XFS_DIFLAG_FILESTREAM
;
1135 ip
->i_d
.di_flags
|= di_flags
;
1139 ip
->i_d
.di_format
= XFS_DINODE_FMT_EXTENTS
;
1140 ip
->i_df
.if_flags
= XFS_IFEXTENTS
;
1141 ip
->i_df
.if_bytes
= ip
->i_df
.if_real_bytes
= 0;
1142 ip
->i_df
.if_u1
.if_extents
= NULL
;
1148 * Attribute fork settings for new inode.
1150 ip
->i_d
.di_aformat
= XFS_DINODE_FMT_EXTENTS
;
1151 ip
->i_d
.di_anextents
= 0;
1154 * Log the new values stuffed into the inode.
1156 xfs_trans_ijoin_ref(tp
, ip
, XFS_ILOCK_EXCL
);
1157 xfs_trans_log_inode(tp
, ip
, flags
);
1159 /* now that we have an i_mode we can setup inode ops and unlock */
1160 xfs_setup_inode(ip
);
1162 /* now we have set up the vfs inode we can associate the filestream */
1164 error
= xfs_filestream_associate(pip
, ip
);
1168 xfs_iflags_set(ip
, XFS_IFILESTREAM
);
1176 * Check to make sure that there are no blocks allocated to the
1177 * file beyond the size of the file. We don't check this for
1178 * files with fixed size extents or real time extents, but we
1179 * at least do it for regular files.
1184 struct xfs_inode
*ip
,
1187 struct xfs_mount
*mp
= ip
->i_mount
;
1188 xfs_fileoff_t map_first
;
1190 xfs_bmbt_irec_t imaps
[2];
1192 if ((ip
->i_d
.di_mode
& S_IFMT
) != S_IFREG
)
1195 if (XFS_IS_REALTIME_INODE(ip
))
1198 if (ip
->i_d
.di_flags
& XFS_DIFLAG_EXTSIZE
)
1202 map_first
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)isize
);
1204 * The filesystem could be shutting down, so bmapi may return
1207 if (xfs_bmapi(NULL
, ip
, map_first
,
1209 (xfs_ufsize_t
)XFS_MAXIOFFSET(mp
)) -
1211 XFS_BMAPI_ENTIRE
, NULL
, 0, imaps
, &nimaps
,
1214 ASSERT(nimaps
== 1);
1215 ASSERT(imaps
[0].br_startblock
== HOLESTARTBLOCK
);
1218 #define xfs_isize_check(ip, isize)
1222 * Free up the underlying blocks past new_size. The new size must be smaller
1223 * than the current size. This routine can be used both for the attribute and
1224 * data fork, and does not modify the inode size, which is left to the caller.
1226 * The transaction passed to this routine must have made a permanent log
1227 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
1228 * given transaction and start new ones, so make sure everything involved in
1229 * the transaction is tidy before calling here. Some transaction will be
1230 * returned to the caller to be committed. The incoming transaction must
1231 * already include the inode, and both inode locks must be held exclusively.
1232 * The inode must also be "held" within the transaction. On return the inode
1233 * will be "held" within the returned transaction. This routine does NOT
1234 * require any disk space to be reserved for it within the transaction.
1236 * If we get an error, we must return with the inode locked and linked into the
1237 * current transaction. This keeps things simple for the higher level code,
1238 * because it always knows that the inode is locked and held in the transaction
1239 * that returns to it whether errors occur or not. We don't mark the inode
1240 * dirty on error so that transactions can be easily aborted if possible.
1243 xfs_itruncate_extents(
1244 struct xfs_trans
**tpp
,
1245 struct xfs_inode
*ip
,
1247 xfs_fsize_t new_size
)
1249 struct xfs_mount
*mp
= ip
->i_mount
;
1250 struct xfs_trans
*tp
= *tpp
;
1251 struct xfs_trans
*ntp
;
1252 xfs_bmap_free_t free_list
;
1253 xfs_fsblock_t first_block
;
1254 xfs_fileoff_t first_unmap_block
;
1255 xfs_fileoff_t last_block
;
1256 xfs_filblks_t unmap_len
;
1261 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
|XFS_IOLOCK_EXCL
));
1262 ASSERT(new_size
<= ip
->i_size
);
1263 ASSERT(tp
->t_flags
& XFS_TRANS_PERM_LOG_RES
);
1264 ASSERT(ip
->i_itemp
!= NULL
);
1265 ASSERT(ip
->i_itemp
->ili_lock_flags
== 0);
1266 ASSERT(!XFS_NOT_DQATTACHED(mp
, ip
));
1269 * Since it is possible for space to become allocated beyond
1270 * the end of the file (in a crash where the space is allocated
1271 * but the inode size is not yet updated), simply remove any
1272 * blocks which show up between the new EOF and the maximum
1273 * possible file size. If the first block to be removed is
1274 * beyond the maximum file size (ie it is the same as last_block),
1275 * then there is nothing to do.
1277 first_unmap_block
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)new_size
);
1278 last_block
= XFS_B_TO_FSB(mp
, (xfs_ufsize_t
)XFS_MAXIOFFSET(mp
));
1279 if (first_unmap_block
== last_block
)
1282 ASSERT(first_unmap_block
< last_block
);
1283 unmap_len
= last_block
- first_unmap_block
+ 1;
1285 xfs_bmap_init(&free_list
, &first_block
);
1286 error
= xfs_bunmapi(tp
, ip
,
1287 first_unmap_block
, unmap_len
,
1288 xfs_bmapi_aflag(whichfork
),
1289 XFS_ITRUNC_MAX_EXTENTS
,
1290 &first_block
, &free_list
,
1293 goto out_bmap_cancel
;
1296 * Duplicate the transaction that has the permanent
1297 * reservation and commit the old transaction.
1299 error
= xfs_bmap_finish(&tp
, &free_list
, &committed
);
1301 xfs_trans_ijoin(tp
, ip
);
1303 goto out_bmap_cancel
;
1307 * Mark the inode dirty so it will be logged and
1308 * moved forward in the log as part of every commit.
1310 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
1313 ntp
= xfs_trans_dup(tp
);
1314 error
= xfs_trans_commit(tp
, 0);
1317 xfs_trans_ijoin(tp
, ip
);
1323 * Transaction commit worked ok so we can drop the extra ticket
1324 * reference that we gained in xfs_trans_dup()
1326 xfs_log_ticket_put(tp
->t_ticket
);
1327 error
= xfs_trans_reserve(tp
, 0,
1328 XFS_ITRUNCATE_LOG_RES(mp
), 0,
1329 XFS_TRANS_PERM_LOG_RES
,
1330 XFS_ITRUNCATE_LOG_COUNT
);
1340 * If the bunmapi call encounters an error, return to the caller where
1341 * the transaction can be properly aborted. We just need to make sure
1342 * we're not holding any resources that we were not when we came in.
1344 xfs_bmap_cancel(&free_list
);
1350 struct xfs_trans
**tpp
,
1351 struct xfs_inode
*ip
,
1352 xfs_fsize_t new_size
)
1356 trace_xfs_itruncate_data_start(ip
, new_size
);
1359 * The first thing we do is set the size to new_size permanently on
1360 * disk. This way we don't have to worry about anyone ever being able
1361 * to look at the data being freed even in the face of a crash.
1362 * What we're getting around here is the case where we free a block, it
1363 * is allocated to another file, it is written to, and then we crash.
1364 * If the new data gets written to the file but the log buffers
1365 * containing the free and reallocation don't, then we'd end up with
1366 * garbage in the blocks being freed. As long as we make the new_size
1367 * permanent before actually freeing any blocks it doesn't matter if
1368 * they get written to.
1370 if (ip
->i_d
.di_nextents
> 0) {
1372 * If we are not changing the file size then do not update
1373 * the on-disk file size - we may be called from
1374 * xfs_inactive_free_eofblocks(). If we update the on-disk
1375 * file size and then the system crashes before the contents
1376 * of the file are flushed to disk then the files may be
1377 * full of holes (ie NULL files bug).
1379 if (ip
->i_size
!= new_size
) {
1380 ip
->i_d
.di_size
= new_size
;
1381 ip
->i_size
= new_size
;
1382 xfs_trans_log_inode(*tpp
, ip
, XFS_ILOG_CORE
);
1386 error
= xfs_itruncate_extents(tpp
, ip
, XFS_DATA_FORK
, new_size
);
1391 * If we are not changing the file size then do not update the on-disk
1392 * file size - we may be called from xfs_inactive_free_eofblocks().
1393 * If we update the on-disk file size and then the system crashes
1394 * before the contents of the file are flushed to disk then the files
1395 * may be full of holes (ie NULL files bug).
1397 xfs_isize_check(ip
, new_size
);
1398 if (ip
->i_size
!= new_size
) {
1399 ip
->i_d
.di_size
= new_size
;
1400 ip
->i_size
= new_size
;
1403 ASSERT(new_size
!= 0 || ip
->i_delayed_blks
== 0);
1404 ASSERT(new_size
!= 0 || ip
->i_d
.di_nextents
== 0);
1407 * Always re-log the inode so that our permanent transaction can keep
1408 * on rolling it forward in the log.
1410 xfs_trans_log_inode(*tpp
, ip
, XFS_ILOG_CORE
);
1412 trace_xfs_itruncate_data_end(ip
, new_size
);
1417 * This is called when the inode's link count goes to 0.
1418 * We place the on-disk inode on a list in the AGI. It
1419 * will be pulled from this list when the inode is freed.
1436 ASSERT(ip
->i_d
.di_nlink
== 0);
1437 ASSERT(ip
->i_d
.di_mode
!= 0);
1442 * Get the agi buffer first. It ensures lock ordering
1445 error
= xfs_read_agi(mp
, tp
, XFS_INO_TO_AGNO(mp
, ip
->i_ino
), &agibp
);
1448 agi
= XFS_BUF_TO_AGI(agibp
);
1451 * Get the index into the agi hash table for the
1452 * list this inode will go on.
1454 agino
= XFS_INO_TO_AGINO(mp
, ip
->i_ino
);
1456 bucket_index
= agino
% XFS_AGI_UNLINKED_BUCKETS
;
1457 ASSERT(agi
->agi_unlinked
[bucket_index
]);
1458 ASSERT(be32_to_cpu(agi
->agi_unlinked
[bucket_index
]) != agino
);
1460 if (agi
->agi_unlinked
[bucket_index
] != cpu_to_be32(NULLAGINO
)) {
1462 * There is already another inode in the bucket we need
1463 * to add ourselves to. Add us at the front of the list.
1464 * Here we put the head pointer into our next pointer,
1465 * and then we fall through to point the head at us.
1467 error
= xfs_itobp(mp
, tp
, ip
, &dip
, &ibp
, XBF_LOCK
);
1471 ASSERT(dip
->di_next_unlinked
== cpu_to_be32(NULLAGINO
));
1472 dip
->di_next_unlinked
= agi
->agi_unlinked
[bucket_index
];
1473 offset
= ip
->i_imap
.im_boffset
+
1474 offsetof(xfs_dinode_t
, di_next_unlinked
);
1475 xfs_trans_inode_buf(tp
, ibp
);
1476 xfs_trans_log_buf(tp
, ibp
, offset
,
1477 (offset
+ sizeof(xfs_agino_t
) - 1));
1478 xfs_inobp_check(mp
, ibp
);
1482 * Point the bucket head pointer at the inode being inserted.
1485 agi
->agi_unlinked
[bucket_index
] = cpu_to_be32(agino
);
1486 offset
= offsetof(xfs_agi_t
, agi_unlinked
) +
1487 (sizeof(xfs_agino_t
) * bucket_index
);
1488 xfs_trans_log_buf(tp
, agibp
, offset
,
1489 (offset
+ sizeof(xfs_agino_t
) - 1));
1494 * Pull the on-disk inode from the AGI unlinked list.
1507 xfs_agnumber_t agno
;
1509 xfs_agino_t next_agino
;
1510 xfs_buf_t
*last_ibp
;
1511 xfs_dinode_t
*last_dip
= NULL
;
1513 int offset
, last_offset
= 0;
1517 agno
= XFS_INO_TO_AGNO(mp
, ip
->i_ino
);
1520 * Get the agi buffer first. It ensures lock ordering
1523 error
= xfs_read_agi(mp
, tp
, agno
, &agibp
);
1527 agi
= XFS_BUF_TO_AGI(agibp
);
1530 * Get the index into the agi hash table for the
1531 * list this inode will go on.
1533 agino
= XFS_INO_TO_AGINO(mp
, ip
->i_ino
);
1535 bucket_index
= agino
% XFS_AGI_UNLINKED_BUCKETS
;
1536 ASSERT(agi
->agi_unlinked
[bucket_index
] != cpu_to_be32(NULLAGINO
));
1537 ASSERT(agi
->agi_unlinked
[bucket_index
]);
1539 if (be32_to_cpu(agi
->agi_unlinked
[bucket_index
]) == agino
) {
1541 * We're at the head of the list. Get the inode's
1542 * on-disk buffer to see if there is anyone after us
1543 * on the list. Only modify our next pointer if it
1544 * is not already NULLAGINO. This saves us the overhead
1545 * of dealing with the buffer when there is no need to
1548 error
= xfs_itobp(mp
, tp
, ip
, &dip
, &ibp
, XBF_LOCK
);
1550 xfs_warn(mp
, "%s: xfs_itobp() returned error %d.",
1554 next_agino
= be32_to_cpu(dip
->di_next_unlinked
);
1555 ASSERT(next_agino
!= 0);
1556 if (next_agino
!= NULLAGINO
) {
1557 dip
->di_next_unlinked
= cpu_to_be32(NULLAGINO
);
1558 offset
= ip
->i_imap
.im_boffset
+
1559 offsetof(xfs_dinode_t
, di_next_unlinked
);
1560 xfs_trans_inode_buf(tp
, ibp
);
1561 xfs_trans_log_buf(tp
, ibp
, offset
,
1562 (offset
+ sizeof(xfs_agino_t
) - 1));
1563 xfs_inobp_check(mp
, ibp
);
1565 xfs_trans_brelse(tp
, ibp
);
1568 * Point the bucket head pointer at the next inode.
1570 ASSERT(next_agino
!= 0);
1571 ASSERT(next_agino
!= agino
);
1572 agi
->agi_unlinked
[bucket_index
] = cpu_to_be32(next_agino
);
1573 offset
= offsetof(xfs_agi_t
, agi_unlinked
) +
1574 (sizeof(xfs_agino_t
) * bucket_index
);
1575 xfs_trans_log_buf(tp
, agibp
, offset
,
1576 (offset
+ sizeof(xfs_agino_t
) - 1));
1579 * We need to search the list for the inode being freed.
1581 next_agino
= be32_to_cpu(agi
->agi_unlinked
[bucket_index
]);
1583 while (next_agino
!= agino
) {
1585 * If the last inode wasn't the one pointing to
1586 * us, then release its buffer since we're not
1587 * going to do anything with it.
1589 if (last_ibp
!= NULL
) {
1590 xfs_trans_brelse(tp
, last_ibp
);
1592 next_ino
= XFS_AGINO_TO_INO(mp
, agno
, next_agino
);
1593 error
= xfs_inotobp(mp
, tp
, next_ino
, &last_dip
,
1594 &last_ibp
, &last_offset
, 0);
1597 "%s: xfs_inotobp() returned error %d.",
1601 next_agino
= be32_to_cpu(last_dip
->di_next_unlinked
);
1602 ASSERT(next_agino
!= NULLAGINO
);
1603 ASSERT(next_agino
!= 0);
1606 * Now last_ibp points to the buffer previous to us on
1607 * the unlinked list. Pull us from the list.
1609 error
= xfs_itobp(mp
, tp
, ip
, &dip
, &ibp
, XBF_LOCK
);
1611 xfs_warn(mp
, "%s: xfs_itobp(2) returned error %d.",
1615 next_agino
= be32_to_cpu(dip
->di_next_unlinked
);
1616 ASSERT(next_agino
!= 0);
1617 ASSERT(next_agino
!= agino
);
1618 if (next_agino
!= NULLAGINO
) {
1619 dip
->di_next_unlinked
= cpu_to_be32(NULLAGINO
);
1620 offset
= ip
->i_imap
.im_boffset
+
1621 offsetof(xfs_dinode_t
, di_next_unlinked
);
1622 xfs_trans_inode_buf(tp
, ibp
);
1623 xfs_trans_log_buf(tp
, ibp
, offset
,
1624 (offset
+ sizeof(xfs_agino_t
) - 1));
1625 xfs_inobp_check(mp
, ibp
);
1627 xfs_trans_brelse(tp
, ibp
);
1630 * Point the previous inode on the list to the next inode.
1632 last_dip
->di_next_unlinked
= cpu_to_be32(next_agino
);
1633 ASSERT(next_agino
!= 0);
1634 offset
= last_offset
+ offsetof(xfs_dinode_t
, di_next_unlinked
);
1635 xfs_trans_inode_buf(tp
, last_ibp
);
1636 xfs_trans_log_buf(tp
, last_ibp
, offset
,
1637 (offset
+ sizeof(xfs_agino_t
) - 1));
1638 xfs_inobp_check(mp
, last_ibp
);
1644 * A big issue when freeing the inode cluster is is that we _cannot_ skip any
1645 * inodes that are in memory - they all must be marked stale and attached to
1646 * the cluster buffer.
1650 xfs_inode_t
*free_ip
,
1654 xfs_mount_t
*mp
= free_ip
->i_mount
;
1655 int blks_per_cluster
;
1662 xfs_inode_log_item_t
*iip
;
1663 xfs_log_item_t
*lip
;
1664 struct xfs_perag
*pag
;
1666 pag
= xfs_perag_get(mp
, XFS_INO_TO_AGNO(mp
, inum
));
1667 if (mp
->m_sb
.sb_blocksize
>= XFS_INODE_CLUSTER_SIZE(mp
)) {
1668 blks_per_cluster
= 1;
1669 ninodes
= mp
->m_sb
.sb_inopblock
;
1670 nbufs
= XFS_IALLOC_BLOCKS(mp
);
1672 blks_per_cluster
= XFS_INODE_CLUSTER_SIZE(mp
) /
1673 mp
->m_sb
.sb_blocksize
;
1674 ninodes
= blks_per_cluster
* mp
->m_sb
.sb_inopblock
;
1675 nbufs
= XFS_IALLOC_BLOCKS(mp
) / blks_per_cluster
;
1678 for (j
= 0; j
< nbufs
; j
++, inum
+= ninodes
) {
1679 blkno
= XFS_AGB_TO_DADDR(mp
, XFS_INO_TO_AGNO(mp
, inum
),
1680 XFS_INO_TO_AGBNO(mp
, inum
));
1683 * We obtain and lock the backing buffer first in the process
1684 * here, as we have to ensure that any dirty inode that we
1685 * can't get the flush lock on is attached to the buffer.
1686 * If we scan the in-memory inodes first, then buffer IO can
1687 * complete before we get a lock on it, and hence we may fail
1688 * to mark all the active inodes on the buffer stale.
1690 bp
= xfs_trans_get_buf(tp
, mp
->m_ddev_targp
, blkno
,
1691 mp
->m_bsize
* blks_per_cluster
,
1695 * Walk the inodes already attached to the buffer and mark them
1696 * stale. These will all have the flush locks held, so an
1697 * in-memory inode walk can't lock them. By marking them all
1698 * stale first, we will not attempt to lock them in the loop
1699 * below as the XFS_ISTALE flag will be set.
1703 if (lip
->li_type
== XFS_LI_INODE
) {
1704 iip
= (xfs_inode_log_item_t
*)lip
;
1705 ASSERT(iip
->ili_logged
== 1);
1706 lip
->li_cb
= xfs_istale_done
;
1707 xfs_trans_ail_copy_lsn(mp
->m_ail
,
1708 &iip
->ili_flush_lsn
,
1709 &iip
->ili_item
.li_lsn
);
1710 xfs_iflags_set(iip
->ili_inode
, XFS_ISTALE
);
1712 lip
= lip
->li_bio_list
;
1717 * For each inode in memory attempt to add it to the inode
1718 * buffer and set it up for being staled on buffer IO
1719 * completion. This is safe as we've locked out tail pushing
1720 * and flushing by locking the buffer.
1722 * We have already marked every inode that was part of a
1723 * transaction stale above, which means there is no point in
1724 * even trying to lock them.
1726 for (i
= 0; i
< ninodes
; i
++) {
1729 ip
= radix_tree_lookup(&pag
->pag_ici_root
,
1730 XFS_INO_TO_AGINO(mp
, (inum
+ i
)));
1732 /* Inode not in memory, nothing to do */
1739 * because this is an RCU protected lookup, we could
1740 * find a recently freed or even reallocated inode
1741 * during the lookup. We need to check under the
1742 * i_flags_lock for a valid inode here. Skip it if it
1743 * is not valid, the wrong inode or stale.
1745 spin_lock(&ip
->i_flags_lock
);
1746 if (ip
->i_ino
!= inum
+ i
||
1747 __xfs_iflags_test(ip
, XFS_ISTALE
)) {
1748 spin_unlock(&ip
->i_flags_lock
);
1752 spin_unlock(&ip
->i_flags_lock
);
1755 * Don't try to lock/unlock the current inode, but we
1756 * _cannot_ skip the other inodes that we did not find
1757 * in the list attached to the buffer and are not
1758 * already marked stale. If we can't lock it, back off
1761 if (ip
!= free_ip
&&
1762 !xfs_ilock_nowait(ip
, XFS_ILOCK_EXCL
)) {
1770 xfs_iflags_set(ip
, XFS_ISTALE
);
1773 * we don't need to attach clean inodes or those only
1774 * with unlogged changes (which we throw away, anyway).
1777 if (!iip
|| xfs_inode_clean(ip
)) {
1778 ASSERT(ip
!= free_ip
);
1779 ip
->i_update_core
= 0;
1781 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1785 iip
->ili_last_fields
= iip
->ili_format
.ilf_fields
;
1786 iip
->ili_format
.ilf_fields
= 0;
1787 iip
->ili_logged
= 1;
1788 xfs_trans_ail_copy_lsn(mp
->m_ail
, &iip
->ili_flush_lsn
,
1789 &iip
->ili_item
.li_lsn
);
1791 xfs_buf_attach_iodone(bp
, xfs_istale_done
,
1795 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1798 xfs_trans_stale_inode_buf(tp
, bp
);
1799 xfs_trans_binval(tp
, bp
);
1806 * This is called to return an inode to the inode free list.
1807 * The inode should already be truncated to 0 length and have
1808 * no pages associated with it. This routine also assumes that
1809 * the inode is already a part of the transaction.
1811 * The on-disk copy of the inode will have been added to the list
1812 * of unlinked inodes in the AGI. We need to remove the inode from
1813 * that list atomically with respect to freeing it here.
1819 xfs_bmap_free_t
*flist
)
1823 xfs_ino_t first_ino
;
1827 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
1828 ASSERT(ip
->i_d
.di_nlink
== 0);
1829 ASSERT(ip
->i_d
.di_nextents
== 0);
1830 ASSERT(ip
->i_d
.di_anextents
== 0);
1831 ASSERT((ip
->i_d
.di_size
== 0 && ip
->i_size
== 0) ||
1832 ((ip
->i_d
.di_mode
& S_IFMT
) != S_IFREG
));
1833 ASSERT(ip
->i_d
.di_nblocks
== 0);
1836 * Pull the on-disk inode from the AGI unlinked list.
1838 error
= xfs_iunlink_remove(tp
, ip
);
1843 error
= xfs_difree(tp
, ip
->i_ino
, flist
, &delete, &first_ino
);
1847 ip
->i_d
.di_mode
= 0; /* mark incore inode as free */
1848 ip
->i_d
.di_flags
= 0;
1849 ip
->i_d
.di_dmevmask
= 0;
1850 ip
->i_d
.di_forkoff
= 0; /* mark the attr fork not in use */
1851 ip
->i_df
.if_ext_max
=
1852 XFS_IFORK_DSIZE(ip
) / (uint
)sizeof(xfs_bmbt_rec_t
);
1853 ip
->i_d
.di_format
= XFS_DINODE_FMT_EXTENTS
;
1854 ip
->i_d
.di_aformat
= XFS_DINODE_FMT_EXTENTS
;
1856 * Bump the generation count so no one will be confused
1857 * by reincarnations of this inode.
1861 xfs_trans_log_inode(tp
, ip
, XFS_ILOG_CORE
);
1863 error
= xfs_itobp(ip
->i_mount
, tp
, ip
, &dip
, &ibp
, XBF_LOCK
);
1868 * Clear the on-disk di_mode. This is to prevent xfs_bulkstat
1869 * from picking up this inode when it is reclaimed (its incore state
1870 * initialzed but not flushed to disk yet). The in-core di_mode is
1871 * already cleared and a corresponding transaction logged.
1872 * The hack here just synchronizes the in-core to on-disk
1873 * di_mode value in advance before the actual inode sync to disk.
1874 * This is OK because the inode is already unlinked and would never
1875 * change its di_mode again for this inode generation.
1876 * This is a temporary hack that would require a proper fix
1882 xfs_ifree_cluster(ip
, tp
, first_ino
);
1889 * Reallocate the space for if_broot based on the number of records
1890 * being added or deleted as indicated in rec_diff. Move the records
1891 * and pointers in if_broot to fit the new size. When shrinking this
1892 * will eliminate holes between the records and pointers created by
1893 * the caller. When growing this will create holes to be filled in
1896 * The caller must not request to add more records than would fit in
1897 * the on-disk inode root. If the if_broot is currently NULL, then
1898 * if we adding records one will be allocated. The caller must also
1899 * not request that the number of records go below zero, although
1900 * it can go to zero.
1902 * ip -- the inode whose if_broot area is changing
1903 * ext_diff -- the change in the number of records, positive or negative,
1904 * requested for the if_broot array.
1912 struct xfs_mount
*mp
= ip
->i_mount
;
1915 struct xfs_btree_block
*new_broot
;
1922 * Handle the degenerate case quietly.
1924 if (rec_diff
== 0) {
1928 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
1931 * If there wasn't any memory allocated before, just
1932 * allocate it now and get out.
1934 if (ifp
->if_broot_bytes
== 0) {
1935 new_size
= (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff
);
1936 ifp
->if_broot
= kmem_alloc(new_size
, KM_SLEEP
| KM_NOFS
);
1937 ifp
->if_broot_bytes
= (int)new_size
;
1942 * If there is already an existing if_broot, then we need
1943 * to realloc() it and shift the pointers to their new
1944 * location. The records don't change location because
1945 * they are kept butted up against the btree block header.
1947 cur_max
= xfs_bmbt_maxrecs(mp
, ifp
->if_broot_bytes
, 0);
1948 new_max
= cur_max
+ rec_diff
;
1949 new_size
= (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max
);
1950 ifp
->if_broot
= kmem_realloc(ifp
->if_broot
, new_size
,
1951 (size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max
), /* old size */
1952 KM_SLEEP
| KM_NOFS
);
1953 op
= (char *)XFS_BMAP_BROOT_PTR_ADDR(mp
, ifp
->if_broot
, 1,
1954 ifp
->if_broot_bytes
);
1955 np
= (char *)XFS_BMAP_BROOT_PTR_ADDR(mp
, ifp
->if_broot
, 1,
1957 ifp
->if_broot_bytes
= (int)new_size
;
1958 ASSERT(ifp
->if_broot_bytes
<=
1959 XFS_IFORK_SIZE(ip
, whichfork
) + XFS_BROOT_SIZE_ADJ
);
1960 memmove(np
, op
, cur_max
* (uint
)sizeof(xfs_dfsbno_t
));
1965 * rec_diff is less than 0. In this case, we are shrinking the
1966 * if_broot buffer. It must already exist. If we go to zero
1967 * records, just get rid of the root and clear the status bit.
1969 ASSERT((ifp
->if_broot
!= NULL
) && (ifp
->if_broot_bytes
> 0));
1970 cur_max
= xfs_bmbt_maxrecs(mp
, ifp
->if_broot_bytes
, 0);
1971 new_max
= cur_max
+ rec_diff
;
1972 ASSERT(new_max
>= 0);
1974 new_size
= (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max
);
1978 new_broot
= kmem_alloc(new_size
, KM_SLEEP
| KM_NOFS
);
1980 * First copy over the btree block header.
1982 memcpy(new_broot
, ifp
->if_broot
, XFS_BTREE_LBLOCK_LEN
);
1985 ifp
->if_flags
&= ~XFS_IFBROOT
;
1989 * Only copy the records and pointers if there are any.
1993 * First copy the records.
1995 op
= (char *)XFS_BMBT_REC_ADDR(mp
, ifp
->if_broot
, 1);
1996 np
= (char *)XFS_BMBT_REC_ADDR(mp
, new_broot
, 1);
1997 memcpy(np
, op
, new_max
* (uint
)sizeof(xfs_bmbt_rec_t
));
2000 * Then copy the pointers.
2002 op
= (char *)XFS_BMAP_BROOT_PTR_ADDR(mp
, ifp
->if_broot
, 1,
2003 ifp
->if_broot_bytes
);
2004 np
= (char *)XFS_BMAP_BROOT_PTR_ADDR(mp
, new_broot
, 1,
2006 memcpy(np
, op
, new_max
* (uint
)sizeof(xfs_dfsbno_t
));
2008 kmem_free(ifp
->if_broot
);
2009 ifp
->if_broot
= new_broot
;
2010 ifp
->if_broot_bytes
= (int)new_size
;
2011 ASSERT(ifp
->if_broot_bytes
<=
2012 XFS_IFORK_SIZE(ip
, whichfork
) + XFS_BROOT_SIZE_ADJ
);
2018 * This is called when the amount of space needed for if_data
2019 * is increased or decreased. The change in size is indicated by
2020 * the number of bytes that need to be added or deleted in the
2021 * byte_diff parameter.
2023 * If the amount of space needed has decreased below the size of the
2024 * inline buffer, then switch to using the inline buffer. Otherwise,
2025 * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer
2026 * to what is needed.
2028 * ip -- the inode whose if_data area is changing
2029 * byte_diff -- the change in the number of bytes, positive or negative,
2030 * requested for the if_data array.
2042 if (byte_diff
== 0) {
2046 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
2047 new_size
= (int)ifp
->if_bytes
+ byte_diff
;
2048 ASSERT(new_size
>= 0);
2050 if (new_size
== 0) {
2051 if (ifp
->if_u1
.if_data
!= ifp
->if_u2
.if_inline_data
) {
2052 kmem_free(ifp
->if_u1
.if_data
);
2054 ifp
->if_u1
.if_data
= NULL
;
2056 } else if (new_size
<= sizeof(ifp
->if_u2
.if_inline_data
)) {
2058 * If the valid extents/data can fit in if_inline_ext/data,
2059 * copy them from the malloc'd vector and free it.
2061 if (ifp
->if_u1
.if_data
== NULL
) {
2062 ifp
->if_u1
.if_data
= ifp
->if_u2
.if_inline_data
;
2063 } else if (ifp
->if_u1
.if_data
!= ifp
->if_u2
.if_inline_data
) {
2064 ASSERT(ifp
->if_real_bytes
!= 0);
2065 memcpy(ifp
->if_u2
.if_inline_data
, ifp
->if_u1
.if_data
,
2067 kmem_free(ifp
->if_u1
.if_data
);
2068 ifp
->if_u1
.if_data
= ifp
->if_u2
.if_inline_data
;
2073 * Stuck with malloc/realloc.
2074 * For inline data, the underlying buffer must be
2075 * a multiple of 4 bytes in size so that it can be
2076 * logged and stay on word boundaries. We enforce
2079 real_size
= roundup(new_size
, 4);
2080 if (ifp
->if_u1
.if_data
== NULL
) {
2081 ASSERT(ifp
->if_real_bytes
== 0);
2082 ifp
->if_u1
.if_data
= kmem_alloc(real_size
,
2083 KM_SLEEP
| KM_NOFS
);
2084 } else if (ifp
->if_u1
.if_data
!= ifp
->if_u2
.if_inline_data
) {
2086 * Only do the realloc if the underlying size
2087 * is really changing.
2089 if (ifp
->if_real_bytes
!= real_size
) {
2090 ifp
->if_u1
.if_data
=
2091 kmem_realloc(ifp
->if_u1
.if_data
,
2094 KM_SLEEP
| KM_NOFS
);
2097 ASSERT(ifp
->if_real_bytes
== 0);
2098 ifp
->if_u1
.if_data
= kmem_alloc(real_size
,
2099 KM_SLEEP
| KM_NOFS
);
2100 memcpy(ifp
->if_u1
.if_data
, ifp
->if_u2
.if_inline_data
,
2104 ifp
->if_real_bytes
= real_size
;
2105 ifp
->if_bytes
= new_size
;
2106 ASSERT(ifp
->if_bytes
<= XFS_IFORK_SIZE(ip
, whichfork
));
2116 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
2117 if (ifp
->if_broot
!= NULL
) {
2118 kmem_free(ifp
->if_broot
);
2119 ifp
->if_broot
= NULL
;
2123 * If the format is local, then we can't have an extents
2124 * array so just look for an inline data array. If we're
2125 * not local then we may or may not have an extents list,
2126 * so check and free it up if we do.
2128 if (XFS_IFORK_FORMAT(ip
, whichfork
) == XFS_DINODE_FMT_LOCAL
) {
2129 if ((ifp
->if_u1
.if_data
!= ifp
->if_u2
.if_inline_data
) &&
2130 (ifp
->if_u1
.if_data
!= NULL
)) {
2131 ASSERT(ifp
->if_real_bytes
!= 0);
2132 kmem_free(ifp
->if_u1
.if_data
);
2133 ifp
->if_u1
.if_data
= NULL
;
2134 ifp
->if_real_bytes
= 0;
2136 } else if ((ifp
->if_flags
& XFS_IFEXTENTS
) &&
2137 ((ifp
->if_flags
& XFS_IFEXTIREC
) ||
2138 ((ifp
->if_u1
.if_extents
!= NULL
) &&
2139 (ifp
->if_u1
.if_extents
!= ifp
->if_u2
.if_inline_ext
)))) {
2140 ASSERT(ifp
->if_real_bytes
!= 0);
2141 xfs_iext_destroy(ifp
);
2143 ASSERT(ifp
->if_u1
.if_extents
== NULL
||
2144 ifp
->if_u1
.if_extents
== ifp
->if_u2
.if_inline_ext
);
2145 ASSERT(ifp
->if_real_bytes
== 0);
2146 if (whichfork
== XFS_ATTR_FORK
) {
2147 kmem_zone_free(xfs_ifork_zone
, ip
->i_afp
);
2153 * This is called to unpin an inode. The caller must have the inode locked
2154 * in at least shared mode so that the buffer cannot be subsequently pinned
2155 * once someone is waiting for it to be unpinned.
2159 struct xfs_inode
*ip
)
2161 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
|XFS_ILOCK_SHARED
));
2163 trace_xfs_inode_unpin_nowait(ip
, _RET_IP_
);
2165 /* Give the log a push to start the unpinning I/O */
2166 xfs_log_force_lsn(ip
->i_mount
, ip
->i_itemp
->ili_last_lsn
, 0);
2172 struct xfs_inode
*ip
)
2174 if (xfs_ipincount(ip
)) {
2175 xfs_iunpin_nowait(ip
);
2176 wait_event(ip
->i_ipin_wait
, (xfs_ipincount(ip
) == 0));
2181 * xfs_iextents_copy()
2183 * This is called to copy the REAL extents (as opposed to the delayed
2184 * allocation extents) from the inode into the given buffer. It
2185 * returns the number of bytes copied into the buffer.
2187 * If there are no delayed allocation extents, then we can just
2188 * memcpy() the extents into the buffer. Otherwise, we need to
2189 * examine each extent in turn and skip those which are delayed.
2201 xfs_fsblock_t start_block
;
2203 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
2204 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
|XFS_ILOCK_SHARED
));
2205 ASSERT(ifp
->if_bytes
> 0);
2207 nrecs
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
2208 XFS_BMAP_TRACE_EXLIST(ip
, nrecs
, whichfork
);
2212 * There are some delayed allocation extents in the
2213 * inode, so copy the extents one at a time and skip
2214 * the delayed ones. There must be at least one
2215 * non-delayed extent.
2218 for (i
= 0; i
< nrecs
; i
++) {
2219 xfs_bmbt_rec_host_t
*ep
= xfs_iext_get_ext(ifp
, i
);
2220 start_block
= xfs_bmbt_get_startblock(ep
);
2221 if (isnullstartblock(start_block
)) {
2223 * It's a delayed allocation extent, so skip it.
2228 /* Translate to on disk format */
2229 put_unaligned(cpu_to_be64(ep
->l0
), &dp
->l0
);
2230 put_unaligned(cpu_to_be64(ep
->l1
), &dp
->l1
);
2234 ASSERT(copied
!= 0);
2235 xfs_validate_extents(ifp
, copied
, XFS_EXTFMT_INODE(ip
));
2237 return (copied
* (uint
)sizeof(xfs_bmbt_rec_t
));
2241 * Each of the following cases stores data into the same region
2242 * of the on-disk inode, so only one of them can be valid at
2243 * any given time. While it is possible to have conflicting formats
2244 * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is
2245 * in EXTENTS format, this can only happen when the fork has
2246 * changed formats after being modified but before being flushed.
2247 * In these cases, the format always takes precedence, because the
2248 * format indicates the current state of the fork.
2255 xfs_inode_log_item_t
*iip
,
2262 #ifdef XFS_TRANS_DEBUG
2265 static const short brootflag
[2] =
2266 { XFS_ILOG_DBROOT
, XFS_ILOG_ABROOT
};
2267 static const short dataflag
[2] =
2268 { XFS_ILOG_DDATA
, XFS_ILOG_ADATA
};
2269 static const short extflag
[2] =
2270 { XFS_ILOG_DEXT
, XFS_ILOG_AEXT
};
2274 ifp
= XFS_IFORK_PTR(ip
, whichfork
);
2276 * This can happen if we gave up in iformat in an error path,
2277 * for the attribute fork.
2280 ASSERT(whichfork
== XFS_ATTR_FORK
);
2283 cp
= XFS_DFORK_PTR(dip
, whichfork
);
2285 switch (XFS_IFORK_FORMAT(ip
, whichfork
)) {
2286 case XFS_DINODE_FMT_LOCAL
:
2287 if ((iip
->ili_format
.ilf_fields
& dataflag
[whichfork
]) &&
2288 (ifp
->if_bytes
> 0)) {
2289 ASSERT(ifp
->if_u1
.if_data
!= NULL
);
2290 ASSERT(ifp
->if_bytes
<= XFS_IFORK_SIZE(ip
, whichfork
));
2291 memcpy(cp
, ifp
->if_u1
.if_data
, ifp
->if_bytes
);
2295 case XFS_DINODE_FMT_EXTENTS
:
2296 ASSERT((ifp
->if_flags
& XFS_IFEXTENTS
) ||
2297 !(iip
->ili_format
.ilf_fields
& extflag
[whichfork
]));
2298 if ((iip
->ili_format
.ilf_fields
& extflag
[whichfork
]) &&
2299 (ifp
->if_bytes
> 0)) {
2300 ASSERT(xfs_iext_get_ext(ifp
, 0));
2301 ASSERT(XFS_IFORK_NEXTENTS(ip
, whichfork
) > 0);
2302 (void)xfs_iextents_copy(ip
, (xfs_bmbt_rec_t
*)cp
,
2307 case XFS_DINODE_FMT_BTREE
:
2308 if ((iip
->ili_format
.ilf_fields
& brootflag
[whichfork
]) &&
2309 (ifp
->if_broot_bytes
> 0)) {
2310 ASSERT(ifp
->if_broot
!= NULL
);
2311 ASSERT(ifp
->if_broot_bytes
<=
2312 (XFS_IFORK_SIZE(ip
, whichfork
) +
2313 XFS_BROOT_SIZE_ADJ
));
2314 xfs_bmbt_to_bmdr(mp
, ifp
->if_broot
, ifp
->if_broot_bytes
,
2315 (xfs_bmdr_block_t
*)cp
,
2316 XFS_DFORK_SIZE(dip
, mp
, whichfork
));
2320 case XFS_DINODE_FMT_DEV
:
2321 if (iip
->ili_format
.ilf_fields
& XFS_ILOG_DEV
) {
2322 ASSERT(whichfork
== XFS_DATA_FORK
);
2323 xfs_dinode_put_rdev(dip
, ip
->i_df
.if_u2
.if_rdev
);
2327 case XFS_DINODE_FMT_UUID
:
2328 if (iip
->ili_format
.ilf_fields
& XFS_ILOG_UUID
) {
2329 ASSERT(whichfork
== XFS_DATA_FORK
);
2330 memcpy(XFS_DFORK_DPTR(dip
),
2331 &ip
->i_df
.if_u2
.if_uuid
,
2347 xfs_mount_t
*mp
= ip
->i_mount
;
2348 struct xfs_perag
*pag
;
2349 unsigned long first_index
, mask
;
2350 unsigned long inodes_per_cluster
;
2352 xfs_inode_t
**ilist
;
2359 pag
= xfs_perag_get(mp
, XFS_INO_TO_AGNO(mp
, ip
->i_ino
));
2361 inodes_per_cluster
= XFS_INODE_CLUSTER_SIZE(mp
) >> mp
->m_sb
.sb_inodelog
;
2362 ilist_size
= inodes_per_cluster
* sizeof(xfs_inode_t
*);
2363 ilist
= kmem_alloc(ilist_size
, KM_MAYFAIL
|KM_NOFS
);
2367 mask
= ~(((XFS_INODE_CLUSTER_SIZE(mp
) >> mp
->m_sb
.sb_inodelog
)) - 1);
2368 first_index
= XFS_INO_TO_AGINO(mp
, ip
->i_ino
) & mask
;
2370 /* really need a gang lookup range call here */
2371 nr_found
= radix_tree_gang_lookup(&pag
->pag_ici_root
, (void**)ilist
,
2372 first_index
, inodes_per_cluster
);
2376 for (i
= 0; i
< nr_found
; i
++) {
2382 * because this is an RCU protected lookup, we could find a
2383 * recently freed or even reallocated inode during the lookup.
2384 * We need to check under the i_flags_lock for a valid inode
2385 * here. Skip it if it is not valid or the wrong inode.
2387 spin_lock(&ip
->i_flags_lock
);
2389 (XFS_INO_TO_AGINO(mp
, iq
->i_ino
) & mask
) != first_index
) {
2390 spin_unlock(&ip
->i_flags_lock
);
2393 spin_unlock(&ip
->i_flags_lock
);
2396 * Do an un-protected check to see if the inode is dirty and
2397 * is a candidate for flushing. These checks will be repeated
2398 * later after the appropriate locks are acquired.
2400 if (xfs_inode_clean(iq
) && xfs_ipincount(iq
) == 0)
2404 * Try to get locks. If any are unavailable or it is pinned,
2405 * then this inode cannot be flushed and is skipped.
2408 if (!xfs_ilock_nowait(iq
, XFS_ILOCK_SHARED
))
2410 if (!xfs_iflock_nowait(iq
)) {
2411 xfs_iunlock(iq
, XFS_ILOCK_SHARED
);
2414 if (xfs_ipincount(iq
)) {
2416 xfs_iunlock(iq
, XFS_ILOCK_SHARED
);
2421 * arriving here means that this inode can be flushed. First
2422 * re-check that it's dirty before flushing.
2424 if (!xfs_inode_clean(iq
)) {
2426 error
= xfs_iflush_int(iq
, bp
);
2428 xfs_iunlock(iq
, XFS_ILOCK_SHARED
);
2429 goto cluster_corrupt_out
;
2435 xfs_iunlock(iq
, XFS_ILOCK_SHARED
);
2439 XFS_STATS_INC(xs_icluster_flushcnt
);
2440 XFS_STATS_ADD(xs_icluster_flushinode
, clcount
);
2451 cluster_corrupt_out
:
2453 * Corruption detected in the clustering loop. Invalidate the
2454 * inode buffer and shut down the filesystem.
2458 * Clean up the buffer. If it was B_DELWRI, just release it --
2459 * brelse can handle it with no problems. If not, shut down the
2460 * filesystem before releasing the buffer.
2462 bufwasdelwri
= XFS_BUF_ISDELAYWRITE(bp
);
2466 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
2468 if (!bufwasdelwri
) {
2470 * Just like incore_relse: if we have b_iodone functions,
2471 * mark the buffer as an error and call them. Otherwise
2472 * mark it as stale and brelse.
2474 if (XFS_BUF_IODONE_FUNC(bp
)) {
2477 XFS_BUF_ERROR(bp
,EIO
);
2478 xfs_buf_ioend(bp
, 0);
2486 * Unlocks the flush lock
2488 xfs_iflush_abort(iq
);
2491 return XFS_ERROR(EFSCORRUPTED
);
2495 * xfs_iflush() will write a modified inode's changes out to the
2496 * inode's on disk home. The caller must have the inode lock held
2497 * in at least shared mode and the inode flush completion must be
2498 * active as well. The inode lock will still be held upon return from
2499 * the call and the caller is free to unlock it.
2500 * The inode flush will be completed when the inode reaches the disk.
2501 * The flags indicate how the inode's buffer should be written out.
2508 xfs_inode_log_item_t
*iip
;
2514 XFS_STATS_INC(xs_iflush_count
);
2516 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
|XFS_ILOCK_SHARED
));
2517 ASSERT(!completion_done(&ip
->i_flush
));
2518 ASSERT(ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
||
2519 ip
->i_d
.di_nextents
> ip
->i_df
.if_ext_max
);
2525 * We can't flush the inode until it is unpinned, so wait for it if we
2526 * are allowed to block. We know no one new can pin it, because we are
2527 * holding the inode lock shared and you need to hold it exclusively to
2530 * If we are not allowed to block, force the log out asynchronously so
2531 * that when we come back the inode will be unpinned. If other inodes
2532 * in the same cluster are dirty, they will probably write the inode
2533 * out for us if they occur after the log force completes.
2535 if (!(flags
& SYNC_WAIT
) && xfs_ipincount(ip
)) {
2536 xfs_iunpin_nowait(ip
);
2540 xfs_iunpin_wait(ip
);
2543 * For stale inodes we cannot rely on the backing buffer remaining
2544 * stale in cache for the remaining life of the stale inode and so
2545 * xfs_itobp() below may give us a buffer that no longer contains
2546 * inodes below. We have to check this after ensuring the inode is
2547 * unpinned so that it is safe to reclaim the stale inode after the
2550 if (xfs_iflags_test(ip
, XFS_ISTALE
)) {
2556 * This may have been unpinned because the filesystem is shutting
2557 * down forcibly. If that's the case we must not write this inode
2558 * to disk, because the log record didn't make it to disk!
2560 if (XFS_FORCED_SHUTDOWN(mp
)) {
2561 ip
->i_update_core
= 0;
2563 iip
->ili_format
.ilf_fields
= 0;
2565 return XFS_ERROR(EIO
);
2569 * Get the buffer containing the on-disk inode.
2571 error
= xfs_itobp(mp
, NULL
, ip
, &dip
, &bp
,
2572 (flags
& SYNC_TRYLOCK
) ? XBF_TRYLOCK
: XBF_LOCK
);
2579 * First flush out the inode that xfs_iflush was called with.
2581 error
= xfs_iflush_int(ip
, bp
);
2586 * If the buffer is pinned then push on the log now so we won't
2587 * get stuck waiting in the write for too long.
2589 if (XFS_BUF_ISPINNED(bp
))
2590 xfs_log_force(mp
, 0);
2594 * see if other inodes can be gathered into this write
2596 error
= xfs_iflush_cluster(ip
, bp
);
2598 goto cluster_corrupt_out
;
2600 if (flags
& SYNC_WAIT
)
2601 error
= xfs_bwrite(mp
, bp
);
2603 xfs_bdwrite(mp
, bp
);
2608 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
2609 cluster_corrupt_out
:
2611 * Unlocks the flush lock
2613 xfs_iflush_abort(ip
);
2614 return XFS_ERROR(EFSCORRUPTED
);
2623 xfs_inode_log_item_t
*iip
;
2626 #ifdef XFS_TRANS_DEBUG
2630 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
|XFS_ILOCK_SHARED
));
2631 ASSERT(!completion_done(&ip
->i_flush
));
2632 ASSERT(ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
||
2633 ip
->i_d
.di_nextents
> ip
->i_df
.if_ext_max
);
2638 /* set *dip = inode's place in the buffer */
2639 dip
= (xfs_dinode_t
*)xfs_buf_offset(bp
, ip
->i_imap
.im_boffset
);
2642 * Clear i_update_core before copying out the data.
2643 * This is for coordination with our timestamp updates
2644 * that don't hold the inode lock. They will always
2645 * update the timestamps BEFORE setting i_update_core,
2646 * so if we clear i_update_core after they set it we
2647 * are guaranteed to see their updates to the timestamps.
2648 * I believe that this depends on strongly ordered memory
2649 * semantics, but we have that. We use the SYNCHRONIZE
2650 * macro to make sure that the compiler does not reorder
2651 * the i_update_core access below the data copy below.
2653 ip
->i_update_core
= 0;
2657 * Make sure to get the latest timestamps from the Linux inode.
2659 xfs_synchronize_times(ip
);
2661 if (XFS_TEST_ERROR(dip
->di_magic
!= cpu_to_be16(XFS_DINODE_MAGIC
),
2662 mp
, XFS_ERRTAG_IFLUSH_1
, XFS_RANDOM_IFLUSH_1
)) {
2663 xfs_alert_tag(mp
, XFS_PTAG_IFLUSH
,
2664 "%s: Bad inode %Lu magic number 0x%x, ptr 0x%p",
2665 __func__
, ip
->i_ino
, be16_to_cpu(dip
->di_magic
), dip
);
2668 if (XFS_TEST_ERROR(ip
->i_d
.di_magic
!= XFS_DINODE_MAGIC
,
2669 mp
, XFS_ERRTAG_IFLUSH_2
, XFS_RANDOM_IFLUSH_2
)) {
2670 xfs_alert_tag(mp
, XFS_PTAG_IFLUSH
,
2671 "%s: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
2672 __func__
, ip
->i_ino
, ip
, ip
->i_d
.di_magic
);
2675 if ((ip
->i_d
.di_mode
& S_IFMT
) == S_IFREG
) {
2677 (ip
->i_d
.di_format
!= XFS_DINODE_FMT_EXTENTS
) &&
2678 (ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
),
2679 mp
, XFS_ERRTAG_IFLUSH_3
, XFS_RANDOM_IFLUSH_3
)) {
2680 xfs_alert_tag(mp
, XFS_PTAG_IFLUSH
,
2681 "%s: Bad regular inode %Lu, ptr 0x%p",
2682 __func__
, ip
->i_ino
, ip
);
2685 } else if ((ip
->i_d
.di_mode
& S_IFMT
) == S_IFDIR
) {
2687 (ip
->i_d
.di_format
!= XFS_DINODE_FMT_EXTENTS
) &&
2688 (ip
->i_d
.di_format
!= XFS_DINODE_FMT_BTREE
) &&
2689 (ip
->i_d
.di_format
!= XFS_DINODE_FMT_LOCAL
),
2690 mp
, XFS_ERRTAG_IFLUSH_4
, XFS_RANDOM_IFLUSH_4
)) {
2691 xfs_alert_tag(mp
, XFS_PTAG_IFLUSH
,
2692 "%s: Bad directory inode %Lu, ptr 0x%p",
2693 __func__
, ip
->i_ino
, ip
);
2697 if (XFS_TEST_ERROR(ip
->i_d
.di_nextents
+ ip
->i_d
.di_anextents
>
2698 ip
->i_d
.di_nblocks
, mp
, XFS_ERRTAG_IFLUSH_5
,
2699 XFS_RANDOM_IFLUSH_5
)) {
2700 xfs_alert_tag(mp
, XFS_PTAG_IFLUSH
,
2701 "%s: detected corrupt incore inode %Lu, "
2702 "total extents = %d, nblocks = %Ld, ptr 0x%p",
2703 __func__
, ip
->i_ino
,
2704 ip
->i_d
.di_nextents
+ ip
->i_d
.di_anextents
,
2705 ip
->i_d
.di_nblocks
, ip
);
2708 if (XFS_TEST_ERROR(ip
->i_d
.di_forkoff
> mp
->m_sb
.sb_inodesize
,
2709 mp
, XFS_ERRTAG_IFLUSH_6
, XFS_RANDOM_IFLUSH_6
)) {
2710 xfs_alert_tag(mp
, XFS_PTAG_IFLUSH
,
2711 "%s: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
2712 __func__
, ip
->i_ino
, ip
->i_d
.di_forkoff
, ip
);
2716 * bump the flush iteration count, used to detect flushes which
2717 * postdate a log record during recovery.
2720 ip
->i_d
.di_flushiter
++;
2723 * Copy the dirty parts of the inode into the on-disk
2724 * inode. We always copy out the core of the inode,
2725 * because if the inode is dirty at all the core must
2728 xfs_dinode_to_disk(dip
, &ip
->i_d
);
2730 /* Wrap, we never let the log put out DI_MAX_FLUSH */
2731 if (ip
->i_d
.di_flushiter
== DI_MAX_FLUSH
)
2732 ip
->i_d
.di_flushiter
= 0;
2735 * If this is really an old format inode and the superblock version
2736 * has not been updated to support only new format inodes, then
2737 * convert back to the old inode format. If the superblock version
2738 * has been updated, then make the conversion permanent.
2740 ASSERT(ip
->i_d
.di_version
== 1 || xfs_sb_version_hasnlink(&mp
->m_sb
));
2741 if (ip
->i_d
.di_version
== 1) {
2742 if (!xfs_sb_version_hasnlink(&mp
->m_sb
)) {
2746 ASSERT(ip
->i_d
.di_nlink
<= XFS_MAXLINK_1
);
2747 dip
->di_onlink
= cpu_to_be16(ip
->i_d
.di_nlink
);
2750 * The superblock version has already been bumped,
2751 * so just make the conversion to the new inode
2754 ip
->i_d
.di_version
= 2;
2755 dip
->di_version
= 2;
2756 ip
->i_d
.di_onlink
= 0;
2758 memset(&(ip
->i_d
.di_pad
[0]), 0, sizeof(ip
->i_d
.di_pad
));
2759 memset(&(dip
->di_pad
[0]), 0,
2760 sizeof(dip
->di_pad
));
2761 ASSERT(xfs_get_projid(ip
) == 0);
2765 xfs_iflush_fork(ip
, dip
, iip
, XFS_DATA_FORK
, bp
);
2766 if (XFS_IFORK_Q(ip
))
2767 xfs_iflush_fork(ip
, dip
, iip
, XFS_ATTR_FORK
, bp
);
2768 xfs_inobp_check(mp
, bp
);
2771 * We've recorded everything logged in the inode, so we'd
2772 * like to clear the ilf_fields bits so we don't log and
2773 * flush things unnecessarily. However, we can't stop
2774 * logging all this information until the data we've copied
2775 * into the disk buffer is written to disk. If we did we might
2776 * overwrite the copy of the inode in the log with all the
2777 * data after re-logging only part of it, and in the face of
2778 * a crash we wouldn't have all the data we need to recover.
2780 * What we do is move the bits to the ili_last_fields field.
2781 * When logging the inode, these bits are moved back to the
2782 * ilf_fields field. In the xfs_iflush_done() routine we
2783 * clear ili_last_fields, since we know that the information
2784 * those bits represent is permanently on disk. As long as
2785 * the flush completes before the inode is logged again, then
2786 * both ilf_fields and ili_last_fields will be cleared.
2788 * We can play with the ilf_fields bits here, because the inode
2789 * lock must be held exclusively in order to set bits there
2790 * and the flush lock protects the ili_last_fields bits.
2791 * Set ili_logged so the flush done
2792 * routine can tell whether or not to look in the AIL.
2793 * Also, store the current LSN of the inode so that we can tell
2794 * whether the item has moved in the AIL from xfs_iflush_done().
2795 * In order to read the lsn we need the AIL lock, because
2796 * it is a 64 bit value that cannot be read atomically.
2798 if (iip
!= NULL
&& iip
->ili_format
.ilf_fields
!= 0) {
2799 iip
->ili_last_fields
= iip
->ili_format
.ilf_fields
;
2800 iip
->ili_format
.ilf_fields
= 0;
2801 iip
->ili_logged
= 1;
2803 xfs_trans_ail_copy_lsn(mp
->m_ail
, &iip
->ili_flush_lsn
,
2804 &iip
->ili_item
.li_lsn
);
2807 * Attach the function xfs_iflush_done to the inode's
2808 * buffer. This will remove the inode from the AIL
2809 * and unlock the inode's flush lock when the inode is
2810 * completely written to disk.
2812 xfs_buf_attach_iodone(bp
, xfs_iflush_done
, &iip
->ili_item
);
2814 ASSERT(bp
->b_fspriv
!= NULL
);
2815 ASSERT(XFS_BUF_IODONE_FUNC(bp
) != NULL
);
2818 * We're flushing an inode which is not in the AIL and has
2819 * not been logged but has i_update_core set. For this
2820 * case we can use a B_DELWRI flush and immediately drop
2821 * the inode flush lock because we can avoid the whole
2822 * AIL state thing. It's OK to drop the flush lock now,
2823 * because we've already locked the buffer and to do anything
2824 * you really need both.
2827 ASSERT(iip
->ili_logged
== 0);
2828 ASSERT(iip
->ili_last_fields
== 0);
2829 ASSERT((iip
->ili_item
.li_flags
& XFS_LI_IN_AIL
) == 0);
2837 return XFS_ERROR(EFSCORRUPTED
);
2841 * Return a pointer to the extent record at file index idx.
2843 xfs_bmbt_rec_host_t
*
2845 xfs_ifork_t
*ifp
, /* inode fork pointer */
2846 xfs_extnum_t idx
) /* index of target extent */
2849 ASSERT(idx
< ifp
->if_bytes
/ sizeof(xfs_bmbt_rec_t
));
2851 if ((ifp
->if_flags
& XFS_IFEXTIREC
) && (idx
== 0)) {
2852 return ifp
->if_u1
.if_ext_irec
->er_extbuf
;
2853 } else if (ifp
->if_flags
& XFS_IFEXTIREC
) {
2854 xfs_ext_irec_t
*erp
; /* irec pointer */
2855 int erp_idx
= 0; /* irec index */
2856 xfs_extnum_t page_idx
= idx
; /* ext index in target list */
2858 erp
= xfs_iext_idx_to_irec(ifp
, &page_idx
, &erp_idx
, 0);
2859 return &erp
->er_extbuf
[page_idx
];
2860 } else if (ifp
->if_bytes
) {
2861 return &ifp
->if_u1
.if_extents
[idx
];
2868 * Insert new item(s) into the extent records for incore inode
2869 * fork 'ifp'. 'count' new items are inserted at index 'idx'.
2873 xfs_inode_t
*ip
, /* incore inode pointer */
2874 xfs_extnum_t idx
, /* starting index of new items */
2875 xfs_extnum_t count
, /* number of inserted items */
2876 xfs_bmbt_irec_t
*new, /* items to insert */
2877 int state
) /* type of extent conversion */
2879 xfs_ifork_t
*ifp
= (state
& BMAP_ATTRFORK
) ? ip
->i_afp
: &ip
->i_df
;
2880 xfs_extnum_t i
; /* extent record index */
2882 trace_xfs_iext_insert(ip
, idx
, new, state
, _RET_IP_
);
2884 ASSERT(ifp
->if_flags
& XFS_IFEXTENTS
);
2885 xfs_iext_add(ifp
, idx
, count
);
2886 for (i
= idx
; i
< idx
+ count
; i
++, new++)
2887 xfs_bmbt_set_all(xfs_iext_get_ext(ifp
, i
), new);
2891 * This is called when the amount of space required for incore file
2892 * extents needs to be increased. The ext_diff parameter stores the
2893 * number of new extents being added and the idx parameter contains
2894 * the extent index where the new extents will be added. If the new
2895 * extents are being appended, then we just need to (re)allocate and
2896 * initialize the space. Otherwise, if the new extents are being
2897 * inserted into the middle of the existing entries, a bit more work
2898 * is required to make room for the new extents to be inserted. The
2899 * caller is responsible for filling in the new extent entries upon
2904 xfs_ifork_t
*ifp
, /* inode fork pointer */
2905 xfs_extnum_t idx
, /* index to begin adding exts */
2906 int ext_diff
) /* number of extents to add */
2908 int byte_diff
; /* new bytes being added */
2909 int new_size
; /* size of extents after adding */
2910 xfs_extnum_t nextents
; /* number of extents in file */
2912 nextents
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
2913 ASSERT((idx
>= 0) && (idx
<= nextents
));
2914 byte_diff
= ext_diff
* sizeof(xfs_bmbt_rec_t
);
2915 new_size
= ifp
->if_bytes
+ byte_diff
;
2917 * If the new number of extents (nextents + ext_diff)
2918 * fits inside the inode, then continue to use the inline
2921 if (nextents
+ ext_diff
<= XFS_INLINE_EXTS
) {
2922 if (idx
< nextents
) {
2923 memmove(&ifp
->if_u2
.if_inline_ext
[idx
+ ext_diff
],
2924 &ifp
->if_u2
.if_inline_ext
[idx
],
2925 (nextents
- idx
) * sizeof(xfs_bmbt_rec_t
));
2926 memset(&ifp
->if_u2
.if_inline_ext
[idx
], 0, byte_diff
);
2928 ifp
->if_u1
.if_extents
= ifp
->if_u2
.if_inline_ext
;
2929 ifp
->if_real_bytes
= 0;
2932 * Otherwise use a linear (direct) extent list.
2933 * If the extents are currently inside the inode,
2934 * xfs_iext_realloc_direct will switch us from
2935 * inline to direct extent allocation mode.
2937 else if (nextents
+ ext_diff
<= XFS_LINEAR_EXTS
) {
2938 xfs_iext_realloc_direct(ifp
, new_size
);
2939 if (idx
< nextents
) {
2940 memmove(&ifp
->if_u1
.if_extents
[idx
+ ext_diff
],
2941 &ifp
->if_u1
.if_extents
[idx
],
2942 (nextents
- idx
) * sizeof(xfs_bmbt_rec_t
));
2943 memset(&ifp
->if_u1
.if_extents
[idx
], 0, byte_diff
);
2946 /* Indirection array */
2948 xfs_ext_irec_t
*erp
;
2952 ASSERT(nextents
+ ext_diff
> XFS_LINEAR_EXTS
);
2953 if (ifp
->if_flags
& XFS_IFEXTIREC
) {
2954 erp
= xfs_iext_idx_to_irec(ifp
, &page_idx
, &erp_idx
, 1);
2956 xfs_iext_irec_init(ifp
);
2957 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
2958 erp
= ifp
->if_u1
.if_ext_irec
;
2960 /* Extents fit in target extent page */
2961 if (erp
&& erp
->er_extcount
+ ext_diff
<= XFS_LINEAR_EXTS
) {
2962 if (page_idx
< erp
->er_extcount
) {
2963 memmove(&erp
->er_extbuf
[page_idx
+ ext_diff
],
2964 &erp
->er_extbuf
[page_idx
],
2965 (erp
->er_extcount
- page_idx
) *
2966 sizeof(xfs_bmbt_rec_t
));
2967 memset(&erp
->er_extbuf
[page_idx
], 0, byte_diff
);
2969 erp
->er_extcount
+= ext_diff
;
2970 xfs_iext_irec_update_extoffs(ifp
, erp_idx
+ 1, ext_diff
);
2972 /* Insert a new extent page */
2974 xfs_iext_add_indirect_multi(ifp
,
2975 erp_idx
, page_idx
, ext_diff
);
2978 * If extent(s) are being appended to the last page in
2979 * the indirection array and the new extent(s) don't fit
2980 * in the page, then erp is NULL and erp_idx is set to
2981 * the next index needed in the indirection array.
2984 int count
= ext_diff
;
2987 erp
= xfs_iext_irec_new(ifp
, erp_idx
);
2988 erp
->er_extcount
= count
;
2989 count
-= MIN(count
, (int)XFS_LINEAR_EXTS
);
2996 ifp
->if_bytes
= new_size
;
3000 * This is called when incore extents are being added to the indirection
3001 * array and the new extents do not fit in the target extent list. The
3002 * erp_idx parameter contains the irec index for the target extent list
3003 * in the indirection array, and the idx parameter contains the extent
3004 * index within the list. The number of extents being added is stored
3005 * in the count parameter.
3007 * |-------| |-------|
3008 * | | | | idx - number of extents before idx
3010 * | | | | count - number of extents being inserted at idx
3011 * |-------| |-------|
3012 * | count | | nex2 | nex2 - number of extents after idx + count
3013 * |-------| |-------|
3016 xfs_iext_add_indirect_multi(
3017 xfs_ifork_t
*ifp
, /* inode fork pointer */
3018 int erp_idx
, /* target extent irec index */
3019 xfs_extnum_t idx
, /* index within target list */
3020 int count
) /* new extents being added */
3022 int byte_diff
; /* new bytes being added */
3023 xfs_ext_irec_t
*erp
; /* pointer to irec entry */
3024 xfs_extnum_t ext_diff
; /* number of extents to add */
3025 xfs_extnum_t ext_cnt
; /* new extents still needed */
3026 xfs_extnum_t nex2
; /* extents after idx + count */
3027 xfs_bmbt_rec_t
*nex2_ep
= NULL
; /* temp list for nex2 extents */
3028 int nlists
; /* number of irec's (lists) */
3030 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
3031 erp
= &ifp
->if_u1
.if_ext_irec
[erp_idx
];
3032 nex2
= erp
->er_extcount
- idx
;
3033 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
3036 * Save second part of target extent list
3037 * (all extents past */
3039 byte_diff
= nex2
* sizeof(xfs_bmbt_rec_t
);
3040 nex2_ep
= (xfs_bmbt_rec_t
*) kmem_alloc(byte_diff
, KM_NOFS
);
3041 memmove(nex2_ep
, &erp
->er_extbuf
[idx
], byte_diff
);
3042 erp
->er_extcount
-= nex2
;
3043 xfs_iext_irec_update_extoffs(ifp
, erp_idx
+ 1, -nex2
);
3044 memset(&erp
->er_extbuf
[idx
], 0, byte_diff
);
3048 * Add the new extents to the end of the target
3049 * list, then allocate new irec record(s) and
3050 * extent buffer(s) as needed to store the rest
3051 * of the new extents.
3054 ext_diff
= MIN(ext_cnt
, (int)XFS_LINEAR_EXTS
- erp
->er_extcount
);
3056 erp
->er_extcount
+= ext_diff
;
3057 xfs_iext_irec_update_extoffs(ifp
, erp_idx
+ 1, ext_diff
);
3058 ext_cnt
-= ext_diff
;
3062 erp
= xfs_iext_irec_new(ifp
, erp_idx
);
3063 ext_diff
= MIN(ext_cnt
, (int)XFS_LINEAR_EXTS
);
3064 erp
->er_extcount
= ext_diff
;
3065 xfs_iext_irec_update_extoffs(ifp
, erp_idx
+ 1, ext_diff
);
3066 ext_cnt
-= ext_diff
;
3069 /* Add nex2 extents back to indirection array */
3071 xfs_extnum_t ext_avail
;
3074 byte_diff
= nex2
* sizeof(xfs_bmbt_rec_t
);
3075 ext_avail
= XFS_LINEAR_EXTS
- erp
->er_extcount
;
3078 * If nex2 extents fit in the current page, append
3079 * nex2_ep after the new extents.
3081 if (nex2
<= ext_avail
) {
3082 i
= erp
->er_extcount
;
3085 * Otherwise, check if space is available in the
3088 else if ((erp_idx
< nlists
- 1) &&
3089 (nex2
<= (ext_avail
= XFS_LINEAR_EXTS
-
3090 ifp
->if_u1
.if_ext_irec
[erp_idx
+1].er_extcount
))) {
3093 /* Create a hole for nex2 extents */
3094 memmove(&erp
->er_extbuf
[nex2
], erp
->er_extbuf
,
3095 erp
->er_extcount
* sizeof(xfs_bmbt_rec_t
));
3098 * Final choice, create a new extent page for
3103 erp
= xfs_iext_irec_new(ifp
, erp_idx
);
3105 memmove(&erp
->er_extbuf
[i
], nex2_ep
, byte_diff
);
3107 erp
->er_extcount
+= nex2
;
3108 xfs_iext_irec_update_extoffs(ifp
, erp_idx
+ 1, nex2
);
3113 * This is called when the amount of space required for incore file
3114 * extents needs to be decreased. The ext_diff parameter stores the
3115 * number of extents to be removed and the idx parameter contains
3116 * the extent index where the extents will be removed from.
3118 * If the amount of space needed has decreased below the linear
3119 * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous
3120 * extent array. Otherwise, use kmem_realloc() to adjust the
3121 * size to what is needed.
3125 xfs_inode_t
*ip
, /* incore inode pointer */
3126 xfs_extnum_t idx
, /* index to begin removing exts */
3127 int ext_diff
, /* number of extents to remove */
3128 int state
) /* type of extent conversion */
3130 xfs_ifork_t
*ifp
= (state
& BMAP_ATTRFORK
) ? ip
->i_afp
: &ip
->i_df
;
3131 xfs_extnum_t nextents
; /* number of extents in file */
3132 int new_size
; /* size of extents after removal */
3134 trace_xfs_iext_remove(ip
, idx
, state
, _RET_IP_
);
3136 ASSERT(ext_diff
> 0);
3137 nextents
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
3138 new_size
= (nextents
- ext_diff
) * sizeof(xfs_bmbt_rec_t
);
3140 if (new_size
== 0) {
3141 xfs_iext_destroy(ifp
);
3142 } else if (ifp
->if_flags
& XFS_IFEXTIREC
) {
3143 xfs_iext_remove_indirect(ifp
, idx
, ext_diff
);
3144 } else if (ifp
->if_real_bytes
) {
3145 xfs_iext_remove_direct(ifp
, idx
, ext_diff
);
3147 xfs_iext_remove_inline(ifp
, idx
, ext_diff
);
3149 ifp
->if_bytes
= new_size
;
3153 * This removes ext_diff extents from the inline buffer, beginning
3154 * at extent index idx.
3157 xfs_iext_remove_inline(
3158 xfs_ifork_t
*ifp
, /* inode fork pointer */
3159 xfs_extnum_t idx
, /* index to begin removing exts */
3160 int ext_diff
) /* number of extents to remove */
3162 int nextents
; /* number of extents in file */
3164 ASSERT(!(ifp
->if_flags
& XFS_IFEXTIREC
));
3165 ASSERT(idx
< XFS_INLINE_EXTS
);
3166 nextents
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
3167 ASSERT(((nextents
- ext_diff
) > 0) &&
3168 (nextents
- ext_diff
) < XFS_INLINE_EXTS
);
3170 if (idx
+ ext_diff
< nextents
) {
3171 memmove(&ifp
->if_u2
.if_inline_ext
[idx
],
3172 &ifp
->if_u2
.if_inline_ext
[idx
+ ext_diff
],
3173 (nextents
- (idx
+ ext_diff
)) *
3174 sizeof(xfs_bmbt_rec_t
));
3175 memset(&ifp
->if_u2
.if_inline_ext
[nextents
- ext_diff
],
3176 0, ext_diff
* sizeof(xfs_bmbt_rec_t
));
3178 memset(&ifp
->if_u2
.if_inline_ext
[idx
], 0,
3179 ext_diff
* sizeof(xfs_bmbt_rec_t
));
3184 * This removes ext_diff extents from a linear (direct) extent list,
3185 * beginning at extent index idx. If the extents are being removed
3186 * from the end of the list (ie. truncate) then we just need to re-
3187 * allocate the list to remove the extra space. Otherwise, if the
3188 * extents are being removed from the middle of the existing extent
3189 * entries, then we first need to move the extent records beginning
3190 * at idx + ext_diff up in the list to overwrite the records being
3191 * removed, then remove the extra space via kmem_realloc.
3194 xfs_iext_remove_direct(
3195 xfs_ifork_t
*ifp
, /* inode fork pointer */
3196 xfs_extnum_t idx
, /* index to begin removing exts */
3197 int ext_diff
) /* number of extents to remove */
3199 xfs_extnum_t nextents
; /* number of extents in file */
3200 int new_size
; /* size of extents after removal */
3202 ASSERT(!(ifp
->if_flags
& XFS_IFEXTIREC
));
3203 new_size
= ifp
->if_bytes
-
3204 (ext_diff
* sizeof(xfs_bmbt_rec_t
));
3205 nextents
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
3207 if (new_size
== 0) {
3208 xfs_iext_destroy(ifp
);
3211 /* Move extents up in the list (if needed) */
3212 if (idx
+ ext_diff
< nextents
) {
3213 memmove(&ifp
->if_u1
.if_extents
[idx
],
3214 &ifp
->if_u1
.if_extents
[idx
+ ext_diff
],
3215 (nextents
- (idx
+ ext_diff
)) *
3216 sizeof(xfs_bmbt_rec_t
));
3218 memset(&ifp
->if_u1
.if_extents
[nextents
- ext_diff
],
3219 0, ext_diff
* sizeof(xfs_bmbt_rec_t
));
3221 * Reallocate the direct extent list. If the extents
3222 * will fit inside the inode then xfs_iext_realloc_direct
3223 * will switch from direct to inline extent allocation
3226 xfs_iext_realloc_direct(ifp
, new_size
);
3227 ifp
->if_bytes
= new_size
;
3231 * This is called when incore extents are being removed from the
3232 * indirection array and the extents being removed span multiple extent
3233 * buffers. The idx parameter contains the file extent index where we
3234 * want to begin removing extents, and the count parameter contains
3235 * how many extents need to be removed.
3237 * |-------| |-------|
3238 * | nex1 | | | nex1 - number of extents before idx
3239 * |-------| | count |
3240 * | | | | count - number of extents being removed at idx
3241 * | count | |-------|
3242 * | | | nex2 | nex2 - number of extents after idx + count
3243 * |-------| |-------|
3246 xfs_iext_remove_indirect(
3247 xfs_ifork_t
*ifp
, /* inode fork pointer */
3248 xfs_extnum_t idx
, /* index to begin removing extents */
3249 int count
) /* number of extents to remove */
3251 xfs_ext_irec_t
*erp
; /* indirection array pointer */
3252 int erp_idx
= 0; /* indirection array index */
3253 xfs_extnum_t ext_cnt
; /* extents left to remove */
3254 xfs_extnum_t ext_diff
; /* extents to remove in current list */
3255 xfs_extnum_t nex1
; /* number of extents before idx */
3256 xfs_extnum_t nex2
; /* extents after idx + count */
3257 int page_idx
= idx
; /* index in target extent list */
3259 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
3260 erp
= xfs_iext_idx_to_irec(ifp
, &page_idx
, &erp_idx
, 0);
3261 ASSERT(erp
!= NULL
);
3265 nex2
= MAX((erp
->er_extcount
- (nex1
+ ext_cnt
)), 0);
3266 ext_diff
= MIN(ext_cnt
, (erp
->er_extcount
- nex1
));
3268 * Check for deletion of entire list;
3269 * xfs_iext_irec_remove() updates extent offsets.
3271 if (ext_diff
== erp
->er_extcount
) {
3272 xfs_iext_irec_remove(ifp
, erp_idx
);
3273 ext_cnt
-= ext_diff
;
3276 ASSERT(erp_idx
< ifp
->if_real_bytes
/
3278 erp
= &ifp
->if_u1
.if_ext_irec
[erp_idx
];
3285 /* Move extents up (if needed) */
3287 memmove(&erp
->er_extbuf
[nex1
],
3288 &erp
->er_extbuf
[nex1
+ ext_diff
],
3289 nex2
* sizeof(xfs_bmbt_rec_t
));
3291 /* Zero out rest of page */
3292 memset(&erp
->er_extbuf
[nex1
+ nex2
], 0, (XFS_IEXT_BUFSZ
-
3293 ((nex1
+ nex2
) * sizeof(xfs_bmbt_rec_t
))));
3294 /* Update remaining counters */
3295 erp
->er_extcount
-= ext_diff
;
3296 xfs_iext_irec_update_extoffs(ifp
, erp_idx
+ 1, -ext_diff
);
3297 ext_cnt
-= ext_diff
;
3302 ifp
->if_bytes
-= count
* sizeof(xfs_bmbt_rec_t
);
3303 xfs_iext_irec_compact(ifp
);
3307 * Create, destroy, or resize a linear (direct) block of extents.
3310 xfs_iext_realloc_direct(
3311 xfs_ifork_t
*ifp
, /* inode fork pointer */
3312 int new_size
) /* new size of extents */
3314 int rnew_size
; /* real new size of extents */
3316 rnew_size
= new_size
;
3318 ASSERT(!(ifp
->if_flags
& XFS_IFEXTIREC
) ||
3319 ((new_size
>= 0) && (new_size
<= XFS_IEXT_BUFSZ
) &&
3320 (new_size
!= ifp
->if_real_bytes
)));
3322 /* Free extent records */
3323 if (new_size
== 0) {
3324 xfs_iext_destroy(ifp
);
3326 /* Resize direct extent list and zero any new bytes */
3327 else if (ifp
->if_real_bytes
) {
3328 /* Check if extents will fit inside the inode */
3329 if (new_size
<= XFS_INLINE_EXTS
* sizeof(xfs_bmbt_rec_t
)) {
3330 xfs_iext_direct_to_inline(ifp
, new_size
/
3331 (uint
)sizeof(xfs_bmbt_rec_t
));
3332 ifp
->if_bytes
= new_size
;
3335 if (!is_power_of_2(new_size
)){
3336 rnew_size
= roundup_pow_of_two(new_size
);
3338 if (rnew_size
!= ifp
->if_real_bytes
) {
3339 ifp
->if_u1
.if_extents
=
3340 kmem_realloc(ifp
->if_u1
.if_extents
,
3342 ifp
->if_real_bytes
, KM_NOFS
);
3344 if (rnew_size
> ifp
->if_real_bytes
) {
3345 memset(&ifp
->if_u1
.if_extents
[ifp
->if_bytes
/
3346 (uint
)sizeof(xfs_bmbt_rec_t
)], 0,
3347 rnew_size
- ifp
->if_real_bytes
);
3351 * Switch from the inline extent buffer to a direct
3352 * extent list. Be sure to include the inline extent
3353 * bytes in new_size.
3356 new_size
+= ifp
->if_bytes
;
3357 if (!is_power_of_2(new_size
)) {
3358 rnew_size
= roundup_pow_of_two(new_size
);
3360 xfs_iext_inline_to_direct(ifp
, rnew_size
);
3362 ifp
->if_real_bytes
= rnew_size
;
3363 ifp
->if_bytes
= new_size
;
3367 * Switch from linear (direct) extent records to inline buffer.
3370 xfs_iext_direct_to_inline(
3371 xfs_ifork_t
*ifp
, /* inode fork pointer */
3372 xfs_extnum_t nextents
) /* number of extents in file */
3374 ASSERT(ifp
->if_flags
& XFS_IFEXTENTS
);
3375 ASSERT(nextents
<= XFS_INLINE_EXTS
);
3377 * The inline buffer was zeroed when we switched
3378 * from inline to direct extent allocation mode,
3379 * so we don't need to clear it here.
3381 memcpy(ifp
->if_u2
.if_inline_ext
, ifp
->if_u1
.if_extents
,
3382 nextents
* sizeof(xfs_bmbt_rec_t
));
3383 kmem_free(ifp
->if_u1
.if_extents
);
3384 ifp
->if_u1
.if_extents
= ifp
->if_u2
.if_inline_ext
;
3385 ifp
->if_real_bytes
= 0;
3389 * Switch from inline buffer to linear (direct) extent records.
3390 * new_size should already be rounded up to the next power of 2
3391 * by the caller (when appropriate), so use new_size as it is.
3392 * However, since new_size may be rounded up, we can't update
3393 * if_bytes here. It is the caller's responsibility to update
3394 * if_bytes upon return.
3397 xfs_iext_inline_to_direct(
3398 xfs_ifork_t
*ifp
, /* inode fork pointer */
3399 int new_size
) /* number of extents in file */
3401 ifp
->if_u1
.if_extents
= kmem_alloc(new_size
, KM_NOFS
);
3402 memset(ifp
->if_u1
.if_extents
, 0, new_size
);
3403 if (ifp
->if_bytes
) {
3404 memcpy(ifp
->if_u1
.if_extents
, ifp
->if_u2
.if_inline_ext
,
3406 memset(ifp
->if_u2
.if_inline_ext
, 0, XFS_INLINE_EXTS
*
3407 sizeof(xfs_bmbt_rec_t
));
3409 ifp
->if_real_bytes
= new_size
;
3413 * Resize an extent indirection array to new_size bytes.
3416 xfs_iext_realloc_indirect(
3417 xfs_ifork_t
*ifp
, /* inode fork pointer */
3418 int new_size
) /* new indirection array size */
3420 int nlists
; /* number of irec's (ex lists) */
3421 int size
; /* current indirection array size */
3423 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
3424 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
3425 size
= nlists
* sizeof(xfs_ext_irec_t
);
3426 ASSERT(ifp
->if_real_bytes
);
3427 ASSERT((new_size
>= 0) && (new_size
!= size
));
3428 if (new_size
== 0) {
3429 xfs_iext_destroy(ifp
);
3431 ifp
->if_u1
.if_ext_irec
= (xfs_ext_irec_t
*)
3432 kmem_realloc(ifp
->if_u1
.if_ext_irec
,
3433 new_size
, size
, KM_NOFS
);
3438 * Switch from indirection array to linear (direct) extent allocations.
3441 xfs_iext_indirect_to_direct(
3442 xfs_ifork_t
*ifp
) /* inode fork pointer */
3444 xfs_bmbt_rec_host_t
*ep
; /* extent record pointer */
3445 xfs_extnum_t nextents
; /* number of extents in file */
3446 int size
; /* size of file extents */
3448 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
3449 nextents
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
3450 ASSERT(nextents
<= XFS_LINEAR_EXTS
);
3451 size
= nextents
* sizeof(xfs_bmbt_rec_t
);
3453 xfs_iext_irec_compact_pages(ifp
);
3454 ASSERT(ifp
->if_real_bytes
== XFS_IEXT_BUFSZ
);
3456 ep
= ifp
->if_u1
.if_ext_irec
->er_extbuf
;
3457 kmem_free(ifp
->if_u1
.if_ext_irec
);
3458 ifp
->if_flags
&= ~XFS_IFEXTIREC
;
3459 ifp
->if_u1
.if_extents
= ep
;
3460 ifp
->if_bytes
= size
;
3461 if (nextents
< XFS_LINEAR_EXTS
) {
3462 xfs_iext_realloc_direct(ifp
, size
);
3467 * Free incore file extents.
3471 xfs_ifork_t
*ifp
) /* inode fork pointer */
3473 if (ifp
->if_flags
& XFS_IFEXTIREC
) {
3477 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
3478 for (erp_idx
= nlists
- 1; erp_idx
>= 0 ; erp_idx
--) {
3479 xfs_iext_irec_remove(ifp
, erp_idx
);
3481 ifp
->if_flags
&= ~XFS_IFEXTIREC
;
3482 } else if (ifp
->if_real_bytes
) {
3483 kmem_free(ifp
->if_u1
.if_extents
);
3484 } else if (ifp
->if_bytes
) {
3485 memset(ifp
->if_u2
.if_inline_ext
, 0, XFS_INLINE_EXTS
*
3486 sizeof(xfs_bmbt_rec_t
));
3488 ifp
->if_u1
.if_extents
= NULL
;
3489 ifp
->if_real_bytes
= 0;
3494 * Return a pointer to the extent record for file system block bno.
3496 xfs_bmbt_rec_host_t
* /* pointer to found extent record */
3497 xfs_iext_bno_to_ext(
3498 xfs_ifork_t
*ifp
, /* inode fork pointer */
3499 xfs_fileoff_t bno
, /* block number to search for */
3500 xfs_extnum_t
*idxp
) /* index of target extent */
3502 xfs_bmbt_rec_host_t
*base
; /* pointer to first extent */
3503 xfs_filblks_t blockcount
= 0; /* number of blocks in extent */
3504 xfs_bmbt_rec_host_t
*ep
= NULL
; /* pointer to target extent */
3505 xfs_ext_irec_t
*erp
= NULL
; /* indirection array pointer */
3506 int high
; /* upper boundary in search */
3507 xfs_extnum_t idx
= 0; /* index of target extent */
3508 int low
; /* lower boundary in search */
3509 xfs_extnum_t nextents
; /* number of file extents */
3510 xfs_fileoff_t startoff
= 0; /* start offset of extent */
3512 nextents
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
3513 if (nextents
== 0) {
3518 if (ifp
->if_flags
& XFS_IFEXTIREC
) {
3519 /* Find target extent list */
3521 erp
= xfs_iext_bno_to_irec(ifp
, bno
, &erp_idx
);
3522 base
= erp
->er_extbuf
;
3523 high
= erp
->er_extcount
- 1;
3525 base
= ifp
->if_u1
.if_extents
;
3526 high
= nextents
- 1;
3528 /* Binary search extent records */
3529 while (low
<= high
) {
3530 idx
= (low
+ high
) >> 1;
3532 startoff
= xfs_bmbt_get_startoff(ep
);
3533 blockcount
= xfs_bmbt_get_blockcount(ep
);
3534 if (bno
< startoff
) {
3536 } else if (bno
>= startoff
+ blockcount
) {
3539 /* Convert back to file-based extent index */
3540 if (ifp
->if_flags
& XFS_IFEXTIREC
) {
3541 idx
+= erp
->er_extoff
;
3547 /* Convert back to file-based extent index */
3548 if (ifp
->if_flags
& XFS_IFEXTIREC
) {
3549 idx
+= erp
->er_extoff
;
3551 if (bno
>= startoff
+ blockcount
) {
3552 if (++idx
== nextents
) {
3555 ep
= xfs_iext_get_ext(ifp
, idx
);
3563 * Return a pointer to the indirection array entry containing the
3564 * extent record for filesystem block bno. Store the index of the
3565 * target irec in *erp_idxp.
3567 xfs_ext_irec_t
* /* pointer to found extent record */
3568 xfs_iext_bno_to_irec(
3569 xfs_ifork_t
*ifp
, /* inode fork pointer */
3570 xfs_fileoff_t bno
, /* block number to search for */
3571 int *erp_idxp
) /* irec index of target ext list */
3573 xfs_ext_irec_t
*erp
= NULL
; /* indirection array pointer */
3574 xfs_ext_irec_t
*erp_next
; /* next indirection array entry */
3575 int erp_idx
; /* indirection array index */
3576 int nlists
; /* number of extent irec's (lists) */
3577 int high
; /* binary search upper limit */
3578 int low
; /* binary search lower limit */
3580 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
3581 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
3585 while (low
<= high
) {
3586 erp_idx
= (low
+ high
) >> 1;
3587 erp
= &ifp
->if_u1
.if_ext_irec
[erp_idx
];
3588 erp_next
= erp_idx
< nlists
- 1 ? erp
+ 1 : NULL
;
3589 if (bno
< xfs_bmbt_get_startoff(erp
->er_extbuf
)) {
3591 } else if (erp_next
&& bno
>=
3592 xfs_bmbt_get_startoff(erp_next
->er_extbuf
)) {
3598 *erp_idxp
= erp_idx
;
3603 * Return a pointer to the indirection array entry containing the
3604 * extent record at file extent index *idxp. Store the index of the
3605 * target irec in *erp_idxp and store the page index of the target
3606 * extent record in *idxp.
3609 xfs_iext_idx_to_irec(
3610 xfs_ifork_t
*ifp
, /* inode fork pointer */
3611 xfs_extnum_t
*idxp
, /* extent index (file -> page) */
3612 int *erp_idxp
, /* pointer to target irec */
3613 int realloc
) /* new bytes were just added */
3615 xfs_ext_irec_t
*prev
; /* pointer to previous irec */
3616 xfs_ext_irec_t
*erp
= NULL
; /* pointer to current irec */
3617 int erp_idx
; /* indirection array index */
3618 int nlists
; /* number of irec's (ex lists) */
3619 int high
; /* binary search upper limit */
3620 int low
; /* binary search lower limit */
3621 xfs_extnum_t page_idx
= *idxp
; /* extent index in target list */
3623 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
3624 ASSERT(page_idx
>= 0);
3625 ASSERT(page_idx
<= ifp
->if_bytes
/ sizeof(xfs_bmbt_rec_t
));
3626 ASSERT(page_idx
< ifp
->if_bytes
/ sizeof(xfs_bmbt_rec_t
) || realloc
);
3628 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
3633 /* Binary search extent irec's */
3634 while (low
<= high
) {
3635 erp_idx
= (low
+ high
) >> 1;
3636 erp
= &ifp
->if_u1
.if_ext_irec
[erp_idx
];
3637 prev
= erp_idx
> 0 ? erp
- 1 : NULL
;
3638 if (page_idx
< erp
->er_extoff
|| (page_idx
== erp
->er_extoff
&&
3639 realloc
&& prev
&& prev
->er_extcount
< XFS_LINEAR_EXTS
)) {
3641 } else if (page_idx
> erp
->er_extoff
+ erp
->er_extcount
||
3642 (page_idx
== erp
->er_extoff
+ erp
->er_extcount
&&
3645 } else if (page_idx
== erp
->er_extoff
+ erp
->er_extcount
&&
3646 erp
->er_extcount
== XFS_LINEAR_EXTS
) {
3650 erp
= erp_idx
< nlists
? erp
+ 1 : NULL
;
3653 page_idx
-= erp
->er_extoff
;
3658 *erp_idxp
= erp_idx
;
3663 * Allocate and initialize an indirection array once the space needed
3664 * for incore extents increases above XFS_IEXT_BUFSZ.
3668 xfs_ifork_t
*ifp
) /* inode fork pointer */
3670 xfs_ext_irec_t
*erp
; /* indirection array pointer */
3671 xfs_extnum_t nextents
; /* number of extents in file */
3673 ASSERT(!(ifp
->if_flags
& XFS_IFEXTIREC
));
3674 nextents
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
3675 ASSERT(nextents
<= XFS_LINEAR_EXTS
);
3677 erp
= kmem_alloc(sizeof(xfs_ext_irec_t
), KM_NOFS
);
3679 if (nextents
== 0) {
3680 ifp
->if_u1
.if_extents
= kmem_alloc(XFS_IEXT_BUFSZ
, KM_NOFS
);
3681 } else if (!ifp
->if_real_bytes
) {
3682 xfs_iext_inline_to_direct(ifp
, XFS_IEXT_BUFSZ
);
3683 } else if (ifp
->if_real_bytes
< XFS_IEXT_BUFSZ
) {
3684 xfs_iext_realloc_direct(ifp
, XFS_IEXT_BUFSZ
);
3686 erp
->er_extbuf
= ifp
->if_u1
.if_extents
;
3687 erp
->er_extcount
= nextents
;
3690 ifp
->if_flags
|= XFS_IFEXTIREC
;
3691 ifp
->if_real_bytes
= XFS_IEXT_BUFSZ
;
3692 ifp
->if_bytes
= nextents
* sizeof(xfs_bmbt_rec_t
);
3693 ifp
->if_u1
.if_ext_irec
= erp
;
3699 * Allocate and initialize a new entry in the indirection array.
3703 xfs_ifork_t
*ifp
, /* inode fork pointer */
3704 int erp_idx
) /* index for new irec */
3706 xfs_ext_irec_t
*erp
; /* indirection array pointer */
3707 int i
; /* loop counter */
3708 int nlists
; /* number of irec's (ex lists) */
3710 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
3711 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
3713 /* Resize indirection array */
3714 xfs_iext_realloc_indirect(ifp
, ++nlists
*
3715 sizeof(xfs_ext_irec_t
));
3717 * Move records down in the array so the
3718 * new page can use erp_idx.
3720 erp
= ifp
->if_u1
.if_ext_irec
;
3721 for (i
= nlists
- 1; i
> erp_idx
; i
--) {
3722 memmove(&erp
[i
], &erp
[i
-1], sizeof(xfs_ext_irec_t
));
3724 ASSERT(i
== erp_idx
);
3726 /* Initialize new extent record */
3727 erp
= ifp
->if_u1
.if_ext_irec
;
3728 erp
[erp_idx
].er_extbuf
= kmem_alloc(XFS_IEXT_BUFSZ
, KM_NOFS
);
3729 ifp
->if_real_bytes
= nlists
* XFS_IEXT_BUFSZ
;
3730 memset(erp
[erp_idx
].er_extbuf
, 0, XFS_IEXT_BUFSZ
);
3731 erp
[erp_idx
].er_extcount
= 0;
3732 erp
[erp_idx
].er_extoff
= erp_idx
> 0 ?
3733 erp
[erp_idx
-1].er_extoff
+ erp
[erp_idx
-1].er_extcount
: 0;
3734 return (&erp
[erp_idx
]);
3738 * Remove a record from the indirection array.
3741 xfs_iext_irec_remove(
3742 xfs_ifork_t
*ifp
, /* inode fork pointer */
3743 int erp_idx
) /* irec index to remove */
3745 xfs_ext_irec_t
*erp
; /* indirection array pointer */
3746 int i
; /* loop counter */
3747 int nlists
; /* number of irec's (ex lists) */
3749 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
3750 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
3751 erp
= &ifp
->if_u1
.if_ext_irec
[erp_idx
];
3752 if (erp
->er_extbuf
) {
3753 xfs_iext_irec_update_extoffs(ifp
, erp_idx
+ 1,
3755 kmem_free(erp
->er_extbuf
);
3757 /* Compact extent records */
3758 erp
= ifp
->if_u1
.if_ext_irec
;
3759 for (i
= erp_idx
; i
< nlists
- 1; i
++) {
3760 memmove(&erp
[i
], &erp
[i
+1], sizeof(xfs_ext_irec_t
));
3763 * Manually free the last extent record from the indirection
3764 * array. A call to xfs_iext_realloc_indirect() with a size
3765 * of zero would result in a call to xfs_iext_destroy() which
3766 * would in turn call this function again, creating a nasty
3770 xfs_iext_realloc_indirect(ifp
,
3771 nlists
* sizeof(xfs_ext_irec_t
));
3773 kmem_free(ifp
->if_u1
.if_ext_irec
);
3775 ifp
->if_real_bytes
= nlists
* XFS_IEXT_BUFSZ
;
3779 * This is called to clean up large amounts of unused memory allocated
3780 * by the indirection array. Before compacting anything though, verify
3781 * that the indirection array is still needed and switch back to the
3782 * linear extent list (or even the inline buffer) if possible. The
3783 * compaction policy is as follows:
3785 * Full Compaction: Extents fit into a single page (or inline buffer)
3786 * Partial Compaction: Extents occupy less than 50% of allocated space
3787 * No Compaction: Extents occupy at least 50% of allocated space
3790 xfs_iext_irec_compact(
3791 xfs_ifork_t
*ifp
) /* inode fork pointer */
3793 xfs_extnum_t nextents
; /* number of extents in file */
3794 int nlists
; /* number of irec's (ex lists) */
3796 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
3797 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
3798 nextents
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
3800 if (nextents
== 0) {
3801 xfs_iext_destroy(ifp
);
3802 } else if (nextents
<= XFS_INLINE_EXTS
) {
3803 xfs_iext_indirect_to_direct(ifp
);
3804 xfs_iext_direct_to_inline(ifp
, nextents
);
3805 } else if (nextents
<= XFS_LINEAR_EXTS
) {
3806 xfs_iext_indirect_to_direct(ifp
);
3807 } else if (nextents
< (nlists
* XFS_LINEAR_EXTS
) >> 1) {
3808 xfs_iext_irec_compact_pages(ifp
);
3813 * Combine extents from neighboring extent pages.
3816 xfs_iext_irec_compact_pages(
3817 xfs_ifork_t
*ifp
) /* inode fork pointer */
3819 xfs_ext_irec_t
*erp
, *erp_next
;/* pointers to irec entries */
3820 int erp_idx
= 0; /* indirection array index */
3821 int nlists
; /* number of irec's (ex lists) */
3823 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
3824 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
3825 while (erp_idx
< nlists
- 1) {
3826 erp
= &ifp
->if_u1
.if_ext_irec
[erp_idx
];
3828 if (erp_next
->er_extcount
<=
3829 (XFS_LINEAR_EXTS
- erp
->er_extcount
)) {
3830 memcpy(&erp
->er_extbuf
[erp
->er_extcount
],
3831 erp_next
->er_extbuf
, erp_next
->er_extcount
*
3832 sizeof(xfs_bmbt_rec_t
));
3833 erp
->er_extcount
+= erp_next
->er_extcount
;
3835 * Free page before removing extent record
3836 * so er_extoffs don't get modified in
3837 * xfs_iext_irec_remove.
3839 kmem_free(erp_next
->er_extbuf
);
3840 erp_next
->er_extbuf
= NULL
;
3841 xfs_iext_irec_remove(ifp
, erp_idx
+ 1);
3842 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
3850 * This is called to update the er_extoff field in the indirection
3851 * array when extents have been added or removed from one of the
3852 * extent lists. erp_idx contains the irec index to begin updating
3853 * at and ext_diff contains the number of extents that were added
3857 xfs_iext_irec_update_extoffs(
3858 xfs_ifork_t
*ifp
, /* inode fork pointer */
3859 int erp_idx
, /* irec index to update */
3860 int ext_diff
) /* number of new extents */
3862 int i
; /* loop counter */
3863 int nlists
; /* number of irec's (ex lists */
3865 ASSERT(ifp
->if_flags
& XFS_IFEXTIREC
);
3866 nlists
= ifp
->if_real_bytes
/ XFS_IEXT_BUFSZ
;
3867 for (i
= erp_idx
; i
< nlists
; i
++) {
3868 ifp
->if_u1
.if_ext_irec
[i
].er_extoff
+= ext_diff
;