2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3 * Written by Alex Tomas <alex@clusterfs.com>
5 * Architecture independence:
6 * Copyright (c) 2005, Bull S.A.
7 * Written by Pierre Peiffer <pierre.peiffer@bull.net>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public Licens
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
24 * Extents support for EXT4
27 * - ext4*_error() should be used in some situations
28 * - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29 * - smart tree reduction
33 #include <linux/time.h>
34 #include <linux/jbd2.h>
35 #include <linux/highuid.h>
36 #include <linux/pagemap.h>
37 #include <linux/quotaops.h>
38 #include <linux/string.h>
39 #include <linux/slab.h>
40 #include <linux/falloc.h>
41 #include <asm/uaccess.h>
42 #include <linux/fiemap.h>
43 #include "ext4_jbd2.h"
44 #include "ext4_extents.h"
47 #include <trace/events/ext4.h>
50 * used by extent splitting.
52 #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \
54 #define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */
55 #define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */
57 #define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */
58 #define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */
60 static __le32
ext4_extent_block_csum(struct inode
*inode
,
61 struct ext4_extent_header
*eh
)
63 struct ext4_inode_info
*ei
= EXT4_I(inode
);
64 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
67 csum
= ext4_chksum(sbi
, ei
->i_csum_seed
, (__u8
*)eh
,
68 EXT4_EXTENT_TAIL_OFFSET(eh
));
69 return cpu_to_le32(csum
);
72 static int ext4_extent_block_csum_verify(struct inode
*inode
,
73 struct ext4_extent_header
*eh
)
75 struct ext4_extent_tail
*et
;
77 if (!EXT4_HAS_RO_COMPAT_FEATURE(inode
->i_sb
,
78 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM
))
81 et
= find_ext4_extent_tail(eh
);
82 if (et
->et_checksum
!= ext4_extent_block_csum(inode
, eh
))
87 static void ext4_extent_block_csum_set(struct inode
*inode
,
88 struct ext4_extent_header
*eh
)
90 struct ext4_extent_tail
*et
;
92 if (!EXT4_HAS_RO_COMPAT_FEATURE(inode
->i_sb
,
93 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM
))
96 et
= find_ext4_extent_tail(eh
);
97 et
->et_checksum
= ext4_extent_block_csum(inode
, eh
);
100 static int ext4_split_extent(handle_t
*handle
,
102 struct ext4_ext_path
*path
,
103 struct ext4_map_blocks
*map
,
107 static int ext4_split_extent_at(handle_t
*handle
,
109 struct ext4_ext_path
*path
,
114 static int ext4_find_delayed_extent(struct inode
*inode
,
115 struct extent_status
*newes
);
117 static int ext4_ext_truncate_extend_restart(handle_t
*handle
,
123 if (!ext4_handle_valid(handle
))
125 if (handle
->h_buffer_credits
> needed
)
127 err
= ext4_journal_extend(handle
, needed
);
130 err
= ext4_truncate_restart_trans(handle
, inode
, needed
);
142 static int ext4_ext_get_access(handle_t
*handle
, struct inode
*inode
,
143 struct ext4_ext_path
*path
)
146 /* path points to block */
147 return ext4_journal_get_write_access(handle
, path
->p_bh
);
149 /* path points to leaf/index in inode body */
150 /* we use in-core data, no need to protect them */
160 int __ext4_ext_dirty(const char *where
, unsigned int line
, handle_t
*handle
,
161 struct inode
*inode
, struct ext4_ext_path
*path
)
165 ext4_extent_block_csum_set(inode
, ext_block_hdr(path
->p_bh
));
166 /* path points to block */
167 err
= __ext4_handle_dirty_metadata(where
, line
, handle
,
170 /* path points to leaf/index in inode body */
171 err
= ext4_mark_inode_dirty(handle
, inode
);
176 static ext4_fsblk_t
ext4_ext_find_goal(struct inode
*inode
,
177 struct ext4_ext_path
*path
,
181 int depth
= path
->p_depth
;
182 struct ext4_extent
*ex
;
185 * Try to predict block placement assuming that we are
186 * filling in a file which will eventually be
187 * non-sparse --- i.e., in the case of libbfd writing
188 * an ELF object sections out-of-order but in a way
189 * the eventually results in a contiguous object or
190 * executable file, or some database extending a table
191 * space file. However, this is actually somewhat
192 * non-ideal if we are writing a sparse file such as
193 * qemu or KVM writing a raw image file that is going
194 * to stay fairly sparse, since it will end up
195 * fragmenting the file system's free space. Maybe we
196 * should have some hueristics or some way to allow
197 * userspace to pass a hint to file system,
198 * especially if the latter case turns out to be
201 ex
= path
[depth
].p_ext
;
203 ext4_fsblk_t ext_pblk
= ext4_ext_pblock(ex
);
204 ext4_lblk_t ext_block
= le32_to_cpu(ex
->ee_block
);
206 if (block
> ext_block
)
207 return ext_pblk
+ (block
- ext_block
);
209 return ext_pblk
- (ext_block
- block
);
212 /* it looks like index is empty;
213 * try to find starting block from index itself */
214 if (path
[depth
].p_bh
)
215 return path
[depth
].p_bh
->b_blocknr
;
218 /* OK. use inode's group */
219 return ext4_inode_to_goal_block(inode
);
223 * Allocation for a meta data block
226 ext4_ext_new_meta_block(handle_t
*handle
, struct inode
*inode
,
227 struct ext4_ext_path
*path
,
228 struct ext4_extent
*ex
, int *err
, unsigned int flags
)
230 ext4_fsblk_t goal
, newblock
;
232 goal
= ext4_ext_find_goal(inode
, path
, le32_to_cpu(ex
->ee_block
));
233 newblock
= ext4_new_meta_blocks(handle
, inode
, goal
, flags
,
238 static inline int ext4_ext_space_block(struct inode
*inode
, int check
)
242 size
= (inode
->i_sb
->s_blocksize
- sizeof(struct ext4_extent_header
))
243 / sizeof(struct ext4_extent
);
244 #ifdef AGGRESSIVE_TEST
245 if (!check
&& size
> 6)
251 static inline int ext4_ext_space_block_idx(struct inode
*inode
, int check
)
255 size
= (inode
->i_sb
->s_blocksize
- sizeof(struct ext4_extent_header
))
256 / sizeof(struct ext4_extent_idx
);
257 #ifdef AGGRESSIVE_TEST
258 if (!check
&& size
> 5)
264 static inline int ext4_ext_space_root(struct inode
*inode
, int check
)
268 size
= sizeof(EXT4_I(inode
)->i_data
);
269 size
-= sizeof(struct ext4_extent_header
);
270 size
/= sizeof(struct ext4_extent
);
271 #ifdef AGGRESSIVE_TEST
272 if (!check
&& size
> 3)
278 static inline int ext4_ext_space_root_idx(struct inode
*inode
, int check
)
282 size
= sizeof(EXT4_I(inode
)->i_data
);
283 size
-= sizeof(struct ext4_extent_header
);
284 size
/= sizeof(struct ext4_extent_idx
);
285 #ifdef AGGRESSIVE_TEST
286 if (!check
&& size
> 4)
293 * Calculate the number of metadata blocks needed
294 * to allocate @blocks
295 * Worse case is one block per extent
297 int ext4_ext_calc_metadata_amount(struct inode
*inode
, ext4_lblk_t lblock
)
299 struct ext4_inode_info
*ei
= EXT4_I(inode
);
302 idxs
= ((inode
->i_sb
->s_blocksize
- sizeof(struct ext4_extent_header
))
303 / sizeof(struct ext4_extent_idx
));
306 * If the new delayed allocation block is contiguous with the
307 * previous da block, it can share index blocks with the
308 * previous block, so we only need to allocate a new index
309 * block every idxs leaf blocks. At ldxs**2 blocks, we need
310 * an additional index block, and at ldxs**3 blocks, yet
311 * another index blocks.
313 if (ei
->i_da_metadata_calc_len
&&
314 ei
->i_da_metadata_calc_last_lblock
+1 == lblock
) {
317 if ((ei
->i_da_metadata_calc_len
% idxs
) == 0)
319 if ((ei
->i_da_metadata_calc_len
% (idxs
*idxs
)) == 0)
321 if ((ei
->i_da_metadata_calc_len
% (idxs
*idxs
*idxs
)) == 0) {
323 ei
->i_da_metadata_calc_len
= 0;
325 ei
->i_da_metadata_calc_len
++;
326 ei
->i_da_metadata_calc_last_lblock
++;
331 * In the worst case we need a new set of index blocks at
332 * every level of the inode's extent tree.
334 ei
->i_da_metadata_calc_len
= 1;
335 ei
->i_da_metadata_calc_last_lblock
= lblock
;
336 return ext_depth(inode
) + 1;
340 ext4_ext_max_entries(struct inode
*inode
, int depth
)
344 if (depth
== ext_depth(inode
)) {
346 max
= ext4_ext_space_root(inode
, 1);
348 max
= ext4_ext_space_root_idx(inode
, 1);
351 max
= ext4_ext_space_block(inode
, 1);
353 max
= ext4_ext_space_block_idx(inode
, 1);
359 static int ext4_valid_extent(struct inode
*inode
, struct ext4_extent
*ext
)
361 ext4_fsblk_t block
= ext4_ext_pblock(ext
);
362 int len
= ext4_ext_get_actual_len(ext
);
366 return ext4_data_block_valid(EXT4_SB(inode
->i_sb
), block
, len
);
369 static int ext4_valid_extent_idx(struct inode
*inode
,
370 struct ext4_extent_idx
*ext_idx
)
372 ext4_fsblk_t block
= ext4_idx_pblock(ext_idx
);
374 return ext4_data_block_valid(EXT4_SB(inode
->i_sb
), block
, 1);
377 static int ext4_valid_extent_entries(struct inode
*inode
,
378 struct ext4_extent_header
*eh
,
381 unsigned short entries
;
382 if (eh
->eh_entries
== 0)
385 entries
= le16_to_cpu(eh
->eh_entries
);
389 struct ext4_extent
*ext
= EXT_FIRST_EXTENT(eh
);
391 if (!ext4_valid_extent(inode
, ext
))
397 struct ext4_extent_idx
*ext_idx
= EXT_FIRST_INDEX(eh
);
399 if (!ext4_valid_extent_idx(inode
, ext_idx
))
408 static int __ext4_ext_check(const char *function
, unsigned int line
,
409 struct inode
*inode
, struct ext4_extent_header
*eh
,
412 const char *error_msg
;
415 if (unlikely(eh
->eh_magic
!= EXT4_EXT_MAGIC
)) {
416 error_msg
= "invalid magic";
419 if (unlikely(le16_to_cpu(eh
->eh_depth
) != depth
)) {
420 error_msg
= "unexpected eh_depth";
423 if (unlikely(eh
->eh_max
== 0)) {
424 error_msg
= "invalid eh_max";
427 max
= ext4_ext_max_entries(inode
, depth
);
428 if (unlikely(le16_to_cpu(eh
->eh_max
) > max
)) {
429 error_msg
= "too large eh_max";
432 if (unlikely(le16_to_cpu(eh
->eh_entries
) > le16_to_cpu(eh
->eh_max
))) {
433 error_msg
= "invalid eh_entries";
436 if (!ext4_valid_extent_entries(inode
, eh
, depth
)) {
437 error_msg
= "invalid extent entries";
440 /* Verify checksum on non-root extent tree nodes */
441 if (ext_depth(inode
) != depth
&&
442 !ext4_extent_block_csum_verify(inode
, eh
)) {
443 error_msg
= "extent tree corrupted";
449 ext4_error_inode(inode
, function
, line
, 0,
450 "bad header/extent: %s - magic %x, "
451 "entries %u, max %u(%u), depth %u(%u)",
452 error_msg
, le16_to_cpu(eh
->eh_magic
),
453 le16_to_cpu(eh
->eh_entries
), le16_to_cpu(eh
->eh_max
),
454 max
, le16_to_cpu(eh
->eh_depth
), depth
);
459 #define ext4_ext_check(inode, eh, depth) \
460 __ext4_ext_check(__func__, __LINE__, inode, eh, depth)
462 int ext4_ext_check_inode(struct inode
*inode
)
464 return ext4_ext_check(inode
, ext_inode_hdr(inode
), ext_depth(inode
));
467 static int __ext4_ext_check_block(const char *function
, unsigned int line
,
469 struct ext4_extent_header
*eh
,
471 struct buffer_head
*bh
)
475 if (buffer_verified(bh
))
477 ret
= ext4_ext_check(inode
, eh
, depth
);
480 set_buffer_verified(bh
);
484 #define ext4_ext_check_block(inode, eh, depth, bh) \
485 __ext4_ext_check_block(__func__, __LINE__, inode, eh, depth, bh)
488 static void ext4_ext_show_path(struct inode
*inode
, struct ext4_ext_path
*path
)
490 int k
, l
= path
->p_depth
;
493 for (k
= 0; k
<= l
; k
++, path
++) {
495 ext_debug(" %d->%llu", le32_to_cpu(path
->p_idx
->ei_block
),
496 ext4_idx_pblock(path
->p_idx
));
497 } else if (path
->p_ext
) {
498 ext_debug(" %d:[%d]%d:%llu ",
499 le32_to_cpu(path
->p_ext
->ee_block
),
500 ext4_ext_is_uninitialized(path
->p_ext
),
501 ext4_ext_get_actual_len(path
->p_ext
),
502 ext4_ext_pblock(path
->p_ext
));
509 static void ext4_ext_show_leaf(struct inode
*inode
, struct ext4_ext_path
*path
)
511 int depth
= ext_depth(inode
);
512 struct ext4_extent_header
*eh
;
513 struct ext4_extent
*ex
;
519 eh
= path
[depth
].p_hdr
;
520 ex
= EXT_FIRST_EXTENT(eh
);
522 ext_debug("Displaying leaf extents for inode %lu\n", inode
->i_ino
);
524 for (i
= 0; i
< le16_to_cpu(eh
->eh_entries
); i
++, ex
++) {
525 ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex
->ee_block
),
526 ext4_ext_is_uninitialized(ex
),
527 ext4_ext_get_actual_len(ex
), ext4_ext_pblock(ex
));
532 static void ext4_ext_show_move(struct inode
*inode
, struct ext4_ext_path
*path
,
533 ext4_fsblk_t newblock
, int level
)
535 int depth
= ext_depth(inode
);
536 struct ext4_extent
*ex
;
538 if (depth
!= level
) {
539 struct ext4_extent_idx
*idx
;
540 idx
= path
[level
].p_idx
;
541 while (idx
<= EXT_MAX_INDEX(path
[level
].p_hdr
)) {
542 ext_debug("%d: move %d:%llu in new index %llu\n", level
,
543 le32_to_cpu(idx
->ei_block
),
544 ext4_idx_pblock(idx
),
552 ex
= path
[depth
].p_ext
;
553 while (ex
<= EXT_MAX_EXTENT(path
[depth
].p_hdr
)) {
554 ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
555 le32_to_cpu(ex
->ee_block
),
557 ext4_ext_is_uninitialized(ex
),
558 ext4_ext_get_actual_len(ex
),
565 #define ext4_ext_show_path(inode, path)
566 #define ext4_ext_show_leaf(inode, path)
567 #define ext4_ext_show_move(inode, path, newblock, level)
570 void ext4_ext_drop_refs(struct ext4_ext_path
*path
)
572 int depth
= path
->p_depth
;
575 for (i
= 0; i
<= depth
; i
++, path
++)
583 * ext4_ext_binsearch_idx:
584 * binary search for the closest index of the given block
585 * the header must be checked before calling this
588 ext4_ext_binsearch_idx(struct inode
*inode
,
589 struct ext4_ext_path
*path
, ext4_lblk_t block
)
591 struct ext4_extent_header
*eh
= path
->p_hdr
;
592 struct ext4_extent_idx
*r
, *l
, *m
;
595 ext_debug("binsearch for %u(idx): ", block
);
597 l
= EXT_FIRST_INDEX(eh
) + 1;
598 r
= EXT_LAST_INDEX(eh
);
601 if (block
< le32_to_cpu(m
->ei_block
))
605 ext_debug("%p(%u):%p(%u):%p(%u) ", l
, le32_to_cpu(l
->ei_block
),
606 m
, le32_to_cpu(m
->ei_block
),
607 r
, le32_to_cpu(r
->ei_block
));
611 ext_debug(" -> %u->%lld ", le32_to_cpu(path
->p_idx
->ei_block
),
612 ext4_idx_pblock(path
->p_idx
));
614 #ifdef CHECK_BINSEARCH
616 struct ext4_extent_idx
*chix
, *ix
;
619 chix
= ix
= EXT_FIRST_INDEX(eh
);
620 for (k
= 0; k
< le16_to_cpu(eh
->eh_entries
); k
++, ix
++) {
622 le32_to_cpu(ix
->ei_block
) <= le32_to_cpu(ix
[-1].ei_block
)) {
623 printk(KERN_DEBUG
"k=%d, ix=0x%p, "
625 ix
, EXT_FIRST_INDEX(eh
));
626 printk(KERN_DEBUG
"%u <= %u\n",
627 le32_to_cpu(ix
->ei_block
),
628 le32_to_cpu(ix
[-1].ei_block
));
630 BUG_ON(k
&& le32_to_cpu(ix
->ei_block
)
631 <= le32_to_cpu(ix
[-1].ei_block
));
632 if (block
< le32_to_cpu(ix
->ei_block
))
636 BUG_ON(chix
!= path
->p_idx
);
643 * ext4_ext_binsearch:
644 * binary search for closest extent of the given block
645 * the header must be checked before calling this
648 ext4_ext_binsearch(struct inode
*inode
,
649 struct ext4_ext_path
*path
, ext4_lblk_t block
)
651 struct ext4_extent_header
*eh
= path
->p_hdr
;
652 struct ext4_extent
*r
, *l
, *m
;
654 if (eh
->eh_entries
== 0) {
656 * this leaf is empty:
657 * we get such a leaf in split/add case
662 ext_debug("binsearch for %u: ", block
);
664 l
= EXT_FIRST_EXTENT(eh
) + 1;
665 r
= EXT_LAST_EXTENT(eh
);
669 if (block
< le32_to_cpu(m
->ee_block
))
673 ext_debug("%p(%u):%p(%u):%p(%u) ", l
, le32_to_cpu(l
->ee_block
),
674 m
, le32_to_cpu(m
->ee_block
),
675 r
, le32_to_cpu(r
->ee_block
));
679 ext_debug(" -> %d:%llu:[%d]%d ",
680 le32_to_cpu(path
->p_ext
->ee_block
),
681 ext4_ext_pblock(path
->p_ext
),
682 ext4_ext_is_uninitialized(path
->p_ext
),
683 ext4_ext_get_actual_len(path
->p_ext
));
685 #ifdef CHECK_BINSEARCH
687 struct ext4_extent
*chex
, *ex
;
690 chex
= ex
= EXT_FIRST_EXTENT(eh
);
691 for (k
= 0; k
< le16_to_cpu(eh
->eh_entries
); k
++, ex
++) {
692 BUG_ON(k
&& le32_to_cpu(ex
->ee_block
)
693 <= le32_to_cpu(ex
[-1].ee_block
));
694 if (block
< le32_to_cpu(ex
->ee_block
))
698 BUG_ON(chex
!= path
->p_ext
);
704 int ext4_ext_tree_init(handle_t
*handle
, struct inode
*inode
)
706 struct ext4_extent_header
*eh
;
708 eh
= ext_inode_hdr(inode
);
711 eh
->eh_magic
= EXT4_EXT_MAGIC
;
712 eh
->eh_max
= cpu_to_le16(ext4_ext_space_root(inode
, 0));
713 ext4_mark_inode_dirty(handle
, inode
);
717 struct ext4_ext_path
*
718 ext4_ext_find_extent(struct inode
*inode
, ext4_lblk_t block
,
719 struct ext4_ext_path
*path
)
721 struct ext4_extent_header
*eh
;
722 struct buffer_head
*bh
;
723 short int depth
, i
, ppos
= 0, alloc
= 0;
726 eh
= ext_inode_hdr(inode
);
727 depth
= ext_depth(inode
);
729 /* account possible depth increase */
731 path
= kzalloc(sizeof(struct ext4_ext_path
) * (depth
+ 2),
734 return ERR_PTR(-ENOMEM
);
741 /* walk through the tree */
743 ext_debug("depth %d: num %d, max %d\n",
744 ppos
, le16_to_cpu(eh
->eh_entries
), le16_to_cpu(eh
->eh_max
));
746 ext4_ext_binsearch_idx(inode
, path
+ ppos
, block
);
747 path
[ppos
].p_block
= ext4_idx_pblock(path
[ppos
].p_idx
);
748 path
[ppos
].p_depth
= i
;
749 path
[ppos
].p_ext
= NULL
;
751 bh
= sb_getblk(inode
->i_sb
, path
[ppos
].p_block
);
756 if (!bh_uptodate_or_lock(bh
)) {
757 trace_ext4_ext_load_extent(inode
, block
,
759 ret
= bh_submit_read(bh
);
765 eh
= ext_block_hdr(bh
);
767 if (unlikely(ppos
> depth
)) {
769 EXT4_ERROR_INODE(inode
,
770 "ppos %d > depth %d", ppos
, depth
);
774 path
[ppos
].p_bh
= bh
;
775 path
[ppos
].p_hdr
= eh
;
778 ret
= ext4_ext_check_block(inode
, eh
, i
, bh
);
783 path
[ppos
].p_depth
= i
;
784 path
[ppos
].p_ext
= NULL
;
785 path
[ppos
].p_idx
= NULL
;
788 ext4_ext_binsearch(inode
, path
+ ppos
, block
);
789 /* if not an empty leaf */
790 if (path
[ppos
].p_ext
)
791 path
[ppos
].p_block
= ext4_ext_pblock(path
[ppos
].p_ext
);
793 ext4_ext_show_path(inode
, path
);
798 ext4_ext_drop_refs(path
);
805 * ext4_ext_insert_index:
806 * insert new index [@logical;@ptr] into the block at @curp;
807 * check where to insert: before @curp or after @curp
809 static int ext4_ext_insert_index(handle_t
*handle
, struct inode
*inode
,
810 struct ext4_ext_path
*curp
,
811 int logical
, ext4_fsblk_t ptr
)
813 struct ext4_extent_idx
*ix
;
816 err
= ext4_ext_get_access(handle
, inode
, curp
);
820 if (unlikely(logical
== le32_to_cpu(curp
->p_idx
->ei_block
))) {
821 EXT4_ERROR_INODE(inode
,
822 "logical %d == ei_block %d!",
823 logical
, le32_to_cpu(curp
->p_idx
->ei_block
));
827 if (unlikely(le16_to_cpu(curp
->p_hdr
->eh_entries
)
828 >= le16_to_cpu(curp
->p_hdr
->eh_max
))) {
829 EXT4_ERROR_INODE(inode
,
830 "eh_entries %d >= eh_max %d!",
831 le16_to_cpu(curp
->p_hdr
->eh_entries
),
832 le16_to_cpu(curp
->p_hdr
->eh_max
));
836 if (logical
> le32_to_cpu(curp
->p_idx
->ei_block
)) {
838 ext_debug("insert new index %d after: %llu\n", logical
, ptr
);
839 ix
= curp
->p_idx
+ 1;
842 ext_debug("insert new index %d before: %llu\n", logical
, ptr
);
846 len
= EXT_LAST_INDEX(curp
->p_hdr
) - ix
+ 1;
849 ext_debug("insert new index %d: "
850 "move %d indices from 0x%p to 0x%p\n",
851 logical
, len
, ix
, ix
+ 1);
852 memmove(ix
+ 1, ix
, len
* sizeof(struct ext4_extent_idx
));
855 if (unlikely(ix
> EXT_MAX_INDEX(curp
->p_hdr
))) {
856 EXT4_ERROR_INODE(inode
, "ix > EXT_MAX_INDEX!");
860 ix
->ei_block
= cpu_to_le32(logical
);
861 ext4_idx_store_pblock(ix
, ptr
);
862 le16_add_cpu(&curp
->p_hdr
->eh_entries
, 1);
864 if (unlikely(ix
> EXT_LAST_INDEX(curp
->p_hdr
))) {
865 EXT4_ERROR_INODE(inode
, "ix > EXT_LAST_INDEX!");
869 err
= ext4_ext_dirty(handle
, inode
, curp
);
870 ext4_std_error(inode
->i_sb
, err
);
877 * inserts new subtree into the path, using free index entry
879 * - allocates all needed blocks (new leaf and all intermediate index blocks)
880 * - makes decision where to split
881 * - moves remaining extents and index entries (right to the split point)
882 * into the newly allocated blocks
883 * - initializes subtree
885 static int ext4_ext_split(handle_t
*handle
, struct inode
*inode
,
887 struct ext4_ext_path
*path
,
888 struct ext4_extent
*newext
, int at
)
890 struct buffer_head
*bh
= NULL
;
891 int depth
= ext_depth(inode
);
892 struct ext4_extent_header
*neh
;
893 struct ext4_extent_idx
*fidx
;
895 ext4_fsblk_t newblock
, oldblock
;
897 ext4_fsblk_t
*ablocks
= NULL
; /* array of allocated blocks */
900 /* make decision: where to split? */
901 /* FIXME: now decision is simplest: at current extent */
903 /* if current leaf will be split, then we should use
904 * border from split point */
905 if (unlikely(path
[depth
].p_ext
> EXT_MAX_EXTENT(path
[depth
].p_hdr
))) {
906 EXT4_ERROR_INODE(inode
, "p_ext > EXT_MAX_EXTENT!");
909 if (path
[depth
].p_ext
!= EXT_MAX_EXTENT(path
[depth
].p_hdr
)) {
910 border
= path
[depth
].p_ext
[1].ee_block
;
911 ext_debug("leaf will be split."
912 " next leaf starts at %d\n",
913 le32_to_cpu(border
));
915 border
= newext
->ee_block
;
916 ext_debug("leaf will be added."
917 " next leaf starts at %d\n",
918 le32_to_cpu(border
));
922 * If error occurs, then we break processing
923 * and mark filesystem read-only. index won't
924 * be inserted and tree will be in consistent
925 * state. Next mount will repair buffers too.
929 * Get array to track all allocated blocks.
930 * We need this to handle errors and free blocks
933 ablocks
= kzalloc(sizeof(ext4_fsblk_t
) * depth
, GFP_NOFS
);
937 /* allocate all needed blocks */
938 ext_debug("allocate %d blocks for indexes/leaf\n", depth
- at
);
939 for (a
= 0; a
< depth
- at
; a
++) {
940 newblock
= ext4_ext_new_meta_block(handle
, inode
, path
,
941 newext
, &err
, flags
);
944 ablocks
[a
] = newblock
;
947 /* initialize new leaf */
948 newblock
= ablocks
[--a
];
949 if (unlikely(newblock
== 0)) {
950 EXT4_ERROR_INODE(inode
, "newblock == 0!");
954 bh
= sb_getblk(inode
->i_sb
, newblock
);
961 err
= ext4_journal_get_create_access(handle
, bh
);
965 neh
= ext_block_hdr(bh
);
967 neh
->eh_max
= cpu_to_le16(ext4_ext_space_block(inode
, 0));
968 neh
->eh_magic
= EXT4_EXT_MAGIC
;
971 /* move remainder of path[depth] to the new leaf */
972 if (unlikely(path
[depth
].p_hdr
->eh_entries
!=
973 path
[depth
].p_hdr
->eh_max
)) {
974 EXT4_ERROR_INODE(inode
, "eh_entries %d != eh_max %d!",
975 path
[depth
].p_hdr
->eh_entries
,
976 path
[depth
].p_hdr
->eh_max
);
980 /* start copy from next extent */
981 m
= EXT_MAX_EXTENT(path
[depth
].p_hdr
) - path
[depth
].p_ext
++;
982 ext4_ext_show_move(inode
, path
, newblock
, depth
);
984 struct ext4_extent
*ex
;
985 ex
= EXT_FIRST_EXTENT(neh
);
986 memmove(ex
, path
[depth
].p_ext
, sizeof(struct ext4_extent
) * m
);
987 le16_add_cpu(&neh
->eh_entries
, m
);
990 ext4_extent_block_csum_set(inode
, neh
);
991 set_buffer_uptodate(bh
);
994 err
= ext4_handle_dirty_metadata(handle
, inode
, bh
);
1000 /* correct old leaf */
1002 err
= ext4_ext_get_access(handle
, inode
, path
+ depth
);
1005 le16_add_cpu(&path
[depth
].p_hdr
->eh_entries
, -m
);
1006 err
= ext4_ext_dirty(handle
, inode
, path
+ depth
);
1012 /* create intermediate indexes */
1014 if (unlikely(k
< 0)) {
1015 EXT4_ERROR_INODE(inode
, "k %d < 0!", k
);
1020 ext_debug("create %d intermediate indices\n", k
);
1021 /* insert new index into current index block */
1022 /* current depth stored in i var */
1025 oldblock
= newblock
;
1026 newblock
= ablocks
[--a
];
1027 bh
= sb_getblk(inode
->i_sb
, newblock
);
1028 if (unlikely(!bh
)) {
1034 err
= ext4_journal_get_create_access(handle
, bh
);
1038 neh
= ext_block_hdr(bh
);
1039 neh
->eh_entries
= cpu_to_le16(1);
1040 neh
->eh_magic
= EXT4_EXT_MAGIC
;
1041 neh
->eh_max
= cpu_to_le16(ext4_ext_space_block_idx(inode
, 0));
1042 neh
->eh_depth
= cpu_to_le16(depth
- i
);
1043 fidx
= EXT_FIRST_INDEX(neh
);
1044 fidx
->ei_block
= border
;
1045 ext4_idx_store_pblock(fidx
, oldblock
);
1047 ext_debug("int.index at %d (block %llu): %u -> %llu\n",
1048 i
, newblock
, le32_to_cpu(border
), oldblock
);
1050 /* move remainder of path[i] to the new index block */
1051 if (unlikely(EXT_MAX_INDEX(path
[i
].p_hdr
) !=
1052 EXT_LAST_INDEX(path
[i
].p_hdr
))) {
1053 EXT4_ERROR_INODE(inode
,
1054 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1055 le32_to_cpu(path
[i
].p_ext
->ee_block
));
1059 /* start copy indexes */
1060 m
= EXT_MAX_INDEX(path
[i
].p_hdr
) - path
[i
].p_idx
++;
1061 ext_debug("cur 0x%p, last 0x%p\n", path
[i
].p_idx
,
1062 EXT_MAX_INDEX(path
[i
].p_hdr
));
1063 ext4_ext_show_move(inode
, path
, newblock
, i
);
1065 memmove(++fidx
, path
[i
].p_idx
,
1066 sizeof(struct ext4_extent_idx
) * m
);
1067 le16_add_cpu(&neh
->eh_entries
, m
);
1069 ext4_extent_block_csum_set(inode
, neh
);
1070 set_buffer_uptodate(bh
);
1073 err
= ext4_handle_dirty_metadata(handle
, inode
, bh
);
1079 /* correct old index */
1081 err
= ext4_ext_get_access(handle
, inode
, path
+ i
);
1084 le16_add_cpu(&path
[i
].p_hdr
->eh_entries
, -m
);
1085 err
= ext4_ext_dirty(handle
, inode
, path
+ i
);
1093 /* insert new index */
1094 err
= ext4_ext_insert_index(handle
, inode
, path
+ at
,
1095 le32_to_cpu(border
), newblock
);
1099 if (buffer_locked(bh
))
1105 /* free all allocated blocks in error case */
1106 for (i
= 0; i
< depth
; i
++) {
1109 ext4_free_blocks(handle
, inode
, NULL
, ablocks
[i
], 1,
1110 EXT4_FREE_BLOCKS_METADATA
);
1119 * ext4_ext_grow_indepth:
1120 * implements tree growing procedure:
1121 * - allocates new block
1122 * - moves top-level data (index block or leaf) into the new block
1123 * - initializes new top-level, creating index that points to the
1124 * just created block
1126 static int ext4_ext_grow_indepth(handle_t
*handle
, struct inode
*inode
,
1128 struct ext4_extent
*newext
)
1130 struct ext4_extent_header
*neh
;
1131 struct buffer_head
*bh
;
1132 ext4_fsblk_t newblock
;
1135 newblock
= ext4_ext_new_meta_block(handle
, inode
, NULL
,
1136 newext
, &err
, flags
);
1140 bh
= sb_getblk(inode
->i_sb
, newblock
);
1145 err
= ext4_journal_get_create_access(handle
, bh
);
1151 /* move top-level index/leaf into new block */
1152 memmove(bh
->b_data
, EXT4_I(inode
)->i_data
,
1153 sizeof(EXT4_I(inode
)->i_data
));
1155 /* set size of new block */
1156 neh
= ext_block_hdr(bh
);
1157 /* old root could have indexes or leaves
1158 * so calculate e_max right way */
1159 if (ext_depth(inode
))
1160 neh
->eh_max
= cpu_to_le16(ext4_ext_space_block_idx(inode
, 0));
1162 neh
->eh_max
= cpu_to_le16(ext4_ext_space_block(inode
, 0));
1163 neh
->eh_magic
= EXT4_EXT_MAGIC
;
1164 ext4_extent_block_csum_set(inode
, neh
);
1165 set_buffer_uptodate(bh
);
1168 err
= ext4_handle_dirty_metadata(handle
, inode
, bh
);
1172 /* Update top-level index: num,max,pointer */
1173 neh
= ext_inode_hdr(inode
);
1174 neh
->eh_entries
= cpu_to_le16(1);
1175 ext4_idx_store_pblock(EXT_FIRST_INDEX(neh
), newblock
);
1176 if (neh
->eh_depth
== 0) {
1177 /* Root extent block becomes index block */
1178 neh
->eh_max
= cpu_to_le16(ext4_ext_space_root_idx(inode
, 0));
1179 EXT_FIRST_INDEX(neh
)->ei_block
=
1180 EXT_FIRST_EXTENT(neh
)->ee_block
;
1182 ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
1183 le16_to_cpu(neh
->eh_entries
), le16_to_cpu(neh
->eh_max
),
1184 le32_to_cpu(EXT_FIRST_INDEX(neh
)->ei_block
),
1185 ext4_idx_pblock(EXT_FIRST_INDEX(neh
)));
1187 le16_add_cpu(&neh
->eh_depth
, 1);
1188 ext4_mark_inode_dirty(handle
, inode
);
1196 * ext4_ext_create_new_leaf:
1197 * finds empty index and adds new leaf.
1198 * if no free index is found, then it requests in-depth growing.
1200 static int ext4_ext_create_new_leaf(handle_t
*handle
, struct inode
*inode
,
1202 struct ext4_ext_path
*path
,
1203 struct ext4_extent
*newext
)
1205 struct ext4_ext_path
*curp
;
1206 int depth
, i
, err
= 0;
1209 i
= depth
= ext_depth(inode
);
1211 /* walk up to the tree and look for free index entry */
1212 curp
= path
+ depth
;
1213 while (i
> 0 && !EXT_HAS_FREE_INDEX(curp
)) {
1218 /* we use already allocated block for index block,
1219 * so subsequent data blocks should be contiguous */
1220 if (EXT_HAS_FREE_INDEX(curp
)) {
1221 /* if we found index with free entry, then use that
1222 * entry: create all needed subtree and add new leaf */
1223 err
= ext4_ext_split(handle
, inode
, flags
, path
, newext
, i
);
1228 ext4_ext_drop_refs(path
);
1229 path
= ext4_ext_find_extent(inode
,
1230 (ext4_lblk_t
)le32_to_cpu(newext
->ee_block
),
1233 err
= PTR_ERR(path
);
1235 /* tree is full, time to grow in depth */
1236 err
= ext4_ext_grow_indepth(handle
, inode
, flags
, newext
);
1241 ext4_ext_drop_refs(path
);
1242 path
= ext4_ext_find_extent(inode
,
1243 (ext4_lblk_t
)le32_to_cpu(newext
->ee_block
),
1246 err
= PTR_ERR(path
);
1251 * only first (depth 0 -> 1) produces free space;
1252 * in all other cases we have to split the grown tree
1254 depth
= ext_depth(inode
);
1255 if (path
[depth
].p_hdr
->eh_entries
== path
[depth
].p_hdr
->eh_max
) {
1256 /* now we need to split */
1266 * search the closest allocated block to the left for *logical
1267 * and returns it at @logical + it's physical address at @phys
1268 * if *logical is the smallest allocated block, the function
1269 * returns 0 at @phys
1270 * return value contains 0 (success) or error code
1272 static int ext4_ext_search_left(struct inode
*inode
,
1273 struct ext4_ext_path
*path
,
1274 ext4_lblk_t
*logical
, ext4_fsblk_t
*phys
)
1276 struct ext4_extent_idx
*ix
;
1277 struct ext4_extent
*ex
;
1280 if (unlikely(path
== NULL
)) {
1281 EXT4_ERROR_INODE(inode
, "path == NULL *logical %d!", *logical
);
1284 depth
= path
->p_depth
;
1287 if (depth
== 0 && path
->p_ext
== NULL
)
1290 /* usually extent in the path covers blocks smaller
1291 * then *logical, but it can be that extent is the
1292 * first one in the file */
1294 ex
= path
[depth
].p_ext
;
1295 ee_len
= ext4_ext_get_actual_len(ex
);
1296 if (*logical
< le32_to_cpu(ex
->ee_block
)) {
1297 if (unlikely(EXT_FIRST_EXTENT(path
[depth
].p_hdr
) != ex
)) {
1298 EXT4_ERROR_INODE(inode
,
1299 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1300 *logical
, le32_to_cpu(ex
->ee_block
));
1303 while (--depth
>= 0) {
1304 ix
= path
[depth
].p_idx
;
1305 if (unlikely(ix
!= EXT_FIRST_INDEX(path
[depth
].p_hdr
))) {
1306 EXT4_ERROR_INODE(inode
,
1307 "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1308 ix
!= NULL
? le32_to_cpu(ix
->ei_block
) : 0,
1309 EXT_FIRST_INDEX(path
[depth
].p_hdr
) != NULL
?
1310 le32_to_cpu(EXT_FIRST_INDEX(path
[depth
].p_hdr
)->ei_block
) : 0,
1318 if (unlikely(*logical
< (le32_to_cpu(ex
->ee_block
) + ee_len
))) {
1319 EXT4_ERROR_INODE(inode
,
1320 "logical %d < ee_block %d + ee_len %d!",
1321 *logical
, le32_to_cpu(ex
->ee_block
), ee_len
);
1325 *logical
= le32_to_cpu(ex
->ee_block
) + ee_len
- 1;
1326 *phys
= ext4_ext_pblock(ex
) + ee_len
- 1;
1331 * search the closest allocated block to the right for *logical
1332 * and returns it at @logical + it's physical address at @phys
1333 * if *logical is the largest allocated block, the function
1334 * returns 0 at @phys
1335 * return value contains 0 (success) or error code
1337 static int ext4_ext_search_right(struct inode
*inode
,
1338 struct ext4_ext_path
*path
,
1339 ext4_lblk_t
*logical
, ext4_fsblk_t
*phys
,
1340 struct ext4_extent
**ret_ex
)
1342 struct buffer_head
*bh
= NULL
;
1343 struct ext4_extent_header
*eh
;
1344 struct ext4_extent_idx
*ix
;
1345 struct ext4_extent
*ex
;
1347 int depth
; /* Note, NOT eh_depth; depth from top of tree */
1350 if (unlikely(path
== NULL
)) {
1351 EXT4_ERROR_INODE(inode
, "path == NULL *logical %d!", *logical
);
1354 depth
= path
->p_depth
;
1357 if (depth
== 0 && path
->p_ext
== NULL
)
1360 /* usually extent in the path covers blocks smaller
1361 * then *logical, but it can be that extent is the
1362 * first one in the file */
1364 ex
= path
[depth
].p_ext
;
1365 ee_len
= ext4_ext_get_actual_len(ex
);
1366 if (*logical
< le32_to_cpu(ex
->ee_block
)) {
1367 if (unlikely(EXT_FIRST_EXTENT(path
[depth
].p_hdr
) != ex
)) {
1368 EXT4_ERROR_INODE(inode
,
1369 "first_extent(path[%d].p_hdr) != ex",
1373 while (--depth
>= 0) {
1374 ix
= path
[depth
].p_idx
;
1375 if (unlikely(ix
!= EXT_FIRST_INDEX(path
[depth
].p_hdr
))) {
1376 EXT4_ERROR_INODE(inode
,
1377 "ix != EXT_FIRST_INDEX *logical %d!",
1385 if (unlikely(*logical
< (le32_to_cpu(ex
->ee_block
) + ee_len
))) {
1386 EXT4_ERROR_INODE(inode
,
1387 "logical %d < ee_block %d + ee_len %d!",
1388 *logical
, le32_to_cpu(ex
->ee_block
), ee_len
);
1392 if (ex
!= EXT_LAST_EXTENT(path
[depth
].p_hdr
)) {
1393 /* next allocated block in this leaf */
1398 /* go up and search for index to the right */
1399 while (--depth
>= 0) {
1400 ix
= path
[depth
].p_idx
;
1401 if (ix
!= EXT_LAST_INDEX(path
[depth
].p_hdr
))
1405 /* we've gone up to the root and found no index to the right */
1409 /* we've found index to the right, let's
1410 * follow it and find the closest allocated
1411 * block to the right */
1413 block
= ext4_idx_pblock(ix
);
1414 while (++depth
< path
->p_depth
) {
1415 bh
= sb_bread(inode
->i_sb
, block
);
1418 eh
= ext_block_hdr(bh
);
1419 /* subtract from p_depth to get proper eh_depth */
1420 if (ext4_ext_check_block(inode
, eh
,
1421 path
->p_depth
- depth
, bh
)) {
1425 ix
= EXT_FIRST_INDEX(eh
);
1426 block
= ext4_idx_pblock(ix
);
1430 bh
= sb_bread(inode
->i_sb
, block
);
1433 eh
= ext_block_hdr(bh
);
1434 if (ext4_ext_check_block(inode
, eh
, path
->p_depth
- depth
, bh
)) {
1438 ex
= EXT_FIRST_EXTENT(eh
);
1440 *logical
= le32_to_cpu(ex
->ee_block
);
1441 *phys
= ext4_ext_pblock(ex
);
1449 * ext4_ext_next_allocated_block:
1450 * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
1451 * NOTE: it considers block number from index entry as
1452 * allocated block. Thus, index entries have to be consistent
1456 ext4_ext_next_allocated_block(struct ext4_ext_path
*path
)
1460 BUG_ON(path
== NULL
);
1461 depth
= path
->p_depth
;
1463 if (depth
== 0 && path
->p_ext
== NULL
)
1464 return EXT_MAX_BLOCKS
;
1466 while (depth
>= 0) {
1467 if (depth
== path
->p_depth
) {
1469 if (path
[depth
].p_ext
&&
1470 path
[depth
].p_ext
!=
1471 EXT_LAST_EXTENT(path
[depth
].p_hdr
))
1472 return le32_to_cpu(path
[depth
].p_ext
[1].ee_block
);
1475 if (path
[depth
].p_idx
!=
1476 EXT_LAST_INDEX(path
[depth
].p_hdr
))
1477 return le32_to_cpu(path
[depth
].p_idx
[1].ei_block
);
1482 return EXT_MAX_BLOCKS
;
1486 * ext4_ext_next_leaf_block:
1487 * returns first allocated block from next leaf or EXT_MAX_BLOCKS
1489 static ext4_lblk_t
ext4_ext_next_leaf_block(struct ext4_ext_path
*path
)
1493 BUG_ON(path
== NULL
);
1494 depth
= path
->p_depth
;
1496 /* zero-tree has no leaf blocks at all */
1498 return EXT_MAX_BLOCKS
;
1500 /* go to index block */
1503 while (depth
>= 0) {
1504 if (path
[depth
].p_idx
!=
1505 EXT_LAST_INDEX(path
[depth
].p_hdr
))
1506 return (ext4_lblk_t
)
1507 le32_to_cpu(path
[depth
].p_idx
[1].ei_block
);
1511 return EXT_MAX_BLOCKS
;
1515 * ext4_ext_correct_indexes:
1516 * if leaf gets modified and modified extent is first in the leaf,
1517 * then we have to correct all indexes above.
1518 * TODO: do we need to correct tree in all cases?
1520 static int ext4_ext_correct_indexes(handle_t
*handle
, struct inode
*inode
,
1521 struct ext4_ext_path
*path
)
1523 struct ext4_extent_header
*eh
;
1524 int depth
= ext_depth(inode
);
1525 struct ext4_extent
*ex
;
1529 eh
= path
[depth
].p_hdr
;
1530 ex
= path
[depth
].p_ext
;
1532 if (unlikely(ex
== NULL
|| eh
== NULL
)) {
1533 EXT4_ERROR_INODE(inode
,
1534 "ex %p == NULL or eh %p == NULL", ex
, eh
);
1539 /* there is no tree at all */
1543 if (ex
!= EXT_FIRST_EXTENT(eh
)) {
1544 /* we correct tree if first leaf got modified only */
1549 * TODO: we need correction if border is smaller than current one
1552 border
= path
[depth
].p_ext
->ee_block
;
1553 err
= ext4_ext_get_access(handle
, inode
, path
+ k
);
1556 path
[k
].p_idx
->ei_block
= border
;
1557 err
= ext4_ext_dirty(handle
, inode
, path
+ k
);
1562 /* change all left-side indexes */
1563 if (path
[k
+1].p_idx
!= EXT_FIRST_INDEX(path
[k
+1].p_hdr
))
1565 err
= ext4_ext_get_access(handle
, inode
, path
+ k
);
1568 path
[k
].p_idx
->ei_block
= border
;
1569 err
= ext4_ext_dirty(handle
, inode
, path
+ k
);
1578 ext4_can_extents_be_merged(struct inode
*inode
, struct ext4_extent
*ex1
,
1579 struct ext4_extent
*ex2
)
1581 unsigned short ext1_ee_len
, ext2_ee_len
, max_len
;
1584 * Make sure that both extents are initialized. We don't merge
1585 * uninitialized extents so that we can be sure that end_io code has
1586 * the extent that was written properly split out and conversion to
1587 * initialized is trivial.
1589 if (ext4_ext_is_uninitialized(ex1
) || ext4_ext_is_uninitialized(ex2
))
1592 if (ext4_ext_is_uninitialized(ex1
))
1593 max_len
= EXT_UNINIT_MAX_LEN
;
1595 max_len
= EXT_INIT_MAX_LEN
;
1597 ext1_ee_len
= ext4_ext_get_actual_len(ex1
);
1598 ext2_ee_len
= ext4_ext_get_actual_len(ex2
);
1600 if (le32_to_cpu(ex1
->ee_block
) + ext1_ee_len
!=
1601 le32_to_cpu(ex2
->ee_block
))
1605 * To allow future support for preallocated extents to be added
1606 * as an RO_COMPAT feature, refuse to merge to extents if
1607 * this can result in the top bit of ee_len being set.
1609 if (ext1_ee_len
+ ext2_ee_len
> max_len
)
1611 #ifdef AGGRESSIVE_TEST
1612 if (ext1_ee_len
>= 4)
1616 if (ext4_ext_pblock(ex1
) + ext1_ee_len
== ext4_ext_pblock(ex2
))
1622 * This function tries to merge the "ex" extent to the next extent in the tree.
1623 * It always tries to merge towards right. If you want to merge towards
1624 * left, pass "ex - 1" as argument instead of "ex".
1625 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1626 * 1 if they got merged.
1628 static int ext4_ext_try_to_merge_right(struct inode
*inode
,
1629 struct ext4_ext_path
*path
,
1630 struct ext4_extent
*ex
)
1632 struct ext4_extent_header
*eh
;
1633 unsigned int depth
, len
;
1635 int uninitialized
= 0;
1637 depth
= ext_depth(inode
);
1638 BUG_ON(path
[depth
].p_hdr
== NULL
);
1639 eh
= path
[depth
].p_hdr
;
1641 while (ex
< EXT_LAST_EXTENT(eh
)) {
1642 if (!ext4_can_extents_be_merged(inode
, ex
, ex
+ 1))
1644 /* merge with next extent! */
1645 if (ext4_ext_is_uninitialized(ex
))
1647 ex
->ee_len
= cpu_to_le16(ext4_ext_get_actual_len(ex
)
1648 + ext4_ext_get_actual_len(ex
+ 1));
1650 ext4_ext_mark_uninitialized(ex
);
1652 if (ex
+ 1 < EXT_LAST_EXTENT(eh
)) {
1653 len
= (EXT_LAST_EXTENT(eh
) - ex
- 1)
1654 * sizeof(struct ext4_extent
);
1655 memmove(ex
+ 1, ex
+ 2, len
);
1657 le16_add_cpu(&eh
->eh_entries
, -1);
1659 WARN_ON(eh
->eh_entries
== 0);
1660 if (!eh
->eh_entries
)
1661 EXT4_ERROR_INODE(inode
, "eh->eh_entries = 0!");
1668 * This function does a very simple check to see if we can collapse
1669 * an extent tree with a single extent tree leaf block into the inode.
1671 static void ext4_ext_try_to_merge_up(handle_t
*handle
,
1672 struct inode
*inode
,
1673 struct ext4_ext_path
*path
)
1676 unsigned max_root
= ext4_ext_space_root(inode
, 0);
1679 if ((path
[0].p_depth
!= 1) ||
1680 (le16_to_cpu(path
[0].p_hdr
->eh_entries
) != 1) ||
1681 (le16_to_cpu(path
[1].p_hdr
->eh_entries
) > max_root
))
1685 * We need to modify the block allocation bitmap and the block
1686 * group descriptor to release the extent tree block. If we
1687 * can't get the journal credits, give up.
1689 if (ext4_journal_extend(handle
, 2))
1693 * Copy the extent data up to the inode
1695 blk
= ext4_idx_pblock(path
[0].p_idx
);
1696 s
= le16_to_cpu(path
[1].p_hdr
->eh_entries
) *
1697 sizeof(struct ext4_extent_idx
);
1698 s
+= sizeof(struct ext4_extent_header
);
1700 memcpy(path
[0].p_hdr
, path
[1].p_hdr
, s
);
1701 path
[0].p_depth
= 0;
1702 path
[0].p_ext
= EXT_FIRST_EXTENT(path
[0].p_hdr
) +
1703 (path
[1].p_ext
- EXT_FIRST_EXTENT(path
[1].p_hdr
));
1704 path
[0].p_hdr
->eh_max
= cpu_to_le16(max_root
);
1706 brelse(path
[1].p_bh
);
1707 ext4_free_blocks(handle
, inode
, NULL
, blk
, 1,
1708 EXT4_FREE_BLOCKS_METADATA
| EXT4_FREE_BLOCKS_FORGET
);
1712 * This function tries to merge the @ex extent to neighbours in the tree.
1713 * return 1 if merge left else 0.
1715 static void ext4_ext_try_to_merge(handle_t
*handle
,
1716 struct inode
*inode
,
1717 struct ext4_ext_path
*path
,
1718 struct ext4_extent
*ex
) {
1719 struct ext4_extent_header
*eh
;
1723 depth
= ext_depth(inode
);
1724 BUG_ON(path
[depth
].p_hdr
== NULL
);
1725 eh
= path
[depth
].p_hdr
;
1727 if (ex
> EXT_FIRST_EXTENT(eh
))
1728 merge_done
= ext4_ext_try_to_merge_right(inode
, path
, ex
- 1);
1731 (void) ext4_ext_try_to_merge_right(inode
, path
, ex
);
1733 ext4_ext_try_to_merge_up(handle
, inode
, path
);
1737 * check if a portion of the "newext" extent overlaps with an
1740 * If there is an overlap discovered, it updates the length of the newext
1741 * such that there will be no overlap, and then returns 1.
1742 * If there is no overlap found, it returns 0.
1744 static unsigned int ext4_ext_check_overlap(struct ext4_sb_info
*sbi
,
1745 struct inode
*inode
,
1746 struct ext4_extent
*newext
,
1747 struct ext4_ext_path
*path
)
1750 unsigned int depth
, len1
;
1751 unsigned int ret
= 0;
1753 b1
= le32_to_cpu(newext
->ee_block
);
1754 len1
= ext4_ext_get_actual_len(newext
);
1755 depth
= ext_depth(inode
);
1756 if (!path
[depth
].p_ext
)
1758 b2
= le32_to_cpu(path
[depth
].p_ext
->ee_block
);
1759 b2
&= ~(sbi
->s_cluster_ratio
- 1);
1762 * get the next allocated block if the extent in the path
1763 * is before the requested block(s)
1766 b2
= ext4_ext_next_allocated_block(path
);
1767 if (b2
== EXT_MAX_BLOCKS
)
1769 b2
&= ~(sbi
->s_cluster_ratio
- 1);
1772 /* check for wrap through zero on extent logical start block*/
1773 if (b1
+ len1
< b1
) {
1774 len1
= EXT_MAX_BLOCKS
- b1
;
1775 newext
->ee_len
= cpu_to_le16(len1
);
1779 /* check for overlap */
1780 if (b1
+ len1
> b2
) {
1781 newext
->ee_len
= cpu_to_le16(b2
- b1
);
1789 * ext4_ext_insert_extent:
1790 * tries to merge requsted extent into the existing extent or
1791 * inserts requested extent as new one into the tree,
1792 * creating new leaf in the no-space case.
1794 int ext4_ext_insert_extent(handle_t
*handle
, struct inode
*inode
,
1795 struct ext4_ext_path
*path
,
1796 struct ext4_extent
*newext
, int flag
)
1798 struct ext4_extent_header
*eh
;
1799 struct ext4_extent
*ex
, *fex
;
1800 struct ext4_extent
*nearex
; /* nearest extent */
1801 struct ext4_ext_path
*npath
= NULL
;
1802 int depth
, len
, err
;
1804 unsigned uninitialized
= 0;
1807 if (unlikely(ext4_ext_get_actual_len(newext
) == 0)) {
1808 EXT4_ERROR_INODE(inode
, "ext4_ext_get_actual_len(newext) == 0");
1811 depth
= ext_depth(inode
);
1812 ex
= path
[depth
].p_ext
;
1813 eh
= path
[depth
].p_hdr
;
1814 if (unlikely(path
[depth
].p_hdr
== NULL
)) {
1815 EXT4_ERROR_INODE(inode
, "path[%d].p_hdr == NULL", depth
);
1819 /* try to insert block into found extent and return */
1820 if (ex
&& !(flag
& EXT4_GET_BLOCKS_PRE_IO
)) {
1823 * Try to see whether we should rather test the extent on
1824 * right from ex, or from the left of ex. This is because
1825 * ext4_ext_find_extent() can return either extent on the
1826 * left, or on the right from the searched position. This
1827 * will make merging more effective.
1829 if (ex
< EXT_LAST_EXTENT(eh
) &&
1830 (le32_to_cpu(ex
->ee_block
) +
1831 ext4_ext_get_actual_len(ex
) <
1832 le32_to_cpu(newext
->ee_block
))) {
1835 } else if ((ex
> EXT_FIRST_EXTENT(eh
)) &&
1836 (le32_to_cpu(newext
->ee_block
) +
1837 ext4_ext_get_actual_len(newext
) <
1838 le32_to_cpu(ex
->ee_block
)))
1841 /* Try to append newex to the ex */
1842 if (ext4_can_extents_be_merged(inode
, ex
, newext
)) {
1843 ext_debug("append [%d]%d block to %u:[%d]%d"
1845 ext4_ext_is_uninitialized(newext
),
1846 ext4_ext_get_actual_len(newext
),
1847 le32_to_cpu(ex
->ee_block
),
1848 ext4_ext_is_uninitialized(ex
),
1849 ext4_ext_get_actual_len(ex
),
1850 ext4_ext_pblock(ex
));
1851 err
= ext4_ext_get_access(handle
, inode
,
1857 * ext4_can_extents_be_merged should have checked
1858 * that either both extents are uninitialized, or
1859 * both aren't. Thus we need to check only one of
1862 if (ext4_ext_is_uninitialized(ex
))
1864 ex
->ee_len
= cpu_to_le16(ext4_ext_get_actual_len(ex
)
1865 + ext4_ext_get_actual_len(newext
));
1867 ext4_ext_mark_uninitialized(ex
);
1868 eh
= path
[depth
].p_hdr
;
1874 /* Try to prepend newex to the ex */
1875 if (ext4_can_extents_be_merged(inode
, newext
, ex
)) {
1876 ext_debug("prepend %u[%d]%d block to %u:[%d]%d"
1878 le32_to_cpu(newext
->ee_block
),
1879 ext4_ext_is_uninitialized(newext
),
1880 ext4_ext_get_actual_len(newext
),
1881 le32_to_cpu(ex
->ee_block
),
1882 ext4_ext_is_uninitialized(ex
),
1883 ext4_ext_get_actual_len(ex
),
1884 ext4_ext_pblock(ex
));
1885 err
= ext4_ext_get_access(handle
, inode
,
1891 * ext4_can_extents_be_merged should have checked
1892 * that either both extents are uninitialized, or
1893 * both aren't. Thus we need to check only one of
1896 if (ext4_ext_is_uninitialized(ex
))
1898 ex
->ee_block
= newext
->ee_block
;
1899 ext4_ext_store_pblock(ex
, ext4_ext_pblock(newext
));
1900 ex
->ee_len
= cpu_to_le16(ext4_ext_get_actual_len(ex
)
1901 + ext4_ext_get_actual_len(newext
));
1903 ext4_ext_mark_uninitialized(ex
);
1904 eh
= path
[depth
].p_hdr
;
1910 depth
= ext_depth(inode
);
1911 eh
= path
[depth
].p_hdr
;
1912 if (le16_to_cpu(eh
->eh_entries
) < le16_to_cpu(eh
->eh_max
))
1915 /* probably next leaf has space for us? */
1916 fex
= EXT_LAST_EXTENT(eh
);
1917 next
= EXT_MAX_BLOCKS
;
1918 if (le32_to_cpu(newext
->ee_block
) > le32_to_cpu(fex
->ee_block
))
1919 next
= ext4_ext_next_leaf_block(path
);
1920 if (next
!= EXT_MAX_BLOCKS
) {
1921 ext_debug("next leaf block - %u\n", next
);
1922 BUG_ON(npath
!= NULL
);
1923 npath
= ext4_ext_find_extent(inode
, next
, NULL
);
1925 return PTR_ERR(npath
);
1926 BUG_ON(npath
->p_depth
!= path
->p_depth
);
1927 eh
= npath
[depth
].p_hdr
;
1928 if (le16_to_cpu(eh
->eh_entries
) < le16_to_cpu(eh
->eh_max
)) {
1929 ext_debug("next leaf isn't full(%d)\n",
1930 le16_to_cpu(eh
->eh_entries
));
1934 ext_debug("next leaf has no free space(%d,%d)\n",
1935 le16_to_cpu(eh
->eh_entries
), le16_to_cpu(eh
->eh_max
));
1939 * There is no free space in the found leaf.
1940 * We're gonna add a new leaf in the tree.
1942 if (flag
& EXT4_GET_BLOCKS_METADATA_NOFAIL
)
1943 flags
= EXT4_MB_USE_RESERVED
;
1944 err
= ext4_ext_create_new_leaf(handle
, inode
, flags
, path
, newext
);
1947 depth
= ext_depth(inode
);
1948 eh
= path
[depth
].p_hdr
;
1951 nearex
= path
[depth
].p_ext
;
1953 err
= ext4_ext_get_access(handle
, inode
, path
+ depth
);
1958 /* there is no extent in this leaf, create first one */
1959 ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n",
1960 le32_to_cpu(newext
->ee_block
),
1961 ext4_ext_pblock(newext
),
1962 ext4_ext_is_uninitialized(newext
),
1963 ext4_ext_get_actual_len(newext
));
1964 nearex
= EXT_FIRST_EXTENT(eh
);
1966 if (le32_to_cpu(newext
->ee_block
)
1967 > le32_to_cpu(nearex
->ee_block
)) {
1969 ext_debug("insert %u:%llu:[%d]%d before: "
1971 le32_to_cpu(newext
->ee_block
),
1972 ext4_ext_pblock(newext
),
1973 ext4_ext_is_uninitialized(newext
),
1974 ext4_ext_get_actual_len(newext
),
1979 BUG_ON(newext
->ee_block
== nearex
->ee_block
);
1980 ext_debug("insert %u:%llu:[%d]%d after: "
1982 le32_to_cpu(newext
->ee_block
),
1983 ext4_ext_pblock(newext
),
1984 ext4_ext_is_uninitialized(newext
),
1985 ext4_ext_get_actual_len(newext
),
1988 len
= EXT_LAST_EXTENT(eh
) - nearex
+ 1;
1990 ext_debug("insert %u:%llu:[%d]%d: "
1991 "move %d extents from 0x%p to 0x%p\n",
1992 le32_to_cpu(newext
->ee_block
),
1993 ext4_ext_pblock(newext
),
1994 ext4_ext_is_uninitialized(newext
),
1995 ext4_ext_get_actual_len(newext
),
1996 len
, nearex
, nearex
+ 1);
1997 memmove(nearex
+ 1, nearex
,
1998 len
* sizeof(struct ext4_extent
));
2002 le16_add_cpu(&eh
->eh_entries
, 1);
2003 path
[depth
].p_ext
= nearex
;
2004 nearex
->ee_block
= newext
->ee_block
;
2005 ext4_ext_store_pblock(nearex
, ext4_ext_pblock(newext
));
2006 nearex
->ee_len
= newext
->ee_len
;
2009 /* try to merge extents */
2010 if (!(flag
& EXT4_GET_BLOCKS_PRE_IO
))
2011 ext4_ext_try_to_merge(handle
, inode
, path
, nearex
);
2014 /* time to correct all indexes above */
2015 err
= ext4_ext_correct_indexes(handle
, inode
, path
);
2019 err
= ext4_ext_dirty(handle
, inode
, path
+ path
->p_depth
);
2023 ext4_ext_drop_refs(npath
);
2029 static int ext4_fill_fiemap_extents(struct inode
*inode
,
2030 ext4_lblk_t block
, ext4_lblk_t num
,
2031 struct fiemap_extent_info
*fieinfo
)
2033 struct ext4_ext_path
*path
= NULL
;
2034 struct ext4_extent
*ex
;
2035 struct extent_status es
;
2036 ext4_lblk_t next
, next_del
, start
= 0, end
= 0;
2037 ext4_lblk_t last
= block
+ num
;
2038 int exists
, depth
= 0, err
= 0;
2039 unsigned int flags
= 0;
2040 unsigned char blksize_bits
= inode
->i_sb
->s_blocksize_bits
;
2042 while (block
< last
&& block
!= EXT_MAX_BLOCKS
) {
2044 /* find extent for this block */
2045 down_read(&EXT4_I(inode
)->i_data_sem
);
2047 if (path
&& ext_depth(inode
) != depth
) {
2048 /* depth was changed. we have to realloc path */
2053 path
= ext4_ext_find_extent(inode
, block
, path
);
2055 up_read(&EXT4_I(inode
)->i_data_sem
);
2056 err
= PTR_ERR(path
);
2061 depth
= ext_depth(inode
);
2062 if (unlikely(path
[depth
].p_hdr
== NULL
)) {
2063 up_read(&EXT4_I(inode
)->i_data_sem
);
2064 EXT4_ERROR_INODE(inode
, "path[%d].p_hdr == NULL", depth
);
2068 ex
= path
[depth
].p_ext
;
2069 next
= ext4_ext_next_allocated_block(path
);
2070 ext4_ext_drop_refs(path
);
2075 /* there is no extent yet, so try to allocate
2076 * all requested space */
2079 } else if (le32_to_cpu(ex
->ee_block
) > block
) {
2080 /* need to allocate space before found extent */
2082 end
= le32_to_cpu(ex
->ee_block
);
2083 if (block
+ num
< end
)
2085 } else if (block
>= le32_to_cpu(ex
->ee_block
)
2086 + ext4_ext_get_actual_len(ex
)) {
2087 /* need to allocate space after found extent */
2092 } else if (block
>= le32_to_cpu(ex
->ee_block
)) {
2094 * some part of requested space is covered
2098 end
= le32_to_cpu(ex
->ee_block
)
2099 + ext4_ext_get_actual_len(ex
);
2100 if (block
+ num
< end
)
2106 BUG_ON(end
<= start
);
2110 es
.es_len
= end
- start
;
2113 es
.es_lblk
= le32_to_cpu(ex
->ee_block
);
2114 es
.es_len
= ext4_ext_get_actual_len(ex
);
2115 es
.es_pblk
= ext4_ext_pblock(ex
);
2116 if (ext4_ext_is_uninitialized(ex
))
2117 flags
|= FIEMAP_EXTENT_UNWRITTEN
;
2121 * Find delayed extent and update es accordingly. We call
2122 * it even in !exists case to find out whether es is the
2123 * last existing extent or not.
2125 next_del
= ext4_find_delayed_extent(inode
, &es
);
2126 if (!exists
&& next_del
) {
2128 flags
|= (FIEMAP_EXTENT_DELALLOC
|
2129 FIEMAP_EXTENT_UNKNOWN
);
2131 up_read(&EXT4_I(inode
)->i_data_sem
);
2133 if (unlikely(es
.es_len
== 0)) {
2134 EXT4_ERROR_INODE(inode
, "es.es_len == 0");
2140 * This is possible iff next == next_del == EXT_MAX_BLOCKS.
2141 * we need to check next == EXT_MAX_BLOCKS because it is
2142 * possible that an extent is with unwritten and delayed
2143 * status due to when an extent is delayed allocated and
2144 * is allocated by fallocate status tree will track both of
2147 * So we could return a unwritten and delayed extent, and
2148 * its block is equal to 'next'.
2150 if (next
== next_del
&& next
== EXT_MAX_BLOCKS
) {
2151 flags
|= FIEMAP_EXTENT_LAST
;
2152 if (unlikely(next_del
!= EXT_MAX_BLOCKS
||
2153 next
!= EXT_MAX_BLOCKS
)) {
2154 EXT4_ERROR_INODE(inode
,
2155 "next extent == %u, next "
2156 "delalloc extent = %u",
2164 err
= fiemap_fill_next_extent(fieinfo
,
2165 (__u64
)es
.es_lblk
<< blksize_bits
,
2166 (__u64
)es
.es_pblk
<< blksize_bits
,
2167 (__u64
)es
.es_len
<< blksize_bits
,
2177 block
= es
.es_lblk
+ es
.es_len
;
2181 ext4_ext_drop_refs(path
);
2189 * ext4_ext_put_gap_in_cache:
2190 * calculate boundaries of the gap that the requested block fits into
2191 * and cache this gap
2194 ext4_ext_put_gap_in_cache(struct inode
*inode
, struct ext4_ext_path
*path
,
2197 int depth
= ext_depth(inode
);
2200 struct ext4_extent
*ex
;
2202 ex
= path
[depth
].p_ext
;
2205 * there is no extent yet, so gap is [0;-] and we
2208 ext_debug("cache gap(whole file):");
2209 } else if (block
< le32_to_cpu(ex
->ee_block
)) {
2211 len
= le32_to_cpu(ex
->ee_block
) - block
;
2212 ext_debug("cache gap(before): %u [%u:%u]",
2214 le32_to_cpu(ex
->ee_block
),
2215 ext4_ext_get_actual_len(ex
));
2216 if (!ext4_find_delalloc_range(inode
, lblock
, lblock
+ len
- 1))
2217 ext4_es_insert_extent(inode
, lblock
, len
, ~0,
2218 EXTENT_STATUS_HOLE
);
2219 } else if (block
>= le32_to_cpu(ex
->ee_block
)
2220 + ext4_ext_get_actual_len(ex
)) {
2222 lblock
= le32_to_cpu(ex
->ee_block
)
2223 + ext4_ext_get_actual_len(ex
);
2225 next
= ext4_ext_next_allocated_block(path
);
2226 ext_debug("cache gap(after): [%u:%u] %u",
2227 le32_to_cpu(ex
->ee_block
),
2228 ext4_ext_get_actual_len(ex
),
2230 BUG_ON(next
== lblock
);
2231 len
= next
- lblock
;
2232 if (!ext4_find_delalloc_range(inode
, lblock
, lblock
+ len
- 1))
2233 ext4_es_insert_extent(inode
, lblock
, len
, ~0,
2234 EXTENT_STATUS_HOLE
);
2240 ext_debug(" -> %u:%lu\n", lblock
, len
);
2245 * removes index from the index block.
2247 static int ext4_ext_rm_idx(handle_t
*handle
, struct inode
*inode
,
2248 struct ext4_ext_path
*path
, int depth
)
2253 /* free index block */
2255 path
= path
+ depth
;
2256 leaf
= ext4_idx_pblock(path
->p_idx
);
2257 if (unlikely(path
->p_hdr
->eh_entries
== 0)) {
2258 EXT4_ERROR_INODE(inode
, "path->p_hdr->eh_entries == 0");
2261 err
= ext4_ext_get_access(handle
, inode
, path
);
2265 if (path
->p_idx
!= EXT_LAST_INDEX(path
->p_hdr
)) {
2266 int len
= EXT_LAST_INDEX(path
->p_hdr
) - path
->p_idx
;
2267 len
*= sizeof(struct ext4_extent_idx
);
2268 memmove(path
->p_idx
, path
->p_idx
+ 1, len
);
2271 le16_add_cpu(&path
->p_hdr
->eh_entries
, -1);
2272 err
= ext4_ext_dirty(handle
, inode
, path
);
2275 ext_debug("index is empty, remove it, free block %llu\n", leaf
);
2276 trace_ext4_ext_rm_idx(inode
, leaf
);
2278 ext4_free_blocks(handle
, inode
, NULL
, leaf
, 1,
2279 EXT4_FREE_BLOCKS_METADATA
| EXT4_FREE_BLOCKS_FORGET
);
2281 while (--depth
>= 0) {
2282 if (path
->p_idx
!= EXT_FIRST_INDEX(path
->p_hdr
))
2285 err
= ext4_ext_get_access(handle
, inode
, path
);
2288 path
->p_idx
->ei_block
= (path
+1)->p_idx
->ei_block
;
2289 err
= ext4_ext_dirty(handle
, inode
, path
);
2297 * ext4_ext_calc_credits_for_single_extent:
2298 * This routine returns max. credits that needed to insert an extent
2299 * to the extent tree.
2300 * When pass the actual path, the caller should calculate credits
2303 int ext4_ext_calc_credits_for_single_extent(struct inode
*inode
, int nrblocks
,
2304 struct ext4_ext_path
*path
)
2307 int depth
= ext_depth(inode
);
2310 /* probably there is space in leaf? */
2311 if (le16_to_cpu(path
[depth
].p_hdr
->eh_entries
)
2312 < le16_to_cpu(path
[depth
].p_hdr
->eh_max
)) {
2315 * There are some space in the leaf tree, no
2316 * need to account for leaf block credit
2318 * bitmaps and block group descriptor blocks
2319 * and other metadata blocks still need to be
2322 /* 1 bitmap, 1 block group descriptor */
2323 ret
= 2 + EXT4_META_TRANS_BLOCKS(inode
->i_sb
);
2328 return ext4_chunk_trans_blocks(inode
, nrblocks
);
2332 * How many index/leaf blocks need to change/allocate to add @extents extents?
2334 * If we add a single extent, then in the worse case, each tree level
2335 * index/leaf need to be changed in case of the tree split.
2337 * If more extents are inserted, they could cause the whole tree split more
2338 * than once, but this is really rare.
2340 int ext4_ext_index_trans_blocks(struct inode
*inode
, int extents
)
2345 /* If we are converting the inline data, only one is needed here. */
2346 if (ext4_has_inline_data(inode
))
2349 depth
= ext_depth(inode
);
2359 static inline int get_default_free_blocks_flags(struct inode
*inode
)
2361 if (S_ISDIR(inode
->i_mode
) || S_ISLNK(inode
->i_mode
))
2362 return EXT4_FREE_BLOCKS_METADATA
| EXT4_FREE_BLOCKS_FORGET
;
2363 else if (ext4_should_journal_data(inode
))
2364 return EXT4_FREE_BLOCKS_FORGET
;
2368 static int ext4_remove_blocks(handle_t
*handle
, struct inode
*inode
,
2369 struct ext4_extent
*ex
,
2370 long long *partial_cluster
,
2371 ext4_lblk_t from
, ext4_lblk_t to
)
2373 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
2374 unsigned short ee_len
= ext4_ext_get_actual_len(ex
);
2376 int flags
= get_default_free_blocks_flags(inode
);
2379 * For bigalloc file systems, we never free a partial cluster
2380 * at the beginning of the extent. Instead, we make a note
2381 * that we tried freeing the cluster, and check to see if we
2382 * need to free it on a subsequent call to ext4_remove_blocks,
2383 * or at the end of the ext4_truncate() operation.
2385 flags
|= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER
;
2387 trace_ext4_remove_blocks(inode
, ex
, from
, to
, *partial_cluster
);
2389 * If we have a partial cluster, and it's different from the
2390 * cluster of the last block, we need to explicitly free the
2391 * partial cluster here.
2393 pblk
= ext4_ext_pblock(ex
) + ee_len
- 1;
2394 if ((*partial_cluster
> 0) &&
2395 (EXT4_B2C(sbi
, pblk
) != *partial_cluster
)) {
2396 ext4_free_blocks(handle
, inode
, NULL
,
2397 EXT4_C2B(sbi
, *partial_cluster
),
2398 sbi
->s_cluster_ratio
, flags
);
2399 *partial_cluster
= 0;
2402 #ifdef EXTENTS_STATS
2404 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
2405 spin_lock(&sbi
->s_ext_stats_lock
);
2406 sbi
->s_ext_blocks
+= ee_len
;
2407 sbi
->s_ext_extents
++;
2408 if (ee_len
< sbi
->s_ext_min
)
2409 sbi
->s_ext_min
= ee_len
;
2410 if (ee_len
> sbi
->s_ext_max
)
2411 sbi
->s_ext_max
= ee_len
;
2412 if (ext_depth(inode
) > sbi
->s_depth_max
)
2413 sbi
->s_depth_max
= ext_depth(inode
);
2414 spin_unlock(&sbi
->s_ext_stats_lock
);
2417 if (from
>= le32_to_cpu(ex
->ee_block
)
2418 && to
== le32_to_cpu(ex
->ee_block
) + ee_len
- 1) {
2421 unsigned int unaligned
;
2423 num
= le32_to_cpu(ex
->ee_block
) + ee_len
- from
;
2424 pblk
= ext4_ext_pblock(ex
) + ee_len
- num
;
2426 * Usually we want to free partial cluster at the end of the
2427 * extent, except for the situation when the cluster is still
2428 * used by any other extent (partial_cluster is negative).
2430 if (*partial_cluster
< 0 &&
2431 -(*partial_cluster
) == EXT4_B2C(sbi
, pblk
+ num
- 1))
2432 flags
|= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER
;
2434 ext_debug("free last %u blocks starting %llu partial %lld\n",
2435 num
, pblk
, *partial_cluster
);
2436 ext4_free_blocks(handle
, inode
, NULL
, pblk
, num
, flags
);
2438 * If the block range to be freed didn't start at the
2439 * beginning of a cluster, and we removed the entire
2440 * extent and the cluster is not used by any other extent,
2441 * save the partial cluster here, since we might need to
2442 * delete if we determine that the truncate operation has
2443 * removed all of the blocks in the cluster.
2445 * On the other hand, if we did not manage to free the whole
2446 * extent, we have to mark the cluster as used (store negative
2447 * cluster number in partial_cluster).
2449 unaligned
= pblk
& (sbi
->s_cluster_ratio
- 1);
2450 if (unaligned
&& (ee_len
== num
) &&
2451 (*partial_cluster
!= -((long long)EXT4_B2C(sbi
, pblk
))))
2452 *partial_cluster
= EXT4_B2C(sbi
, pblk
);
2454 *partial_cluster
= -((long long)EXT4_B2C(sbi
, pblk
));
2455 else if (*partial_cluster
> 0)
2456 *partial_cluster
= 0;
2458 ext4_error(sbi
->s_sb
, "strange request: removal(2) "
2459 "%u-%u from %u:%u\n",
2460 from
, to
, le32_to_cpu(ex
->ee_block
), ee_len
);
2466 * ext4_ext_rm_leaf() Removes the extents associated with the
2467 * blocks appearing between "start" and "end", and splits the extents
2468 * if "start" and "end" appear in the same extent
2470 * @handle: The journal handle
2471 * @inode: The files inode
2472 * @path: The path to the leaf
2473 * @partial_cluster: The cluster which we'll have to free if all extents
2474 * has been released from it. It gets negative in case
2475 * that the cluster is still used.
2476 * @start: The first block to remove
2477 * @end: The last block to remove
2480 ext4_ext_rm_leaf(handle_t
*handle
, struct inode
*inode
,
2481 struct ext4_ext_path
*path
,
2482 long long *partial_cluster
,
2483 ext4_lblk_t start
, ext4_lblk_t end
)
2485 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
2486 int err
= 0, correct_index
= 0;
2487 int depth
= ext_depth(inode
), credits
;
2488 struct ext4_extent_header
*eh
;
2491 ext4_lblk_t ex_ee_block
;
2492 unsigned short ex_ee_len
;
2493 unsigned uninitialized
= 0;
2494 struct ext4_extent
*ex
;
2497 /* the header must be checked already in ext4_ext_remove_space() */
2498 ext_debug("truncate since %u in leaf to %u\n", start
, end
);
2499 if (!path
[depth
].p_hdr
)
2500 path
[depth
].p_hdr
= ext_block_hdr(path
[depth
].p_bh
);
2501 eh
= path
[depth
].p_hdr
;
2502 if (unlikely(path
[depth
].p_hdr
== NULL
)) {
2503 EXT4_ERROR_INODE(inode
, "path[%d].p_hdr == NULL", depth
);
2506 /* find where to start removing */
2507 ex
= path
[depth
].p_ext
;
2509 ex
= EXT_LAST_EXTENT(eh
);
2511 ex_ee_block
= le32_to_cpu(ex
->ee_block
);
2512 ex_ee_len
= ext4_ext_get_actual_len(ex
);
2514 trace_ext4_ext_rm_leaf(inode
, start
, ex
, *partial_cluster
);
2516 while (ex
>= EXT_FIRST_EXTENT(eh
) &&
2517 ex_ee_block
+ ex_ee_len
> start
) {
2519 if (ext4_ext_is_uninitialized(ex
))
2524 ext_debug("remove ext %u:[%d]%d\n", ex_ee_block
,
2525 uninitialized
, ex_ee_len
);
2526 path
[depth
].p_ext
= ex
;
2528 a
= ex_ee_block
> start
? ex_ee_block
: start
;
2529 b
= ex_ee_block
+ex_ee_len
- 1 < end
?
2530 ex_ee_block
+ex_ee_len
- 1 : end
;
2532 ext_debug(" border %u:%u\n", a
, b
);
2534 /* If this extent is beyond the end of the hole, skip it */
2535 if (end
< ex_ee_block
) {
2537 * We're going to skip this extent and move to another,
2538 * so if this extent is not cluster aligned we have
2539 * to mark the current cluster as used to avoid
2540 * accidentally freeing it later on
2542 pblk
= ext4_ext_pblock(ex
);
2543 if (pblk
& (sbi
->s_cluster_ratio
- 1))
2545 -((long long)EXT4_B2C(sbi
, pblk
));
2547 ex_ee_block
= le32_to_cpu(ex
->ee_block
);
2548 ex_ee_len
= ext4_ext_get_actual_len(ex
);
2550 } else if (b
!= ex_ee_block
+ ex_ee_len
- 1) {
2551 EXT4_ERROR_INODE(inode
,
2552 "can not handle truncate %u:%u "
2554 start
, end
, ex_ee_block
,
2555 ex_ee_block
+ ex_ee_len
- 1);
2558 } else if (a
!= ex_ee_block
) {
2559 /* remove tail of the extent */
2560 num
= a
- ex_ee_block
;
2562 /* remove whole extent: excellent! */
2566 * 3 for leaf, sb, and inode plus 2 (bmap and group
2567 * descriptor) for each block group; assume two block
2568 * groups plus ex_ee_len/blocks_per_block_group for
2571 credits
= 7 + 2*(ex_ee_len
/EXT4_BLOCKS_PER_GROUP(inode
->i_sb
));
2572 if (ex
== EXT_FIRST_EXTENT(eh
)) {
2574 credits
+= (ext_depth(inode
)) + 1;
2576 credits
+= EXT4_MAXQUOTAS_TRANS_BLOCKS(inode
->i_sb
);
2578 err
= ext4_ext_truncate_extend_restart(handle
, inode
, credits
);
2582 err
= ext4_ext_get_access(handle
, inode
, path
+ depth
);
2586 err
= ext4_remove_blocks(handle
, inode
, ex
, partial_cluster
,
2592 /* this extent is removed; mark slot entirely unused */
2593 ext4_ext_store_pblock(ex
, 0);
2595 ex
->ee_len
= cpu_to_le16(num
);
2597 * Do not mark uninitialized if all the blocks in the
2598 * extent have been removed.
2600 if (uninitialized
&& num
)
2601 ext4_ext_mark_uninitialized(ex
);
2603 * If the extent was completely released,
2604 * we need to remove it from the leaf
2607 if (end
!= EXT_MAX_BLOCKS
- 1) {
2609 * For hole punching, we need to scoot all the
2610 * extents up when an extent is removed so that
2611 * we dont have blank extents in the middle
2613 memmove(ex
, ex
+1, (EXT_LAST_EXTENT(eh
) - ex
) *
2614 sizeof(struct ext4_extent
));
2616 /* Now get rid of the one at the end */
2617 memset(EXT_LAST_EXTENT(eh
), 0,
2618 sizeof(struct ext4_extent
));
2620 le16_add_cpu(&eh
->eh_entries
, -1);
2621 } else if (*partial_cluster
> 0)
2622 *partial_cluster
= 0;
2624 err
= ext4_ext_dirty(handle
, inode
, path
+ depth
);
2628 ext_debug("new extent: %u:%u:%llu\n", ex_ee_block
, num
,
2629 ext4_ext_pblock(ex
));
2631 ex_ee_block
= le32_to_cpu(ex
->ee_block
);
2632 ex_ee_len
= ext4_ext_get_actual_len(ex
);
2635 if (correct_index
&& eh
->eh_entries
)
2636 err
= ext4_ext_correct_indexes(handle
, inode
, path
);
2639 * Free the partial cluster only if the current extent does not
2640 * reference it. Otherwise we might free used cluster.
2642 if (*partial_cluster
> 0 &&
2643 (EXT4_B2C(sbi
, ext4_ext_pblock(ex
) + ex_ee_len
- 1) !=
2644 *partial_cluster
)) {
2645 int flags
= get_default_free_blocks_flags(inode
);
2647 ext4_free_blocks(handle
, inode
, NULL
,
2648 EXT4_C2B(sbi
, *partial_cluster
),
2649 sbi
->s_cluster_ratio
, flags
);
2650 *partial_cluster
= 0;
2653 /* if this leaf is free, then we should
2654 * remove it from index block above */
2655 if (err
== 0 && eh
->eh_entries
== 0 && path
[depth
].p_bh
!= NULL
)
2656 err
= ext4_ext_rm_idx(handle
, inode
, path
, depth
);
2663 * ext4_ext_more_to_rm:
2664 * returns 1 if current index has to be freed (even partial)
2667 ext4_ext_more_to_rm(struct ext4_ext_path
*path
)
2669 BUG_ON(path
->p_idx
== NULL
);
2671 if (path
->p_idx
< EXT_FIRST_INDEX(path
->p_hdr
))
2675 * if truncate on deeper level happened, it wasn't partial,
2676 * so we have to consider current index for truncation
2678 if (le16_to_cpu(path
->p_hdr
->eh_entries
) == path
->p_block
)
2683 int ext4_ext_remove_space(struct inode
*inode
, ext4_lblk_t start
,
2686 struct super_block
*sb
= inode
->i_sb
;
2687 int depth
= ext_depth(inode
);
2688 struct ext4_ext_path
*path
= NULL
;
2689 long long partial_cluster
= 0;
2693 ext_debug("truncate since %u to %u\n", start
, end
);
2695 /* probably first extent we're gonna free will be last in block */
2696 handle
= ext4_journal_start(inode
, EXT4_HT_TRUNCATE
, depth
+ 1);
2698 return PTR_ERR(handle
);
2701 trace_ext4_ext_remove_space(inode
, start
, end
, depth
);
2704 * Check if we are removing extents inside the extent tree. If that
2705 * is the case, we are going to punch a hole inside the extent tree
2706 * so we have to check whether we need to split the extent covering
2707 * the last block to remove so we can easily remove the part of it
2708 * in ext4_ext_rm_leaf().
2710 if (end
< EXT_MAX_BLOCKS
- 1) {
2711 struct ext4_extent
*ex
;
2712 ext4_lblk_t ee_block
;
2714 /* find extent for this block */
2715 path
= ext4_ext_find_extent(inode
, end
, NULL
);
2717 ext4_journal_stop(handle
);
2718 return PTR_ERR(path
);
2720 depth
= ext_depth(inode
);
2721 /* Leaf not may not exist only if inode has no blocks at all */
2722 ex
= path
[depth
].p_ext
;
2725 EXT4_ERROR_INODE(inode
,
2726 "path[%d].p_hdr == NULL",
2733 ee_block
= le32_to_cpu(ex
->ee_block
);
2736 * See if the last block is inside the extent, if so split
2737 * the extent at 'end' block so we can easily remove the
2738 * tail of the first part of the split extent in
2739 * ext4_ext_rm_leaf().
2741 if (end
>= ee_block
&&
2742 end
< ee_block
+ ext4_ext_get_actual_len(ex
) - 1) {
2745 if (ext4_ext_is_uninitialized(ex
))
2746 split_flag
= EXT4_EXT_MARK_UNINIT1
|
2747 EXT4_EXT_MARK_UNINIT2
;
2750 * Split the extent in two so that 'end' is the last
2751 * block in the first new extent. Also we should not
2752 * fail removing space due to ENOSPC so try to use
2753 * reserved block if that happens.
2755 err
= ext4_split_extent_at(handle
, inode
, path
,
2756 end
+ 1, split_flag
,
2757 EXT4_GET_BLOCKS_PRE_IO
|
2758 EXT4_GET_BLOCKS_METADATA_NOFAIL
);
2765 * We start scanning from right side, freeing all the blocks
2766 * after i_size and walking into the tree depth-wise.
2768 depth
= ext_depth(inode
);
2773 le16_to_cpu(path
[k
].p_hdr
->eh_entries
)+1;
2775 path
= kzalloc(sizeof(struct ext4_ext_path
) * (depth
+ 1),
2778 ext4_journal_stop(handle
);
2781 path
[0].p_depth
= depth
;
2782 path
[0].p_hdr
= ext_inode_hdr(inode
);
2785 if (ext4_ext_check(inode
, path
[0].p_hdr
, depth
)) {
2792 while (i
>= 0 && err
== 0) {
2794 /* this is leaf block */
2795 err
= ext4_ext_rm_leaf(handle
, inode
, path
,
2796 &partial_cluster
, start
,
2798 /* root level has p_bh == NULL, brelse() eats this */
2799 brelse(path
[i
].p_bh
);
2800 path
[i
].p_bh
= NULL
;
2805 /* this is index block */
2806 if (!path
[i
].p_hdr
) {
2807 ext_debug("initialize header\n");
2808 path
[i
].p_hdr
= ext_block_hdr(path
[i
].p_bh
);
2811 if (!path
[i
].p_idx
) {
2812 /* this level hasn't been touched yet */
2813 path
[i
].p_idx
= EXT_LAST_INDEX(path
[i
].p_hdr
);
2814 path
[i
].p_block
= le16_to_cpu(path
[i
].p_hdr
->eh_entries
)+1;
2815 ext_debug("init index ptr: hdr 0x%p, num %d\n",
2817 le16_to_cpu(path
[i
].p_hdr
->eh_entries
));
2819 /* we were already here, see at next index */
2823 ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
2824 i
, EXT_FIRST_INDEX(path
[i
].p_hdr
),
2826 if (ext4_ext_more_to_rm(path
+ i
)) {
2827 struct buffer_head
*bh
;
2828 /* go to the next level */
2829 ext_debug("move to level %d (block %llu)\n",
2830 i
+ 1, ext4_idx_pblock(path
[i
].p_idx
));
2831 memset(path
+ i
+ 1, 0, sizeof(*path
));
2832 bh
= sb_bread(sb
, ext4_idx_pblock(path
[i
].p_idx
));
2834 /* should we reset i_size? */
2838 if (WARN_ON(i
+ 1 > depth
)) {
2842 if (ext4_ext_check_block(inode
, ext_block_hdr(bh
),
2843 depth
- i
- 1, bh
)) {
2847 path
[i
+ 1].p_bh
= bh
;
2849 /* save actual number of indexes since this
2850 * number is changed at the next iteration */
2851 path
[i
].p_block
= le16_to_cpu(path
[i
].p_hdr
->eh_entries
);
2854 /* we finished processing this index, go up */
2855 if (path
[i
].p_hdr
->eh_entries
== 0 && i
> 0) {
2856 /* index is empty, remove it;
2857 * handle must be already prepared by the
2858 * truncatei_leaf() */
2859 err
= ext4_ext_rm_idx(handle
, inode
, path
, i
);
2861 /* root level has p_bh == NULL, brelse() eats this */
2862 brelse(path
[i
].p_bh
);
2863 path
[i
].p_bh
= NULL
;
2865 ext_debug("return to level %d\n", i
);
2869 trace_ext4_ext_remove_space_done(inode
, start
, end
, depth
,
2870 partial_cluster
, path
->p_hdr
->eh_entries
);
2872 /* If we still have something in the partial cluster and we have removed
2873 * even the first extent, then we should free the blocks in the partial
2874 * cluster as well. */
2875 if (partial_cluster
> 0 && path
->p_hdr
->eh_entries
== 0) {
2876 int flags
= get_default_free_blocks_flags(inode
);
2878 ext4_free_blocks(handle
, inode
, NULL
,
2879 EXT4_C2B(EXT4_SB(sb
), partial_cluster
),
2880 EXT4_SB(sb
)->s_cluster_ratio
, flags
);
2881 partial_cluster
= 0;
2884 /* TODO: flexible tree reduction should be here */
2885 if (path
->p_hdr
->eh_entries
== 0) {
2887 * truncate to zero freed all the tree,
2888 * so we need to correct eh_depth
2890 err
= ext4_ext_get_access(handle
, inode
, path
);
2892 ext_inode_hdr(inode
)->eh_depth
= 0;
2893 ext_inode_hdr(inode
)->eh_max
=
2894 cpu_to_le16(ext4_ext_space_root(inode
, 0));
2895 err
= ext4_ext_dirty(handle
, inode
, path
);
2899 ext4_ext_drop_refs(path
);
2901 if (err
== -EAGAIN
) {
2905 ext4_journal_stop(handle
);
2911 * called at mount time
2913 void ext4_ext_init(struct super_block
*sb
)
2916 * possible initialization would be here
2919 if (EXT4_HAS_INCOMPAT_FEATURE(sb
, EXT4_FEATURE_INCOMPAT_EXTENTS
)) {
2920 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
2921 printk(KERN_INFO
"EXT4-fs: file extents enabled"
2922 #ifdef AGGRESSIVE_TEST
2923 ", aggressive tests"
2925 #ifdef CHECK_BINSEARCH
2928 #ifdef EXTENTS_STATS
2933 #ifdef EXTENTS_STATS
2934 spin_lock_init(&EXT4_SB(sb
)->s_ext_stats_lock
);
2935 EXT4_SB(sb
)->s_ext_min
= 1 << 30;
2936 EXT4_SB(sb
)->s_ext_max
= 0;
2942 * called at umount time
2944 void ext4_ext_release(struct super_block
*sb
)
2946 if (!EXT4_HAS_INCOMPAT_FEATURE(sb
, EXT4_FEATURE_INCOMPAT_EXTENTS
))
2949 #ifdef EXTENTS_STATS
2950 if (EXT4_SB(sb
)->s_ext_blocks
&& EXT4_SB(sb
)->s_ext_extents
) {
2951 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
2952 printk(KERN_ERR
"EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
2953 sbi
->s_ext_blocks
, sbi
->s_ext_extents
,
2954 sbi
->s_ext_blocks
/ sbi
->s_ext_extents
);
2955 printk(KERN_ERR
"EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
2956 sbi
->s_ext_min
, sbi
->s_ext_max
, sbi
->s_depth_max
);
2961 /* FIXME!! we need to try to merge to left or right after zero-out */
2962 static int ext4_ext_zeroout(struct inode
*inode
, struct ext4_extent
*ex
)
2964 ext4_fsblk_t ee_pblock
;
2965 unsigned int ee_len
;
2968 ee_len
= ext4_ext_get_actual_len(ex
);
2969 ee_pblock
= ext4_ext_pblock(ex
);
2971 ret
= sb_issue_zeroout(inode
->i_sb
, ee_pblock
, ee_len
, GFP_NOFS
);
2979 * ext4_split_extent_at() splits an extent at given block.
2981 * @handle: the journal handle
2982 * @inode: the file inode
2983 * @path: the path to the extent
2984 * @split: the logical block where the extent is splitted.
2985 * @split_flags: indicates if the extent could be zeroout if split fails, and
2986 * the states(init or uninit) of new extents.
2987 * @flags: flags used to insert new extent to extent tree.
2990 * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
2991 * of which are deterimined by split_flag.
2993 * There are two cases:
2994 * a> the extent are splitted into two extent.
2995 * b> split is not needed, and just mark the extent.
2997 * return 0 on success.
2999 static int ext4_split_extent_at(handle_t
*handle
,
3000 struct inode
*inode
,
3001 struct ext4_ext_path
*path
,
3006 ext4_fsblk_t newblock
;
3007 ext4_lblk_t ee_block
;
3008 struct ext4_extent
*ex
, newex
, orig_ex
, zero_ex
;
3009 struct ext4_extent
*ex2
= NULL
;
3010 unsigned int ee_len
, depth
;
3013 BUG_ON((split_flag
& (EXT4_EXT_DATA_VALID1
| EXT4_EXT_DATA_VALID2
)) ==
3014 (EXT4_EXT_DATA_VALID1
| EXT4_EXT_DATA_VALID2
));
3016 ext_debug("ext4_split_extents_at: inode %lu, logical"
3017 "block %llu\n", inode
->i_ino
, (unsigned long long)split
);
3019 ext4_ext_show_leaf(inode
, path
);
3021 depth
= ext_depth(inode
);
3022 ex
= path
[depth
].p_ext
;
3023 ee_block
= le32_to_cpu(ex
->ee_block
);
3024 ee_len
= ext4_ext_get_actual_len(ex
);
3025 newblock
= split
- ee_block
+ ext4_ext_pblock(ex
);
3027 BUG_ON(split
< ee_block
|| split
>= (ee_block
+ ee_len
));
3028 BUG_ON(!ext4_ext_is_uninitialized(ex
) &&
3029 split_flag
& (EXT4_EXT_MAY_ZEROOUT
|
3030 EXT4_EXT_MARK_UNINIT1
|
3031 EXT4_EXT_MARK_UNINIT2
));
3033 err
= ext4_ext_get_access(handle
, inode
, path
+ depth
);
3037 if (split
== ee_block
) {
3039 * case b: block @split is the block that the extent begins with
3040 * then we just change the state of the extent, and splitting
3043 if (split_flag
& EXT4_EXT_MARK_UNINIT2
)
3044 ext4_ext_mark_uninitialized(ex
);
3046 ext4_ext_mark_initialized(ex
);
3048 if (!(flags
& EXT4_GET_BLOCKS_PRE_IO
))
3049 ext4_ext_try_to_merge(handle
, inode
, path
, ex
);
3051 err
= ext4_ext_dirty(handle
, inode
, path
+ path
->p_depth
);
3056 memcpy(&orig_ex
, ex
, sizeof(orig_ex
));
3057 ex
->ee_len
= cpu_to_le16(split
- ee_block
);
3058 if (split_flag
& EXT4_EXT_MARK_UNINIT1
)
3059 ext4_ext_mark_uninitialized(ex
);
3062 * path may lead to new leaf, not to original leaf any more
3063 * after ext4_ext_insert_extent() returns,
3065 err
= ext4_ext_dirty(handle
, inode
, path
+ depth
);
3067 goto fix_extent_len
;
3070 ex2
->ee_block
= cpu_to_le32(split
);
3071 ex2
->ee_len
= cpu_to_le16(ee_len
- (split
- ee_block
));
3072 ext4_ext_store_pblock(ex2
, newblock
);
3073 if (split_flag
& EXT4_EXT_MARK_UNINIT2
)
3074 ext4_ext_mark_uninitialized(ex2
);
3076 err
= ext4_ext_insert_extent(handle
, inode
, path
, &newex
, flags
);
3077 if (err
== -ENOSPC
&& (EXT4_EXT_MAY_ZEROOUT
& split_flag
)) {
3078 if (split_flag
& (EXT4_EXT_DATA_VALID1
|EXT4_EXT_DATA_VALID2
)) {
3079 if (split_flag
& EXT4_EXT_DATA_VALID1
) {
3080 err
= ext4_ext_zeroout(inode
, ex2
);
3081 zero_ex
.ee_block
= ex2
->ee_block
;
3082 zero_ex
.ee_len
= cpu_to_le16(
3083 ext4_ext_get_actual_len(ex2
));
3084 ext4_ext_store_pblock(&zero_ex
,
3085 ext4_ext_pblock(ex2
));
3087 err
= ext4_ext_zeroout(inode
, ex
);
3088 zero_ex
.ee_block
= ex
->ee_block
;
3089 zero_ex
.ee_len
= cpu_to_le16(
3090 ext4_ext_get_actual_len(ex
));
3091 ext4_ext_store_pblock(&zero_ex
,
3092 ext4_ext_pblock(ex
));
3095 err
= ext4_ext_zeroout(inode
, &orig_ex
);
3096 zero_ex
.ee_block
= orig_ex
.ee_block
;
3097 zero_ex
.ee_len
= cpu_to_le16(
3098 ext4_ext_get_actual_len(&orig_ex
));
3099 ext4_ext_store_pblock(&zero_ex
,
3100 ext4_ext_pblock(&orig_ex
));
3104 goto fix_extent_len
;
3105 /* update the extent length and mark as initialized */
3106 ex
->ee_len
= cpu_to_le16(ee_len
);
3107 ext4_ext_try_to_merge(handle
, inode
, path
, ex
);
3108 err
= ext4_ext_dirty(handle
, inode
, path
+ path
->p_depth
);
3110 goto fix_extent_len
;
3112 /* update extent status tree */
3113 err
= ext4_es_zeroout(inode
, &zero_ex
);
3117 goto fix_extent_len
;
3120 ext4_ext_show_leaf(inode
, path
);
3124 ex
->ee_len
= orig_ex
.ee_len
;
3125 ext4_ext_dirty(handle
, inode
, path
+ depth
);
3130 * ext4_split_extents() splits an extent and mark extent which is covered
3131 * by @map as split_flags indicates
3133 * It may result in splitting the extent into multiple extents (upto three)
3134 * There are three possibilities:
3135 * a> There is no split required
3136 * b> Splits in two extents: Split is happening at either end of the extent
3137 * c> Splits in three extents: Somone is splitting in middle of the extent
3140 static int ext4_split_extent(handle_t
*handle
,
3141 struct inode
*inode
,
3142 struct ext4_ext_path
*path
,
3143 struct ext4_map_blocks
*map
,
3147 ext4_lblk_t ee_block
;
3148 struct ext4_extent
*ex
;
3149 unsigned int ee_len
, depth
;
3152 int split_flag1
, flags1
;
3153 int allocated
= map
->m_len
;
3155 depth
= ext_depth(inode
);
3156 ex
= path
[depth
].p_ext
;
3157 ee_block
= le32_to_cpu(ex
->ee_block
);
3158 ee_len
= ext4_ext_get_actual_len(ex
);
3159 uninitialized
= ext4_ext_is_uninitialized(ex
);
3161 if (map
->m_lblk
+ map
->m_len
< ee_block
+ ee_len
) {
3162 split_flag1
= split_flag
& EXT4_EXT_MAY_ZEROOUT
;
3163 flags1
= flags
| EXT4_GET_BLOCKS_PRE_IO
;
3165 split_flag1
|= EXT4_EXT_MARK_UNINIT1
|
3166 EXT4_EXT_MARK_UNINIT2
;
3167 if (split_flag
& EXT4_EXT_DATA_VALID2
)
3168 split_flag1
|= EXT4_EXT_DATA_VALID1
;
3169 err
= ext4_split_extent_at(handle
, inode
, path
,
3170 map
->m_lblk
+ map
->m_len
, split_flag1
, flags1
);
3174 allocated
= ee_len
- (map
->m_lblk
- ee_block
);
3177 * Update path is required because previous ext4_split_extent_at() may
3178 * result in split of original leaf or extent zeroout.
3180 ext4_ext_drop_refs(path
);
3181 path
= ext4_ext_find_extent(inode
, map
->m_lblk
, path
);
3183 return PTR_ERR(path
);
3184 depth
= ext_depth(inode
);
3185 ex
= path
[depth
].p_ext
;
3186 uninitialized
= ext4_ext_is_uninitialized(ex
);
3189 if (map
->m_lblk
>= ee_block
) {
3190 split_flag1
= split_flag
& EXT4_EXT_DATA_VALID2
;
3191 if (uninitialized
) {
3192 split_flag1
|= EXT4_EXT_MARK_UNINIT1
;
3193 split_flag1
|= split_flag
& (EXT4_EXT_MAY_ZEROOUT
|
3194 EXT4_EXT_MARK_UNINIT2
);
3196 err
= ext4_split_extent_at(handle
, inode
, path
,
3197 map
->m_lblk
, split_flag1
, flags
);
3202 ext4_ext_show_leaf(inode
, path
);
3204 return err
? err
: allocated
;
3208 * This function is called by ext4_ext_map_blocks() if someone tries to write
3209 * to an uninitialized extent. It may result in splitting the uninitialized
3210 * extent into multiple extents (up to three - one initialized and two
3212 * There are three possibilities:
3213 * a> There is no split required: Entire extent should be initialized
3214 * b> Splits in two extents: Write is happening at either end of the extent
3215 * c> Splits in three extents: Somone is writing in middle of the extent
3218 * - The extent pointed to by 'path' is uninitialized.
3219 * - The extent pointed to by 'path' contains a superset
3220 * of the logical span [map->m_lblk, map->m_lblk + map->m_len).
3222 * Post-conditions on success:
3223 * - the returned value is the number of blocks beyond map->l_lblk
3224 * that are allocated and initialized.
3225 * It is guaranteed to be >= map->m_len.
3227 static int ext4_ext_convert_to_initialized(handle_t
*handle
,
3228 struct inode
*inode
,
3229 struct ext4_map_blocks
*map
,
3230 struct ext4_ext_path
*path
,
3233 struct ext4_sb_info
*sbi
;
3234 struct ext4_extent_header
*eh
;
3235 struct ext4_map_blocks split_map
;
3236 struct ext4_extent zero_ex
;
3237 struct ext4_extent
*ex
, *abut_ex
;
3238 ext4_lblk_t ee_block
, eof_block
;
3239 unsigned int ee_len
, depth
, map_len
= map
->m_len
;
3240 int allocated
= 0, max_zeroout
= 0;
3244 ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
3245 "block %llu, max_blocks %u\n", inode
->i_ino
,
3246 (unsigned long long)map
->m_lblk
, map_len
);
3248 sbi
= EXT4_SB(inode
->i_sb
);
3249 eof_block
= (inode
->i_size
+ inode
->i_sb
->s_blocksize
- 1) >>
3250 inode
->i_sb
->s_blocksize_bits
;
3251 if (eof_block
< map
->m_lblk
+ map_len
)
3252 eof_block
= map
->m_lblk
+ map_len
;
3254 depth
= ext_depth(inode
);
3255 eh
= path
[depth
].p_hdr
;
3256 ex
= path
[depth
].p_ext
;
3257 ee_block
= le32_to_cpu(ex
->ee_block
);
3258 ee_len
= ext4_ext_get_actual_len(ex
);
3261 trace_ext4_ext_convert_to_initialized_enter(inode
, map
, ex
);
3263 /* Pre-conditions */
3264 BUG_ON(!ext4_ext_is_uninitialized(ex
));
3265 BUG_ON(!in_range(map
->m_lblk
, ee_block
, ee_len
));
3268 * Attempt to transfer newly initialized blocks from the currently
3269 * uninitialized extent to its neighbor. This is much cheaper
3270 * than an insertion followed by a merge as those involve costly
3271 * memmove() calls. Transferring to the left is the common case in
3272 * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE)
3273 * followed by append writes.
3275 * Limitations of the current logic:
3276 * - L1: we do not deal with writes covering the whole extent.
3277 * This would require removing the extent if the transfer
3279 * - L2: we only attempt to merge with an extent stored in the
3280 * same extent tree node.
3282 if ((map
->m_lblk
== ee_block
) &&
3283 /* See if we can merge left */
3284 (map_len
< ee_len
) && /*L1*/
3285 (ex
> EXT_FIRST_EXTENT(eh
))) { /*L2*/
3286 ext4_lblk_t prev_lblk
;
3287 ext4_fsblk_t prev_pblk
, ee_pblk
;
3288 unsigned int prev_len
;
3291 prev_lblk
= le32_to_cpu(abut_ex
->ee_block
);
3292 prev_len
= ext4_ext_get_actual_len(abut_ex
);
3293 prev_pblk
= ext4_ext_pblock(abut_ex
);
3294 ee_pblk
= ext4_ext_pblock(ex
);
3297 * A transfer of blocks from 'ex' to 'abut_ex' is allowed
3298 * upon those conditions:
3299 * - C1: abut_ex is initialized,
3300 * - C2: abut_ex is logically abutting ex,
3301 * - C3: abut_ex is physically abutting ex,
3302 * - C4: abut_ex can receive the additional blocks without
3303 * overflowing the (initialized) length limit.
3305 if ((!ext4_ext_is_uninitialized(abut_ex
)) && /*C1*/
3306 ((prev_lblk
+ prev_len
) == ee_block
) && /*C2*/
3307 ((prev_pblk
+ prev_len
) == ee_pblk
) && /*C3*/
3308 (prev_len
< (EXT_INIT_MAX_LEN
- map_len
))) { /*C4*/
3309 err
= ext4_ext_get_access(handle
, inode
, path
+ depth
);
3313 trace_ext4_ext_convert_to_initialized_fastpath(inode
,
3316 /* Shift the start of ex by 'map_len' blocks */
3317 ex
->ee_block
= cpu_to_le32(ee_block
+ map_len
);
3318 ext4_ext_store_pblock(ex
, ee_pblk
+ map_len
);
3319 ex
->ee_len
= cpu_to_le16(ee_len
- map_len
);
3320 ext4_ext_mark_uninitialized(ex
); /* Restore the flag */
3322 /* Extend abut_ex by 'map_len' blocks */
3323 abut_ex
->ee_len
= cpu_to_le16(prev_len
+ map_len
);
3325 /* Result: number of initialized blocks past m_lblk */
3326 allocated
= map_len
;
3328 } else if (((map
->m_lblk
+ map_len
) == (ee_block
+ ee_len
)) &&
3329 (map_len
< ee_len
) && /*L1*/
3330 ex
< EXT_LAST_EXTENT(eh
)) { /*L2*/
3331 /* See if we can merge right */
3332 ext4_lblk_t next_lblk
;
3333 ext4_fsblk_t next_pblk
, ee_pblk
;
3334 unsigned int next_len
;
3337 next_lblk
= le32_to_cpu(abut_ex
->ee_block
);
3338 next_len
= ext4_ext_get_actual_len(abut_ex
);
3339 next_pblk
= ext4_ext_pblock(abut_ex
);
3340 ee_pblk
= ext4_ext_pblock(ex
);
3343 * A transfer of blocks from 'ex' to 'abut_ex' is allowed
3344 * upon those conditions:
3345 * - C1: abut_ex is initialized,
3346 * - C2: abut_ex is logically abutting ex,
3347 * - C3: abut_ex is physically abutting ex,
3348 * - C4: abut_ex can receive the additional blocks without
3349 * overflowing the (initialized) length limit.
3351 if ((!ext4_ext_is_uninitialized(abut_ex
)) && /*C1*/
3352 ((map
->m_lblk
+ map_len
) == next_lblk
) && /*C2*/
3353 ((ee_pblk
+ ee_len
) == next_pblk
) && /*C3*/
3354 (next_len
< (EXT_INIT_MAX_LEN
- map_len
))) { /*C4*/
3355 err
= ext4_ext_get_access(handle
, inode
, path
+ depth
);
3359 trace_ext4_ext_convert_to_initialized_fastpath(inode
,
3362 /* Shift the start of abut_ex by 'map_len' blocks */
3363 abut_ex
->ee_block
= cpu_to_le32(next_lblk
- map_len
);
3364 ext4_ext_store_pblock(abut_ex
, next_pblk
- map_len
);
3365 ex
->ee_len
= cpu_to_le16(ee_len
- map_len
);
3366 ext4_ext_mark_uninitialized(ex
); /* Restore the flag */
3368 /* Extend abut_ex by 'map_len' blocks */
3369 abut_ex
->ee_len
= cpu_to_le16(next_len
+ map_len
);
3371 /* Result: number of initialized blocks past m_lblk */
3372 allocated
= map_len
;
3376 /* Mark the block containing both extents as dirty */
3377 ext4_ext_dirty(handle
, inode
, path
+ depth
);
3379 /* Update path to point to the right extent */
3380 path
[depth
].p_ext
= abut_ex
;
3383 allocated
= ee_len
- (map
->m_lblk
- ee_block
);
3385 WARN_ON(map
->m_lblk
< ee_block
);
3387 * It is safe to convert extent to initialized via explicit
3388 * zeroout only if extent is fully insde i_size or new_size.
3390 split_flag
|= ee_block
+ ee_len
<= eof_block
? EXT4_EXT_MAY_ZEROOUT
: 0;
3392 if (EXT4_EXT_MAY_ZEROOUT
& split_flag
)
3393 max_zeroout
= sbi
->s_extent_max_zeroout_kb
>>
3394 (inode
->i_sb
->s_blocksize_bits
- 10);
3396 /* If extent is less than s_max_zeroout_kb, zeroout directly */
3397 if (max_zeroout
&& (ee_len
<= max_zeroout
)) {
3398 err
= ext4_ext_zeroout(inode
, ex
);
3401 zero_ex
.ee_block
= ex
->ee_block
;
3402 zero_ex
.ee_len
= cpu_to_le16(ext4_ext_get_actual_len(ex
));
3403 ext4_ext_store_pblock(&zero_ex
, ext4_ext_pblock(ex
));
3405 err
= ext4_ext_get_access(handle
, inode
, path
+ depth
);
3408 ext4_ext_mark_initialized(ex
);
3409 ext4_ext_try_to_merge(handle
, inode
, path
, ex
);
3410 err
= ext4_ext_dirty(handle
, inode
, path
+ path
->p_depth
);
3416 * 1. split the extent into three extents.
3417 * 2. split the extent into two extents, zeroout the first half.
3418 * 3. split the extent into two extents, zeroout the second half.
3419 * 4. split the extent into two extents with out zeroout.
3421 split_map
.m_lblk
= map
->m_lblk
;
3422 split_map
.m_len
= map
->m_len
;
3424 if (max_zeroout
&& (allocated
> map
->m_len
)) {
3425 if (allocated
<= max_zeroout
) {
3428 cpu_to_le32(map
->m_lblk
);
3429 zero_ex
.ee_len
= cpu_to_le16(allocated
);
3430 ext4_ext_store_pblock(&zero_ex
,
3431 ext4_ext_pblock(ex
) + map
->m_lblk
- ee_block
);
3432 err
= ext4_ext_zeroout(inode
, &zero_ex
);
3435 split_map
.m_lblk
= map
->m_lblk
;
3436 split_map
.m_len
= allocated
;
3437 } else if (map
->m_lblk
- ee_block
+ map
->m_len
< max_zeroout
) {
3439 if (map
->m_lblk
!= ee_block
) {
3440 zero_ex
.ee_block
= ex
->ee_block
;
3441 zero_ex
.ee_len
= cpu_to_le16(map
->m_lblk
-
3443 ext4_ext_store_pblock(&zero_ex
,
3444 ext4_ext_pblock(ex
));
3445 err
= ext4_ext_zeroout(inode
, &zero_ex
);
3450 split_map
.m_lblk
= ee_block
;
3451 split_map
.m_len
= map
->m_lblk
- ee_block
+ map
->m_len
;
3452 allocated
= map
->m_len
;
3456 allocated
= ext4_split_extent(handle
, inode
, path
,
3457 &split_map
, split_flag
, flags
);
3462 /* If we have gotten a failure, don't zero out status tree */
3464 err
= ext4_es_zeroout(inode
, &zero_ex
);
3465 return err
? err
: allocated
;
3469 * This function is called by ext4_ext_map_blocks() from
3470 * ext4_get_blocks_dio_write() when DIO to write
3471 * to an uninitialized extent.
3473 * Writing to an uninitialized extent may result in splitting the uninitialized
3474 * extent into multiple initialized/uninitialized extents (up to three)
3475 * There are three possibilities:
3476 * a> There is no split required: Entire extent should be uninitialized
3477 * b> Splits in two extents: Write is happening at either end of the extent
3478 * c> Splits in three extents: Somone is writing in middle of the extent
3480 * One of more index blocks maybe needed if the extent tree grow after
3481 * the uninitialized extent split. To prevent ENOSPC occur at the IO
3482 * complete, we need to split the uninitialized extent before DIO submit
3483 * the IO. The uninitialized extent called at this time will be split
3484 * into three uninitialized extent(at most). After IO complete, the part
3485 * being filled will be convert to initialized by the end_io callback function
3486 * via ext4_convert_unwritten_extents().
3488 * Returns the size of uninitialized extent to be written on success.
3490 static int ext4_split_unwritten_extents(handle_t
*handle
,
3491 struct inode
*inode
,
3492 struct ext4_map_blocks
*map
,
3493 struct ext4_ext_path
*path
,
3496 ext4_lblk_t eof_block
;
3497 ext4_lblk_t ee_block
;
3498 struct ext4_extent
*ex
;
3499 unsigned int ee_len
;
3500 int split_flag
= 0, depth
;
3502 ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
3503 "block %llu, max_blocks %u\n", inode
->i_ino
,
3504 (unsigned long long)map
->m_lblk
, map
->m_len
);
3506 eof_block
= (inode
->i_size
+ inode
->i_sb
->s_blocksize
- 1) >>
3507 inode
->i_sb
->s_blocksize_bits
;
3508 if (eof_block
< map
->m_lblk
+ map
->m_len
)
3509 eof_block
= map
->m_lblk
+ map
->m_len
;
3511 * It is safe to convert extent to initialized via explicit
3512 * zeroout only if extent is fully insde i_size or new_size.
3514 depth
= ext_depth(inode
);
3515 ex
= path
[depth
].p_ext
;
3516 ee_block
= le32_to_cpu(ex
->ee_block
);
3517 ee_len
= ext4_ext_get_actual_len(ex
);
3519 split_flag
|= ee_block
+ ee_len
<= eof_block
? EXT4_EXT_MAY_ZEROOUT
: 0;
3520 split_flag
|= EXT4_EXT_MARK_UNINIT2
;
3521 if (flags
& EXT4_GET_BLOCKS_CONVERT
)
3522 split_flag
|= EXT4_EXT_DATA_VALID2
;
3523 flags
|= EXT4_GET_BLOCKS_PRE_IO
;
3524 return ext4_split_extent(handle
, inode
, path
, map
, split_flag
, flags
);
3527 static int ext4_convert_unwritten_extents_endio(handle_t
*handle
,
3528 struct inode
*inode
,
3529 struct ext4_map_blocks
*map
,
3530 struct ext4_ext_path
*path
)
3532 struct ext4_extent
*ex
;
3533 ext4_lblk_t ee_block
;
3534 unsigned int ee_len
;
3538 depth
= ext_depth(inode
);
3539 ex
= path
[depth
].p_ext
;
3540 ee_block
= le32_to_cpu(ex
->ee_block
);
3541 ee_len
= ext4_ext_get_actual_len(ex
);
3543 ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
3544 "block %llu, max_blocks %u\n", inode
->i_ino
,
3545 (unsigned long long)ee_block
, ee_len
);
3547 /* If extent is larger than requested it is a clear sign that we still
3548 * have some extent state machine issues left. So extent_split is still
3550 * TODO: Once all related issues will be fixed this situation should be
3553 if (ee_block
!= map
->m_lblk
|| ee_len
> map
->m_len
) {
3555 ext4_warning("Inode (%ld) finished: extent logical block %llu,"
3556 " len %u; IO logical block %llu, len %u\n",
3557 inode
->i_ino
, (unsigned long long)ee_block
, ee_len
,
3558 (unsigned long long)map
->m_lblk
, map
->m_len
);
3560 err
= ext4_split_unwritten_extents(handle
, inode
, map
, path
,
3561 EXT4_GET_BLOCKS_CONVERT
);
3564 ext4_ext_drop_refs(path
);
3565 path
= ext4_ext_find_extent(inode
, map
->m_lblk
, path
);
3567 err
= PTR_ERR(path
);
3570 depth
= ext_depth(inode
);
3571 ex
= path
[depth
].p_ext
;
3574 err
= ext4_ext_get_access(handle
, inode
, path
+ depth
);
3577 /* first mark the extent as initialized */
3578 ext4_ext_mark_initialized(ex
);
3580 /* note: ext4_ext_correct_indexes() isn't needed here because
3581 * borders are not changed
3583 ext4_ext_try_to_merge(handle
, inode
, path
, ex
);
3585 /* Mark modified extent as dirty */
3586 err
= ext4_ext_dirty(handle
, inode
, path
+ path
->p_depth
);
3588 ext4_ext_show_leaf(inode
, path
);
3592 static void unmap_underlying_metadata_blocks(struct block_device
*bdev
,
3593 sector_t block
, int count
)
3596 for (i
= 0; i
< count
; i
++)
3597 unmap_underlying_metadata(bdev
, block
+ i
);
3601 * Handle EOFBLOCKS_FL flag, clearing it if necessary
3603 static int check_eofblocks_fl(handle_t
*handle
, struct inode
*inode
,
3605 struct ext4_ext_path
*path
,
3609 struct ext4_extent_header
*eh
;
3610 struct ext4_extent
*last_ex
;
3612 if (!ext4_test_inode_flag(inode
, EXT4_INODE_EOFBLOCKS
))
3615 depth
= ext_depth(inode
);
3616 eh
= path
[depth
].p_hdr
;
3619 * We're going to remove EOFBLOCKS_FL entirely in future so we
3620 * do not care for this case anymore. Simply remove the flag
3621 * if there are no extents.
3623 if (unlikely(!eh
->eh_entries
))
3625 last_ex
= EXT_LAST_EXTENT(eh
);
3627 * We should clear the EOFBLOCKS_FL flag if we are writing the
3628 * last block in the last extent in the file. We test this by
3629 * first checking to see if the caller to
3630 * ext4_ext_get_blocks() was interested in the last block (or
3631 * a block beyond the last block) in the current extent. If
3632 * this turns out to be false, we can bail out from this
3633 * function immediately.
3635 if (lblk
+ len
< le32_to_cpu(last_ex
->ee_block
) +
3636 ext4_ext_get_actual_len(last_ex
))
3639 * If the caller does appear to be planning to write at or
3640 * beyond the end of the current extent, we then test to see
3641 * if the current extent is the last extent in the file, by
3642 * checking to make sure it was reached via the rightmost node
3643 * at each level of the tree.
3645 for (i
= depth
-1; i
>= 0; i
--)
3646 if (path
[i
].p_idx
!= EXT_LAST_INDEX(path
[i
].p_hdr
))
3649 ext4_clear_inode_flag(inode
, EXT4_INODE_EOFBLOCKS
);
3650 return ext4_mark_inode_dirty(handle
, inode
);
3654 * ext4_find_delalloc_range: find delayed allocated block in the given range.
3656 * Return 1 if there is a delalloc block in the range, otherwise 0.
3658 int ext4_find_delalloc_range(struct inode
*inode
,
3659 ext4_lblk_t lblk_start
,
3660 ext4_lblk_t lblk_end
)
3662 struct extent_status es
;
3664 ext4_es_find_delayed_extent_range(inode
, lblk_start
, lblk_end
, &es
);
3666 return 0; /* there is no delay extent in this tree */
3667 else if (es
.es_lblk
<= lblk_start
&&
3668 lblk_start
< es
.es_lblk
+ es
.es_len
)
3670 else if (lblk_start
<= es
.es_lblk
&& es
.es_lblk
<= lblk_end
)
3676 int ext4_find_delalloc_cluster(struct inode
*inode
, ext4_lblk_t lblk
)
3678 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
3679 ext4_lblk_t lblk_start
, lblk_end
;
3680 lblk_start
= lblk
& (~(sbi
->s_cluster_ratio
- 1));
3681 lblk_end
= lblk_start
+ sbi
->s_cluster_ratio
- 1;
3683 return ext4_find_delalloc_range(inode
, lblk_start
, lblk_end
);
3687 * Determines how many complete clusters (out of those specified by the 'map')
3688 * are under delalloc and were reserved quota for.
3689 * This function is called when we are writing out the blocks that were
3690 * originally written with their allocation delayed, but then the space was
3691 * allocated using fallocate() before the delayed allocation could be resolved.
3692 * The cases to look for are:
3693 * ('=' indicated delayed allocated blocks
3694 * '-' indicates non-delayed allocated blocks)
3695 * (a) partial clusters towards beginning and/or end outside of allocated range
3696 * are not delalloc'ed.
3698 * |----c---=|====c====|====c====|===-c----|
3699 * |++++++ allocated ++++++|
3700 * ==> 4 complete clusters in above example
3702 * (b) partial cluster (outside of allocated range) towards either end is
3703 * marked for delayed allocation. In this case, we will exclude that
3706 * |----====c========|========c========|
3707 * |++++++ allocated ++++++|
3708 * ==> 1 complete clusters in above example
3711 * |================c================|
3712 * |++++++ allocated ++++++|
3713 * ==> 0 complete clusters in above example
3715 * The ext4_da_update_reserve_space will be called only if we
3716 * determine here that there were some "entire" clusters that span
3717 * this 'allocated' range.
3718 * In the non-bigalloc case, this function will just end up returning num_blks
3719 * without ever calling ext4_find_delalloc_range.
3722 get_reserved_cluster_alloc(struct inode
*inode
, ext4_lblk_t lblk_start
,
3723 unsigned int num_blks
)
3725 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
3726 ext4_lblk_t alloc_cluster_start
, alloc_cluster_end
;
3727 ext4_lblk_t lblk_from
, lblk_to
, c_offset
;
3728 unsigned int allocated_clusters
= 0;
3730 alloc_cluster_start
= EXT4_B2C(sbi
, lblk_start
);
3731 alloc_cluster_end
= EXT4_B2C(sbi
, lblk_start
+ num_blks
- 1);
3733 /* max possible clusters for this allocation */
3734 allocated_clusters
= alloc_cluster_end
- alloc_cluster_start
+ 1;
3736 trace_ext4_get_reserved_cluster_alloc(inode
, lblk_start
, num_blks
);
3738 /* Check towards left side */
3739 c_offset
= lblk_start
& (sbi
->s_cluster_ratio
- 1);
3741 lblk_from
= lblk_start
& (~(sbi
->s_cluster_ratio
- 1));
3742 lblk_to
= lblk_from
+ c_offset
- 1;
3744 if (ext4_find_delalloc_range(inode
, lblk_from
, lblk_to
))
3745 allocated_clusters
--;
3748 /* Now check towards right. */
3749 c_offset
= (lblk_start
+ num_blks
) & (sbi
->s_cluster_ratio
- 1);
3750 if (allocated_clusters
&& c_offset
) {
3751 lblk_from
= lblk_start
+ num_blks
;
3752 lblk_to
= lblk_from
+ (sbi
->s_cluster_ratio
- c_offset
) - 1;
3754 if (ext4_find_delalloc_range(inode
, lblk_from
, lblk_to
))
3755 allocated_clusters
--;
3758 return allocated_clusters
;
3762 ext4_ext_handle_uninitialized_extents(handle_t
*handle
, struct inode
*inode
,
3763 struct ext4_map_blocks
*map
,
3764 struct ext4_ext_path
*path
, int flags
,
3765 unsigned int allocated
, ext4_fsblk_t newblock
)
3769 ext4_io_end_t
*io
= ext4_inode_aio(inode
);
3771 ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical "
3772 "block %llu, max_blocks %u, flags %x, allocated %u\n",
3773 inode
->i_ino
, (unsigned long long)map
->m_lblk
, map
->m_len
,
3775 ext4_ext_show_leaf(inode
, path
);
3778 * When writing into uninitialized space, we should not fail to
3779 * allocate metadata blocks for the new extent block if needed.
3781 flags
|= EXT4_GET_BLOCKS_METADATA_NOFAIL
;
3783 trace_ext4_ext_handle_uninitialized_extents(inode
, map
, flags
,
3784 allocated
, newblock
);
3786 /* get_block() before submit the IO, split the extent */
3787 if ((flags
& EXT4_GET_BLOCKS_PRE_IO
)) {
3788 ret
= ext4_split_unwritten_extents(handle
, inode
, map
,
3793 * Flag the inode(non aio case) or end_io struct (aio case)
3794 * that this IO needs to conversion to written when IO is
3798 ext4_set_io_unwritten_flag(inode
, io
);
3800 ext4_set_inode_state(inode
, EXT4_STATE_DIO_UNWRITTEN
);
3801 map
->m_flags
|= EXT4_MAP_UNWRITTEN
;
3802 if (ext4_should_dioread_nolock(inode
))
3803 map
->m_flags
|= EXT4_MAP_UNINIT
;
3806 /* IO end_io complete, convert the filled extent to written */
3807 if ((flags
& EXT4_GET_BLOCKS_CONVERT
)) {
3808 ret
= ext4_convert_unwritten_extents_endio(handle
, inode
, map
,
3811 ext4_update_inode_fsync_trans(handle
, inode
, 1);
3812 err
= check_eofblocks_fl(handle
, inode
, map
->m_lblk
,
3816 map
->m_flags
|= EXT4_MAP_MAPPED
;
3817 if (allocated
> map
->m_len
)
3818 allocated
= map
->m_len
;
3819 map
->m_len
= allocated
;
3822 /* buffered IO case */
3824 * repeat fallocate creation request
3825 * we already have an unwritten extent
3827 if (flags
& EXT4_GET_BLOCKS_UNINIT_EXT
) {
3828 map
->m_flags
|= EXT4_MAP_UNWRITTEN
;
3832 /* buffered READ or buffered write_begin() lookup */
3833 if ((flags
& EXT4_GET_BLOCKS_CREATE
) == 0) {
3835 * We have blocks reserved already. We
3836 * return allocated blocks so that delalloc
3837 * won't do block reservation for us. But
3838 * the buffer head will be unmapped so that
3839 * a read from the block returns 0s.
3841 map
->m_flags
|= EXT4_MAP_UNWRITTEN
;
3845 /* buffered write, writepage time, convert*/
3846 ret
= ext4_ext_convert_to_initialized(handle
, inode
, map
, path
, flags
);
3848 ext4_update_inode_fsync_trans(handle
, inode
, 1);
3855 map
->m_flags
|= EXT4_MAP_NEW
;
3857 * if we allocated more blocks than requested
3858 * we need to make sure we unmap the extra block
3859 * allocated. The actual needed block will get
3860 * unmapped later when we find the buffer_head marked
3863 if (allocated
> map
->m_len
) {
3864 unmap_underlying_metadata_blocks(inode
->i_sb
->s_bdev
,
3865 newblock
+ map
->m_len
,
3866 allocated
- map
->m_len
);
3867 allocated
= map
->m_len
;
3869 map
->m_len
= allocated
;
3872 * If we have done fallocate with the offset that is already
3873 * delayed allocated, we would have block reservation
3874 * and quota reservation done in the delayed write path.
3875 * But fallocate would have already updated quota and block
3876 * count for this offset. So cancel these reservation
3878 if (flags
& EXT4_GET_BLOCKS_DELALLOC_RESERVE
) {
3879 unsigned int reserved_clusters
;
3880 reserved_clusters
= get_reserved_cluster_alloc(inode
,
3881 map
->m_lblk
, map
->m_len
);
3882 if (reserved_clusters
)
3883 ext4_da_update_reserve_space(inode
,
3889 map
->m_flags
|= EXT4_MAP_MAPPED
;
3890 if ((flags
& EXT4_GET_BLOCKS_KEEP_SIZE
) == 0) {
3891 err
= check_eofblocks_fl(handle
, inode
, map
->m_lblk
, path
,
3897 if (allocated
> map
->m_len
)
3898 allocated
= map
->m_len
;
3899 ext4_ext_show_leaf(inode
, path
);
3900 map
->m_pblk
= newblock
;
3901 map
->m_len
= allocated
;
3904 ext4_ext_drop_refs(path
);
3907 return err
? err
: allocated
;
3911 * get_implied_cluster_alloc - check to see if the requested
3912 * allocation (in the map structure) overlaps with a cluster already
3913 * allocated in an extent.
3914 * @sb The filesystem superblock structure
3915 * @map The requested lblk->pblk mapping
3916 * @ex The extent structure which might contain an implied
3917 * cluster allocation
3919 * This function is called by ext4_ext_map_blocks() after we failed to
3920 * find blocks that were already in the inode's extent tree. Hence,
3921 * we know that the beginning of the requested region cannot overlap
3922 * the extent from the inode's extent tree. There are three cases we
3923 * want to catch. The first is this case:
3925 * |--- cluster # N--|
3926 * |--- extent ---| |---- requested region ---|
3929 * The second case that we need to test for is this one:
3931 * |--------- cluster # N ----------------|
3932 * |--- requested region --| |------- extent ----|
3933 * |=======================|
3935 * The third case is when the requested region lies between two extents
3936 * within the same cluster:
3937 * |------------- cluster # N-------------|
3938 * |----- ex -----| |---- ex_right ----|
3939 * |------ requested region ------|
3940 * |================|
3942 * In each of the above cases, we need to set the map->m_pblk and
3943 * map->m_len so it corresponds to the return the extent labelled as
3944 * "|====|" from cluster #N, since it is already in use for data in
3945 * cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to
3946 * signal to ext4_ext_map_blocks() that map->m_pblk should be treated
3947 * as a new "allocated" block region. Otherwise, we will return 0 and
3948 * ext4_ext_map_blocks() will then allocate one or more new clusters
3949 * by calling ext4_mb_new_blocks().
3951 static int get_implied_cluster_alloc(struct super_block
*sb
,
3952 struct ext4_map_blocks
*map
,
3953 struct ext4_extent
*ex
,
3954 struct ext4_ext_path
*path
)
3956 struct ext4_sb_info
*sbi
= EXT4_SB(sb
);
3957 ext4_lblk_t c_offset
= map
->m_lblk
& (sbi
->s_cluster_ratio
-1);
3958 ext4_lblk_t ex_cluster_start
, ex_cluster_end
;
3959 ext4_lblk_t rr_cluster_start
;
3960 ext4_lblk_t ee_block
= le32_to_cpu(ex
->ee_block
);
3961 ext4_fsblk_t ee_start
= ext4_ext_pblock(ex
);
3962 unsigned short ee_len
= ext4_ext_get_actual_len(ex
);
3964 /* The extent passed in that we are trying to match */
3965 ex_cluster_start
= EXT4_B2C(sbi
, ee_block
);
3966 ex_cluster_end
= EXT4_B2C(sbi
, ee_block
+ ee_len
- 1);
3968 /* The requested region passed into ext4_map_blocks() */
3969 rr_cluster_start
= EXT4_B2C(sbi
, map
->m_lblk
);
3971 if ((rr_cluster_start
== ex_cluster_end
) ||
3972 (rr_cluster_start
== ex_cluster_start
)) {
3973 if (rr_cluster_start
== ex_cluster_end
)
3974 ee_start
+= ee_len
- 1;
3975 map
->m_pblk
= (ee_start
& ~(sbi
->s_cluster_ratio
- 1)) +
3977 map
->m_len
= min(map
->m_len
,
3978 (unsigned) sbi
->s_cluster_ratio
- c_offset
);
3980 * Check for and handle this case:
3982 * |--------- cluster # N-------------|
3983 * |------- extent ----|
3984 * |--- requested region ---|
3988 if (map
->m_lblk
< ee_block
)
3989 map
->m_len
= min(map
->m_len
, ee_block
- map
->m_lblk
);
3992 * Check for the case where there is already another allocated
3993 * block to the right of 'ex' but before the end of the cluster.
3995 * |------------- cluster # N-------------|
3996 * |----- ex -----| |---- ex_right ----|
3997 * |------ requested region ------|
3998 * |================|
4000 if (map
->m_lblk
> ee_block
) {
4001 ext4_lblk_t next
= ext4_ext_next_allocated_block(path
);
4002 map
->m_len
= min(map
->m_len
, next
- map
->m_lblk
);
4005 trace_ext4_get_implied_cluster_alloc_exit(sb
, map
, 1);
4009 trace_ext4_get_implied_cluster_alloc_exit(sb
, map
, 0);
4015 * Block allocation/map/preallocation routine for extents based files
4018 * Need to be called with
4019 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
4020 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
4022 * return > 0, number of of blocks already mapped/allocated
4023 * if create == 0 and these are pre-allocated blocks
4024 * buffer head is unmapped
4025 * otherwise blocks are mapped
4027 * return = 0, if plain look up failed (blocks have not been allocated)
4028 * buffer head is unmapped
4030 * return < 0, error case.
4032 int ext4_ext_map_blocks(handle_t
*handle
, struct inode
*inode
,
4033 struct ext4_map_blocks
*map
, int flags
)
4035 struct ext4_ext_path
*path
= NULL
;
4036 struct ext4_extent newex
, *ex
, *ex2
;
4037 struct ext4_sb_info
*sbi
= EXT4_SB(inode
->i_sb
);
4038 ext4_fsblk_t newblock
= 0;
4039 int free_on_err
= 0, err
= 0, depth
;
4040 unsigned int allocated
= 0, offset
= 0;
4041 unsigned int allocated_clusters
= 0;
4042 struct ext4_allocation_request ar
;
4043 ext4_io_end_t
*io
= ext4_inode_aio(inode
);
4044 ext4_lblk_t cluster_offset
;
4045 int set_unwritten
= 0;
4047 ext_debug("blocks %u/%u requested for inode %lu\n",
4048 map
->m_lblk
, map
->m_len
, inode
->i_ino
);
4049 trace_ext4_ext_map_blocks_enter(inode
, map
->m_lblk
, map
->m_len
, flags
);
4051 /* find extent for this block */
4052 path
= ext4_ext_find_extent(inode
, map
->m_lblk
, NULL
);
4054 err
= PTR_ERR(path
);
4059 depth
= ext_depth(inode
);
4062 * consistent leaf must not be empty;
4063 * this situation is possible, though, _during_ tree modification;
4064 * this is why assert can't be put in ext4_ext_find_extent()
4066 if (unlikely(path
[depth
].p_ext
== NULL
&& depth
!= 0)) {
4067 EXT4_ERROR_INODE(inode
, "bad extent address "
4068 "lblock: %lu, depth: %d pblock %lld",
4069 (unsigned long) map
->m_lblk
, depth
,
4070 path
[depth
].p_block
);
4075 ex
= path
[depth
].p_ext
;
4077 ext4_lblk_t ee_block
= le32_to_cpu(ex
->ee_block
);
4078 ext4_fsblk_t ee_start
= ext4_ext_pblock(ex
);
4079 unsigned short ee_len
;
4082 * Uninitialized extents are treated as holes, except that
4083 * we split out initialized portions during a write.
4085 ee_len
= ext4_ext_get_actual_len(ex
);
4087 trace_ext4_ext_show_extent(inode
, ee_block
, ee_start
, ee_len
);
4089 /* if found extent covers block, simply return it */
4090 if (in_range(map
->m_lblk
, ee_block
, ee_len
)) {
4091 newblock
= map
->m_lblk
- ee_block
+ ee_start
;
4092 /* number of remaining blocks in the extent */
4093 allocated
= ee_len
- (map
->m_lblk
- ee_block
);
4094 ext_debug("%u fit into %u:%d -> %llu\n", map
->m_lblk
,
4095 ee_block
, ee_len
, newblock
);
4097 if (!ext4_ext_is_uninitialized(ex
))
4100 allocated
= ext4_ext_handle_uninitialized_extents(
4101 handle
, inode
, map
, path
, flags
,
4102 allocated
, newblock
);
4107 if ((sbi
->s_cluster_ratio
> 1) &&
4108 ext4_find_delalloc_cluster(inode
, map
->m_lblk
))
4109 map
->m_flags
|= EXT4_MAP_FROM_CLUSTER
;
4112 * requested block isn't allocated yet;
4113 * we couldn't try to create block if create flag is zero
4115 if ((flags
& EXT4_GET_BLOCKS_CREATE
) == 0) {
4117 * put just found gap into cache to speed up
4118 * subsequent requests
4120 if ((flags
& EXT4_GET_BLOCKS_NO_PUT_HOLE
) == 0)
4121 ext4_ext_put_gap_in_cache(inode
, path
, map
->m_lblk
);
4126 * Okay, we need to do block allocation.
4128 map
->m_flags
&= ~EXT4_MAP_FROM_CLUSTER
;
4129 newex
.ee_block
= cpu_to_le32(map
->m_lblk
);
4130 cluster_offset
= map
->m_lblk
& (sbi
->s_cluster_ratio
-1);
4133 * If we are doing bigalloc, check to see if the extent returned
4134 * by ext4_ext_find_extent() implies a cluster we can use.
4136 if (cluster_offset
&& ex
&&
4137 get_implied_cluster_alloc(inode
->i_sb
, map
, ex
, path
)) {
4138 ar
.len
= allocated
= map
->m_len
;
4139 newblock
= map
->m_pblk
;
4140 map
->m_flags
|= EXT4_MAP_FROM_CLUSTER
;
4141 goto got_allocated_blocks
;
4144 /* find neighbour allocated blocks */
4145 ar
.lleft
= map
->m_lblk
;
4146 err
= ext4_ext_search_left(inode
, path
, &ar
.lleft
, &ar
.pleft
);
4149 ar
.lright
= map
->m_lblk
;
4151 err
= ext4_ext_search_right(inode
, path
, &ar
.lright
, &ar
.pright
, &ex2
);
4155 /* Check if the extent after searching to the right implies a
4156 * cluster we can use. */
4157 if ((sbi
->s_cluster_ratio
> 1) && ex2
&&
4158 get_implied_cluster_alloc(inode
->i_sb
, map
, ex2
, path
)) {
4159 ar
.len
= allocated
= map
->m_len
;
4160 newblock
= map
->m_pblk
;
4161 map
->m_flags
|= EXT4_MAP_FROM_CLUSTER
;
4162 goto got_allocated_blocks
;
4166 * See if request is beyond maximum number of blocks we can have in
4167 * a single extent. For an initialized extent this limit is
4168 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
4169 * EXT_UNINIT_MAX_LEN.
4171 if (map
->m_len
> EXT_INIT_MAX_LEN
&&
4172 !(flags
& EXT4_GET_BLOCKS_UNINIT_EXT
))
4173 map
->m_len
= EXT_INIT_MAX_LEN
;
4174 else if (map
->m_len
> EXT_UNINIT_MAX_LEN
&&
4175 (flags
& EXT4_GET_BLOCKS_UNINIT_EXT
))
4176 map
->m_len
= EXT_UNINIT_MAX_LEN
;
4178 /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
4179 newex
.ee_len
= cpu_to_le16(map
->m_len
);
4180 err
= ext4_ext_check_overlap(sbi
, inode
, &newex
, path
);
4182 allocated
= ext4_ext_get_actual_len(&newex
);
4184 allocated
= map
->m_len
;
4186 /* allocate new block */
4188 ar
.goal
= ext4_ext_find_goal(inode
, path
, map
->m_lblk
);
4189 ar
.logical
= map
->m_lblk
;
4191 * We calculate the offset from the beginning of the cluster
4192 * for the logical block number, since when we allocate a
4193 * physical cluster, the physical block should start at the
4194 * same offset from the beginning of the cluster. This is
4195 * needed so that future calls to get_implied_cluster_alloc()
4198 offset
= map
->m_lblk
& (sbi
->s_cluster_ratio
- 1);
4199 ar
.len
= EXT4_NUM_B2C(sbi
, offset
+allocated
);
4201 ar
.logical
-= offset
;
4202 if (S_ISREG(inode
->i_mode
))
4203 ar
.flags
= EXT4_MB_HINT_DATA
;
4205 /* disable in-core preallocation for non-regular files */
4207 if (flags
& EXT4_GET_BLOCKS_NO_NORMALIZE
)
4208 ar
.flags
|= EXT4_MB_HINT_NOPREALLOC
;
4209 newblock
= ext4_mb_new_blocks(handle
, &ar
, &err
);
4212 ext_debug("allocate new block: goal %llu, found %llu/%u\n",
4213 ar
.goal
, newblock
, allocated
);
4215 allocated_clusters
= ar
.len
;
4216 ar
.len
= EXT4_C2B(sbi
, ar
.len
) - offset
;
4217 if (ar
.len
> allocated
)
4220 got_allocated_blocks
:
4221 /* try to insert new extent into found leaf and return */
4222 ext4_ext_store_pblock(&newex
, newblock
+ offset
);
4223 newex
.ee_len
= cpu_to_le16(ar
.len
);
4224 /* Mark uninitialized */
4225 if (flags
& EXT4_GET_BLOCKS_UNINIT_EXT
){
4226 ext4_ext_mark_uninitialized(&newex
);
4227 map
->m_flags
|= EXT4_MAP_UNWRITTEN
;
4229 * io_end structure was created for every IO write to an
4230 * uninitialized extent. To avoid unnecessary conversion,
4231 * here we flag the IO that really needs the conversion.
4232 * For non asycn direct IO case, flag the inode state
4233 * that we need to perform conversion when IO is done.
4235 if ((flags
& EXT4_GET_BLOCKS_PRE_IO
))
4237 if (ext4_should_dioread_nolock(inode
))
4238 map
->m_flags
|= EXT4_MAP_UNINIT
;
4242 if ((flags
& EXT4_GET_BLOCKS_KEEP_SIZE
) == 0)
4243 err
= check_eofblocks_fl(handle
, inode
, map
->m_lblk
,
4246 err
= ext4_ext_insert_extent(handle
, inode
, path
,
4249 if (!err
&& set_unwritten
) {
4251 ext4_set_io_unwritten_flag(inode
, io
);
4253 ext4_set_inode_state(inode
,
4254 EXT4_STATE_DIO_UNWRITTEN
);
4257 if (err
&& free_on_err
) {
4258 int fb_flags
= flags
& EXT4_GET_BLOCKS_DELALLOC_RESERVE
?
4259 EXT4_FREE_BLOCKS_NO_QUOT_UPDATE
: 0;
4260 /* free data blocks we just allocated */
4261 /* not a good idea to call discard here directly,
4262 * but otherwise we'd need to call it every free() */
4263 ext4_discard_preallocations(inode
);
4264 ext4_free_blocks(handle
, inode
, NULL
, ext4_ext_pblock(&newex
),
4265 ext4_ext_get_actual_len(&newex
), fb_flags
);
4269 /* previous routine could use block we allocated */
4270 newblock
= ext4_ext_pblock(&newex
);
4271 allocated
= ext4_ext_get_actual_len(&newex
);
4272 if (allocated
> map
->m_len
)
4273 allocated
= map
->m_len
;
4274 map
->m_flags
|= EXT4_MAP_NEW
;
4277 * Update reserved blocks/metadata blocks after successful
4278 * block allocation which had been deferred till now.
4280 if (flags
& EXT4_GET_BLOCKS_DELALLOC_RESERVE
) {
4281 unsigned int reserved_clusters
;
4283 * Check how many clusters we had reserved this allocated range
4285 reserved_clusters
= get_reserved_cluster_alloc(inode
,
4286 map
->m_lblk
, allocated
);
4287 if (map
->m_flags
& EXT4_MAP_FROM_CLUSTER
) {
4288 if (reserved_clusters
) {
4290 * We have clusters reserved for this range.
4291 * But since we are not doing actual allocation
4292 * and are simply using blocks from previously
4293 * allocated cluster, we should release the
4294 * reservation and not claim quota.
4296 ext4_da_update_reserve_space(inode
,
4297 reserved_clusters
, 0);
4300 BUG_ON(allocated_clusters
< reserved_clusters
);
4301 if (reserved_clusters
< allocated_clusters
) {
4302 struct ext4_inode_info
*ei
= EXT4_I(inode
);
4303 int reservation
= allocated_clusters
-
4306 * It seems we claimed few clusters outside of
4307 * the range of this allocation. We should give
4308 * it back to the reservation pool. This can
4309 * happen in the following case:
4311 * * Suppose s_cluster_ratio is 4 (i.e., each
4312 * cluster has 4 blocks. Thus, the clusters
4313 * are [0-3],[4-7],[8-11]...
4314 * * First comes delayed allocation write for
4315 * logical blocks 10 & 11. Since there were no
4316 * previous delayed allocated blocks in the
4317 * range [8-11], we would reserve 1 cluster
4319 * * Next comes write for logical blocks 3 to 8.
4320 * In this case, we will reserve 2 clusters
4321 * (for [0-3] and [4-7]; and not for [8-11] as
4322 * that range has a delayed allocated blocks.
4323 * Thus total reserved clusters now becomes 3.
4324 * * Now, during the delayed allocation writeout
4325 * time, we will first write blocks [3-8] and
4326 * allocate 3 clusters for writing these
4327 * blocks. Also, we would claim all these
4328 * three clusters above.
4329 * * Now when we come here to writeout the
4330 * blocks [10-11], we would expect to claim
4331 * the reservation of 1 cluster we had made
4332 * (and we would claim it since there are no
4333 * more delayed allocated blocks in the range
4334 * [8-11]. But our reserved cluster count had
4335 * already gone to 0.
4337 * Thus, at the step 4 above when we determine
4338 * that there are still some unwritten delayed
4339 * allocated blocks outside of our current
4340 * block range, we should increment the
4341 * reserved clusters count so that when the
4342 * remaining blocks finally gets written, we
4345 dquot_reserve_block(inode
,
4346 EXT4_C2B(sbi
, reservation
));
4347 spin_lock(&ei
->i_block_reservation_lock
);
4348 ei
->i_reserved_data_blocks
+= reservation
;
4349 spin_unlock(&ei
->i_block_reservation_lock
);
4352 * We will claim quota for all newly allocated blocks.
4353 * We're updating the reserved space *after* the
4354 * correction above so we do not accidentally free
4355 * all the metadata reservation because we might
4356 * actually need it later on.
4358 ext4_da_update_reserve_space(inode
, allocated_clusters
,
4364 * Cache the extent and update transaction to commit on fdatasync only
4365 * when it is _not_ an uninitialized extent.
4367 if ((flags
& EXT4_GET_BLOCKS_UNINIT_EXT
) == 0)
4368 ext4_update_inode_fsync_trans(handle
, inode
, 1);
4370 ext4_update_inode_fsync_trans(handle
, inode
, 0);
4372 if (allocated
> map
->m_len
)
4373 allocated
= map
->m_len
;
4374 ext4_ext_show_leaf(inode
, path
);
4375 map
->m_flags
|= EXT4_MAP_MAPPED
;
4376 map
->m_pblk
= newblock
;
4377 map
->m_len
= allocated
;
4380 ext4_ext_drop_refs(path
);
4385 trace_ext4_ext_map_blocks_exit(inode
, flags
, map
, err
? err
: allocated
);
4387 return err
? err
: allocated
;
4390 void ext4_ext_truncate(handle_t
*handle
, struct inode
*inode
)
4392 struct super_block
*sb
= inode
->i_sb
;
4393 ext4_lblk_t last_block
;
4397 * TODO: optimization is possible here.
4398 * Probably we need not scan at all,
4399 * because page truncation is enough.
4402 /* we have to know where to truncate from in crash case */
4403 EXT4_I(inode
)->i_disksize
= inode
->i_size
;
4404 ext4_mark_inode_dirty(handle
, inode
);
4406 last_block
= (inode
->i_size
+ sb
->s_blocksize
- 1)
4407 >> EXT4_BLOCK_SIZE_BITS(sb
);
4408 err
= ext4_es_remove_extent(inode
, last_block
,
4409 EXT_MAX_BLOCKS
- last_block
);
4410 err
= ext4_ext_remove_space(inode
, last_block
, EXT_MAX_BLOCKS
- 1);
4413 static void ext4_falloc_update_inode(struct inode
*inode
,
4414 int mode
, loff_t new_size
, int update_ctime
)
4416 struct timespec now
;
4419 now
= current_fs_time(inode
->i_sb
);
4420 if (!timespec_equal(&inode
->i_ctime
, &now
))
4421 inode
->i_ctime
= now
;
4424 * Update only when preallocation was requested beyond
4427 if (!(mode
& FALLOC_FL_KEEP_SIZE
)) {
4428 if (new_size
> i_size_read(inode
))
4429 i_size_write(inode
, new_size
);
4430 if (new_size
> EXT4_I(inode
)->i_disksize
)
4431 ext4_update_i_disksize(inode
, new_size
);
4434 * Mark that we allocate beyond EOF so the subsequent truncate
4435 * can proceed even if the new size is the same as i_size.
4437 if (new_size
> i_size_read(inode
))
4438 ext4_set_inode_flag(inode
, EXT4_INODE_EOFBLOCKS
);
4444 * preallocate space for a file. This implements ext4's fallocate file
4445 * operation, which gets called from sys_fallocate system call.
4446 * For block-mapped files, posix_fallocate should fall back to the method
4447 * of writing zeroes to the required new blocks (the same behavior which is
4448 * expected for file systems which do not support fallocate() system call).
4450 long ext4_fallocate(struct file
*file
, int mode
, loff_t offset
, loff_t len
)
4452 struct inode
*inode
= file_inode(file
);
4455 unsigned int max_blocks
;
4460 struct ext4_map_blocks map
;
4461 unsigned int credits
, blkbits
= inode
->i_blkbits
;
4463 /* Return error if mode is not supported */
4464 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
))
4467 if (mode
& FALLOC_FL_PUNCH_HOLE
)
4468 return ext4_punch_hole(inode
, offset
, len
);
4470 ret
= ext4_convert_inline_data(inode
);
4475 * currently supporting (pre)allocate mode for extent-based
4478 if (!(ext4_test_inode_flag(inode
, EXT4_INODE_EXTENTS
)))
4481 trace_ext4_fallocate_enter(inode
, offset
, len
, mode
);
4482 map
.m_lblk
= offset
>> blkbits
;
4484 * We can't just convert len to max_blocks because
4485 * If blocksize = 4096 offset = 3072 and len = 2048
4487 max_blocks
= (EXT4_BLOCK_ALIGN(len
+ offset
, blkbits
) >> blkbits
)
4490 * credits to insert 1 extent into extent tree
4492 credits
= ext4_chunk_trans_blocks(inode
, max_blocks
);
4493 mutex_lock(&inode
->i_mutex
);
4494 ret
= inode_newsize_ok(inode
, (len
+ offset
));
4496 mutex_unlock(&inode
->i_mutex
);
4497 trace_ext4_fallocate_exit(inode
, offset
, max_blocks
, ret
);
4500 flags
= EXT4_GET_BLOCKS_CREATE_UNINIT_EXT
;
4501 if (mode
& FALLOC_FL_KEEP_SIZE
)
4502 flags
|= EXT4_GET_BLOCKS_KEEP_SIZE
;
4504 * Don't normalize the request if it can fit in one extent so
4505 * that it doesn't get unnecessarily split into multiple
4508 if (len
<= EXT_UNINIT_MAX_LEN
<< blkbits
)
4509 flags
|= EXT4_GET_BLOCKS_NO_NORMALIZE
;
4512 while (ret
>= 0 && ret
< max_blocks
) {
4513 map
.m_lblk
= map
.m_lblk
+ ret
;
4514 map
.m_len
= max_blocks
= max_blocks
- ret
;
4515 handle
= ext4_journal_start(inode
, EXT4_HT_MAP_BLOCKS
,
4517 if (IS_ERR(handle
)) {
4518 ret
= PTR_ERR(handle
);
4521 ret
= ext4_map_blocks(handle
, inode
, &map
, flags
);
4524 ext4_warning(inode
->i_sb
,
4525 "inode #%lu: block %u: len %u: "
4526 "ext4_ext_map_blocks returned %d",
4527 inode
->i_ino
, map
.m_lblk
,
4530 ext4_mark_inode_dirty(handle
, inode
);
4531 ret2
= ext4_journal_stop(handle
);
4534 if ((map
.m_lblk
+ ret
) >= (EXT4_BLOCK_ALIGN(offset
+ len
,
4535 blkbits
) >> blkbits
))
4536 new_size
= offset
+ len
;
4538 new_size
= ((loff_t
) map
.m_lblk
+ ret
) << blkbits
;
4540 ext4_falloc_update_inode(inode
, mode
, new_size
,
4541 (map
.m_flags
& EXT4_MAP_NEW
));
4542 ext4_mark_inode_dirty(handle
, inode
);
4543 if ((file
->f_flags
& O_SYNC
) && ret
>= max_blocks
)
4544 ext4_handle_sync(handle
);
4545 ret2
= ext4_journal_stop(handle
);
4549 if (ret
== -ENOSPC
&&
4550 ext4_should_retry_alloc(inode
->i_sb
, &retries
)) {
4554 mutex_unlock(&inode
->i_mutex
);
4555 trace_ext4_fallocate_exit(inode
, offset
, max_blocks
,
4556 ret
> 0 ? ret2
: ret
);
4557 return ret
> 0 ? ret2
: ret
;
4561 * This function convert a range of blocks to written extents
4562 * The caller of this function will pass the start offset and the size.
4563 * all unwritten extents within this range will be converted to
4566 * This function is called from the direct IO end io call back
4567 * function, to convert the fallocated extents after IO is completed.
4568 * Returns 0 on success.
4570 int ext4_convert_unwritten_extents(handle_t
*handle
, struct inode
*inode
,
4571 loff_t offset
, ssize_t len
)
4573 unsigned int max_blocks
;
4576 struct ext4_map_blocks map
;
4577 unsigned int credits
, blkbits
= inode
->i_blkbits
;
4579 map
.m_lblk
= offset
>> blkbits
;
4581 * We can't just convert len to max_blocks because
4582 * If blocksize = 4096 offset = 3072 and len = 2048
4584 max_blocks
= ((EXT4_BLOCK_ALIGN(len
+ offset
, blkbits
) >> blkbits
) -
4587 * This is somewhat ugly but the idea is clear: When transaction is
4588 * reserved, everything goes into it. Otherwise we rather start several
4589 * smaller transactions for conversion of each extent separately.
4592 handle
= ext4_journal_start_reserved(handle
,
4593 EXT4_HT_EXT_CONVERT
);
4595 return PTR_ERR(handle
);
4599 * credits to insert 1 extent into extent tree
4601 credits
= ext4_chunk_trans_blocks(inode
, max_blocks
);
4603 while (ret
>= 0 && ret
< max_blocks
) {
4605 map
.m_len
= (max_blocks
-= ret
);
4607 handle
= ext4_journal_start(inode
, EXT4_HT_MAP_BLOCKS
,
4609 if (IS_ERR(handle
)) {
4610 ret
= PTR_ERR(handle
);
4614 ret
= ext4_map_blocks(handle
, inode
, &map
,
4615 EXT4_GET_BLOCKS_IO_CONVERT_EXT
);
4617 ext4_warning(inode
->i_sb
,
4618 "inode #%lu: block %u: len %u: "
4619 "ext4_ext_map_blocks returned %d",
4620 inode
->i_ino
, map
.m_lblk
,
4622 ext4_mark_inode_dirty(handle
, inode
);
4624 ret2
= ext4_journal_stop(handle
);
4625 if (ret
<= 0 || ret2
)
4629 ret2
= ext4_journal_stop(handle
);
4630 return ret
> 0 ? ret2
: ret
;
4634 * If newes is not existing extent (newes->ec_pblk equals zero) find
4635 * delayed extent at start of newes and update newes accordingly and
4636 * return start of the next delayed extent.
4638 * If newes is existing extent (newes->ec_pblk is not equal zero)
4639 * return start of next delayed extent or EXT_MAX_BLOCKS if no delayed
4640 * extent found. Leave newes unmodified.
4642 static int ext4_find_delayed_extent(struct inode
*inode
,
4643 struct extent_status
*newes
)
4645 struct extent_status es
;
4646 ext4_lblk_t block
, next_del
;
4648 if (newes
->es_pblk
== 0) {
4649 ext4_es_find_delayed_extent_range(inode
, newes
->es_lblk
,
4650 newes
->es_lblk
+ newes
->es_len
- 1, &es
);
4653 * No extent in extent-tree contains block @newes->es_pblk,
4654 * then the block may stay in 1)a hole or 2)delayed-extent.
4660 if (es
.es_lblk
> newes
->es_lblk
) {
4662 newes
->es_len
= min(es
.es_lblk
- newes
->es_lblk
,
4667 newes
->es_len
= es
.es_lblk
+ es
.es_len
- newes
->es_lblk
;
4670 block
= newes
->es_lblk
+ newes
->es_len
;
4671 ext4_es_find_delayed_extent_range(inode
, block
, EXT_MAX_BLOCKS
, &es
);
4673 next_del
= EXT_MAX_BLOCKS
;
4675 next_del
= es
.es_lblk
;
4679 /* fiemap flags we can handle specified here */
4680 #define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
4682 static int ext4_xattr_fiemap(struct inode
*inode
,
4683 struct fiemap_extent_info
*fieinfo
)
4687 __u32 flags
= FIEMAP_EXTENT_LAST
;
4688 int blockbits
= inode
->i_sb
->s_blocksize_bits
;
4692 if (ext4_test_inode_state(inode
, EXT4_STATE_XATTR
)) {
4693 struct ext4_iloc iloc
;
4694 int offset
; /* offset of xattr in inode */
4696 error
= ext4_get_inode_loc(inode
, &iloc
);
4699 physical
= (__u64
)iloc
.bh
->b_blocknr
<< blockbits
;
4700 offset
= EXT4_GOOD_OLD_INODE_SIZE
+
4701 EXT4_I(inode
)->i_extra_isize
;
4703 length
= EXT4_SB(inode
->i_sb
)->s_inode_size
- offset
;
4704 flags
|= FIEMAP_EXTENT_DATA_INLINE
;
4706 } else { /* external block */
4707 physical
= (__u64
)EXT4_I(inode
)->i_file_acl
<< blockbits
;
4708 length
= inode
->i_sb
->s_blocksize
;
4712 error
= fiemap_fill_next_extent(fieinfo
, 0, physical
,
4714 return (error
< 0 ? error
: 0);
4717 int ext4_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
4718 __u64 start
, __u64 len
)
4720 ext4_lblk_t start_blk
;
4723 if (ext4_has_inline_data(inode
)) {
4726 error
= ext4_inline_data_fiemap(inode
, fieinfo
, &has_inline
);
4732 /* fallback to generic here if not in extents fmt */
4733 if (!(ext4_test_inode_flag(inode
, EXT4_INODE_EXTENTS
)))
4734 return generic_block_fiemap(inode
, fieinfo
, start
, len
,
4737 if (fiemap_check_flags(fieinfo
, EXT4_FIEMAP_FLAGS
))
4740 if (fieinfo
->fi_flags
& FIEMAP_FLAG_XATTR
) {
4741 error
= ext4_xattr_fiemap(inode
, fieinfo
);
4743 ext4_lblk_t len_blks
;
4746 start_blk
= start
>> inode
->i_sb
->s_blocksize_bits
;
4747 last_blk
= (start
+ len
- 1) >> inode
->i_sb
->s_blocksize_bits
;
4748 if (last_blk
>= EXT_MAX_BLOCKS
)
4749 last_blk
= EXT_MAX_BLOCKS
-1;
4750 len_blks
= ((ext4_lblk_t
) last_blk
) - start_blk
+ 1;
4753 * Walk the extent tree gathering extent information
4754 * and pushing extents back to the user.
4756 error
= ext4_fill_fiemap_extents(inode
, start_blk
,