ext4: better estimate credits needed for ext4_da_writepages()
[linux-2.6.git] / fs / ext4 / extents.c
blob94283d06cace93a5cdc214c3c13f7af16b314ce5
1 /*
2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3 * Written by Alex Tomas <alex@clusterfs.com>
5 * Architecture independence:
6 * Copyright (c) 2005, Bull S.A.
7 * Written by Pierre Peiffer <pierre.peiffer@bull.net>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public Licens
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
24 * Extents support for EXT4
26 * TODO:
27 * - ext4*_error() should be used in some situations
28 * - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29 * - smart tree reduction
32 #include <linux/fs.h>
33 #include <linux/time.h>
34 #include <linux/jbd2.h>
35 #include <linux/highuid.h>
36 #include <linux/pagemap.h>
37 #include <linux/quotaops.h>
38 #include <linux/string.h>
39 #include <linux/slab.h>
40 #include <linux/falloc.h>
41 #include <asm/uaccess.h>
42 #include <linux/fiemap.h>
43 #include "ext4_jbd2.h"
44 #include "ext4_extents.h"
45 #include "xattr.h"
47 #include <trace/events/ext4.h>
50 * used by extent splitting.
52 #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \
53 due to ENOSPC */
54 #define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */
55 #define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */
57 #define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */
58 #define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */
60 static __le32 ext4_extent_block_csum(struct inode *inode,
61 struct ext4_extent_header *eh)
63 struct ext4_inode_info *ei = EXT4_I(inode);
64 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
65 __u32 csum;
67 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
68 EXT4_EXTENT_TAIL_OFFSET(eh));
69 return cpu_to_le32(csum);
72 static int ext4_extent_block_csum_verify(struct inode *inode,
73 struct ext4_extent_header *eh)
75 struct ext4_extent_tail *et;
77 if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
78 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
79 return 1;
81 et = find_ext4_extent_tail(eh);
82 if (et->et_checksum != ext4_extent_block_csum(inode, eh))
83 return 0;
84 return 1;
87 static void ext4_extent_block_csum_set(struct inode *inode,
88 struct ext4_extent_header *eh)
90 struct ext4_extent_tail *et;
92 if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
93 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
94 return;
96 et = find_ext4_extent_tail(eh);
97 et->et_checksum = ext4_extent_block_csum(inode, eh);
100 static int ext4_split_extent(handle_t *handle,
101 struct inode *inode,
102 struct ext4_ext_path *path,
103 struct ext4_map_blocks *map,
104 int split_flag,
105 int flags);
107 static int ext4_split_extent_at(handle_t *handle,
108 struct inode *inode,
109 struct ext4_ext_path *path,
110 ext4_lblk_t split,
111 int split_flag,
112 int flags);
114 static int ext4_find_delayed_extent(struct inode *inode,
115 struct extent_status *newes);
117 static int ext4_ext_truncate_extend_restart(handle_t *handle,
118 struct inode *inode,
119 int needed)
121 int err;
123 if (!ext4_handle_valid(handle))
124 return 0;
125 if (handle->h_buffer_credits > needed)
126 return 0;
127 err = ext4_journal_extend(handle, needed);
128 if (err <= 0)
129 return err;
130 err = ext4_truncate_restart_trans(handle, inode, needed);
131 if (err == 0)
132 err = -EAGAIN;
134 return err;
138 * could return:
139 * - EROFS
140 * - ENOMEM
142 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
143 struct ext4_ext_path *path)
145 if (path->p_bh) {
146 /* path points to block */
147 return ext4_journal_get_write_access(handle, path->p_bh);
149 /* path points to leaf/index in inode body */
150 /* we use in-core data, no need to protect them */
151 return 0;
155 * could return:
156 * - EROFS
157 * - ENOMEM
158 * - EIO
160 int __ext4_ext_dirty(const char *where, unsigned int line, handle_t *handle,
161 struct inode *inode, struct ext4_ext_path *path)
163 int err;
164 if (path->p_bh) {
165 ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
166 /* path points to block */
167 err = __ext4_handle_dirty_metadata(where, line, handle,
168 inode, path->p_bh);
169 } else {
170 /* path points to leaf/index in inode body */
171 err = ext4_mark_inode_dirty(handle, inode);
173 return err;
176 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
177 struct ext4_ext_path *path,
178 ext4_lblk_t block)
180 if (path) {
181 int depth = path->p_depth;
182 struct ext4_extent *ex;
185 * Try to predict block placement assuming that we are
186 * filling in a file which will eventually be
187 * non-sparse --- i.e., in the case of libbfd writing
188 * an ELF object sections out-of-order but in a way
189 * the eventually results in a contiguous object or
190 * executable file, or some database extending a table
191 * space file. However, this is actually somewhat
192 * non-ideal if we are writing a sparse file such as
193 * qemu or KVM writing a raw image file that is going
194 * to stay fairly sparse, since it will end up
195 * fragmenting the file system's free space. Maybe we
196 * should have some hueristics or some way to allow
197 * userspace to pass a hint to file system,
198 * especially if the latter case turns out to be
199 * common.
201 ex = path[depth].p_ext;
202 if (ex) {
203 ext4_fsblk_t ext_pblk = ext4_ext_pblock(ex);
204 ext4_lblk_t ext_block = le32_to_cpu(ex->ee_block);
206 if (block > ext_block)
207 return ext_pblk + (block - ext_block);
208 else
209 return ext_pblk - (ext_block - block);
212 /* it looks like index is empty;
213 * try to find starting block from index itself */
214 if (path[depth].p_bh)
215 return path[depth].p_bh->b_blocknr;
218 /* OK. use inode's group */
219 return ext4_inode_to_goal_block(inode);
223 * Allocation for a meta data block
225 static ext4_fsblk_t
226 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
227 struct ext4_ext_path *path,
228 struct ext4_extent *ex, int *err, unsigned int flags)
230 ext4_fsblk_t goal, newblock;
232 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
233 newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
234 NULL, err);
235 return newblock;
238 static inline int ext4_ext_space_block(struct inode *inode, int check)
240 int size;
242 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
243 / sizeof(struct ext4_extent);
244 #ifdef AGGRESSIVE_TEST
245 if (!check && size > 6)
246 size = 6;
247 #endif
248 return size;
251 static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
253 int size;
255 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
256 / sizeof(struct ext4_extent_idx);
257 #ifdef AGGRESSIVE_TEST
258 if (!check && size > 5)
259 size = 5;
260 #endif
261 return size;
264 static inline int ext4_ext_space_root(struct inode *inode, int check)
266 int size;
268 size = sizeof(EXT4_I(inode)->i_data);
269 size -= sizeof(struct ext4_extent_header);
270 size /= sizeof(struct ext4_extent);
271 #ifdef AGGRESSIVE_TEST
272 if (!check && size > 3)
273 size = 3;
274 #endif
275 return size;
278 static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
280 int size;
282 size = sizeof(EXT4_I(inode)->i_data);
283 size -= sizeof(struct ext4_extent_header);
284 size /= sizeof(struct ext4_extent_idx);
285 #ifdef AGGRESSIVE_TEST
286 if (!check && size > 4)
287 size = 4;
288 #endif
289 return size;
293 * Calculate the number of metadata blocks needed
294 * to allocate @blocks
295 * Worse case is one block per extent
297 int ext4_ext_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
299 struct ext4_inode_info *ei = EXT4_I(inode);
300 int idxs;
302 idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
303 / sizeof(struct ext4_extent_idx));
306 * If the new delayed allocation block is contiguous with the
307 * previous da block, it can share index blocks with the
308 * previous block, so we only need to allocate a new index
309 * block every idxs leaf blocks. At ldxs**2 blocks, we need
310 * an additional index block, and at ldxs**3 blocks, yet
311 * another index blocks.
313 if (ei->i_da_metadata_calc_len &&
314 ei->i_da_metadata_calc_last_lblock+1 == lblock) {
315 int num = 0;
317 if ((ei->i_da_metadata_calc_len % idxs) == 0)
318 num++;
319 if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
320 num++;
321 if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
322 num++;
323 ei->i_da_metadata_calc_len = 0;
324 } else
325 ei->i_da_metadata_calc_len++;
326 ei->i_da_metadata_calc_last_lblock++;
327 return num;
331 * In the worst case we need a new set of index blocks at
332 * every level of the inode's extent tree.
334 ei->i_da_metadata_calc_len = 1;
335 ei->i_da_metadata_calc_last_lblock = lblock;
336 return ext_depth(inode) + 1;
339 static int
340 ext4_ext_max_entries(struct inode *inode, int depth)
342 int max;
344 if (depth == ext_depth(inode)) {
345 if (depth == 0)
346 max = ext4_ext_space_root(inode, 1);
347 else
348 max = ext4_ext_space_root_idx(inode, 1);
349 } else {
350 if (depth == 0)
351 max = ext4_ext_space_block(inode, 1);
352 else
353 max = ext4_ext_space_block_idx(inode, 1);
356 return max;
359 static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
361 ext4_fsblk_t block = ext4_ext_pblock(ext);
362 int len = ext4_ext_get_actual_len(ext);
364 if (len == 0)
365 return 0;
366 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
369 static int ext4_valid_extent_idx(struct inode *inode,
370 struct ext4_extent_idx *ext_idx)
372 ext4_fsblk_t block = ext4_idx_pblock(ext_idx);
374 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
377 static int ext4_valid_extent_entries(struct inode *inode,
378 struct ext4_extent_header *eh,
379 int depth)
381 unsigned short entries;
382 if (eh->eh_entries == 0)
383 return 1;
385 entries = le16_to_cpu(eh->eh_entries);
387 if (depth == 0) {
388 /* leaf entries */
389 struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
390 while (entries) {
391 if (!ext4_valid_extent(inode, ext))
392 return 0;
393 ext++;
394 entries--;
396 } else {
397 struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
398 while (entries) {
399 if (!ext4_valid_extent_idx(inode, ext_idx))
400 return 0;
401 ext_idx++;
402 entries--;
405 return 1;
408 static int __ext4_ext_check(const char *function, unsigned int line,
409 struct inode *inode, struct ext4_extent_header *eh,
410 int depth)
412 const char *error_msg;
413 int max = 0;
415 if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
416 error_msg = "invalid magic";
417 goto corrupted;
419 if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
420 error_msg = "unexpected eh_depth";
421 goto corrupted;
423 if (unlikely(eh->eh_max == 0)) {
424 error_msg = "invalid eh_max";
425 goto corrupted;
427 max = ext4_ext_max_entries(inode, depth);
428 if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
429 error_msg = "too large eh_max";
430 goto corrupted;
432 if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
433 error_msg = "invalid eh_entries";
434 goto corrupted;
436 if (!ext4_valid_extent_entries(inode, eh, depth)) {
437 error_msg = "invalid extent entries";
438 goto corrupted;
440 /* Verify checksum on non-root extent tree nodes */
441 if (ext_depth(inode) != depth &&
442 !ext4_extent_block_csum_verify(inode, eh)) {
443 error_msg = "extent tree corrupted";
444 goto corrupted;
446 return 0;
448 corrupted:
449 ext4_error_inode(inode, function, line, 0,
450 "bad header/extent: %s - magic %x, "
451 "entries %u, max %u(%u), depth %u(%u)",
452 error_msg, le16_to_cpu(eh->eh_magic),
453 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
454 max, le16_to_cpu(eh->eh_depth), depth);
456 return -EIO;
459 #define ext4_ext_check(inode, eh, depth) \
460 __ext4_ext_check(__func__, __LINE__, inode, eh, depth)
462 int ext4_ext_check_inode(struct inode *inode)
464 return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
467 static int __ext4_ext_check_block(const char *function, unsigned int line,
468 struct inode *inode,
469 struct ext4_extent_header *eh,
470 int depth,
471 struct buffer_head *bh)
473 int ret;
475 if (buffer_verified(bh))
476 return 0;
477 ret = ext4_ext_check(inode, eh, depth);
478 if (ret)
479 return ret;
480 set_buffer_verified(bh);
481 return ret;
484 #define ext4_ext_check_block(inode, eh, depth, bh) \
485 __ext4_ext_check_block(__func__, __LINE__, inode, eh, depth, bh)
487 #ifdef EXT_DEBUG
488 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
490 int k, l = path->p_depth;
492 ext_debug("path:");
493 for (k = 0; k <= l; k++, path++) {
494 if (path->p_idx) {
495 ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block),
496 ext4_idx_pblock(path->p_idx));
497 } else if (path->p_ext) {
498 ext_debug(" %d:[%d]%d:%llu ",
499 le32_to_cpu(path->p_ext->ee_block),
500 ext4_ext_is_uninitialized(path->p_ext),
501 ext4_ext_get_actual_len(path->p_ext),
502 ext4_ext_pblock(path->p_ext));
503 } else
504 ext_debug(" []");
506 ext_debug("\n");
509 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
511 int depth = ext_depth(inode);
512 struct ext4_extent_header *eh;
513 struct ext4_extent *ex;
514 int i;
516 if (!path)
517 return;
519 eh = path[depth].p_hdr;
520 ex = EXT_FIRST_EXTENT(eh);
522 ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);
524 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
525 ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
526 ext4_ext_is_uninitialized(ex),
527 ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
529 ext_debug("\n");
532 static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
533 ext4_fsblk_t newblock, int level)
535 int depth = ext_depth(inode);
536 struct ext4_extent *ex;
538 if (depth != level) {
539 struct ext4_extent_idx *idx;
540 idx = path[level].p_idx;
541 while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) {
542 ext_debug("%d: move %d:%llu in new index %llu\n", level,
543 le32_to_cpu(idx->ei_block),
544 ext4_idx_pblock(idx),
545 newblock);
546 idx++;
549 return;
552 ex = path[depth].p_ext;
553 while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) {
554 ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
555 le32_to_cpu(ex->ee_block),
556 ext4_ext_pblock(ex),
557 ext4_ext_is_uninitialized(ex),
558 ext4_ext_get_actual_len(ex),
559 newblock);
560 ex++;
564 #else
565 #define ext4_ext_show_path(inode, path)
566 #define ext4_ext_show_leaf(inode, path)
567 #define ext4_ext_show_move(inode, path, newblock, level)
568 #endif
570 void ext4_ext_drop_refs(struct ext4_ext_path *path)
572 int depth = path->p_depth;
573 int i;
575 for (i = 0; i <= depth; i++, path++)
576 if (path->p_bh) {
577 brelse(path->p_bh);
578 path->p_bh = NULL;
583 * ext4_ext_binsearch_idx:
584 * binary search for the closest index of the given block
585 * the header must be checked before calling this
587 static void
588 ext4_ext_binsearch_idx(struct inode *inode,
589 struct ext4_ext_path *path, ext4_lblk_t block)
591 struct ext4_extent_header *eh = path->p_hdr;
592 struct ext4_extent_idx *r, *l, *m;
595 ext_debug("binsearch for %u(idx): ", block);
597 l = EXT_FIRST_INDEX(eh) + 1;
598 r = EXT_LAST_INDEX(eh);
599 while (l <= r) {
600 m = l + (r - l) / 2;
601 if (block < le32_to_cpu(m->ei_block))
602 r = m - 1;
603 else
604 l = m + 1;
605 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
606 m, le32_to_cpu(m->ei_block),
607 r, le32_to_cpu(r->ei_block));
610 path->p_idx = l - 1;
611 ext_debug(" -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
612 ext4_idx_pblock(path->p_idx));
614 #ifdef CHECK_BINSEARCH
616 struct ext4_extent_idx *chix, *ix;
617 int k;
619 chix = ix = EXT_FIRST_INDEX(eh);
620 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
621 if (k != 0 &&
622 le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
623 printk(KERN_DEBUG "k=%d, ix=0x%p, "
624 "first=0x%p\n", k,
625 ix, EXT_FIRST_INDEX(eh));
626 printk(KERN_DEBUG "%u <= %u\n",
627 le32_to_cpu(ix->ei_block),
628 le32_to_cpu(ix[-1].ei_block));
630 BUG_ON(k && le32_to_cpu(ix->ei_block)
631 <= le32_to_cpu(ix[-1].ei_block));
632 if (block < le32_to_cpu(ix->ei_block))
633 break;
634 chix = ix;
636 BUG_ON(chix != path->p_idx);
638 #endif
643 * ext4_ext_binsearch:
644 * binary search for closest extent of the given block
645 * the header must be checked before calling this
647 static void
648 ext4_ext_binsearch(struct inode *inode,
649 struct ext4_ext_path *path, ext4_lblk_t block)
651 struct ext4_extent_header *eh = path->p_hdr;
652 struct ext4_extent *r, *l, *m;
654 if (eh->eh_entries == 0) {
656 * this leaf is empty:
657 * we get such a leaf in split/add case
659 return;
662 ext_debug("binsearch for %u: ", block);
664 l = EXT_FIRST_EXTENT(eh) + 1;
665 r = EXT_LAST_EXTENT(eh);
667 while (l <= r) {
668 m = l + (r - l) / 2;
669 if (block < le32_to_cpu(m->ee_block))
670 r = m - 1;
671 else
672 l = m + 1;
673 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
674 m, le32_to_cpu(m->ee_block),
675 r, le32_to_cpu(r->ee_block));
678 path->p_ext = l - 1;
679 ext_debug(" -> %d:%llu:[%d]%d ",
680 le32_to_cpu(path->p_ext->ee_block),
681 ext4_ext_pblock(path->p_ext),
682 ext4_ext_is_uninitialized(path->p_ext),
683 ext4_ext_get_actual_len(path->p_ext));
685 #ifdef CHECK_BINSEARCH
687 struct ext4_extent *chex, *ex;
688 int k;
690 chex = ex = EXT_FIRST_EXTENT(eh);
691 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
692 BUG_ON(k && le32_to_cpu(ex->ee_block)
693 <= le32_to_cpu(ex[-1].ee_block));
694 if (block < le32_to_cpu(ex->ee_block))
695 break;
696 chex = ex;
698 BUG_ON(chex != path->p_ext);
700 #endif
704 int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
706 struct ext4_extent_header *eh;
708 eh = ext_inode_hdr(inode);
709 eh->eh_depth = 0;
710 eh->eh_entries = 0;
711 eh->eh_magic = EXT4_EXT_MAGIC;
712 eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
713 ext4_mark_inode_dirty(handle, inode);
714 return 0;
717 struct ext4_ext_path *
718 ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
719 struct ext4_ext_path *path)
721 struct ext4_extent_header *eh;
722 struct buffer_head *bh;
723 short int depth, i, ppos = 0, alloc = 0;
724 int ret;
726 eh = ext_inode_hdr(inode);
727 depth = ext_depth(inode);
729 /* account possible depth increase */
730 if (!path) {
731 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
732 GFP_NOFS);
733 if (!path)
734 return ERR_PTR(-ENOMEM);
735 alloc = 1;
737 path[0].p_hdr = eh;
738 path[0].p_bh = NULL;
740 i = depth;
741 /* walk through the tree */
742 while (i) {
743 ext_debug("depth %d: num %d, max %d\n",
744 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
746 ext4_ext_binsearch_idx(inode, path + ppos, block);
747 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
748 path[ppos].p_depth = i;
749 path[ppos].p_ext = NULL;
751 bh = sb_getblk(inode->i_sb, path[ppos].p_block);
752 if (unlikely(!bh)) {
753 ret = -ENOMEM;
754 goto err;
756 if (!bh_uptodate_or_lock(bh)) {
757 trace_ext4_ext_load_extent(inode, block,
758 path[ppos].p_block);
759 ret = bh_submit_read(bh);
760 if (ret < 0) {
761 put_bh(bh);
762 goto err;
765 eh = ext_block_hdr(bh);
766 ppos++;
767 if (unlikely(ppos > depth)) {
768 put_bh(bh);
769 EXT4_ERROR_INODE(inode,
770 "ppos %d > depth %d", ppos, depth);
771 ret = -EIO;
772 goto err;
774 path[ppos].p_bh = bh;
775 path[ppos].p_hdr = eh;
776 i--;
778 ret = ext4_ext_check_block(inode, eh, i, bh);
779 if (ret < 0)
780 goto err;
783 path[ppos].p_depth = i;
784 path[ppos].p_ext = NULL;
785 path[ppos].p_idx = NULL;
787 /* find extent */
788 ext4_ext_binsearch(inode, path + ppos, block);
789 /* if not an empty leaf */
790 if (path[ppos].p_ext)
791 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
793 ext4_ext_show_path(inode, path);
795 return path;
797 err:
798 ext4_ext_drop_refs(path);
799 if (alloc)
800 kfree(path);
801 return ERR_PTR(ret);
805 * ext4_ext_insert_index:
806 * insert new index [@logical;@ptr] into the block at @curp;
807 * check where to insert: before @curp or after @curp
809 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
810 struct ext4_ext_path *curp,
811 int logical, ext4_fsblk_t ptr)
813 struct ext4_extent_idx *ix;
814 int len, err;
816 err = ext4_ext_get_access(handle, inode, curp);
817 if (err)
818 return err;
820 if (unlikely(logical == le32_to_cpu(curp->p_idx->ei_block))) {
821 EXT4_ERROR_INODE(inode,
822 "logical %d == ei_block %d!",
823 logical, le32_to_cpu(curp->p_idx->ei_block));
824 return -EIO;
827 if (unlikely(le16_to_cpu(curp->p_hdr->eh_entries)
828 >= le16_to_cpu(curp->p_hdr->eh_max))) {
829 EXT4_ERROR_INODE(inode,
830 "eh_entries %d >= eh_max %d!",
831 le16_to_cpu(curp->p_hdr->eh_entries),
832 le16_to_cpu(curp->p_hdr->eh_max));
833 return -EIO;
836 if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
837 /* insert after */
838 ext_debug("insert new index %d after: %llu\n", logical, ptr);
839 ix = curp->p_idx + 1;
840 } else {
841 /* insert before */
842 ext_debug("insert new index %d before: %llu\n", logical, ptr);
843 ix = curp->p_idx;
846 len = EXT_LAST_INDEX(curp->p_hdr) - ix + 1;
847 BUG_ON(len < 0);
848 if (len > 0) {
849 ext_debug("insert new index %d: "
850 "move %d indices from 0x%p to 0x%p\n",
851 logical, len, ix, ix + 1);
852 memmove(ix + 1, ix, len * sizeof(struct ext4_extent_idx));
855 if (unlikely(ix > EXT_MAX_INDEX(curp->p_hdr))) {
856 EXT4_ERROR_INODE(inode, "ix > EXT_MAX_INDEX!");
857 return -EIO;
860 ix->ei_block = cpu_to_le32(logical);
861 ext4_idx_store_pblock(ix, ptr);
862 le16_add_cpu(&curp->p_hdr->eh_entries, 1);
864 if (unlikely(ix > EXT_LAST_INDEX(curp->p_hdr))) {
865 EXT4_ERROR_INODE(inode, "ix > EXT_LAST_INDEX!");
866 return -EIO;
869 err = ext4_ext_dirty(handle, inode, curp);
870 ext4_std_error(inode->i_sb, err);
872 return err;
876 * ext4_ext_split:
877 * inserts new subtree into the path, using free index entry
878 * at depth @at:
879 * - allocates all needed blocks (new leaf and all intermediate index blocks)
880 * - makes decision where to split
881 * - moves remaining extents and index entries (right to the split point)
882 * into the newly allocated blocks
883 * - initializes subtree
885 static int ext4_ext_split(handle_t *handle, struct inode *inode,
886 unsigned int flags,
887 struct ext4_ext_path *path,
888 struct ext4_extent *newext, int at)
890 struct buffer_head *bh = NULL;
891 int depth = ext_depth(inode);
892 struct ext4_extent_header *neh;
893 struct ext4_extent_idx *fidx;
894 int i = at, k, m, a;
895 ext4_fsblk_t newblock, oldblock;
896 __le32 border;
897 ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
898 int err = 0;
900 /* make decision: where to split? */
901 /* FIXME: now decision is simplest: at current extent */
903 /* if current leaf will be split, then we should use
904 * border from split point */
905 if (unlikely(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr))) {
906 EXT4_ERROR_INODE(inode, "p_ext > EXT_MAX_EXTENT!");
907 return -EIO;
909 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
910 border = path[depth].p_ext[1].ee_block;
911 ext_debug("leaf will be split."
912 " next leaf starts at %d\n",
913 le32_to_cpu(border));
914 } else {
915 border = newext->ee_block;
916 ext_debug("leaf will be added."
917 " next leaf starts at %d\n",
918 le32_to_cpu(border));
922 * If error occurs, then we break processing
923 * and mark filesystem read-only. index won't
924 * be inserted and tree will be in consistent
925 * state. Next mount will repair buffers too.
929 * Get array to track all allocated blocks.
930 * We need this to handle errors and free blocks
931 * upon them.
933 ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
934 if (!ablocks)
935 return -ENOMEM;
937 /* allocate all needed blocks */
938 ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
939 for (a = 0; a < depth - at; a++) {
940 newblock = ext4_ext_new_meta_block(handle, inode, path,
941 newext, &err, flags);
942 if (newblock == 0)
943 goto cleanup;
944 ablocks[a] = newblock;
947 /* initialize new leaf */
948 newblock = ablocks[--a];
949 if (unlikely(newblock == 0)) {
950 EXT4_ERROR_INODE(inode, "newblock == 0!");
951 err = -EIO;
952 goto cleanup;
954 bh = sb_getblk(inode->i_sb, newblock);
955 if (unlikely(!bh)) {
956 err = -ENOMEM;
957 goto cleanup;
959 lock_buffer(bh);
961 err = ext4_journal_get_create_access(handle, bh);
962 if (err)
963 goto cleanup;
965 neh = ext_block_hdr(bh);
966 neh->eh_entries = 0;
967 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
968 neh->eh_magic = EXT4_EXT_MAGIC;
969 neh->eh_depth = 0;
971 /* move remainder of path[depth] to the new leaf */
972 if (unlikely(path[depth].p_hdr->eh_entries !=
973 path[depth].p_hdr->eh_max)) {
974 EXT4_ERROR_INODE(inode, "eh_entries %d != eh_max %d!",
975 path[depth].p_hdr->eh_entries,
976 path[depth].p_hdr->eh_max);
977 err = -EIO;
978 goto cleanup;
980 /* start copy from next extent */
981 m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++;
982 ext4_ext_show_move(inode, path, newblock, depth);
983 if (m) {
984 struct ext4_extent *ex;
985 ex = EXT_FIRST_EXTENT(neh);
986 memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m);
987 le16_add_cpu(&neh->eh_entries, m);
990 ext4_extent_block_csum_set(inode, neh);
991 set_buffer_uptodate(bh);
992 unlock_buffer(bh);
994 err = ext4_handle_dirty_metadata(handle, inode, bh);
995 if (err)
996 goto cleanup;
997 brelse(bh);
998 bh = NULL;
1000 /* correct old leaf */
1001 if (m) {
1002 err = ext4_ext_get_access(handle, inode, path + depth);
1003 if (err)
1004 goto cleanup;
1005 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
1006 err = ext4_ext_dirty(handle, inode, path + depth);
1007 if (err)
1008 goto cleanup;
1012 /* create intermediate indexes */
1013 k = depth - at - 1;
1014 if (unlikely(k < 0)) {
1015 EXT4_ERROR_INODE(inode, "k %d < 0!", k);
1016 err = -EIO;
1017 goto cleanup;
1019 if (k)
1020 ext_debug("create %d intermediate indices\n", k);
1021 /* insert new index into current index block */
1022 /* current depth stored in i var */
1023 i = depth - 1;
1024 while (k--) {
1025 oldblock = newblock;
1026 newblock = ablocks[--a];
1027 bh = sb_getblk(inode->i_sb, newblock);
1028 if (unlikely(!bh)) {
1029 err = -ENOMEM;
1030 goto cleanup;
1032 lock_buffer(bh);
1034 err = ext4_journal_get_create_access(handle, bh);
1035 if (err)
1036 goto cleanup;
1038 neh = ext_block_hdr(bh);
1039 neh->eh_entries = cpu_to_le16(1);
1040 neh->eh_magic = EXT4_EXT_MAGIC;
1041 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1042 neh->eh_depth = cpu_to_le16(depth - i);
1043 fidx = EXT_FIRST_INDEX(neh);
1044 fidx->ei_block = border;
1045 ext4_idx_store_pblock(fidx, oldblock);
1047 ext_debug("int.index at %d (block %llu): %u -> %llu\n",
1048 i, newblock, le32_to_cpu(border), oldblock);
1050 /* move remainder of path[i] to the new index block */
1051 if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) !=
1052 EXT_LAST_INDEX(path[i].p_hdr))) {
1053 EXT4_ERROR_INODE(inode,
1054 "EXT_MAX_INDEX != EXT_LAST_INDEX ee_block %d!",
1055 le32_to_cpu(path[i].p_ext->ee_block));
1056 err = -EIO;
1057 goto cleanup;
1059 /* start copy indexes */
1060 m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++;
1061 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
1062 EXT_MAX_INDEX(path[i].p_hdr));
1063 ext4_ext_show_move(inode, path, newblock, i);
1064 if (m) {
1065 memmove(++fidx, path[i].p_idx,
1066 sizeof(struct ext4_extent_idx) * m);
1067 le16_add_cpu(&neh->eh_entries, m);
1069 ext4_extent_block_csum_set(inode, neh);
1070 set_buffer_uptodate(bh);
1071 unlock_buffer(bh);
1073 err = ext4_handle_dirty_metadata(handle, inode, bh);
1074 if (err)
1075 goto cleanup;
1076 brelse(bh);
1077 bh = NULL;
1079 /* correct old index */
1080 if (m) {
1081 err = ext4_ext_get_access(handle, inode, path + i);
1082 if (err)
1083 goto cleanup;
1084 le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1085 err = ext4_ext_dirty(handle, inode, path + i);
1086 if (err)
1087 goto cleanup;
1090 i--;
1093 /* insert new index */
1094 err = ext4_ext_insert_index(handle, inode, path + at,
1095 le32_to_cpu(border), newblock);
1097 cleanup:
1098 if (bh) {
1099 if (buffer_locked(bh))
1100 unlock_buffer(bh);
1101 brelse(bh);
1104 if (err) {
1105 /* free all allocated blocks in error case */
1106 for (i = 0; i < depth; i++) {
1107 if (!ablocks[i])
1108 continue;
1109 ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
1110 EXT4_FREE_BLOCKS_METADATA);
1113 kfree(ablocks);
1115 return err;
1119 * ext4_ext_grow_indepth:
1120 * implements tree growing procedure:
1121 * - allocates new block
1122 * - moves top-level data (index block or leaf) into the new block
1123 * - initializes new top-level, creating index that points to the
1124 * just created block
1126 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1127 unsigned int flags,
1128 struct ext4_extent *newext)
1130 struct ext4_extent_header *neh;
1131 struct buffer_head *bh;
1132 ext4_fsblk_t newblock;
1133 int err = 0;
1135 newblock = ext4_ext_new_meta_block(handle, inode, NULL,
1136 newext, &err, flags);
1137 if (newblock == 0)
1138 return err;
1140 bh = sb_getblk(inode->i_sb, newblock);
1141 if (unlikely(!bh))
1142 return -ENOMEM;
1143 lock_buffer(bh);
1145 err = ext4_journal_get_create_access(handle, bh);
1146 if (err) {
1147 unlock_buffer(bh);
1148 goto out;
1151 /* move top-level index/leaf into new block */
1152 memmove(bh->b_data, EXT4_I(inode)->i_data,
1153 sizeof(EXT4_I(inode)->i_data));
1155 /* set size of new block */
1156 neh = ext_block_hdr(bh);
1157 /* old root could have indexes or leaves
1158 * so calculate e_max right way */
1159 if (ext_depth(inode))
1160 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1161 else
1162 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1163 neh->eh_magic = EXT4_EXT_MAGIC;
1164 ext4_extent_block_csum_set(inode, neh);
1165 set_buffer_uptodate(bh);
1166 unlock_buffer(bh);
1168 err = ext4_handle_dirty_metadata(handle, inode, bh);
1169 if (err)
1170 goto out;
1172 /* Update top-level index: num,max,pointer */
1173 neh = ext_inode_hdr(inode);
1174 neh->eh_entries = cpu_to_le16(1);
1175 ext4_idx_store_pblock(EXT_FIRST_INDEX(neh), newblock);
1176 if (neh->eh_depth == 0) {
1177 /* Root extent block becomes index block */
1178 neh->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
1179 EXT_FIRST_INDEX(neh)->ei_block =
1180 EXT_FIRST_EXTENT(neh)->ee_block;
1182 ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
1183 le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1184 le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
1185 ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
1187 le16_add_cpu(&neh->eh_depth, 1);
1188 ext4_mark_inode_dirty(handle, inode);
1189 out:
1190 brelse(bh);
1192 return err;
1196 * ext4_ext_create_new_leaf:
1197 * finds empty index and adds new leaf.
1198 * if no free index is found, then it requests in-depth growing.
1200 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1201 unsigned int flags,
1202 struct ext4_ext_path *path,
1203 struct ext4_extent *newext)
1205 struct ext4_ext_path *curp;
1206 int depth, i, err = 0;
1208 repeat:
1209 i = depth = ext_depth(inode);
1211 /* walk up to the tree and look for free index entry */
1212 curp = path + depth;
1213 while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1214 i--;
1215 curp--;
1218 /* we use already allocated block for index block,
1219 * so subsequent data blocks should be contiguous */
1220 if (EXT_HAS_FREE_INDEX(curp)) {
1221 /* if we found index with free entry, then use that
1222 * entry: create all needed subtree and add new leaf */
1223 err = ext4_ext_split(handle, inode, flags, path, newext, i);
1224 if (err)
1225 goto out;
1227 /* refill path */
1228 ext4_ext_drop_refs(path);
1229 path = ext4_ext_find_extent(inode,
1230 (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1231 path);
1232 if (IS_ERR(path))
1233 err = PTR_ERR(path);
1234 } else {
1235 /* tree is full, time to grow in depth */
1236 err = ext4_ext_grow_indepth(handle, inode, flags, newext);
1237 if (err)
1238 goto out;
1240 /* refill path */
1241 ext4_ext_drop_refs(path);
1242 path = ext4_ext_find_extent(inode,
1243 (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1244 path);
1245 if (IS_ERR(path)) {
1246 err = PTR_ERR(path);
1247 goto out;
1251 * only first (depth 0 -> 1) produces free space;
1252 * in all other cases we have to split the grown tree
1254 depth = ext_depth(inode);
1255 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1256 /* now we need to split */
1257 goto repeat;
1261 out:
1262 return err;
1266 * search the closest allocated block to the left for *logical
1267 * and returns it at @logical + it's physical address at @phys
1268 * if *logical is the smallest allocated block, the function
1269 * returns 0 at @phys
1270 * return value contains 0 (success) or error code
1272 static int ext4_ext_search_left(struct inode *inode,
1273 struct ext4_ext_path *path,
1274 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1276 struct ext4_extent_idx *ix;
1277 struct ext4_extent *ex;
1278 int depth, ee_len;
1280 if (unlikely(path == NULL)) {
1281 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1282 return -EIO;
1284 depth = path->p_depth;
1285 *phys = 0;
1287 if (depth == 0 && path->p_ext == NULL)
1288 return 0;
1290 /* usually extent in the path covers blocks smaller
1291 * then *logical, but it can be that extent is the
1292 * first one in the file */
1294 ex = path[depth].p_ext;
1295 ee_len = ext4_ext_get_actual_len(ex);
1296 if (*logical < le32_to_cpu(ex->ee_block)) {
1297 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1298 EXT4_ERROR_INODE(inode,
1299 "EXT_FIRST_EXTENT != ex *logical %d ee_block %d!",
1300 *logical, le32_to_cpu(ex->ee_block));
1301 return -EIO;
1303 while (--depth >= 0) {
1304 ix = path[depth].p_idx;
1305 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1306 EXT4_ERROR_INODE(inode,
1307 "ix (%d) != EXT_FIRST_INDEX (%d) (depth %d)!",
1308 ix != NULL ? le32_to_cpu(ix->ei_block) : 0,
1309 EXT_FIRST_INDEX(path[depth].p_hdr) != NULL ?
1310 le32_to_cpu(EXT_FIRST_INDEX(path[depth].p_hdr)->ei_block) : 0,
1311 depth);
1312 return -EIO;
1315 return 0;
1318 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1319 EXT4_ERROR_INODE(inode,
1320 "logical %d < ee_block %d + ee_len %d!",
1321 *logical, le32_to_cpu(ex->ee_block), ee_len);
1322 return -EIO;
1325 *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1326 *phys = ext4_ext_pblock(ex) + ee_len - 1;
1327 return 0;
1331 * search the closest allocated block to the right for *logical
1332 * and returns it at @logical + it's physical address at @phys
1333 * if *logical is the largest allocated block, the function
1334 * returns 0 at @phys
1335 * return value contains 0 (success) or error code
1337 static int ext4_ext_search_right(struct inode *inode,
1338 struct ext4_ext_path *path,
1339 ext4_lblk_t *logical, ext4_fsblk_t *phys,
1340 struct ext4_extent **ret_ex)
1342 struct buffer_head *bh = NULL;
1343 struct ext4_extent_header *eh;
1344 struct ext4_extent_idx *ix;
1345 struct ext4_extent *ex;
1346 ext4_fsblk_t block;
1347 int depth; /* Note, NOT eh_depth; depth from top of tree */
1348 int ee_len;
1350 if (unlikely(path == NULL)) {
1351 EXT4_ERROR_INODE(inode, "path == NULL *logical %d!", *logical);
1352 return -EIO;
1354 depth = path->p_depth;
1355 *phys = 0;
1357 if (depth == 0 && path->p_ext == NULL)
1358 return 0;
1360 /* usually extent in the path covers blocks smaller
1361 * then *logical, but it can be that extent is the
1362 * first one in the file */
1364 ex = path[depth].p_ext;
1365 ee_len = ext4_ext_get_actual_len(ex);
1366 if (*logical < le32_to_cpu(ex->ee_block)) {
1367 if (unlikely(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex)) {
1368 EXT4_ERROR_INODE(inode,
1369 "first_extent(path[%d].p_hdr) != ex",
1370 depth);
1371 return -EIO;
1373 while (--depth >= 0) {
1374 ix = path[depth].p_idx;
1375 if (unlikely(ix != EXT_FIRST_INDEX(path[depth].p_hdr))) {
1376 EXT4_ERROR_INODE(inode,
1377 "ix != EXT_FIRST_INDEX *logical %d!",
1378 *logical);
1379 return -EIO;
1382 goto found_extent;
1385 if (unlikely(*logical < (le32_to_cpu(ex->ee_block) + ee_len))) {
1386 EXT4_ERROR_INODE(inode,
1387 "logical %d < ee_block %d + ee_len %d!",
1388 *logical, le32_to_cpu(ex->ee_block), ee_len);
1389 return -EIO;
1392 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1393 /* next allocated block in this leaf */
1394 ex++;
1395 goto found_extent;
1398 /* go up and search for index to the right */
1399 while (--depth >= 0) {
1400 ix = path[depth].p_idx;
1401 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1402 goto got_index;
1405 /* we've gone up to the root and found no index to the right */
1406 return 0;
1408 got_index:
1409 /* we've found index to the right, let's
1410 * follow it and find the closest allocated
1411 * block to the right */
1412 ix++;
1413 block = ext4_idx_pblock(ix);
1414 while (++depth < path->p_depth) {
1415 bh = sb_bread(inode->i_sb, block);
1416 if (bh == NULL)
1417 return -EIO;
1418 eh = ext_block_hdr(bh);
1419 /* subtract from p_depth to get proper eh_depth */
1420 if (ext4_ext_check_block(inode, eh,
1421 path->p_depth - depth, bh)) {
1422 put_bh(bh);
1423 return -EIO;
1425 ix = EXT_FIRST_INDEX(eh);
1426 block = ext4_idx_pblock(ix);
1427 put_bh(bh);
1430 bh = sb_bread(inode->i_sb, block);
1431 if (bh == NULL)
1432 return -EIO;
1433 eh = ext_block_hdr(bh);
1434 if (ext4_ext_check_block(inode, eh, path->p_depth - depth, bh)) {
1435 put_bh(bh);
1436 return -EIO;
1438 ex = EXT_FIRST_EXTENT(eh);
1439 found_extent:
1440 *logical = le32_to_cpu(ex->ee_block);
1441 *phys = ext4_ext_pblock(ex);
1442 *ret_ex = ex;
1443 if (bh)
1444 put_bh(bh);
1445 return 0;
1449 * ext4_ext_next_allocated_block:
1450 * returns allocated block in subsequent extent or EXT_MAX_BLOCKS.
1451 * NOTE: it considers block number from index entry as
1452 * allocated block. Thus, index entries have to be consistent
1453 * with leaves.
1455 static ext4_lblk_t
1456 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1458 int depth;
1460 BUG_ON(path == NULL);
1461 depth = path->p_depth;
1463 if (depth == 0 && path->p_ext == NULL)
1464 return EXT_MAX_BLOCKS;
1466 while (depth >= 0) {
1467 if (depth == path->p_depth) {
1468 /* leaf */
1469 if (path[depth].p_ext &&
1470 path[depth].p_ext !=
1471 EXT_LAST_EXTENT(path[depth].p_hdr))
1472 return le32_to_cpu(path[depth].p_ext[1].ee_block);
1473 } else {
1474 /* index */
1475 if (path[depth].p_idx !=
1476 EXT_LAST_INDEX(path[depth].p_hdr))
1477 return le32_to_cpu(path[depth].p_idx[1].ei_block);
1479 depth--;
1482 return EXT_MAX_BLOCKS;
1486 * ext4_ext_next_leaf_block:
1487 * returns first allocated block from next leaf or EXT_MAX_BLOCKS
1489 static ext4_lblk_t ext4_ext_next_leaf_block(struct ext4_ext_path *path)
1491 int depth;
1493 BUG_ON(path == NULL);
1494 depth = path->p_depth;
1496 /* zero-tree has no leaf blocks at all */
1497 if (depth == 0)
1498 return EXT_MAX_BLOCKS;
1500 /* go to index block */
1501 depth--;
1503 while (depth >= 0) {
1504 if (path[depth].p_idx !=
1505 EXT_LAST_INDEX(path[depth].p_hdr))
1506 return (ext4_lblk_t)
1507 le32_to_cpu(path[depth].p_idx[1].ei_block);
1508 depth--;
1511 return EXT_MAX_BLOCKS;
1515 * ext4_ext_correct_indexes:
1516 * if leaf gets modified and modified extent is first in the leaf,
1517 * then we have to correct all indexes above.
1518 * TODO: do we need to correct tree in all cases?
1520 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1521 struct ext4_ext_path *path)
1523 struct ext4_extent_header *eh;
1524 int depth = ext_depth(inode);
1525 struct ext4_extent *ex;
1526 __le32 border;
1527 int k, err = 0;
1529 eh = path[depth].p_hdr;
1530 ex = path[depth].p_ext;
1532 if (unlikely(ex == NULL || eh == NULL)) {
1533 EXT4_ERROR_INODE(inode,
1534 "ex %p == NULL or eh %p == NULL", ex, eh);
1535 return -EIO;
1538 if (depth == 0) {
1539 /* there is no tree at all */
1540 return 0;
1543 if (ex != EXT_FIRST_EXTENT(eh)) {
1544 /* we correct tree if first leaf got modified only */
1545 return 0;
1549 * TODO: we need correction if border is smaller than current one
1551 k = depth - 1;
1552 border = path[depth].p_ext->ee_block;
1553 err = ext4_ext_get_access(handle, inode, path + k);
1554 if (err)
1555 return err;
1556 path[k].p_idx->ei_block = border;
1557 err = ext4_ext_dirty(handle, inode, path + k);
1558 if (err)
1559 return err;
1561 while (k--) {
1562 /* change all left-side indexes */
1563 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1564 break;
1565 err = ext4_ext_get_access(handle, inode, path + k);
1566 if (err)
1567 break;
1568 path[k].p_idx->ei_block = border;
1569 err = ext4_ext_dirty(handle, inode, path + k);
1570 if (err)
1571 break;
1574 return err;
1578 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1579 struct ext4_extent *ex2)
1581 unsigned short ext1_ee_len, ext2_ee_len, max_len;
1584 * Make sure that both extents are initialized. We don't merge
1585 * uninitialized extents so that we can be sure that end_io code has
1586 * the extent that was written properly split out and conversion to
1587 * initialized is trivial.
1589 if (ext4_ext_is_uninitialized(ex1) || ext4_ext_is_uninitialized(ex2))
1590 return 0;
1592 if (ext4_ext_is_uninitialized(ex1))
1593 max_len = EXT_UNINIT_MAX_LEN;
1594 else
1595 max_len = EXT_INIT_MAX_LEN;
1597 ext1_ee_len = ext4_ext_get_actual_len(ex1);
1598 ext2_ee_len = ext4_ext_get_actual_len(ex2);
1600 if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1601 le32_to_cpu(ex2->ee_block))
1602 return 0;
1605 * To allow future support for preallocated extents to be added
1606 * as an RO_COMPAT feature, refuse to merge to extents if
1607 * this can result in the top bit of ee_len being set.
1609 if (ext1_ee_len + ext2_ee_len > max_len)
1610 return 0;
1611 #ifdef AGGRESSIVE_TEST
1612 if (ext1_ee_len >= 4)
1613 return 0;
1614 #endif
1616 if (ext4_ext_pblock(ex1) + ext1_ee_len == ext4_ext_pblock(ex2))
1617 return 1;
1618 return 0;
1622 * This function tries to merge the "ex" extent to the next extent in the tree.
1623 * It always tries to merge towards right. If you want to merge towards
1624 * left, pass "ex - 1" as argument instead of "ex".
1625 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1626 * 1 if they got merged.
1628 static int ext4_ext_try_to_merge_right(struct inode *inode,
1629 struct ext4_ext_path *path,
1630 struct ext4_extent *ex)
1632 struct ext4_extent_header *eh;
1633 unsigned int depth, len;
1634 int merge_done = 0;
1635 int uninitialized = 0;
1637 depth = ext_depth(inode);
1638 BUG_ON(path[depth].p_hdr == NULL);
1639 eh = path[depth].p_hdr;
1641 while (ex < EXT_LAST_EXTENT(eh)) {
1642 if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1643 break;
1644 /* merge with next extent! */
1645 if (ext4_ext_is_uninitialized(ex))
1646 uninitialized = 1;
1647 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1648 + ext4_ext_get_actual_len(ex + 1));
1649 if (uninitialized)
1650 ext4_ext_mark_uninitialized(ex);
1652 if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1653 len = (EXT_LAST_EXTENT(eh) - ex - 1)
1654 * sizeof(struct ext4_extent);
1655 memmove(ex + 1, ex + 2, len);
1657 le16_add_cpu(&eh->eh_entries, -1);
1658 merge_done = 1;
1659 WARN_ON(eh->eh_entries == 0);
1660 if (!eh->eh_entries)
1661 EXT4_ERROR_INODE(inode, "eh->eh_entries = 0!");
1664 return merge_done;
1668 * This function does a very simple check to see if we can collapse
1669 * an extent tree with a single extent tree leaf block into the inode.
1671 static void ext4_ext_try_to_merge_up(handle_t *handle,
1672 struct inode *inode,
1673 struct ext4_ext_path *path)
1675 size_t s;
1676 unsigned max_root = ext4_ext_space_root(inode, 0);
1677 ext4_fsblk_t blk;
1679 if ((path[0].p_depth != 1) ||
1680 (le16_to_cpu(path[0].p_hdr->eh_entries) != 1) ||
1681 (le16_to_cpu(path[1].p_hdr->eh_entries) > max_root))
1682 return;
1685 * We need to modify the block allocation bitmap and the block
1686 * group descriptor to release the extent tree block. If we
1687 * can't get the journal credits, give up.
1689 if (ext4_journal_extend(handle, 2))
1690 return;
1693 * Copy the extent data up to the inode
1695 blk = ext4_idx_pblock(path[0].p_idx);
1696 s = le16_to_cpu(path[1].p_hdr->eh_entries) *
1697 sizeof(struct ext4_extent_idx);
1698 s += sizeof(struct ext4_extent_header);
1700 memcpy(path[0].p_hdr, path[1].p_hdr, s);
1701 path[0].p_depth = 0;
1702 path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) +
1703 (path[1].p_ext - EXT_FIRST_EXTENT(path[1].p_hdr));
1704 path[0].p_hdr->eh_max = cpu_to_le16(max_root);
1706 brelse(path[1].p_bh);
1707 ext4_free_blocks(handle, inode, NULL, blk, 1,
1708 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
1712 * This function tries to merge the @ex extent to neighbours in the tree.
1713 * return 1 if merge left else 0.
1715 static void ext4_ext_try_to_merge(handle_t *handle,
1716 struct inode *inode,
1717 struct ext4_ext_path *path,
1718 struct ext4_extent *ex) {
1719 struct ext4_extent_header *eh;
1720 unsigned int depth;
1721 int merge_done = 0;
1723 depth = ext_depth(inode);
1724 BUG_ON(path[depth].p_hdr == NULL);
1725 eh = path[depth].p_hdr;
1727 if (ex > EXT_FIRST_EXTENT(eh))
1728 merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1);
1730 if (!merge_done)
1731 (void) ext4_ext_try_to_merge_right(inode, path, ex);
1733 ext4_ext_try_to_merge_up(handle, inode, path);
1737 * check if a portion of the "newext" extent overlaps with an
1738 * existing extent.
1740 * If there is an overlap discovered, it updates the length of the newext
1741 * such that there will be no overlap, and then returns 1.
1742 * If there is no overlap found, it returns 0.
1744 static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
1745 struct inode *inode,
1746 struct ext4_extent *newext,
1747 struct ext4_ext_path *path)
1749 ext4_lblk_t b1, b2;
1750 unsigned int depth, len1;
1751 unsigned int ret = 0;
1753 b1 = le32_to_cpu(newext->ee_block);
1754 len1 = ext4_ext_get_actual_len(newext);
1755 depth = ext_depth(inode);
1756 if (!path[depth].p_ext)
1757 goto out;
1758 b2 = le32_to_cpu(path[depth].p_ext->ee_block);
1759 b2 &= ~(sbi->s_cluster_ratio - 1);
1762 * get the next allocated block if the extent in the path
1763 * is before the requested block(s)
1765 if (b2 < b1) {
1766 b2 = ext4_ext_next_allocated_block(path);
1767 if (b2 == EXT_MAX_BLOCKS)
1768 goto out;
1769 b2 &= ~(sbi->s_cluster_ratio - 1);
1772 /* check for wrap through zero on extent logical start block*/
1773 if (b1 + len1 < b1) {
1774 len1 = EXT_MAX_BLOCKS - b1;
1775 newext->ee_len = cpu_to_le16(len1);
1776 ret = 1;
1779 /* check for overlap */
1780 if (b1 + len1 > b2) {
1781 newext->ee_len = cpu_to_le16(b2 - b1);
1782 ret = 1;
1784 out:
1785 return ret;
1789 * ext4_ext_insert_extent:
1790 * tries to merge requsted extent into the existing extent or
1791 * inserts requested extent as new one into the tree,
1792 * creating new leaf in the no-space case.
1794 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1795 struct ext4_ext_path *path,
1796 struct ext4_extent *newext, int flag)
1798 struct ext4_extent_header *eh;
1799 struct ext4_extent *ex, *fex;
1800 struct ext4_extent *nearex; /* nearest extent */
1801 struct ext4_ext_path *npath = NULL;
1802 int depth, len, err;
1803 ext4_lblk_t next;
1804 unsigned uninitialized = 0;
1805 int flags = 0;
1807 if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1808 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1809 return -EIO;
1811 depth = ext_depth(inode);
1812 ex = path[depth].p_ext;
1813 eh = path[depth].p_hdr;
1814 if (unlikely(path[depth].p_hdr == NULL)) {
1815 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
1816 return -EIO;
1819 /* try to insert block into found extent and return */
1820 if (ex && !(flag & EXT4_GET_BLOCKS_PRE_IO)) {
1823 * Try to see whether we should rather test the extent on
1824 * right from ex, or from the left of ex. This is because
1825 * ext4_ext_find_extent() can return either extent on the
1826 * left, or on the right from the searched position. This
1827 * will make merging more effective.
1829 if (ex < EXT_LAST_EXTENT(eh) &&
1830 (le32_to_cpu(ex->ee_block) +
1831 ext4_ext_get_actual_len(ex) <
1832 le32_to_cpu(newext->ee_block))) {
1833 ex += 1;
1834 goto prepend;
1835 } else if ((ex > EXT_FIRST_EXTENT(eh)) &&
1836 (le32_to_cpu(newext->ee_block) +
1837 ext4_ext_get_actual_len(newext) <
1838 le32_to_cpu(ex->ee_block)))
1839 ex -= 1;
1841 /* Try to append newex to the ex */
1842 if (ext4_can_extents_be_merged(inode, ex, newext)) {
1843 ext_debug("append [%d]%d block to %u:[%d]%d"
1844 "(from %llu)\n",
1845 ext4_ext_is_uninitialized(newext),
1846 ext4_ext_get_actual_len(newext),
1847 le32_to_cpu(ex->ee_block),
1848 ext4_ext_is_uninitialized(ex),
1849 ext4_ext_get_actual_len(ex),
1850 ext4_ext_pblock(ex));
1851 err = ext4_ext_get_access(handle, inode,
1852 path + depth);
1853 if (err)
1854 return err;
1857 * ext4_can_extents_be_merged should have checked
1858 * that either both extents are uninitialized, or
1859 * both aren't. Thus we need to check only one of
1860 * them here.
1862 if (ext4_ext_is_uninitialized(ex))
1863 uninitialized = 1;
1864 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1865 + ext4_ext_get_actual_len(newext));
1866 if (uninitialized)
1867 ext4_ext_mark_uninitialized(ex);
1868 eh = path[depth].p_hdr;
1869 nearex = ex;
1870 goto merge;
1873 prepend:
1874 /* Try to prepend newex to the ex */
1875 if (ext4_can_extents_be_merged(inode, newext, ex)) {
1876 ext_debug("prepend %u[%d]%d block to %u:[%d]%d"
1877 "(from %llu)\n",
1878 le32_to_cpu(newext->ee_block),
1879 ext4_ext_is_uninitialized(newext),
1880 ext4_ext_get_actual_len(newext),
1881 le32_to_cpu(ex->ee_block),
1882 ext4_ext_is_uninitialized(ex),
1883 ext4_ext_get_actual_len(ex),
1884 ext4_ext_pblock(ex));
1885 err = ext4_ext_get_access(handle, inode,
1886 path + depth);
1887 if (err)
1888 return err;
1891 * ext4_can_extents_be_merged should have checked
1892 * that either both extents are uninitialized, or
1893 * both aren't. Thus we need to check only one of
1894 * them here.
1896 if (ext4_ext_is_uninitialized(ex))
1897 uninitialized = 1;
1898 ex->ee_block = newext->ee_block;
1899 ext4_ext_store_pblock(ex, ext4_ext_pblock(newext));
1900 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1901 + ext4_ext_get_actual_len(newext));
1902 if (uninitialized)
1903 ext4_ext_mark_uninitialized(ex);
1904 eh = path[depth].p_hdr;
1905 nearex = ex;
1906 goto merge;
1910 depth = ext_depth(inode);
1911 eh = path[depth].p_hdr;
1912 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1913 goto has_space;
1915 /* probably next leaf has space for us? */
1916 fex = EXT_LAST_EXTENT(eh);
1917 next = EXT_MAX_BLOCKS;
1918 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block))
1919 next = ext4_ext_next_leaf_block(path);
1920 if (next != EXT_MAX_BLOCKS) {
1921 ext_debug("next leaf block - %u\n", next);
1922 BUG_ON(npath != NULL);
1923 npath = ext4_ext_find_extent(inode, next, NULL);
1924 if (IS_ERR(npath))
1925 return PTR_ERR(npath);
1926 BUG_ON(npath->p_depth != path->p_depth);
1927 eh = npath[depth].p_hdr;
1928 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1929 ext_debug("next leaf isn't full(%d)\n",
1930 le16_to_cpu(eh->eh_entries));
1931 path = npath;
1932 goto has_space;
1934 ext_debug("next leaf has no free space(%d,%d)\n",
1935 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
1939 * There is no free space in the found leaf.
1940 * We're gonna add a new leaf in the tree.
1942 if (flag & EXT4_GET_BLOCKS_METADATA_NOFAIL)
1943 flags = EXT4_MB_USE_RESERVED;
1944 err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext);
1945 if (err)
1946 goto cleanup;
1947 depth = ext_depth(inode);
1948 eh = path[depth].p_hdr;
1950 has_space:
1951 nearex = path[depth].p_ext;
1953 err = ext4_ext_get_access(handle, inode, path + depth);
1954 if (err)
1955 goto cleanup;
1957 if (!nearex) {
1958 /* there is no extent in this leaf, create first one */
1959 ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n",
1960 le32_to_cpu(newext->ee_block),
1961 ext4_ext_pblock(newext),
1962 ext4_ext_is_uninitialized(newext),
1963 ext4_ext_get_actual_len(newext));
1964 nearex = EXT_FIRST_EXTENT(eh);
1965 } else {
1966 if (le32_to_cpu(newext->ee_block)
1967 > le32_to_cpu(nearex->ee_block)) {
1968 /* Insert after */
1969 ext_debug("insert %u:%llu:[%d]%d before: "
1970 "nearest %p\n",
1971 le32_to_cpu(newext->ee_block),
1972 ext4_ext_pblock(newext),
1973 ext4_ext_is_uninitialized(newext),
1974 ext4_ext_get_actual_len(newext),
1975 nearex);
1976 nearex++;
1977 } else {
1978 /* Insert before */
1979 BUG_ON(newext->ee_block == nearex->ee_block);
1980 ext_debug("insert %u:%llu:[%d]%d after: "
1981 "nearest %p\n",
1982 le32_to_cpu(newext->ee_block),
1983 ext4_ext_pblock(newext),
1984 ext4_ext_is_uninitialized(newext),
1985 ext4_ext_get_actual_len(newext),
1986 nearex);
1988 len = EXT_LAST_EXTENT(eh) - nearex + 1;
1989 if (len > 0) {
1990 ext_debug("insert %u:%llu:[%d]%d: "
1991 "move %d extents from 0x%p to 0x%p\n",
1992 le32_to_cpu(newext->ee_block),
1993 ext4_ext_pblock(newext),
1994 ext4_ext_is_uninitialized(newext),
1995 ext4_ext_get_actual_len(newext),
1996 len, nearex, nearex + 1);
1997 memmove(nearex + 1, nearex,
1998 len * sizeof(struct ext4_extent));
2002 le16_add_cpu(&eh->eh_entries, 1);
2003 path[depth].p_ext = nearex;
2004 nearex->ee_block = newext->ee_block;
2005 ext4_ext_store_pblock(nearex, ext4_ext_pblock(newext));
2006 nearex->ee_len = newext->ee_len;
2008 merge:
2009 /* try to merge extents */
2010 if (!(flag & EXT4_GET_BLOCKS_PRE_IO))
2011 ext4_ext_try_to_merge(handle, inode, path, nearex);
2014 /* time to correct all indexes above */
2015 err = ext4_ext_correct_indexes(handle, inode, path);
2016 if (err)
2017 goto cleanup;
2019 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
2021 cleanup:
2022 if (npath) {
2023 ext4_ext_drop_refs(npath);
2024 kfree(npath);
2026 return err;
2029 static int ext4_fill_fiemap_extents(struct inode *inode,
2030 ext4_lblk_t block, ext4_lblk_t num,
2031 struct fiemap_extent_info *fieinfo)
2033 struct ext4_ext_path *path = NULL;
2034 struct ext4_extent *ex;
2035 struct extent_status es;
2036 ext4_lblk_t next, next_del, start = 0, end = 0;
2037 ext4_lblk_t last = block + num;
2038 int exists, depth = 0, err = 0;
2039 unsigned int flags = 0;
2040 unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
2042 while (block < last && block != EXT_MAX_BLOCKS) {
2043 num = last - block;
2044 /* find extent for this block */
2045 down_read(&EXT4_I(inode)->i_data_sem);
2047 if (path && ext_depth(inode) != depth) {
2048 /* depth was changed. we have to realloc path */
2049 kfree(path);
2050 path = NULL;
2053 path = ext4_ext_find_extent(inode, block, path);
2054 if (IS_ERR(path)) {
2055 up_read(&EXT4_I(inode)->i_data_sem);
2056 err = PTR_ERR(path);
2057 path = NULL;
2058 break;
2061 depth = ext_depth(inode);
2062 if (unlikely(path[depth].p_hdr == NULL)) {
2063 up_read(&EXT4_I(inode)->i_data_sem);
2064 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2065 err = -EIO;
2066 break;
2068 ex = path[depth].p_ext;
2069 next = ext4_ext_next_allocated_block(path);
2070 ext4_ext_drop_refs(path);
2072 flags = 0;
2073 exists = 0;
2074 if (!ex) {
2075 /* there is no extent yet, so try to allocate
2076 * all requested space */
2077 start = block;
2078 end = block + num;
2079 } else if (le32_to_cpu(ex->ee_block) > block) {
2080 /* need to allocate space before found extent */
2081 start = block;
2082 end = le32_to_cpu(ex->ee_block);
2083 if (block + num < end)
2084 end = block + num;
2085 } else if (block >= le32_to_cpu(ex->ee_block)
2086 + ext4_ext_get_actual_len(ex)) {
2087 /* need to allocate space after found extent */
2088 start = block;
2089 end = block + num;
2090 if (end >= next)
2091 end = next;
2092 } else if (block >= le32_to_cpu(ex->ee_block)) {
2094 * some part of requested space is covered
2095 * by found extent
2097 start = block;
2098 end = le32_to_cpu(ex->ee_block)
2099 + ext4_ext_get_actual_len(ex);
2100 if (block + num < end)
2101 end = block + num;
2102 exists = 1;
2103 } else {
2104 BUG();
2106 BUG_ON(end <= start);
2108 if (!exists) {
2109 es.es_lblk = start;
2110 es.es_len = end - start;
2111 es.es_pblk = 0;
2112 } else {
2113 es.es_lblk = le32_to_cpu(ex->ee_block);
2114 es.es_len = ext4_ext_get_actual_len(ex);
2115 es.es_pblk = ext4_ext_pblock(ex);
2116 if (ext4_ext_is_uninitialized(ex))
2117 flags |= FIEMAP_EXTENT_UNWRITTEN;
2121 * Find delayed extent and update es accordingly. We call
2122 * it even in !exists case to find out whether es is the
2123 * last existing extent or not.
2125 next_del = ext4_find_delayed_extent(inode, &es);
2126 if (!exists && next_del) {
2127 exists = 1;
2128 flags |= FIEMAP_EXTENT_DELALLOC;
2130 up_read(&EXT4_I(inode)->i_data_sem);
2132 if (unlikely(es.es_len == 0)) {
2133 EXT4_ERROR_INODE(inode, "es.es_len == 0");
2134 err = -EIO;
2135 break;
2139 * This is possible iff next == next_del == EXT_MAX_BLOCKS.
2140 * we need to check next == EXT_MAX_BLOCKS because it is
2141 * possible that an extent is with unwritten and delayed
2142 * status due to when an extent is delayed allocated and
2143 * is allocated by fallocate status tree will track both of
2144 * them in a extent.
2146 * So we could return a unwritten and delayed extent, and
2147 * its block is equal to 'next'.
2149 if (next == next_del && next == EXT_MAX_BLOCKS) {
2150 flags |= FIEMAP_EXTENT_LAST;
2151 if (unlikely(next_del != EXT_MAX_BLOCKS ||
2152 next != EXT_MAX_BLOCKS)) {
2153 EXT4_ERROR_INODE(inode,
2154 "next extent == %u, next "
2155 "delalloc extent = %u",
2156 next, next_del);
2157 err = -EIO;
2158 break;
2162 if (exists) {
2163 err = fiemap_fill_next_extent(fieinfo,
2164 (__u64)es.es_lblk << blksize_bits,
2165 (__u64)es.es_pblk << blksize_bits,
2166 (__u64)es.es_len << blksize_bits,
2167 flags);
2168 if (err < 0)
2169 break;
2170 if (err == 1) {
2171 err = 0;
2172 break;
2176 block = es.es_lblk + es.es_len;
2179 if (path) {
2180 ext4_ext_drop_refs(path);
2181 kfree(path);
2184 return err;
2188 * ext4_ext_put_gap_in_cache:
2189 * calculate boundaries of the gap that the requested block fits into
2190 * and cache this gap
2192 static void
2193 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
2194 ext4_lblk_t block)
2196 int depth = ext_depth(inode);
2197 unsigned long len;
2198 ext4_lblk_t lblock;
2199 struct ext4_extent *ex;
2201 ex = path[depth].p_ext;
2202 if (ex == NULL) {
2204 * there is no extent yet, so gap is [0;-] and we
2205 * don't cache it
2207 ext_debug("cache gap(whole file):");
2208 } else if (block < le32_to_cpu(ex->ee_block)) {
2209 lblock = block;
2210 len = le32_to_cpu(ex->ee_block) - block;
2211 ext_debug("cache gap(before): %u [%u:%u]",
2212 block,
2213 le32_to_cpu(ex->ee_block),
2214 ext4_ext_get_actual_len(ex));
2215 if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1))
2216 ext4_es_insert_extent(inode, lblock, len, ~0,
2217 EXTENT_STATUS_HOLE);
2218 } else if (block >= le32_to_cpu(ex->ee_block)
2219 + ext4_ext_get_actual_len(ex)) {
2220 ext4_lblk_t next;
2221 lblock = le32_to_cpu(ex->ee_block)
2222 + ext4_ext_get_actual_len(ex);
2224 next = ext4_ext_next_allocated_block(path);
2225 ext_debug("cache gap(after): [%u:%u] %u",
2226 le32_to_cpu(ex->ee_block),
2227 ext4_ext_get_actual_len(ex),
2228 block);
2229 BUG_ON(next == lblock);
2230 len = next - lblock;
2231 if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1))
2232 ext4_es_insert_extent(inode, lblock, len, ~0,
2233 EXTENT_STATUS_HOLE);
2234 } else {
2235 lblock = len = 0;
2236 BUG();
2239 ext_debug(" -> %u:%lu\n", lblock, len);
2243 * ext4_ext_rm_idx:
2244 * removes index from the index block.
2246 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2247 struct ext4_ext_path *path, int depth)
2249 int err;
2250 ext4_fsblk_t leaf;
2252 /* free index block */
2253 depth--;
2254 path = path + depth;
2255 leaf = ext4_idx_pblock(path->p_idx);
2256 if (unlikely(path->p_hdr->eh_entries == 0)) {
2257 EXT4_ERROR_INODE(inode, "path->p_hdr->eh_entries == 0");
2258 return -EIO;
2260 err = ext4_ext_get_access(handle, inode, path);
2261 if (err)
2262 return err;
2264 if (path->p_idx != EXT_LAST_INDEX(path->p_hdr)) {
2265 int len = EXT_LAST_INDEX(path->p_hdr) - path->p_idx;
2266 len *= sizeof(struct ext4_extent_idx);
2267 memmove(path->p_idx, path->p_idx + 1, len);
2270 le16_add_cpu(&path->p_hdr->eh_entries, -1);
2271 err = ext4_ext_dirty(handle, inode, path);
2272 if (err)
2273 return err;
2274 ext_debug("index is empty, remove it, free block %llu\n", leaf);
2275 trace_ext4_ext_rm_idx(inode, leaf);
2277 ext4_free_blocks(handle, inode, NULL, leaf, 1,
2278 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2280 while (--depth >= 0) {
2281 if (path->p_idx != EXT_FIRST_INDEX(path->p_hdr))
2282 break;
2283 path--;
2284 err = ext4_ext_get_access(handle, inode, path);
2285 if (err)
2286 break;
2287 path->p_idx->ei_block = (path+1)->p_idx->ei_block;
2288 err = ext4_ext_dirty(handle, inode, path);
2289 if (err)
2290 break;
2292 return err;
2296 * ext4_ext_calc_credits_for_single_extent:
2297 * This routine returns max. credits that needed to insert an extent
2298 * to the extent tree.
2299 * When pass the actual path, the caller should calculate credits
2300 * under i_data_sem.
2302 int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2303 struct ext4_ext_path *path)
2305 if (path) {
2306 int depth = ext_depth(inode);
2307 int ret = 0;
2309 /* probably there is space in leaf? */
2310 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2311 < le16_to_cpu(path[depth].p_hdr->eh_max)) {
2314 * There are some space in the leaf tree, no
2315 * need to account for leaf block credit
2317 * bitmaps and block group descriptor blocks
2318 * and other metadata blocks still need to be
2319 * accounted.
2321 /* 1 bitmap, 1 block group descriptor */
2322 ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2323 return ret;
2327 return ext4_chunk_trans_blocks(inode, nrblocks);
2331 * How many index/leaf blocks need to change/allocate to add @extents extents?
2333 * If we add a single extent, then in the worse case, each tree level
2334 * index/leaf need to be changed in case of the tree split.
2336 * If more extents are inserted, they could cause the whole tree split more
2337 * than once, but this is really rare.
2339 int ext4_ext_index_trans_blocks(struct inode *inode, int extents)
2341 int index;
2342 int depth;
2344 /* If we are converting the inline data, only one is needed here. */
2345 if (ext4_has_inline_data(inode))
2346 return 1;
2348 depth = ext_depth(inode);
2350 if (extents <= 1)
2351 index = depth * 2;
2352 else
2353 index = depth * 3;
2355 return index;
2358 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2359 struct ext4_extent *ex,
2360 long long *partial_cluster,
2361 ext4_lblk_t from, ext4_lblk_t to)
2363 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2364 unsigned short ee_len = ext4_ext_get_actual_len(ex);
2365 ext4_fsblk_t pblk;
2366 int flags = 0;
2368 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2369 flags |= EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET;
2370 else if (ext4_should_journal_data(inode))
2371 flags |= EXT4_FREE_BLOCKS_FORGET;
2374 * For bigalloc file systems, we never free a partial cluster
2375 * at the beginning of the extent. Instead, we make a note
2376 * that we tried freeing the cluster, and check to see if we
2377 * need to free it on a subsequent call to ext4_remove_blocks,
2378 * or at the end of the ext4_truncate() operation.
2380 flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
2382 trace_ext4_remove_blocks(inode, ex, from, to, *partial_cluster);
2384 * If we have a partial cluster, and it's different from the
2385 * cluster of the last block, we need to explicitly free the
2386 * partial cluster here.
2388 pblk = ext4_ext_pblock(ex) + ee_len - 1;
2389 if ((*partial_cluster > 0) &&
2390 (EXT4_B2C(sbi, pblk) != *partial_cluster)) {
2391 ext4_free_blocks(handle, inode, NULL,
2392 EXT4_C2B(sbi, *partial_cluster),
2393 sbi->s_cluster_ratio, flags);
2394 *partial_cluster = 0;
2397 #ifdef EXTENTS_STATS
2399 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2400 spin_lock(&sbi->s_ext_stats_lock);
2401 sbi->s_ext_blocks += ee_len;
2402 sbi->s_ext_extents++;
2403 if (ee_len < sbi->s_ext_min)
2404 sbi->s_ext_min = ee_len;
2405 if (ee_len > sbi->s_ext_max)
2406 sbi->s_ext_max = ee_len;
2407 if (ext_depth(inode) > sbi->s_depth_max)
2408 sbi->s_depth_max = ext_depth(inode);
2409 spin_unlock(&sbi->s_ext_stats_lock);
2411 #endif
2412 if (from >= le32_to_cpu(ex->ee_block)
2413 && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
2414 /* tail removal */
2415 ext4_lblk_t num;
2416 unsigned int unaligned;
2418 num = le32_to_cpu(ex->ee_block) + ee_len - from;
2419 pblk = ext4_ext_pblock(ex) + ee_len - num;
2421 * Usually we want to free partial cluster at the end of the
2422 * extent, except for the situation when the cluster is still
2423 * used by any other extent (partial_cluster is negative).
2425 if (*partial_cluster < 0 &&
2426 -(*partial_cluster) == EXT4_B2C(sbi, pblk + num - 1))
2427 flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
2429 ext_debug("free last %u blocks starting %llu partial %lld\n",
2430 num, pblk, *partial_cluster);
2431 ext4_free_blocks(handle, inode, NULL, pblk, num, flags);
2433 * If the block range to be freed didn't start at the
2434 * beginning of a cluster, and we removed the entire
2435 * extent and the cluster is not used by any other extent,
2436 * save the partial cluster here, since we might need to
2437 * delete if we determine that the truncate operation has
2438 * removed all of the blocks in the cluster.
2440 * On the other hand, if we did not manage to free the whole
2441 * extent, we have to mark the cluster as used (store negative
2442 * cluster number in partial_cluster).
2444 unaligned = pblk & (sbi->s_cluster_ratio - 1);
2445 if (unaligned && (ee_len == num) &&
2446 (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk))))
2447 *partial_cluster = EXT4_B2C(sbi, pblk);
2448 else if (unaligned)
2449 *partial_cluster = -((long long)EXT4_B2C(sbi, pblk));
2450 else if (*partial_cluster > 0)
2451 *partial_cluster = 0;
2452 } else
2453 ext4_error(sbi->s_sb, "strange request: removal(2) "
2454 "%u-%u from %u:%u\n",
2455 from, to, le32_to_cpu(ex->ee_block), ee_len);
2456 return 0;
2461 * ext4_ext_rm_leaf() Removes the extents associated with the
2462 * blocks appearing between "start" and "end", and splits the extents
2463 * if "start" and "end" appear in the same extent
2465 * @handle: The journal handle
2466 * @inode: The files inode
2467 * @path: The path to the leaf
2468 * @partial_cluster: The cluster which we'll have to free if all extents
2469 * has been released from it. It gets negative in case
2470 * that the cluster is still used.
2471 * @start: The first block to remove
2472 * @end: The last block to remove
2474 static int
2475 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2476 struct ext4_ext_path *path,
2477 long long *partial_cluster,
2478 ext4_lblk_t start, ext4_lblk_t end)
2480 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2481 int err = 0, correct_index = 0;
2482 int depth = ext_depth(inode), credits;
2483 struct ext4_extent_header *eh;
2484 ext4_lblk_t a, b;
2485 unsigned num;
2486 ext4_lblk_t ex_ee_block;
2487 unsigned short ex_ee_len;
2488 unsigned uninitialized = 0;
2489 struct ext4_extent *ex;
2490 ext4_fsblk_t pblk;
2492 /* the header must be checked already in ext4_ext_remove_space() */
2493 ext_debug("truncate since %u in leaf to %u\n", start, end);
2494 if (!path[depth].p_hdr)
2495 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2496 eh = path[depth].p_hdr;
2497 if (unlikely(path[depth].p_hdr == NULL)) {
2498 EXT4_ERROR_INODE(inode, "path[%d].p_hdr == NULL", depth);
2499 return -EIO;
2501 /* find where to start removing */
2502 ex = EXT_LAST_EXTENT(eh);
2504 ex_ee_block = le32_to_cpu(ex->ee_block);
2505 ex_ee_len = ext4_ext_get_actual_len(ex);
2507 trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
2509 while (ex >= EXT_FIRST_EXTENT(eh) &&
2510 ex_ee_block + ex_ee_len > start) {
2512 if (ext4_ext_is_uninitialized(ex))
2513 uninitialized = 1;
2514 else
2515 uninitialized = 0;
2517 ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
2518 uninitialized, ex_ee_len);
2519 path[depth].p_ext = ex;
2521 a = ex_ee_block > start ? ex_ee_block : start;
2522 b = ex_ee_block+ex_ee_len - 1 < end ?
2523 ex_ee_block+ex_ee_len - 1 : end;
2525 ext_debug(" border %u:%u\n", a, b);
2527 /* If this extent is beyond the end of the hole, skip it */
2528 if (end < ex_ee_block) {
2530 * We're going to skip this extent and move to another,
2531 * so if this extent is not cluster aligned we have
2532 * to mark the current cluster as used to avoid
2533 * accidentally freeing it later on
2535 pblk = ext4_ext_pblock(ex);
2536 if (pblk & (sbi->s_cluster_ratio - 1))
2537 *partial_cluster =
2538 -((long long)EXT4_B2C(sbi, pblk));
2539 ex--;
2540 ex_ee_block = le32_to_cpu(ex->ee_block);
2541 ex_ee_len = ext4_ext_get_actual_len(ex);
2542 continue;
2543 } else if (b != ex_ee_block + ex_ee_len - 1) {
2544 EXT4_ERROR_INODE(inode,
2545 "can not handle truncate %u:%u "
2546 "on extent %u:%u",
2547 start, end, ex_ee_block,
2548 ex_ee_block + ex_ee_len - 1);
2549 err = -EIO;
2550 goto out;
2551 } else if (a != ex_ee_block) {
2552 /* remove tail of the extent */
2553 num = a - ex_ee_block;
2554 } else {
2555 /* remove whole extent: excellent! */
2556 num = 0;
2559 * 3 for leaf, sb, and inode plus 2 (bmap and group
2560 * descriptor) for each block group; assume two block
2561 * groups plus ex_ee_len/blocks_per_block_group for
2562 * the worst case
2564 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2565 if (ex == EXT_FIRST_EXTENT(eh)) {
2566 correct_index = 1;
2567 credits += (ext_depth(inode)) + 1;
2569 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
2571 err = ext4_ext_truncate_extend_restart(handle, inode, credits);
2572 if (err)
2573 goto out;
2575 err = ext4_ext_get_access(handle, inode, path + depth);
2576 if (err)
2577 goto out;
2579 err = ext4_remove_blocks(handle, inode, ex, partial_cluster,
2580 a, b);
2581 if (err)
2582 goto out;
2584 if (num == 0)
2585 /* this extent is removed; mark slot entirely unused */
2586 ext4_ext_store_pblock(ex, 0);
2588 ex->ee_len = cpu_to_le16(num);
2590 * Do not mark uninitialized if all the blocks in the
2591 * extent have been removed.
2593 if (uninitialized && num)
2594 ext4_ext_mark_uninitialized(ex);
2596 * If the extent was completely released,
2597 * we need to remove it from the leaf
2599 if (num == 0) {
2600 if (end != EXT_MAX_BLOCKS - 1) {
2602 * For hole punching, we need to scoot all the
2603 * extents up when an extent is removed so that
2604 * we dont have blank extents in the middle
2606 memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) *
2607 sizeof(struct ext4_extent));
2609 /* Now get rid of the one at the end */
2610 memset(EXT_LAST_EXTENT(eh), 0,
2611 sizeof(struct ext4_extent));
2613 le16_add_cpu(&eh->eh_entries, -1);
2614 } else if (*partial_cluster > 0)
2615 *partial_cluster = 0;
2617 err = ext4_ext_dirty(handle, inode, path + depth);
2618 if (err)
2619 goto out;
2621 ext_debug("new extent: %u:%u:%llu\n", ex_ee_block, num,
2622 ext4_ext_pblock(ex));
2623 ex--;
2624 ex_ee_block = le32_to_cpu(ex->ee_block);
2625 ex_ee_len = ext4_ext_get_actual_len(ex);
2628 if (correct_index && eh->eh_entries)
2629 err = ext4_ext_correct_indexes(handle, inode, path);
2632 * Free the partial cluster only if the current extent does not
2633 * reference it. Otherwise we might free used cluster.
2635 if (*partial_cluster > 0 &&
2636 (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) !=
2637 *partial_cluster)) {
2638 int flags = EXT4_FREE_BLOCKS_FORGET;
2640 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2641 flags |= EXT4_FREE_BLOCKS_METADATA;
2643 ext4_free_blocks(handle, inode, NULL,
2644 EXT4_C2B(sbi, *partial_cluster),
2645 sbi->s_cluster_ratio, flags);
2646 *partial_cluster = 0;
2649 /* if this leaf is free, then we should
2650 * remove it from index block above */
2651 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2652 err = ext4_ext_rm_idx(handle, inode, path, depth);
2654 out:
2655 return err;
2659 * ext4_ext_more_to_rm:
2660 * returns 1 if current index has to be freed (even partial)
2662 static int
2663 ext4_ext_more_to_rm(struct ext4_ext_path *path)
2665 BUG_ON(path->p_idx == NULL);
2667 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2668 return 0;
2671 * if truncate on deeper level happened, it wasn't partial,
2672 * so we have to consider current index for truncation
2674 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2675 return 0;
2676 return 1;
2679 int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
2680 ext4_lblk_t end)
2682 struct super_block *sb = inode->i_sb;
2683 int depth = ext_depth(inode);
2684 struct ext4_ext_path *path = NULL;
2685 long long partial_cluster = 0;
2686 handle_t *handle;
2687 int i = 0, err = 0;
2689 ext_debug("truncate since %u to %u\n", start, end);
2691 /* probably first extent we're gonna free will be last in block */
2692 handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, depth + 1);
2693 if (IS_ERR(handle))
2694 return PTR_ERR(handle);
2696 again:
2697 trace_ext4_ext_remove_space(inode, start, end, depth);
2700 * Check if we are removing extents inside the extent tree. If that
2701 * is the case, we are going to punch a hole inside the extent tree
2702 * so we have to check whether we need to split the extent covering
2703 * the last block to remove so we can easily remove the part of it
2704 * in ext4_ext_rm_leaf().
2706 if (end < EXT_MAX_BLOCKS - 1) {
2707 struct ext4_extent *ex;
2708 ext4_lblk_t ee_block;
2710 /* find extent for this block */
2711 path = ext4_ext_find_extent(inode, end, NULL);
2712 if (IS_ERR(path)) {
2713 ext4_journal_stop(handle);
2714 return PTR_ERR(path);
2716 depth = ext_depth(inode);
2717 /* Leaf not may not exist only if inode has no blocks at all */
2718 ex = path[depth].p_ext;
2719 if (!ex) {
2720 if (depth) {
2721 EXT4_ERROR_INODE(inode,
2722 "path[%d].p_hdr == NULL",
2723 depth);
2724 err = -EIO;
2726 goto out;
2729 ee_block = le32_to_cpu(ex->ee_block);
2732 * See if the last block is inside the extent, if so split
2733 * the extent at 'end' block so we can easily remove the
2734 * tail of the first part of the split extent in
2735 * ext4_ext_rm_leaf().
2737 if (end >= ee_block &&
2738 end < ee_block + ext4_ext_get_actual_len(ex) - 1) {
2739 int split_flag = 0;
2741 if (ext4_ext_is_uninitialized(ex))
2742 split_flag = EXT4_EXT_MARK_UNINIT1 |
2743 EXT4_EXT_MARK_UNINIT2;
2746 * Split the extent in two so that 'end' is the last
2747 * block in the first new extent. Also we should not
2748 * fail removing space due to ENOSPC so try to use
2749 * reserved block if that happens.
2751 err = ext4_split_extent_at(handle, inode, path,
2752 end + 1, split_flag,
2753 EXT4_GET_BLOCKS_PRE_IO |
2754 EXT4_GET_BLOCKS_METADATA_NOFAIL);
2756 if (err < 0)
2757 goto out;
2761 * We start scanning from right side, freeing all the blocks
2762 * after i_size and walking into the tree depth-wise.
2764 depth = ext_depth(inode);
2765 if (path) {
2766 int k = i = depth;
2767 while (--k > 0)
2768 path[k].p_block =
2769 le16_to_cpu(path[k].p_hdr->eh_entries)+1;
2770 } else {
2771 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
2772 GFP_NOFS);
2773 if (path == NULL) {
2774 ext4_journal_stop(handle);
2775 return -ENOMEM;
2777 path[0].p_depth = depth;
2778 path[0].p_hdr = ext_inode_hdr(inode);
2779 i = 0;
2781 if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
2782 err = -EIO;
2783 goto out;
2786 err = 0;
2788 while (i >= 0 && err == 0) {
2789 if (i == depth) {
2790 /* this is leaf block */
2791 err = ext4_ext_rm_leaf(handle, inode, path,
2792 &partial_cluster, start,
2793 end);
2794 /* root level has p_bh == NULL, brelse() eats this */
2795 brelse(path[i].p_bh);
2796 path[i].p_bh = NULL;
2797 i--;
2798 continue;
2801 /* this is index block */
2802 if (!path[i].p_hdr) {
2803 ext_debug("initialize header\n");
2804 path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2807 if (!path[i].p_idx) {
2808 /* this level hasn't been touched yet */
2809 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2810 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2811 ext_debug("init index ptr: hdr 0x%p, num %d\n",
2812 path[i].p_hdr,
2813 le16_to_cpu(path[i].p_hdr->eh_entries));
2814 } else {
2815 /* we were already here, see at next index */
2816 path[i].p_idx--;
2819 ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
2820 i, EXT_FIRST_INDEX(path[i].p_hdr),
2821 path[i].p_idx);
2822 if (ext4_ext_more_to_rm(path + i)) {
2823 struct buffer_head *bh;
2824 /* go to the next level */
2825 ext_debug("move to level %d (block %llu)\n",
2826 i + 1, ext4_idx_pblock(path[i].p_idx));
2827 memset(path + i + 1, 0, sizeof(*path));
2828 bh = sb_bread(sb, ext4_idx_pblock(path[i].p_idx));
2829 if (!bh) {
2830 /* should we reset i_size? */
2831 err = -EIO;
2832 break;
2834 if (WARN_ON(i + 1 > depth)) {
2835 err = -EIO;
2836 break;
2838 if (ext4_ext_check_block(inode, ext_block_hdr(bh),
2839 depth - i - 1, bh)) {
2840 err = -EIO;
2841 break;
2843 path[i + 1].p_bh = bh;
2845 /* save actual number of indexes since this
2846 * number is changed at the next iteration */
2847 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2848 i++;
2849 } else {
2850 /* we finished processing this index, go up */
2851 if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2852 /* index is empty, remove it;
2853 * handle must be already prepared by the
2854 * truncatei_leaf() */
2855 err = ext4_ext_rm_idx(handle, inode, path, i);
2857 /* root level has p_bh == NULL, brelse() eats this */
2858 brelse(path[i].p_bh);
2859 path[i].p_bh = NULL;
2860 i--;
2861 ext_debug("return to level %d\n", i);
2865 trace_ext4_ext_remove_space_done(inode, start, end, depth,
2866 partial_cluster, path->p_hdr->eh_entries);
2868 /* If we still have something in the partial cluster and we have removed
2869 * even the first extent, then we should free the blocks in the partial
2870 * cluster as well. */
2871 if (partial_cluster > 0 && path->p_hdr->eh_entries == 0) {
2872 int flags = EXT4_FREE_BLOCKS_FORGET;
2874 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2875 flags |= EXT4_FREE_BLOCKS_METADATA;
2877 ext4_free_blocks(handle, inode, NULL,
2878 EXT4_C2B(EXT4_SB(sb), partial_cluster),
2879 EXT4_SB(sb)->s_cluster_ratio, flags);
2880 partial_cluster = 0;
2883 /* TODO: flexible tree reduction should be here */
2884 if (path->p_hdr->eh_entries == 0) {
2886 * truncate to zero freed all the tree,
2887 * so we need to correct eh_depth
2889 err = ext4_ext_get_access(handle, inode, path);
2890 if (err == 0) {
2891 ext_inode_hdr(inode)->eh_depth = 0;
2892 ext_inode_hdr(inode)->eh_max =
2893 cpu_to_le16(ext4_ext_space_root(inode, 0));
2894 err = ext4_ext_dirty(handle, inode, path);
2897 out:
2898 ext4_ext_drop_refs(path);
2899 kfree(path);
2900 if (err == -EAGAIN) {
2901 path = NULL;
2902 goto again;
2904 ext4_journal_stop(handle);
2906 return err;
2910 * called at mount time
2912 void ext4_ext_init(struct super_block *sb)
2915 * possible initialization would be here
2918 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
2919 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
2920 printk(KERN_INFO "EXT4-fs: file extents enabled"
2921 #ifdef AGGRESSIVE_TEST
2922 ", aggressive tests"
2923 #endif
2924 #ifdef CHECK_BINSEARCH
2925 ", check binsearch"
2926 #endif
2927 #ifdef EXTENTS_STATS
2928 ", stats"
2929 #endif
2930 "\n");
2931 #endif
2932 #ifdef EXTENTS_STATS
2933 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
2934 EXT4_SB(sb)->s_ext_min = 1 << 30;
2935 EXT4_SB(sb)->s_ext_max = 0;
2936 #endif
2941 * called at umount time
2943 void ext4_ext_release(struct super_block *sb)
2945 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
2946 return;
2948 #ifdef EXTENTS_STATS
2949 if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
2950 struct ext4_sb_info *sbi = EXT4_SB(sb);
2951 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
2952 sbi->s_ext_blocks, sbi->s_ext_extents,
2953 sbi->s_ext_blocks / sbi->s_ext_extents);
2954 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
2955 sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
2957 #endif
2960 /* FIXME!! we need to try to merge to left or right after zero-out */
2961 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2963 ext4_fsblk_t ee_pblock;
2964 unsigned int ee_len;
2965 int ret;
2967 ee_len = ext4_ext_get_actual_len(ex);
2968 ee_pblock = ext4_ext_pblock(ex);
2970 ret = sb_issue_zeroout(inode->i_sb, ee_pblock, ee_len, GFP_NOFS);
2971 if (ret > 0)
2972 ret = 0;
2974 return ret;
2978 * ext4_split_extent_at() splits an extent at given block.
2980 * @handle: the journal handle
2981 * @inode: the file inode
2982 * @path: the path to the extent
2983 * @split: the logical block where the extent is splitted.
2984 * @split_flags: indicates if the extent could be zeroout if split fails, and
2985 * the states(init or uninit) of new extents.
2986 * @flags: flags used to insert new extent to extent tree.
2989 * Splits extent [a, b] into two extents [a, @split) and [@split, b], states
2990 * of which are deterimined by split_flag.
2992 * There are two cases:
2993 * a> the extent are splitted into two extent.
2994 * b> split is not needed, and just mark the extent.
2996 * return 0 on success.
2998 static int ext4_split_extent_at(handle_t *handle,
2999 struct inode *inode,
3000 struct ext4_ext_path *path,
3001 ext4_lblk_t split,
3002 int split_flag,
3003 int flags)
3005 ext4_fsblk_t newblock;
3006 ext4_lblk_t ee_block;
3007 struct ext4_extent *ex, newex, orig_ex, zero_ex;
3008 struct ext4_extent *ex2 = NULL;
3009 unsigned int ee_len, depth;
3010 int err = 0;
3012 BUG_ON((split_flag & (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2)) ==
3013 (EXT4_EXT_DATA_VALID1 | EXT4_EXT_DATA_VALID2));
3015 ext_debug("ext4_split_extents_at: inode %lu, logical"
3016 "block %llu\n", inode->i_ino, (unsigned long long)split);
3018 ext4_ext_show_leaf(inode, path);
3020 depth = ext_depth(inode);
3021 ex = path[depth].p_ext;
3022 ee_block = le32_to_cpu(ex->ee_block);
3023 ee_len = ext4_ext_get_actual_len(ex);
3024 newblock = split - ee_block + ext4_ext_pblock(ex);
3026 BUG_ON(split < ee_block || split >= (ee_block + ee_len));
3027 BUG_ON(!ext4_ext_is_uninitialized(ex) &&
3028 split_flag & (EXT4_EXT_MAY_ZEROOUT |
3029 EXT4_EXT_MARK_UNINIT1 |
3030 EXT4_EXT_MARK_UNINIT2));
3032 err = ext4_ext_get_access(handle, inode, path + depth);
3033 if (err)
3034 goto out;
3036 if (split == ee_block) {
3038 * case b: block @split is the block that the extent begins with
3039 * then we just change the state of the extent, and splitting
3040 * is not needed.
3042 if (split_flag & EXT4_EXT_MARK_UNINIT2)
3043 ext4_ext_mark_uninitialized(ex);
3044 else
3045 ext4_ext_mark_initialized(ex);
3047 if (!(flags & EXT4_GET_BLOCKS_PRE_IO))
3048 ext4_ext_try_to_merge(handle, inode, path, ex);
3050 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3051 goto out;
3054 /* case a */
3055 memcpy(&orig_ex, ex, sizeof(orig_ex));
3056 ex->ee_len = cpu_to_le16(split - ee_block);
3057 if (split_flag & EXT4_EXT_MARK_UNINIT1)
3058 ext4_ext_mark_uninitialized(ex);
3061 * path may lead to new leaf, not to original leaf any more
3062 * after ext4_ext_insert_extent() returns,
3064 err = ext4_ext_dirty(handle, inode, path + depth);
3065 if (err)
3066 goto fix_extent_len;
3068 ex2 = &newex;
3069 ex2->ee_block = cpu_to_le32(split);
3070 ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block));
3071 ext4_ext_store_pblock(ex2, newblock);
3072 if (split_flag & EXT4_EXT_MARK_UNINIT2)
3073 ext4_ext_mark_uninitialized(ex2);
3075 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
3076 if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3077 if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
3078 if (split_flag & EXT4_EXT_DATA_VALID1) {
3079 err = ext4_ext_zeroout(inode, ex2);
3080 zero_ex.ee_block = ex2->ee_block;
3081 zero_ex.ee_len = cpu_to_le16(
3082 ext4_ext_get_actual_len(ex2));
3083 ext4_ext_store_pblock(&zero_ex,
3084 ext4_ext_pblock(ex2));
3085 } else {
3086 err = ext4_ext_zeroout(inode, ex);
3087 zero_ex.ee_block = ex->ee_block;
3088 zero_ex.ee_len = cpu_to_le16(
3089 ext4_ext_get_actual_len(ex));
3090 ext4_ext_store_pblock(&zero_ex,
3091 ext4_ext_pblock(ex));
3093 } else {
3094 err = ext4_ext_zeroout(inode, &orig_ex);
3095 zero_ex.ee_block = orig_ex.ee_block;
3096 zero_ex.ee_len = cpu_to_le16(
3097 ext4_ext_get_actual_len(&orig_ex));
3098 ext4_ext_store_pblock(&zero_ex,
3099 ext4_ext_pblock(&orig_ex));
3102 if (err)
3103 goto fix_extent_len;
3104 /* update the extent length and mark as initialized */
3105 ex->ee_len = cpu_to_le16(ee_len);
3106 ext4_ext_try_to_merge(handle, inode, path, ex);
3107 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3108 if (err)
3109 goto fix_extent_len;
3111 /* update extent status tree */
3112 err = ext4_es_zeroout(inode, &zero_ex);
3114 goto out;
3115 } else if (err)
3116 goto fix_extent_len;
3118 out:
3119 ext4_ext_show_leaf(inode, path);
3120 return err;
3122 fix_extent_len:
3123 ex->ee_len = orig_ex.ee_len;
3124 ext4_ext_dirty(handle, inode, path + depth);
3125 return err;
3129 * ext4_split_extents() splits an extent and mark extent which is covered
3130 * by @map as split_flags indicates
3132 * It may result in splitting the extent into multiple extents (upto three)
3133 * There are three possibilities:
3134 * a> There is no split required
3135 * b> Splits in two extents: Split is happening at either end of the extent
3136 * c> Splits in three extents: Somone is splitting in middle of the extent
3139 static int ext4_split_extent(handle_t *handle,
3140 struct inode *inode,
3141 struct ext4_ext_path *path,
3142 struct ext4_map_blocks *map,
3143 int split_flag,
3144 int flags)
3146 ext4_lblk_t ee_block;
3147 struct ext4_extent *ex;
3148 unsigned int ee_len, depth;
3149 int err = 0;
3150 int uninitialized;
3151 int split_flag1, flags1;
3152 int allocated = map->m_len;
3154 depth = ext_depth(inode);
3155 ex = path[depth].p_ext;
3156 ee_block = le32_to_cpu(ex->ee_block);
3157 ee_len = ext4_ext_get_actual_len(ex);
3158 uninitialized = ext4_ext_is_uninitialized(ex);
3160 if (map->m_lblk + map->m_len < ee_block + ee_len) {
3161 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
3162 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
3163 if (uninitialized)
3164 split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
3165 EXT4_EXT_MARK_UNINIT2;
3166 if (split_flag & EXT4_EXT_DATA_VALID2)
3167 split_flag1 |= EXT4_EXT_DATA_VALID1;
3168 err = ext4_split_extent_at(handle, inode, path,
3169 map->m_lblk + map->m_len, split_flag1, flags1);
3170 if (err)
3171 goto out;
3172 } else {
3173 allocated = ee_len - (map->m_lblk - ee_block);
3176 * Update path is required because previous ext4_split_extent_at() may
3177 * result in split of original leaf or extent zeroout.
3179 ext4_ext_drop_refs(path);
3180 path = ext4_ext_find_extent(inode, map->m_lblk, path);
3181 if (IS_ERR(path))
3182 return PTR_ERR(path);
3183 depth = ext_depth(inode);
3184 ex = path[depth].p_ext;
3185 uninitialized = ext4_ext_is_uninitialized(ex);
3186 split_flag1 = 0;
3188 if (map->m_lblk >= ee_block) {
3189 split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
3190 if (uninitialized) {
3191 split_flag1 |= EXT4_EXT_MARK_UNINIT1;
3192 split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
3193 EXT4_EXT_MARK_UNINIT2);
3195 err = ext4_split_extent_at(handle, inode, path,
3196 map->m_lblk, split_flag1, flags);
3197 if (err)
3198 goto out;
3201 ext4_ext_show_leaf(inode, path);
3202 out:
3203 return err ? err : allocated;
3207 * This function is called by ext4_ext_map_blocks() if someone tries to write
3208 * to an uninitialized extent. It may result in splitting the uninitialized
3209 * extent into multiple extents (up to three - one initialized and two
3210 * uninitialized).
3211 * There are three possibilities:
3212 * a> There is no split required: Entire extent should be initialized
3213 * b> Splits in two extents: Write is happening at either end of the extent
3214 * c> Splits in three extents: Somone is writing in middle of the extent
3216 * Pre-conditions:
3217 * - The extent pointed to by 'path' is uninitialized.
3218 * - The extent pointed to by 'path' contains a superset
3219 * of the logical span [map->m_lblk, map->m_lblk + map->m_len).
3221 * Post-conditions on success:
3222 * - the returned value is the number of blocks beyond map->l_lblk
3223 * that are allocated and initialized.
3224 * It is guaranteed to be >= map->m_len.
3226 static int ext4_ext_convert_to_initialized(handle_t *handle,
3227 struct inode *inode,
3228 struct ext4_map_blocks *map,
3229 struct ext4_ext_path *path,
3230 int flags)
3232 struct ext4_sb_info *sbi;
3233 struct ext4_extent_header *eh;
3234 struct ext4_map_blocks split_map;
3235 struct ext4_extent zero_ex;
3236 struct ext4_extent *ex, *abut_ex;
3237 ext4_lblk_t ee_block, eof_block;
3238 unsigned int ee_len, depth, map_len = map->m_len;
3239 int allocated = 0, max_zeroout = 0;
3240 int err = 0;
3241 int split_flag = 0;
3243 ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
3244 "block %llu, max_blocks %u\n", inode->i_ino,
3245 (unsigned long long)map->m_lblk, map_len);
3247 sbi = EXT4_SB(inode->i_sb);
3248 eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3249 inode->i_sb->s_blocksize_bits;
3250 if (eof_block < map->m_lblk + map_len)
3251 eof_block = map->m_lblk + map_len;
3253 depth = ext_depth(inode);
3254 eh = path[depth].p_hdr;
3255 ex = path[depth].p_ext;
3256 ee_block = le32_to_cpu(ex->ee_block);
3257 ee_len = ext4_ext_get_actual_len(ex);
3258 zero_ex.ee_len = 0;
3260 trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
3262 /* Pre-conditions */
3263 BUG_ON(!ext4_ext_is_uninitialized(ex));
3264 BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
3267 * Attempt to transfer newly initialized blocks from the currently
3268 * uninitialized extent to its neighbor. This is much cheaper
3269 * than an insertion followed by a merge as those involve costly
3270 * memmove() calls. Transferring to the left is the common case in
3271 * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE)
3272 * followed by append writes.
3274 * Limitations of the current logic:
3275 * - L1: we do not deal with writes covering the whole extent.
3276 * This would require removing the extent if the transfer
3277 * is possible.
3278 * - L2: we only attempt to merge with an extent stored in the
3279 * same extent tree node.
3281 if ((map->m_lblk == ee_block) &&
3282 /* See if we can merge left */
3283 (map_len < ee_len) && /*L1*/
3284 (ex > EXT_FIRST_EXTENT(eh))) { /*L2*/
3285 ext4_lblk_t prev_lblk;
3286 ext4_fsblk_t prev_pblk, ee_pblk;
3287 unsigned int prev_len;
3289 abut_ex = ex - 1;
3290 prev_lblk = le32_to_cpu(abut_ex->ee_block);
3291 prev_len = ext4_ext_get_actual_len(abut_ex);
3292 prev_pblk = ext4_ext_pblock(abut_ex);
3293 ee_pblk = ext4_ext_pblock(ex);
3296 * A transfer of blocks from 'ex' to 'abut_ex' is allowed
3297 * upon those conditions:
3298 * - C1: abut_ex is initialized,
3299 * - C2: abut_ex is logically abutting ex,
3300 * - C3: abut_ex is physically abutting ex,
3301 * - C4: abut_ex can receive the additional blocks without
3302 * overflowing the (initialized) length limit.
3304 if ((!ext4_ext_is_uninitialized(abut_ex)) && /*C1*/
3305 ((prev_lblk + prev_len) == ee_block) && /*C2*/
3306 ((prev_pblk + prev_len) == ee_pblk) && /*C3*/
3307 (prev_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/
3308 err = ext4_ext_get_access(handle, inode, path + depth);
3309 if (err)
3310 goto out;
3312 trace_ext4_ext_convert_to_initialized_fastpath(inode,
3313 map, ex, abut_ex);
3315 /* Shift the start of ex by 'map_len' blocks */
3316 ex->ee_block = cpu_to_le32(ee_block + map_len);
3317 ext4_ext_store_pblock(ex, ee_pblk + map_len);
3318 ex->ee_len = cpu_to_le16(ee_len - map_len);
3319 ext4_ext_mark_uninitialized(ex); /* Restore the flag */
3321 /* Extend abut_ex by 'map_len' blocks */
3322 abut_ex->ee_len = cpu_to_le16(prev_len + map_len);
3324 /* Result: number of initialized blocks past m_lblk */
3325 allocated = map_len;
3327 } else if (((map->m_lblk + map_len) == (ee_block + ee_len)) &&
3328 (map_len < ee_len) && /*L1*/
3329 ex < EXT_LAST_EXTENT(eh)) { /*L2*/
3330 /* See if we can merge right */
3331 ext4_lblk_t next_lblk;
3332 ext4_fsblk_t next_pblk, ee_pblk;
3333 unsigned int next_len;
3335 abut_ex = ex + 1;
3336 next_lblk = le32_to_cpu(abut_ex->ee_block);
3337 next_len = ext4_ext_get_actual_len(abut_ex);
3338 next_pblk = ext4_ext_pblock(abut_ex);
3339 ee_pblk = ext4_ext_pblock(ex);
3342 * A transfer of blocks from 'ex' to 'abut_ex' is allowed
3343 * upon those conditions:
3344 * - C1: abut_ex is initialized,
3345 * - C2: abut_ex is logically abutting ex,
3346 * - C3: abut_ex is physically abutting ex,
3347 * - C4: abut_ex can receive the additional blocks without
3348 * overflowing the (initialized) length limit.
3350 if ((!ext4_ext_is_uninitialized(abut_ex)) && /*C1*/
3351 ((map->m_lblk + map_len) == next_lblk) && /*C2*/
3352 ((ee_pblk + ee_len) == next_pblk) && /*C3*/
3353 (next_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/
3354 err = ext4_ext_get_access(handle, inode, path + depth);
3355 if (err)
3356 goto out;
3358 trace_ext4_ext_convert_to_initialized_fastpath(inode,
3359 map, ex, abut_ex);
3361 /* Shift the start of abut_ex by 'map_len' blocks */
3362 abut_ex->ee_block = cpu_to_le32(next_lblk - map_len);
3363 ext4_ext_store_pblock(abut_ex, next_pblk - map_len);
3364 ex->ee_len = cpu_to_le16(ee_len - map_len);
3365 ext4_ext_mark_uninitialized(ex); /* Restore the flag */
3367 /* Extend abut_ex by 'map_len' blocks */
3368 abut_ex->ee_len = cpu_to_le16(next_len + map_len);
3370 /* Result: number of initialized blocks past m_lblk */
3371 allocated = map_len;
3374 if (allocated) {
3375 /* Mark the block containing both extents as dirty */
3376 ext4_ext_dirty(handle, inode, path + depth);
3378 /* Update path to point to the right extent */
3379 path[depth].p_ext = abut_ex;
3380 goto out;
3381 } else
3382 allocated = ee_len - (map->m_lblk - ee_block);
3384 WARN_ON(map->m_lblk < ee_block);
3386 * It is safe to convert extent to initialized via explicit
3387 * zeroout only if extent is fully insde i_size or new_size.
3389 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3391 if (EXT4_EXT_MAY_ZEROOUT & split_flag)
3392 max_zeroout = sbi->s_extent_max_zeroout_kb >>
3393 (inode->i_sb->s_blocksize_bits - 10);
3395 /* If extent is less than s_max_zeroout_kb, zeroout directly */
3396 if (max_zeroout && (ee_len <= max_zeroout)) {
3397 err = ext4_ext_zeroout(inode, ex);
3398 if (err)
3399 goto out;
3400 zero_ex.ee_block = ex->ee_block;
3401 zero_ex.ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex));
3402 ext4_ext_store_pblock(&zero_ex, ext4_ext_pblock(ex));
3404 err = ext4_ext_get_access(handle, inode, path + depth);
3405 if (err)
3406 goto out;
3407 ext4_ext_mark_initialized(ex);
3408 ext4_ext_try_to_merge(handle, inode, path, ex);
3409 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3410 goto out;
3414 * four cases:
3415 * 1. split the extent into three extents.
3416 * 2. split the extent into two extents, zeroout the first half.
3417 * 3. split the extent into two extents, zeroout the second half.
3418 * 4. split the extent into two extents with out zeroout.
3420 split_map.m_lblk = map->m_lblk;
3421 split_map.m_len = map->m_len;
3423 if (max_zeroout && (allocated > map->m_len)) {
3424 if (allocated <= max_zeroout) {
3425 /* case 3 */
3426 zero_ex.ee_block =
3427 cpu_to_le32(map->m_lblk);
3428 zero_ex.ee_len = cpu_to_le16(allocated);
3429 ext4_ext_store_pblock(&zero_ex,
3430 ext4_ext_pblock(ex) + map->m_lblk - ee_block);
3431 err = ext4_ext_zeroout(inode, &zero_ex);
3432 if (err)
3433 goto out;
3434 split_map.m_lblk = map->m_lblk;
3435 split_map.m_len = allocated;
3436 } else if (map->m_lblk - ee_block + map->m_len < max_zeroout) {
3437 /* case 2 */
3438 if (map->m_lblk != ee_block) {
3439 zero_ex.ee_block = ex->ee_block;
3440 zero_ex.ee_len = cpu_to_le16(map->m_lblk -
3441 ee_block);
3442 ext4_ext_store_pblock(&zero_ex,
3443 ext4_ext_pblock(ex));
3444 err = ext4_ext_zeroout(inode, &zero_ex);
3445 if (err)
3446 goto out;
3449 split_map.m_lblk = ee_block;
3450 split_map.m_len = map->m_lblk - ee_block + map->m_len;
3451 allocated = map->m_len;
3455 allocated = ext4_split_extent(handle, inode, path,
3456 &split_map, split_flag, flags);
3457 if (allocated < 0)
3458 err = allocated;
3460 out:
3461 /* If we have gotten a failure, don't zero out status tree */
3462 if (!err)
3463 err = ext4_es_zeroout(inode, &zero_ex);
3464 return err ? err : allocated;
3468 * This function is called by ext4_ext_map_blocks() from
3469 * ext4_get_blocks_dio_write() when DIO to write
3470 * to an uninitialized extent.
3472 * Writing to an uninitialized extent may result in splitting the uninitialized
3473 * extent into multiple initialized/uninitialized extents (up to three)
3474 * There are three possibilities:
3475 * a> There is no split required: Entire extent should be uninitialized
3476 * b> Splits in two extents: Write is happening at either end of the extent
3477 * c> Splits in three extents: Somone is writing in middle of the extent
3479 * One of more index blocks maybe needed if the extent tree grow after
3480 * the uninitialized extent split. To prevent ENOSPC occur at the IO
3481 * complete, we need to split the uninitialized extent before DIO submit
3482 * the IO. The uninitialized extent called at this time will be split
3483 * into three uninitialized extent(at most). After IO complete, the part
3484 * being filled will be convert to initialized by the end_io callback function
3485 * via ext4_convert_unwritten_extents().
3487 * Returns the size of uninitialized extent to be written on success.
3489 static int ext4_split_unwritten_extents(handle_t *handle,
3490 struct inode *inode,
3491 struct ext4_map_blocks *map,
3492 struct ext4_ext_path *path,
3493 int flags)
3495 ext4_lblk_t eof_block;
3496 ext4_lblk_t ee_block;
3497 struct ext4_extent *ex;
3498 unsigned int ee_len;
3499 int split_flag = 0, depth;
3501 ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
3502 "block %llu, max_blocks %u\n", inode->i_ino,
3503 (unsigned long long)map->m_lblk, map->m_len);
3505 eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
3506 inode->i_sb->s_blocksize_bits;
3507 if (eof_block < map->m_lblk + map->m_len)
3508 eof_block = map->m_lblk + map->m_len;
3510 * It is safe to convert extent to initialized via explicit
3511 * zeroout only if extent is fully insde i_size or new_size.
3513 depth = ext_depth(inode);
3514 ex = path[depth].p_ext;
3515 ee_block = le32_to_cpu(ex->ee_block);
3516 ee_len = ext4_ext_get_actual_len(ex);
3518 split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
3519 split_flag |= EXT4_EXT_MARK_UNINIT2;
3520 if (flags & EXT4_GET_BLOCKS_CONVERT)
3521 split_flag |= EXT4_EXT_DATA_VALID2;
3522 flags |= EXT4_GET_BLOCKS_PRE_IO;
3523 return ext4_split_extent(handle, inode, path, map, split_flag, flags);
3526 static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3527 struct inode *inode,
3528 struct ext4_map_blocks *map,
3529 struct ext4_ext_path *path)
3531 struct ext4_extent *ex;
3532 ext4_lblk_t ee_block;
3533 unsigned int ee_len;
3534 int depth;
3535 int err = 0;
3537 depth = ext_depth(inode);
3538 ex = path[depth].p_ext;
3539 ee_block = le32_to_cpu(ex->ee_block);
3540 ee_len = ext4_ext_get_actual_len(ex);
3542 ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical"
3543 "block %llu, max_blocks %u\n", inode->i_ino,
3544 (unsigned long long)ee_block, ee_len);
3546 /* If extent is larger than requested it is a clear sign that we still
3547 * have some extent state machine issues left. So extent_split is still
3548 * required.
3549 * TODO: Once all related issues will be fixed this situation should be
3550 * illegal.
3552 if (ee_block != map->m_lblk || ee_len > map->m_len) {
3553 #ifdef EXT4_DEBUG
3554 ext4_warning("Inode (%ld) finished: extent logical block %llu,"
3555 " len %u; IO logical block %llu, len %u\n",
3556 inode->i_ino, (unsigned long long)ee_block, ee_len,
3557 (unsigned long long)map->m_lblk, map->m_len);
3558 #endif
3559 err = ext4_split_unwritten_extents(handle, inode, map, path,
3560 EXT4_GET_BLOCKS_CONVERT);
3561 if (err < 0)
3562 goto out;
3563 ext4_ext_drop_refs(path);
3564 path = ext4_ext_find_extent(inode, map->m_lblk, path);
3565 if (IS_ERR(path)) {
3566 err = PTR_ERR(path);
3567 goto out;
3569 depth = ext_depth(inode);
3570 ex = path[depth].p_ext;
3573 err = ext4_ext_get_access(handle, inode, path + depth);
3574 if (err)
3575 goto out;
3576 /* first mark the extent as initialized */
3577 ext4_ext_mark_initialized(ex);
3579 /* note: ext4_ext_correct_indexes() isn't needed here because
3580 * borders are not changed
3582 ext4_ext_try_to_merge(handle, inode, path, ex);
3584 /* Mark modified extent as dirty */
3585 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3586 out:
3587 ext4_ext_show_leaf(inode, path);
3588 return err;
3591 static void unmap_underlying_metadata_blocks(struct block_device *bdev,
3592 sector_t block, int count)
3594 int i;
3595 for (i = 0; i < count; i++)
3596 unmap_underlying_metadata(bdev, block + i);
3600 * Handle EOFBLOCKS_FL flag, clearing it if necessary
3602 static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
3603 ext4_lblk_t lblk,
3604 struct ext4_ext_path *path,
3605 unsigned int len)
3607 int i, depth;
3608 struct ext4_extent_header *eh;
3609 struct ext4_extent *last_ex;
3611 if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
3612 return 0;
3614 depth = ext_depth(inode);
3615 eh = path[depth].p_hdr;
3618 * We're going to remove EOFBLOCKS_FL entirely in future so we
3619 * do not care for this case anymore. Simply remove the flag
3620 * if there are no extents.
3622 if (unlikely(!eh->eh_entries))
3623 goto out;
3624 last_ex = EXT_LAST_EXTENT(eh);
3626 * We should clear the EOFBLOCKS_FL flag if we are writing the
3627 * last block in the last extent in the file. We test this by
3628 * first checking to see if the caller to
3629 * ext4_ext_get_blocks() was interested in the last block (or
3630 * a block beyond the last block) in the current extent. If
3631 * this turns out to be false, we can bail out from this
3632 * function immediately.
3634 if (lblk + len < le32_to_cpu(last_ex->ee_block) +
3635 ext4_ext_get_actual_len(last_ex))
3636 return 0;
3638 * If the caller does appear to be planning to write at or
3639 * beyond the end of the current extent, we then test to see
3640 * if the current extent is the last extent in the file, by
3641 * checking to make sure it was reached via the rightmost node
3642 * at each level of the tree.
3644 for (i = depth-1; i >= 0; i--)
3645 if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
3646 return 0;
3647 out:
3648 ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3649 return ext4_mark_inode_dirty(handle, inode);
3653 * ext4_find_delalloc_range: find delayed allocated block in the given range.
3655 * Return 1 if there is a delalloc block in the range, otherwise 0.
3657 int ext4_find_delalloc_range(struct inode *inode,
3658 ext4_lblk_t lblk_start,
3659 ext4_lblk_t lblk_end)
3661 struct extent_status es;
3663 ext4_es_find_delayed_extent_range(inode, lblk_start, lblk_end, &es);
3664 if (es.es_len == 0)
3665 return 0; /* there is no delay extent in this tree */
3666 else if (es.es_lblk <= lblk_start &&
3667 lblk_start < es.es_lblk + es.es_len)
3668 return 1;
3669 else if (lblk_start <= es.es_lblk && es.es_lblk <= lblk_end)
3670 return 1;
3671 else
3672 return 0;
3675 int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk)
3677 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3678 ext4_lblk_t lblk_start, lblk_end;
3679 lblk_start = lblk & (~(sbi->s_cluster_ratio - 1));
3680 lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
3682 return ext4_find_delalloc_range(inode, lblk_start, lblk_end);
3686 * Determines how many complete clusters (out of those specified by the 'map')
3687 * are under delalloc and were reserved quota for.
3688 * This function is called when we are writing out the blocks that were
3689 * originally written with their allocation delayed, but then the space was
3690 * allocated using fallocate() before the delayed allocation could be resolved.
3691 * The cases to look for are:
3692 * ('=' indicated delayed allocated blocks
3693 * '-' indicates non-delayed allocated blocks)
3694 * (a) partial clusters towards beginning and/or end outside of allocated range
3695 * are not delalloc'ed.
3696 * Ex:
3697 * |----c---=|====c====|====c====|===-c----|
3698 * |++++++ allocated ++++++|
3699 * ==> 4 complete clusters in above example
3701 * (b) partial cluster (outside of allocated range) towards either end is
3702 * marked for delayed allocation. In this case, we will exclude that
3703 * cluster.
3704 * Ex:
3705 * |----====c========|========c========|
3706 * |++++++ allocated ++++++|
3707 * ==> 1 complete clusters in above example
3709 * Ex:
3710 * |================c================|
3711 * |++++++ allocated ++++++|
3712 * ==> 0 complete clusters in above example
3714 * The ext4_da_update_reserve_space will be called only if we
3715 * determine here that there were some "entire" clusters that span
3716 * this 'allocated' range.
3717 * In the non-bigalloc case, this function will just end up returning num_blks
3718 * without ever calling ext4_find_delalloc_range.
3720 static unsigned int
3721 get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
3722 unsigned int num_blks)
3724 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3725 ext4_lblk_t alloc_cluster_start, alloc_cluster_end;
3726 ext4_lblk_t lblk_from, lblk_to, c_offset;
3727 unsigned int allocated_clusters = 0;
3729 alloc_cluster_start = EXT4_B2C(sbi, lblk_start);
3730 alloc_cluster_end = EXT4_B2C(sbi, lblk_start + num_blks - 1);
3732 /* max possible clusters for this allocation */
3733 allocated_clusters = alloc_cluster_end - alloc_cluster_start + 1;
3735 trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
3737 /* Check towards left side */
3738 c_offset = lblk_start & (sbi->s_cluster_ratio - 1);
3739 if (c_offset) {
3740 lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1));
3741 lblk_to = lblk_from + c_offset - 1;
3743 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
3744 allocated_clusters--;
3747 /* Now check towards right. */
3748 c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1);
3749 if (allocated_clusters && c_offset) {
3750 lblk_from = lblk_start + num_blks;
3751 lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
3753 if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
3754 allocated_clusters--;
3757 return allocated_clusters;
3760 static int
3761 ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3762 struct ext4_map_blocks *map,
3763 struct ext4_ext_path *path, int flags,
3764 unsigned int allocated, ext4_fsblk_t newblock)
3766 int ret = 0;
3767 int err = 0;
3768 ext4_io_end_t *io = ext4_inode_aio(inode);
3770 ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical "
3771 "block %llu, max_blocks %u, flags %x, allocated %u\n",
3772 inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
3773 flags, allocated);
3774 ext4_ext_show_leaf(inode, path);
3777 * When writing into uninitialized space, we should not fail to
3778 * allocate metadata blocks for the new extent block if needed.
3780 flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL;
3782 trace_ext4_ext_handle_uninitialized_extents(inode, map, flags,
3783 allocated, newblock);
3785 /* get_block() before submit the IO, split the extent */
3786 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
3787 ret = ext4_split_unwritten_extents(handle, inode, map,
3788 path, flags);
3789 if (ret <= 0)
3790 goto out;
3792 * Flag the inode(non aio case) or end_io struct (aio case)
3793 * that this IO needs to conversion to written when IO is
3794 * completed
3796 if (io)
3797 ext4_set_io_unwritten_flag(inode, io);
3798 else
3799 ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3800 map->m_flags |= EXT4_MAP_UNWRITTEN;
3801 if (ext4_should_dioread_nolock(inode))
3802 map->m_flags |= EXT4_MAP_UNINIT;
3803 goto out;
3805 /* IO end_io complete, convert the filled extent to written */
3806 if ((flags & EXT4_GET_BLOCKS_CONVERT)) {
3807 ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
3808 path);
3809 if (ret >= 0) {
3810 ext4_update_inode_fsync_trans(handle, inode, 1);
3811 err = check_eofblocks_fl(handle, inode, map->m_lblk,
3812 path, map->m_len);
3813 } else
3814 err = ret;
3815 map->m_flags |= EXT4_MAP_MAPPED;
3816 if (allocated > map->m_len)
3817 allocated = map->m_len;
3818 map->m_len = allocated;
3819 goto out2;
3821 /* buffered IO case */
3823 * repeat fallocate creation request
3824 * we already have an unwritten extent
3826 if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) {
3827 map->m_flags |= EXT4_MAP_UNWRITTEN;
3828 goto map_out;
3831 /* buffered READ or buffered write_begin() lookup */
3832 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3834 * We have blocks reserved already. We
3835 * return allocated blocks so that delalloc
3836 * won't do block reservation for us. But
3837 * the buffer head will be unmapped so that
3838 * a read from the block returns 0s.
3840 map->m_flags |= EXT4_MAP_UNWRITTEN;
3841 goto out1;
3844 /* buffered write, writepage time, convert*/
3845 ret = ext4_ext_convert_to_initialized(handle, inode, map, path, flags);
3846 if (ret >= 0)
3847 ext4_update_inode_fsync_trans(handle, inode, 1);
3848 out:
3849 if (ret <= 0) {
3850 err = ret;
3851 goto out2;
3852 } else
3853 allocated = ret;
3854 map->m_flags |= EXT4_MAP_NEW;
3856 * if we allocated more blocks than requested
3857 * we need to make sure we unmap the extra block
3858 * allocated. The actual needed block will get
3859 * unmapped later when we find the buffer_head marked
3860 * new.
3862 if (allocated > map->m_len) {
3863 unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
3864 newblock + map->m_len,
3865 allocated - map->m_len);
3866 allocated = map->m_len;
3868 map->m_len = allocated;
3871 * If we have done fallocate with the offset that is already
3872 * delayed allocated, we would have block reservation
3873 * and quota reservation done in the delayed write path.
3874 * But fallocate would have already updated quota and block
3875 * count for this offset. So cancel these reservation
3877 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
3878 unsigned int reserved_clusters;
3879 reserved_clusters = get_reserved_cluster_alloc(inode,
3880 map->m_lblk, map->m_len);
3881 if (reserved_clusters)
3882 ext4_da_update_reserve_space(inode,
3883 reserved_clusters,
3887 map_out:
3888 map->m_flags |= EXT4_MAP_MAPPED;
3889 if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0) {
3890 err = check_eofblocks_fl(handle, inode, map->m_lblk, path,
3891 map->m_len);
3892 if (err < 0)
3893 goto out2;
3895 out1:
3896 if (allocated > map->m_len)
3897 allocated = map->m_len;
3898 ext4_ext_show_leaf(inode, path);
3899 map->m_pblk = newblock;
3900 map->m_len = allocated;
3901 out2:
3902 if (path) {
3903 ext4_ext_drop_refs(path);
3904 kfree(path);
3906 return err ? err : allocated;
3910 * get_implied_cluster_alloc - check to see if the requested
3911 * allocation (in the map structure) overlaps with a cluster already
3912 * allocated in an extent.
3913 * @sb The filesystem superblock structure
3914 * @map The requested lblk->pblk mapping
3915 * @ex The extent structure which might contain an implied
3916 * cluster allocation
3918 * This function is called by ext4_ext_map_blocks() after we failed to
3919 * find blocks that were already in the inode's extent tree. Hence,
3920 * we know that the beginning of the requested region cannot overlap
3921 * the extent from the inode's extent tree. There are three cases we
3922 * want to catch. The first is this case:
3924 * |--- cluster # N--|
3925 * |--- extent ---| |---- requested region ---|
3926 * |==========|
3928 * The second case that we need to test for is this one:
3930 * |--------- cluster # N ----------------|
3931 * |--- requested region --| |------- extent ----|
3932 * |=======================|
3934 * The third case is when the requested region lies between two extents
3935 * within the same cluster:
3936 * |------------- cluster # N-------------|
3937 * |----- ex -----| |---- ex_right ----|
3938 * |------ requested region ------|
3939 * |================|
3941 * In each of the above cases, we need to set the map->m_pblk and
3942 * map->m_len so it corresponds to the return the extent labelled as
3943 * "|====|" from cluster #N, since it is already in use for data in
3944 * cluster EXT4_B2C(sbi, map->m_lblk). We will then return 1 to
3945 * signal to ext4_ext_map_blocks() that map->m_pblk should be treated
3946 * as a new "allocated" block region. Otherwise, we will return 0 and
3947 * ext4_ext_map_blocks() will then allocate one or more new clusters
3948 * by calling ext4_mb_new_blocks().
3950 static int get_implied_cluster_alloc(struct super_block *sb,
3951 struct ext4_map_blocks *map,
3952 struct ext4_extent *ex,
3953 struct ext4_ext_path *path)
3955 struct ext4_sb_info *sbi = EXT4_SB(sb);
3956 ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
3957 ext4_lblk_t ex_cluster_start, ex_cluster_end;
3958 ext4_lblk_t rr_cluster_start;
3959 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3960 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
3961 unsigned short ee_len = ext4_ext_get_actual_len(ex);
3963 /* The extent passed in that we are trying to match */
3964 ex_cluster_start = EXT4_B2C(sbi, ee_block);
3965 ex_cluster_end = EXT4_B2C(sbi, ee_block + ee_len - 1);
3967 /* The requested region passed into ext4_map_blocks() */
3968 rr_cluster_start = EXT4_B2C(sbi, map->m_lblk);
3970 if ((rr_cluster_start == ex_cluster_end) ||
3971 (rr_cluster_start == ex_cluster_start)) {
3972 if (rr_cluster_start == ex_cluster_end)
3973 ee_start += ee_len - 1;
3974 map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) +
3975 c_offset;
3976 map->m_len = min(map->m_len,
3977 (unsigned) sbi->s_cluster_ratio - c_offset);
3979 * Check for and handle this case:
3981 * |--------- cluster # N-------------|
3982 * |------- extent ----|
3983 * |--- requested region ---|
3984 * |===========|
3987 if (map->m_lblk < ee_block)
3988 map->m_len = min(map->m_len, ee_block - map->m_lblk);
3991 * Check for the case where there is already another allocated
3992 * block to the right of 'ex' but before the end of the cluster.
3994 * |------------- cluster # N-------------|
3995 * |----- ex -----| |---- ex_right ----|
3996 * |------ requested region ------|
3997 * |================|
3999 if (map->m_lblk > ee_block) {
4000 ext4_lblk_t next = ext4_ext_next_allocated_block(path);
4001 map->m_len = min(map->m_len, next - map->m_lblk);
4004 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 1);
4005 return 1;
4008 trace_ext4_get_implied_cluster_alloc_exit(sb, map, 0);
4009 return 0;
4014 * Block allocation/map/preallocation routine for extents based files
4017 * Need to be called with
4018 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
4019 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
4021 * return > 0, number of of blocks already mapped/allocated
4022 * if create == 0 and these are pre-allocated blocks
4023 * buffer head is unmapped
4024 * otherwise blocks are mapped
4026 * return = 0, if plain look up failed (blocks have not been allocated)
4027 * buffer head is unmapped
4029 * return < 0, error case.
4031 int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4032 struct ext4_map_blocks *map, int flags)
4034 struct ext4_ext_path *path = NULL;
4035 struct ext4_extent newex, *ex, *ex2;
4036 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4037 ext4_fsblk_t newblock = 0;
4038 int free_on_err = 0, err = 0, depth;
4039 unsigned int allocated = 0, offset = 0;
4040 unsigned int allocated_clusters = 0;
4041 struct ext4_allocation_request ar;
4042 ext4_io_end_t *io = ext4_inode_aio(inode);
4043 ext4_lblk_t cluster_offset;
4044 int set_unwritten = 0;
4046 ext_debug("blocks %u/%u requested for inode %lu\n",
4047 map->m_lblk, map->m_len, inode->i_ino);
4048 trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
4050 /* find extent for this block */
4051 path = ext4_ext_find_extent(inode, map->m_lblk, NULL);
4052 if (IS_ERR(path)) {
4053 err = PTR_ERR(path);
4054 path = NULL;
4055 goto out2;
4058 depth = ext_depth(inode);
4061 * consistent leaf must not be empty;
4062 * this situation is possible, though, _during_ tree modification;
4063 * this is why assert can't be put in ext4_ext_find_extent()
4065 if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
4066 EXT4_ERROR_INODE(inode, "bad extent address "
4067 "lblock: %lu, depth: %d pblock %lld",
4068 (unsigned long) map->m_lblk, depth,
4069 path[depth].p_block);
4070 err = -EIO;
4071 goto out2;
4074 ex = path[depth].p_ext;
4075 if (ex) {
4076 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
4077 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
4078 unsigned short ee_len;
4081 * Uninitialized extents are treated as holes, except that
4082 * we split out initialized portions during a write.
4084 ee_len = ext4_ext_get_actual_len(ex);
4086 trace_ext4_ext_show_extent(inode, ee_block, ee_start, ee_len);
4088 /* if found extent covers block, simply return it */
4089 if (in_range(map->m_lblk, ee_block, ee_len)) {
4090 newblock = map->m_lblk - ee_block + ee_start;
4091 /* number of remaining blocks in the extent */
4092 allocated = ee_len - (map->m_lblk - ee_block);
4093 ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
4094 ee_block, ee_len, newblock);
4096 if (!ext4_ext_is_uninitialized(ex))
4097 goto out;
4099 allocated = ext4_ext_handle_uninitialized_extents(
4100 handle, inode, map, path, flags,
4101 allocated, newblock);
4102 goto out3;
4106 if ((sbi->s_cluster_ratio > 1) &&
4107 ext4_find_delalloc_cluster(inode, map->m_lblk))
4108 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4111 * requested block isn't allocated yet;
4112 * we couldn't try to create block if create flag is zero
4114 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
4116 * put just found gap into cache to speed up
4117 * subsequent requests
4119 if ((flags & EXT4_GET_BLOCKS_NO_PUT_HOLE) == 0)
4120 ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
4121 goto out2;
4125 * Okay, we need to do block allocation.
4127 map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
4128 newex.ee_block = cpu_to_le32(map->m_lblk);
4129 cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
4132 * If we are doing bigalloc, check to see if the extent returned
4133 * by ext4_ext_find_extent() implies a cluster we can use.
4135 if (cluster_offset && ex &&
4136 get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
4137 ar.len = allocated = map->m_len;
4138 newblock = map->m_pblk;
4139 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4140 goto got_allocated_blocks;
4143 /* find neighbour allocated blocks */
4144 ar.lleft = map->m_lblk;
4145 err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
4146 if (err)
4147 goto out2;
4148 ar.lright = map->m_lblk;
4149 ex2 = NULL;
4150 err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright, &ex2);
4151 if (err)
4152 goto out2;
4154 /* Check if the extent after searching to the right implies a
4155 * cluster we can use. */
4156 if ((sbi->s_cluster_ratio > 1) && ex2 &&
4157 get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
4158 ar.len = allocated = map->m_len;
4159 newblock = map->m_pblk;
4160 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4161 goto got_allocated_blocks;
4165 * See if request is beyond maximum number of blocks we can have in
4166 * a single extent. For an initialized extent this limit is
4167 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
4168 * EXT_UNINIT_MAX_LEN.
4170 if (map->m_len > EXT_INIT_MAX_LEN &&
4171 !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
4172 map->m_len = EXT_INIT_MAX_LEN;
4173 else if (map->m_len > EXT_UNINIT_MAX_LEN &&
4174 (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
4175 map->m_len = EXT_UNINIT_MAX_LEN;
4177 /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
4178 newex.ee_len = cpu_to_le16(map->m_len);
4179 err = ext4_ext_check_overlap(sbi, inode, &newex, path);
4180 if (err)
4181 allocated = ext4_ext_get_actual_len(&newex);
4182 else
4183 allocated = map->m_len;
4185 /* allocate new block */
4186 ar.inode = inode;
4187 ar.goal = ext4_ext_find_goal(inode, path, map->m_lblk);
4188 ar.logical = map->m_lblk;
4190 * We calculate the offset from the beginning of the cluster
4191 * for the logical block number, since when we allocate a
4192 * physical cluster, the physical block should start at the
4193 * same offset from the beginning of the cluster. This is
4194 * needed so that future calls to get_implied_cluster_alloc()
4195 * work correctly.
4197 offset = map->m_lblk & (sbi->s_cluster_ratio - 1);
4198 ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
4199 ar.goal -= offset;
4200 ar.logical -= offset;
4201 if (S_ISREG(inode->i_mode))
4202 ar.flags = EXT4_MB_HINT_DATA;
4203 else
4204 /* disable in-core preallocation for non-regular files */
4205 ar.flags = 0;
4206 if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
4207 ar.flags |= EXT4_MB_HINT_NOPREALLOC;
4208 newblock = ext4_mb_new_blocks(handle, &ar, &err);
4209 if (!newblock)
4210 goto out2;
4211 ext_debug("allocate new block: goal %llu, found %llu/%u\n",
4212 ar.goal, newblock, allocated);
4213 free_on_err = 1;
4214 allocated_clusters = ar.len;
4215 ar.len = EXT4_C2B(sbi, ar.len) - offset;
4216 if (ar.len > allocated)
4217 ar.len = allocated;
4219 got_allocated_blocks:
4220 /* try to insert new extent into found leaf and return */
4221 ext4_ext_store_pblock(&newex, newblock + offset);
4222 newex.ee_len = cpu_to_le16(ar.len);
4223 /* Mark uninitialized */
4224 if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
4225 ext4_ext_mark_uninitialized(&newex);
4226 map->m_flags |= EXT4_MAP_UNWRITTEN;
4228 * io_end structure was created for every IO write to an
4229 * uninitialized extent. To avoid unnecessary conversion,
4230 * here we flag the IO that really needs the conversion.
4231 * For non asycn direct IO case, flag the inode state
4232 * that we need to perform conversion when IO is done.
4234 if ((flags & EXT4_GET_BLOCKS_PRE_IO))
4235 set_unwritten = 1;
4236 if (ext4_should_dioread_nolock(inode))
4237 map->m_flags |= EXT4_MAP_UNINIT;
4240 err = 0;
4241 if ((flags & EXT4_GET_BLOCKS_KEEP_SIZE) == 0)
4242 err = check_eofblocks_fl(handle, inode, map->m_lblk,
4243 path, ar.len);
4244 if (!err)
4245 err = ext4_ext_insert_extent(handle, inode, path,
4246 &newex, flags);
4248 if (!err && set_unwritten) {
4249 if (io)
4250 ext4_set_io_unwritten_flag(inode, io);
4251 else
4252 ext4_set_inode_state(inode,
4253 EXT4_STATE_DIO_UNWRITTEN);
4256 if (err && free_on_err) {
4257 int fb_flags = flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE ?
4258 EXT4_FREE_BLOCKS_NO_QUOT_UPDATE : 0;
4259 /* free data blocks we just allocated */
4260 /* not a good idea to call discard here directly,
4261 * but otherwise we'd need to call it every free() */
4262 ext4_discard_preallocations(inode);
4263 ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex),
4264 ext4_ext_get_actual_len(&newex), fb_flags);
4265 goto out2;
4268 /* previous routine could use block we allocated */
4269 newblock = ext4_ext_pblock(&newex);
4270 allocated = ext4_ext_get_actual_len(&newex);
4271 if (allocated > map->m_len)
4272 allocated = map->m_len;
4273 map->m_flags |= EXT4_MAP_NEW;
4276 * Update reserved blocks/metadata blocks after successful
4277 * block allocation which had been deferred till now.
4279 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
4280 unsigned int reserved_clusters;
4282 * Check how many clusters we had reserved this allocated range
4284 reserved_clusters = get_reserved_cluster_alloc(inode,
4285 map->m_lblk, allocated);
4286 if (map->m_flags & EXT4_MAP_FROM_CLUSTER) {
4287 if (reserved_clusters) {
4289 * We have clusters reserved for this range.
4290 * But since we are not doing actual allocation
4291 * and are simply using blocks from previously
4292 * allocated cluster, we should release the
4293 * reservation and not claim quota.
4295 ext4_da_update_reserve_space(inode,
4296 reserved_clusters, 0);
4298 } else {
4299 BUG_ON(allocated_clusters < reserved_clusters);
4300 if (reserved_clusters < allocated_clusters) {
4301 struct ext4_inode_info *ei = EXT4_I(inode);
4302 int reservation = allocated_clusters -
4303 reserved_clusters;
4305 * It seems we claimed few clusters outside of
4306 * the range of this allocation. We should give
4307 * it back to the reservation pool. This can
4308 * happen in the following case:
4310 * * Suppose s_cluster_ratio is 4 (i.e., each
4311 * cluster has 4 blocks. Thus, the clusters
4312 * are [0-3],[4-7],[8-11]...
4313 * * First comes delayed allocation write for
4314 * logical blocks 10 & 11. Since there were no
4315 * previous delayed allocated blocks in the
4316 * range [8-11], we would reserve 1 cluster
4317 * for this write.
4318 * * Next comes write for logical blocks 3 to 8.
4319 * In this case, we will reserve 2 clusters
4320 * (for [0-3] and [4-7]; and not for [8-11] as
4321 * that range has a delayed allocated blocks.
4322 * Thus total reserved clusters now becomes 3.
4323 * * Now, during the delayed allocation writeout
4324 * time, we will first write blocks [3-8] and
4325 * allocate 3 clusters for writing these
4326 * blocks. Also, we would claim all these
4327 * three clusters above.
4328 * * Now when we come here to writeout the
4329 * blocks [10-11], we would expect to claim
4330 * the reservation of 1 cluster we had made
4331 * (and we would claim it since there are no
4332 * more delayed allocated blocks in the range
4333 * [8-11]. But our reserved cluster count had
4334 * already gone to 0.
4336 * Thus, at the step 4 above when we determine
4337 * that there are still some unwritten delayed
4338 * allocated blocks outside of our current
4339 * block range, we should increment the
4340 * reserved clusters count so that when the
4341 * remaining blocks finally gets written, we
4342 * could claim them.
4344 dquot_reserve_block(inode,
4345 EXT4_C2B(sbi, reservation));
4346 spin_lock(&ei->i_block_reservation_lock);
4347 ei->i_reserved_data_blocks += reservation;
4348 spin_unlock(&ei->i_block_reservation_lock);
4351 * We will claim quota for all newly allocated blocks.
4352 * We're updating the reserved space *after* the
4353 * correction above so we do not accidentally free
4354 * all the metadata reservation because we might
4355 * actually need it later on.
4357 ext4_da_update_reserve_space(inode, allocated_clusters,
4363 * Cache the extent and update transaction to commit on fdatasync only
4364 * when it is _not_ an uninitialized extent.
4366 if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0)
4367 ext4_update_inode_fsync_trans(handle, inode, 1);
4368 else
4369 ext4_update_inode_fsync_trans(handle, inode, 0);
4370 out:
4371 if (allocated > map->m_len)
4372 allocated = map->m_len;
4373 ext4_ext_show_leaf(inode, path);
4374 map->m_flags |= EXT4_MAP_MAPPED;
4375 map->m_pblk = newblock;
4376 map->m_len = allocated;
4377 out2:
4378 if (path) {
4379 ext4_ext_drop_refs(path);
4380 kfree(path);
4383 out3:
4384 trace_ext4_ext_map_blocks_exit(inode, map, err ? err : allocated);
4386 return err ? err : allocated;
4389 void ext4_ext_truncate(handle_t *handle, struct inode *inode)
4391 struct super_block *sb = inode->i_sb;
4392 ext4_lblk_t last_block;
4393 int err = 0;
4396 * TODO: optimization is possible here.
4397 * Probably we need not scan at all,
4398 * because page truncation is enough.
4401 /* we have to know where to truncate from in crash case */
4402 EXT4_I(inode)->i_disksize = inode->i_size;
4403 ext4_mark_inode_dirty(handle, inode);
4405 last_block = (inode->i_size + sb->s_blocksize - 1)
4406 >> EXT4_BLOCK_SIZE_BITS(sb);
4407 err = ext4_es_remove_extent(inode, last_block,
4408 EXT_MAX_BLOCKS - last_block);
4409 err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCKS - 1);
4412 static void ext4_falloc_update_inode(struct inode *inode,
4413 int mode, loff_t new_size, int update_ctime)
4415 struct timespec now;
4417 if (update_ctime) {
4418 now = current_fs_time(inode->i_sb);
4419 if (!timespec_equal(&inode->i_ctime, &now))
4420 inode->i_ctime = now;
4423 * Update only when preallocation was requested beyond
4424 * the file size.
4426 if (!(mode & FALLOC_FL_KEEP_SIZE)) {
4427 if (new_size > i_size_read(inode))
4428 i_size_write(inode, new_size);
4429 if (new_size > EXT4_I(inode)->i_disksize)
4430 ext4_update_i_disksize(inode, new_size);
4431 } else {
4433 * Mark that we allocate beyond EOF so the subsequent truncate
4434 * can proceed even if the new size is the same as i_size.
4436 if (new_size > i_size_read(inode))
4437 ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
4443 * preallocate space for a file. This implements ext4's fallocate file
4444 * operation, which gets called from sys_fallocate system call.
4445 * For block-mapped files, posix_fallocate should fall back to the method
4446 * of writing zeroes to the required new blocks (the same behavior which is
4447 * expected for file systems which do not support fallocate() system call).
4449 long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4451 struct inode *inode = file_inode(file);
4452 handle_t *handle;
4453 loff_t new_size;
4454 unsigned int max_blocks;
4455 int ret = 0;
4456 int ret2 = 0;
4457 int retries = 0;
4458 int flags;
4459 struct ext4_map_blocks map;
4460 unsigned int credits, blkbits = inode->i_blkbits;
4462 /* Return error if mode is not supported */
4463 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
4464 return -EOPNOTSUPP;
4466 if (mode & FALLOC_FL_PUNCH_HOLE)
4467 return ext4_punch_hole(file, offset, len);
4469 ret = ext4_convert_inline_data(inode);
4470 if (ret)
4471 return ret;
4474 * currently supporting (pre)allocate mode for extent-based
4475 * files _only_
4477 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4478 return -EOPNOTSUPP;
4480 trace_ext4_fallocate_enter(inode, offset, len, mode);
4481 map.m_lblk = offset >> blkbits;
4483 * We can't just convert len to max_blocks because
4484 * If blocksize = 4096 offset = 3072 and len = 2048
4486 max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
4487 - map.m_lblk;
4489 * credits to insert 1 extent into extent tree
4491 credits = ext4_chunk_trans_blocks(inode, max_blocks);
4492 mutex_lock(&inode->i_mutex);
4493 ret = inode_newsize_ok(inode, (len + offset));
4494 if (ret) {
4495 mutex_unlock(&inode->i_mutex);
4496 trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
4497 return ret;
4499 flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT;
4500 if (mode & FALLOC_FL_KEEP_SIZE)
4501 flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
4503 * Don't normalize the request if it can fit in one extent so
4504 * that it doesn't get unnecessarily split into multiple
4505 * extents.
4507 if (len <= EXT_UNINIT_MAX_LEN << blkbits)
4508 flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
4510 retry:
4511 while (ret >= 0 && ret < max_blocks) {
4512 map.m_lblk = map.m_lblk + ret;
4513 map.m_len = max_blocks = max_blocks - ret;
4514 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
4515 credits);
4516 if (IS_ERR(handle)) {
4517 ret = PTR_ERR(handle);
4518 break;
4520 ret = ext4_map_blocks(handle, inode, &map, flags);
4521 if (ret <= 0) {
4522 #ifdef EXT4FS_DEBUG
4523 ext4_warning(inode->i_sb,
4524 "inode #%lu: block %u: len %u: "
4525 "ext4_ext_map_blocks returned %d",
4526 inode->i_ino, map.m_lblk,
4527 map.m_len, ret);
4528 #endif
4529 ext4_mark_inode_dirty(handle, inode);
4530 ret2 = ext4_journal_stop(handle);
4531 break;
4533 if ((map.m_lblk + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
4534 blkbits) >> blkbits))
4535 new_size = offset + len;
4536 else
4537 new_size = ((loff_t) map.m_lblk + ret) << blkbits;
4539 ext4_falloc_update_inode(inode, mode, new_size,
4540 (map.m_flags & EXT4_MAP_NEW));
4541 ext4_mark_inode_dirty(handle, inode);
4542 if ((file->f_flags & O_SYNC) && ret >= max_blocks)
4543 ext4_handle_sync(handle);
4544 ret2 = ext4_journal_stop(handle);
4545 if (ret2)
4546 break;
4548 if (ret == -ENOSPC &&
4549 ext4_should_retry_alloc(inode->i_sb, &retries)) {
4550 ret = 0;
4551 goto retry;
4553 mutex_unlock(&inode->i_mutex);
4554 trace_ext4_fallocate_exit(inode, offset, max_blocks,
4555 ret > 0 ? ret2 : ret);
4556 return ret > 0 ? ret2 : ret;
4560 * This function convert a range of blocks to written extents
4561 * The caller of this function will pass the start offset and the size.
4562 * all unwritten extents within this range will be converted to
4563 * written extents.
4565 * This function is called from the direct IO end io call back
4566 * function, to convert the fallocated extents after IO is completed.
4567 * Returns 0 on success.
4569 int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
4570 ssize_t len)
4572 handle_t *handle;
4573 unsigned int max_blocks;
4574 int ret = 0;
4575 int ret2 = 0;
4576 struct ext4_map_blocks map;
4577 unsigned int credits, blkbits = inode->i_blkbits;
4579 map.m_lblk = offset >> blkbits;
4581 * We can't just convert len to max_blocks because
4582 * If blocksize = 4096 offset = 3072 and len = 2048
4584 max_blocks = ((EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) -
4585 map.m_lblk);
4587 * credits to insert 1 extent into extent tree
4589 credits = ext4_chunk_trans_blocks(inode, max_blocks);
4590 while (ret >= 0 && ret < max_blocks) {
4591 map.m_lblk += ret;
4592 map.m_len = (max_blocks -= ret);
4593 handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, credits);
4594 if (IS_ERR(handle)) {
4595 ret = PTR_ERR(handle);
4596 break;
4598 ret = ext4_map_blocks(handle, inode, &map,
4599 EXT4_GET_BLOCKS_IO_CONVERT_EXT);
4600 if (ret <= 0)
4601 ext4_warning(inode->i_sb,
4602 "inode #%lu: block %u: len %u: "
4603 "ext4_ext_map_blocks returned %d",
4604 inode->i_ino, map.m_lblk,
4605 map.m_len, ret);
4606 ext4_mark_inode_dirty(handle, inode);
4607 ret2 = ext4_journal_stop(handle);
4608 if (ret <= 0 || ret2 )
4609 break;
4611 return ret > 0 ? ret2 : ret;
4615 * If newes is not existing extent (newes->ec_pblk equals zero) find
4616 * delayed extent at start of newes and update newes accordingly and
4617 * return start of the next delayed extent.
4619 * If newes is existing extent (newes->ec_pblk is not equal zero)
4620 * return start of next delayed extent or EXT_MAX_BLOCKS if no delayed
4621 * extent found. Leave newes unmodified.
4623 static int ext4_find_delayed_extent(struct inode *inode,
4624 struct extent_status *newes)
4626 struct extent_status es;
4627 ext4_lblk_t block, next_del;
4629 if (newes->es_pblk == 0) {
4630 ext4_es_find_delayed_extent_range(inode, newes->es_lblk,
4631 newes->es_lblk + newes->es_len - 1, &es);
4634 * No extent in extent-tree contains block @newes->es_pblk,
4635 * then the block may stay in 1)a hole or 2)delayed-extent.
4637 if (es.es_len == 0)
4638 /* A hole found. */
4639 return 0;
4641 if (es.es_lblk > newes->es_lblk) {
4642 /* A hole found. */
4643 newes->es_len = min(es.es_lblk - newes->es_lblk,
4644 newes->es_len);
4645 return 0;
4648 newes->es_len = es.es_lblk + es.es_len - newes->es_lblk;
4651 block = newes->es_lblk + newes->es_len;
4652 ext4_es_find_delayed_extent_range(inode, block, EXT_MAX_BLOCKS, &es);
4653 if (es.es_len == 0)
4654 next_del = EXT_MAX_BLOCKS;
4655 else
4656 next_del = es.es_lblk;
4658 return next_del;
4660 /* fiemap flags we can handle specified here */
4661 #define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
4663 static int ext4_xattr_fiemap(struct inode *inode,
4664 struct fiemap_extent_info *fieinfo)
4666 __u64 physical = 0;
4667 __u64 length;
4668 __u32 flags = FIEMAP_EXTENT_LAST;
4669 int blockbits = inode->i_sb->s_blocksize_bits;
4670 int error = 0;
4672 /* in-inode? */
4673 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
4674 struct ext4_iloc iloc;
4675 int offset; /* offset of xattr in inode */
4677 error = ext4_get_inode_loc(inode, &iloc);
4678 if (error)
4679 return error;
4680 physical = (__u64)iloc.bh->b_blocknr << blockbits;
4681 offset = EXT4_GOOD_OLD_INODE_SIZE +
4682 EXT4_I(inode)->i_extra_isize;
4683 physical += offset;
4684 length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
4685 flags |= FIEMAP_EXTENT_DATA_INLINE;
4686 brelse(iloc.bh);
4687 } else { /* external block */
4688 physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits;
4689 length = inode->i_sb->s_blocksize;
4692 if (physical)
4693 error = fiemap_fill_next_extent(fieinfo, 0, physical,
4694 length, flags);
4695 return (error < 0 ? error : 0);
4698 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4699 __u64 start, __u64 len)
4701 ext4_lblk_t start_blk;
4702 int error = 0;
4704 if (ext4_has_inline_data(inode)) {
4705 int has_inline = 1;
4707 error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline);
4709 if (has_inline)
4710 return error;
4713 /* fallback to generic here if not in extents fmt */
4714 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4715 return generic_block_fiemap(inode, fieinfo, start, len,
4716 ext4_get_block);
4718 if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
4719 return -EBADR;
4721 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
4722 error = ext4_xattr_fiemap(inode, fieinfo);
4723 } else {
4724 ext4_lblk_t len_blks;
4725 __u64 last_blk;
4727 start_blk = start >> inode->i_sb->s_blocksize_bits;
4728 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
4729 if (last_blk >= EXT_MAX_BLOCKS)
4730 last_blk = EXT_MAX_BLOCKS-1;
4731 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
4734 * Walk the extent tree gathering extent information
4735 * and pushing extents back to the user.
4737 error = ext4_fill_fiemap_extents(inode, start_blk,
4738 len_blks, fieinfo);
4741 return error;