ext4: Clear the EXT4_EOFBLOCKS_FL flag only when warranted
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / fs / ext4 / extents.c
blobc100a0bc49686d2b5062fc1c1146b2c38d32f543
1 /*
2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3 * Written by Alex Tomas <alex@clusterfs.com>
5 * Architecture independence:
6 * Copyright (c) 2005, Bull S.A.
7 * Written by Pierre Peiffer <pierre.peiffer@bull.net>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public Licens
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
24 * Extents support for EXT4
26 * TODO:
27 * - ext4*_error() should be used in some situations
28 * - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29 * - smart tree reduction
32 #include <linux/module.h>
33 #include <linux/fs.h>
34 #include <linux/time.h>
35 #include <linux/jbd2.h>
36 #include <linux/highuid.h>
37 #include <linux/pagemap.h>
38 #include <linux/quotaops.h>
39 #include <linux/string.h>
40 #include <linux/slab.h>
41 #include <linux/falloc.h>
42 #include <asm/uaccess.h>
43 #include <linux/fiemap.h>
44 #include "ext4_jbd2.h"
45 #include "ext4_extents.h"
49 * ext_pblock:
50 * combine low and high parts of physical block number into ext4_fsblk_t
52 ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
54 ext4_fsblk_t block;
56 block = le32_to_cpu(ex->ee_start_lo);
57 block |= ((ext4_fsblk_t) le16_to_cpu(ex->ee_start_hi) << 31) << 1;
58 return block;
62 * idx_pblock:
63 * combine low and high parts of a leaf physical block number into ext4_fsblk_t
65 ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
67 ext4_fsblk_t block;
69 block = le32_to_cpu(ix->ei_leaf_lo);
70 block |= ((ext4_fsblk_t) le16_to_cpu(ix->ei_leaf_hi) << 31) << 1;
71 return block;
75 * ext4_ext_store_pblock:
76 * stores a large physical block number into an extent struct,
77 * breaking it into parts
79 void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
81 ex->ee_start_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
82 ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
86 * ext4_idx_store_pblock:
87 * stores a large physical block number into an index struct,
88 * breaking it into parts
90 static void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
92 ix->ei_leaf_lo = cpu_to_le32((unsigned long) (pb & 0xffffffff));
93 ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
96 static int ext4_ext_truncate_extend_restart(handle_t *handle,
97 struct inode *inode,
98 int needed)
100 int err;
102 if (!ext4_handle_valid(handle))
103 return 0;
104 if (handle->h_buffer_credits > needed)
105 return 0;
106 err = ext4_journal_extend(handle, needed);
107 if (err <= 0)
108 return err;
109 err = ext4_truncate_restart_trans(handle, inode, needed);
111 * We have dropped i_data_sem so someone might have cached again
112 * an extent we are going to truncate.
114 ext4_ext_invalidate_cache(inode);
116 return err;
120 * could return:
121 * - EROFS
122 * - ENOMEM
124 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
125 struct ext4_ext_path *path)
127 if (path->p_bh) {
128 /* path points to block */
129 return ext4_journal_get_write_access(handle, path->p_bh);
131 /* path points to leaf/index in inode body */
132 /* we use in-core data, no need to protect them */
133 return 0;
137 * could return:
138 * - EROFS
139 * - ENOMEM
140 * - EIO
142 static int ext4_ext_dirty(handle_t *handle, struct inode *inode,
143 struct ext4_ext_path *path)
145 int err;
146 if (path->p_bh) {
147 /* path points to block */
148 err = ext4_handle_dirty_metadata(handle, inode, path->p_bh);
149 } else {
150 /* path points to leaf/index in inode body */
151 err = ext4_mark_inode_dirty(handle, inode);
153 return err;
156 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
157 struct ext4_ext_path *path,
158 ext4_lblk_t block)
160 struct ext4_inode_info *ei = EXT4_I(inode);
161 ext4_fsblk_t bg_start;
162 ext4_fsblk_t last_block;
163 ext4_grpblk_t colour;
164 ext4_group_t block_group;
165 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
166 int depth;
168 if (path) {
169 struct ext4_extent *ex;
170 depth = path->p_depth;
172 /* try to predict block placement */
173 ex = path[depth].p_ext;
174 if (ex)
175 return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block));
177 /* it looks like index is empty;
178 * try to find starting block from index itself */
179 if (path[depth].p_bh)
180 return path[depth].p_bh->b_blocknr;
183 /* OK. use inode's group */
184 block_group = ei->i_block_group;
185 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
187 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
188 * block groups per flexgroup, reserve the first block
189 * group for directories and special files. Regular
190 * files will start at the second block group. This
191 * tends to speed up directory access and improves
192 * fsck times.
194 block_group &= ~(flex_size-1);
195 if (S_ISREG(inode->i_mode))
196 block_group++;
198 bg_start = (block_group * EXT4_BLOCKS_PER_GROUP(inode->i_sb)) +
199 le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_first_data_block);
200 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
203 * If we are doing delayed allocation, we don't need take
204 * colour into account.
206 if (test_opt(inode->i_sb, DELALLOC))
207 return bg_start;
209 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
210 colour = (current->pid % 16) *
211 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
212 else
213 colour = (current->pid % 16) * ((last_block - bg_start) / 16);
214 return bg_start + colour + block;
218 * Allocation for a meta data block
220 static ext4_fsblk_t
221 ext4_ext_new_meta_block(handle_t *handle, struct inode *inode,
222 struct ext4_ext_path *path,
223 struct ext4_extent *ex, int *err)
225 ext4_fsblk_t goal, newblock;
227 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
228 newblock = ext4_new_meta_blocks(handle, inode, goal, NULL, err);
229 return newblock;
232 static inline int ext4_ext_space_block(struct inode *inode, int check)
234 int size;
236 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
237 / sizeof(struct ext4_extent);
238 if (!check) {
239 #ifdef AGGRESSIVE_TEST
240 if (size > 6)
241 size = 6;
242 #endif
244 return size;
247 static inline int ext4_ext_space_block_idx(struct inode *inode, int check)
249 int size;
251 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
252 / sizeof(struct ext4_extent_idx);
253 if (!check) {
254 #ifdef AGGRESSIVE_TEST
255 if (size > 5)
256 size = 5;
257 #endif
259 return size;
262 static inline int ext4_ext_space_root(struct inode *inode, int check)
264 int size;
266 size = sizeof(EXT4_I(inode)->i_data);
267 size -= sizeof(struct ext4_extent_header);
268 size /= sizeof(struct ext4_extent);
269 if (!check) {
270 #ifdef AGGRESSIVE_TEST
271 if (size > 3)
272 size = 3;
273 #endif
275 return size;
278 static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
280 int size;
282 size = sizeof(EXT4_I(inode)->i_data);
283 size -= sizeof(struct ext4_extent_header);
284 size /= sizeof(struct ext4_extent_idx);
285 if (!check) {
286 #ifdef AGGRESSIVE_TEST
287 if (size > 4)
288 size = 4;
289 #endif
291 return size;
295 * Calculate the number of metadata blocks needed
296 * to allocate @blocks
297 * Worse case is one block per extent
299 int ext4_ext_calc_metadata_amount(struct inode *inode, sector_t lblock)
301 struct ext4_inode_info *ei = EXT4_I(inode);
302 int idxs, num = 0;
304 idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
305 / sizeof(struct ext4_extent_idx));
308 * If the new delayed allocation block is contiguous with the
309 * previous da block, it can share index blocks with the
310 * previous block, so we only need to allocate a new index
311 * block every idxs leaf blocks. At ldxs**2 blocks, we need
312 * an additional index block, and at ldxs**3 blocks, yet
313 * another index blocks.
315 if (ei->i_da_metadata_calc_len &&
316 ei->i_da_metadata_calc_last_lblock+1 == lblock) {
317 if ((ei->i_da_metadata_calc_len % idxs) == 0)
318 num++;
319 if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
320 num++;
321 if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
322 num++;
323 ei->i_da_metadata_calc_len = 0;
324 } else
325 ei->i_da_metadata_calc_len++;
326 ei->i_da_metadata_calc_last_lblock++;
327 return num;
331 * In the worst case we need a new set of index blocks at
332 * every level of the inode's extent tree.
334 ei->i_da_metadata_calc_len = 1;
335 ei->i_da_metadata_calc_last_lblock = lblock;
336 return ext_depth(inode) + 1;
339 static int
340 ext4_ext_max_entries(struct inode *inode, int depth)
342 int max;
344 if (depth == ext_depth(inode)) {
345 if (depth == 0)
346 max = ext4_ext_space_root(inode, 1);
347 else
348 max = ext4_ext_space_root_idx(inode, 1);
349 } else {
350 if (depth == 0)
351 max = ext4_ext_space_block(inode, 1);
352 else
353 max = ext4_ext_space_block_idx(inode, 1);
356 return max;
359 static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
361 ext4_fsblk_t block = ext_pblock(ext);
362 int len = ext4_ext_get_actual_len(ext);
364 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
367 static int ext4_valid_extent_idx(struct inode *inode,
368 struct ext4_extent_idx *ext_idx)
370 ext4_fsblk_t block = idx_pblock(ext_idx);
372 return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1);
375 static int ext4_valid_extent_entries(struct inode *inode,
376 struct ext4_extent_header *eh,
377 int depth)
379 struct ext4_extent *ext;
380 struct ext4_extent_idx *ext_idx;
381 unsigned short entries;
382 if (eh->eh_entries == 0)
383 return 1;
385 entries = le16_to_cpu(eh->eh_entries);
387 if (depth == 0) {
388 /* leaf entries */
389 ext = EXT_FIRST_EXTENT(eh);
390 while (entries) {
391 if (!ext4_valid_extent(inode, ext))
392 return 0;
393 ext++;
394 entries--;
396 } else {
397 ext_idx = EXT_FIRST_INDEX(eh);
398 while (entries) {
399 if (!ext4_valid_extent_idx(inode, ext_idx))
400 return 0;
401 ext_idx++;
402 entries--;
405 return 1;
408 static int __ext4_ext_check(const char *function, struct inode *inode,
409 struct ext4_extent_header *eh,
410 int depth)
412 const char *error_msg;
413 int max = 0;
415 if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
416 error_msg = "invalid magic";
417 goto corrupted;
419 if (unlikely(le16_to_cpu(eh->eh_depth) != depth)) {
420 error_msg = "unexpected eh_depth";
421 goto corrupted;
423 if (unlikely(eh->eh_max == 0)) {
424 error_msg = "invalid eh_max";
425 goto corrupted;
427 max = ext4_ext_max_entries(inode, depth);
428 if (unlikely(le16_to_cpu(eh->eh_max) > max)) {
429 error_msg = "too large eh_max";
430 goto corrupted;
432 if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
433 error_msg = "invalid eh_entries";
434 goto corrupted;
436 if (!ext4_valid_extent_entries(inode, eh, depth)) {
437 error_msg = "invalid extent entries";
438 goto corrupted;
440 return 0;
442 corrupted:
443 ext4_error(inode->i_sb, function,
444 "bad header/extent in inode #%lu: %s - magic %x, "
445 "entries %u, max %u(%u), depth %u(%u)",
446 inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
447 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
448 max, le16_to_cpu(eh->eh_depth), depth);
450 return -EIO;
453 #define ext4_ext_check(inode, eh, depth) \
454 __ext4_ext_check(__func__, inode, eh, depth)
456 int ext4_ext_check_inode(struct inode *inode)
458 return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
461 #ifdef EXT_DEBUG
462 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
464 int k, l = path->p_depth;
466 ext_debug("path:");
467 for (k = 0; k <= l; k++, path++) {
468 if (path->p_idx) {
469 ext_debug(" %d->%llu", le32_to_cpu(path->p_idx->ei_block),
470 idx_pblock(path->p_idx));
471 } else if (path->p_ext) {
472 ext_debug(" %d:[%d]%d:%llu ",
473 le32_to_cpu(path->p_ext->ee_block),
474 ext4_ext_is_uninitialized(path->p_ext),
475 ext4_ext_get_actual_len(path->p_ext),
476 ext_pblock(path->p_ext));
477 } else
478 ext_debug(" []");
480 ext_debug("\n");
483 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
485 int depth = ext_depth(inode);
486 struct ext4_extent_header *eh;
487 struct ext4_extent *ex;
488 int i;
490 if (!path)
491 return;
493 eh = path[depth].p_hdr;
494 ex = EXT_FIRST_EXTENT(eh);
496 ext_debug("Displaying leaf extents for inode %lu\n", inode->i_ino);
498 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
499 ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
500 ext4_ext_is_uninitialized(ex),
501 ext4_ext_get_actual_len(ex), ext_pblock(ex));
503 ext_debug("\n");
505 #else
506 #define ext4_ext_show_path(inode, path)
507 #define ext4_ext_show_leaf(inode, path)
508 #endif
510 void ext4_ext_drop_refs(struct ext4_ext_path *path)
512 int depth = path->p_depth;
513 int i;
515 for (i = 0; i <= depth; i++, path++)
516 if (path->p_bh) {
517 brelse(path->p_bh);
518 path->p_bh = NULL;
523 * ext4_ext_binsearch_idx:
524 * binary search for the closest index of the given block
525 * the header must be checked before calling this
527 static void
528 ext4_ext_binsearch_idx(struct inode *inode,
529 struct ext4_ext_path *path, ext4_lblk_t block)
531 struct ext4_extent_header *eh = path->p_hdr;
532 struct ext4_extent_idx *r, *l, *m;
535 ext_debug("binsearch for %u(idx): ", block);
537 l = EXT_FIRST_INDEX(eh) + 1;
538 r = EXT_LAST_INDEX(eh);
539 while (l <= r) {
540 m = l + (r - l) / 2;
541 if (block < le32_to_cpu(m->ei_block))
542 r = m - 1;
543 else
544 l = m + 1;
545 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ei_block),
546 m, le32_to_cpu(m->ei_block),
547 r, le32_to_cpu(r->ei_block));
550 path->p_idx = l - 1;
551 ext_debug(" -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
552 idx_pblock(path->p_idx));
554 #ifdef CHECK_BINSEARCH
556 struct ext4_extent_idx *chix, *ix;
557 int k;
559 chix = ix = EXT_FIRST_INDEX(eh);
560 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
561 if (k != 0 &&
562 le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
563 printk(KERN_DEBUG "k=%d, ix=0x%p, "
564 "first=0x%p\n", k,
565 ix, EXT_FIRST_INDEX(eh));
566 printk(KERN_DEBUG "%u <= %u\n",
567 le32_to_cpu(ix->ei_block),
568 le32_to_cpu(ix[-1].ei_block));
570 BUG_ON(k && le32_to_cpu(ix->ei_block)
571 <= le32_to_cpu(ix[-1].ei_block));
572 if (block < le32_to_cpu(ix->ei_block))
573 break;
574 chix = ix;
576 BUG_ON(chix != path->p_idx);
578 #endif
583 * ext4_ext_binsearch:
584 * binary search for closest extent of the given block
585 * the header must be checked before calling this
587 static void
588 ext4_ext_binsearch(struct inode *inode,
589 struct ext4_ext_path *path, ext4_lblk_t block)
591 struct ext4_extent_header *eh = path->p_hdr;
592 struct ext4_extent *r, *l, *m;
594 if (eh->eh_entries == 0) {
596 * this leaf is empty:
597 * we get such a leaf in split/add case
599 return;
602 ext_debug("binsearch for %u: ", block);
604 l = EXT_FIRST_EXTENT(eh) + 1;
605 r = EXT_LAST_EXTENT(eh);
607 while (l <= r) {
608 m = l + (r - l) / 2;
609 if (block < le32_to_cpu(m->ee_block))
610 r = m - 1;
611 else
612 l = m + 1;
613 ext_debug("%p(%u):%p(%u):%p(%u) ", l, le32_to_cpu(l->ee_block),
614 m, le32_to_cpu(m->ee_block),
615 r, le32_to_cpu(r->ee_block));
618 path->p_ext = l - 1;
619 ext_debug(" -> %d:%llu:[%d]%d ",
620 le32_to_cpu(path->p_ext->ee_block),
621 ext_pblock(path->p_ext),
622 ext4_ext_is_uninitialized(path->p_ext),
623 ext4_ext_get_actual_len(path->p_ext));
625 #ifdef CHECK_BINSEARCH
627 struct ext4_extent *chex, *ex;
628 int k;
630 chex = ex = EXT_FIRST_EXTENT(eh);
631 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
632 BUG_ON(k && le32_to_cpu(ex->ee_block)
633 <= le32_to_cpu(ex[-1].ee_block));
634 if (block < le32_to_cpu(ex->ee_block))
635 break;
636 chex = ex;
638 BUG_ON(chex != path->p_ext);
640 #endif
644 int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
646 struct ext4_extent_header *eh;
648 eh = ext_inode_hdr(inode);
649 eh->eh_depth = 0;
650 eh->eh_entries = 0;
651 eh->eh_magic = EXT4_EXT_MAGIC;
652 eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode, 0));
653 ext4_mark_inode_dirty(handle, inode);
654 ext4_ext_invalidate_cache(inode);
655 return 0;
658 struct ext4_ext_path *
659 ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
660 struct ext4_ext_path *path)
662 struct ext4_extent_header *eh;
663 struct buffer_head *bh;
664 short int depth, i, ppos = 0, alloc = 0;
666 eh = ext_inode_hdr(inode);
667 depth = ext_depth(inode);
669 /* account possible depth increase */
670 if (!path) {
671 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
672 GFP_NOFS);
673 if (!path)
674 return ERR_PTR(-ENOMEM);
675 alloc = 1;
677 path[0].p_hdr = eh;
678 path[0].p_bh = NULL;
680 i = depth;
681 /* walk through the tree */
682 while (i) {
683 int need_to_validate = 0;
685 ext_debug("depth %d: num %d, max %d\n",
686 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
688 ext4_ext_binsearch_idx(inode, path + ppos, block);
689 path[ppos].p_block = idx_pblock(path[ppos].p_idx);
690 path[ppos].p_depth = i;
691 path[ppos].p_ext = NULL;
693 bh = sb_getblk(inode->i_sb, path[ppos].p_block);
694 if (unlikely(!bh))
695 goto err;
696 if (!bh_uptodate_or_lock(bh)) {
697 if (bh_submit_read(bh) < 0) {
698 put_bh(bh);
699 goto err;
701 /* validate the extent entries */
702 need_to_validate = 1;
704 eh = ext_block_hdr(bh);
705 ppos++;
706 BUG_ON(ppos > depth);
707 path[ppos].p_bh = bh;
708 path[ppos].p_hdr = eh;
709 i--;
711 if (need_to_validate && ext4_ext_check(inode, eh, i))
712 goto err;
715 path[ppos].p_depth = i;
716 path[ppos].p_ext = NULL;
717 path[ppos].p_idx = NULL;
719 /* find extent */
720 ext4_ext_binsearch(inode, path + ppos, block);
721 /* if not an empty leaf */
722 if (path[ppos].p_ext)
723 path[ppos].p_block = ext_pblock(path[ppos].p_ext);
725 ext4_ext_show_path(inode, path);
727 return path;
729 err:
730 ext4_ext_drop_refs(path);
731 if (alloc)
732 kfree(path);
733 return ERR_PTR(-EIO);
737 * ext4_ext_insert_index:
738 * insert new index [@logical;@ptr] into the block at @curp;
739 * check where to insert: before @curp or after @curp
741 int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
742 struct ext4_ext_path *curp,
743 int logical, ext4_fsblk_t ptr)
745 struct ext4_extent_idx *ix;
746 int len, err;
748 err = ext4_ext_get_access(handle, inode, curp);
749 if (err)
750 return err;
752 BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block));
753 len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
754 if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
755 /* insert after */
756 if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
757 len = (len - 1) * sizeof(struct ext4_extent_idx);
758 len = len < 0 ? 0 : len;
759 ext_debug("insert new index %d after: %llu. "
760 "move %d from 0x%p to 0x%p\n",
761 logical, ptr, len,
762 (curp->p_idx + 1), (curp->p_idx + 2));
763 memmove(curp->p_idx + 2, curp->p_idx + 1, len);
765 ix = curp->p_idx + 1;
766 } else {
767 /* insert before */
768 len = len * sizeof(struct ext4_extent_idx);
769 len = len < 0 ? 0 : len;
770 ext_debug("insert new index %d before: %llu. "
771 "move %d from 0x%p to 0x%p\n",
772 logical, ptr, len,
773 curp->p_idx, (curp->p_idx + 1));
774 memmove(curp->p_idx + 1, curp->p_idx, len);
775 ix = curp->p_idx;
778 ix->ei_block = cpu_to_le32(logical);
779 ext4_idx_store_pblock(ix, ptr);
780 le16_add_cpu(&curp->p_hdr->eh_entries, 1);
782 BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries)
783 > le16_to_cpu(curp->p_hdr->eh_max));
784 BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr));
786 err = ext4_ext_dirty(handle, inode, curp);
787 ext4_std_error(inode->i_sb, err);
789 return err;
793 * ext4_ext_split:
794 * inserts new subtree into the path, using free index entry
795 * at depth @at:
796 * - allocates all needed blocks (new leaf and all intermediate index blocks)
797 * - makes decision where to split
798 * - moves remaining extents and index entries (right to the split point)
799 * into the newly allocated blocks
800 * - initializes subtree
802 static int ext4_ext_split(handle_t *handle, struct inode *inode,
803 struct ext4_ext_path *path,
804 struct ext4_extent *newext, int at)
806 struct buffer_head *bh = NULL;
807 int depth = ext_depth(inode);
808 struct ext4_extent_header *neh;
809 struct ext4_extent_idx *fidx;
810 struct ext4_extent *ex;
811 int i = at, k, m, a;
812 ext4_fsblk_t newblock, oldblock;
813 __le32 border;
814 ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
815 int err = 0;
817 /* make decision: where to split? */
818 /* FIXME: now decision is simplest: at current extent */
820 /* if current leaf will be split, then we should use
821 * border from split point */
822 BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr));
823 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
824 border = path[depth].p_ext[1].ee_block;
825 ext_debug("leaf will be split."
826 " next leaf starts at %d\n",
827 le32_to_cpu(border));
828 } else {
829 border = newext->ee_block;
830 ext_debug("leaf will be added."
831 " next leaf starts at %d\n",
832 le32_to_cpu(border));
836 * If error occurs, then we break processing
837 * and mark filesystem read-only. index won't
838 * be inserted and tree will be in consistent
839 * state. Next mount will repair buffers too.
843 * Get array to track all allocated blocks.
844 * We need this to handle errors and free blocks
845 * upon them.
847 ablocks = kzalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
848 if (!ablocks)
849 return -ENOMEM;
851 /* allocate all needed blocks */
852 ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
853 for (a = 0; a < depth - at; a++) {
854 newblock = ext4_ext_new_meta_block(handle, inode, path,
855 newext, &err);
856 if (newblock == 0)
857 goto cleanup;
858 ablocks[a] = newblock;
861 /* initialize new leaf */
862 newblock = ablocks[--a];
863 BUG_ON(newblock == 0);
864 bh = sb_getblk(inode->i_sb, newblock);
865 if (!bh) {
866 err = -EIO;
867 goto cleanup;
869 lock_buffer(bh);
871 err = ext4_journal_get_create_access(handle, bh);
872 if (err)
873 goto cleanup;
875 neh = ext_block_hdr(bh);
876 neh->eh_entries = 0;
877 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
878 neh->eh_magic = EXT4_EXT_MAGIC;
879 neh->eh_depth = 0;
880 ex = EXT_FIRST_EXTENT(neh);
882 /* move remainder of path[depth] to the new leaf */
883 BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max);
884 /* start copy from next extent */
885 /* TODO: we could do it by single memmove */
886 m = 0;
887 path[depth].p_ext++;
888 while (path[depth].p_ext <=
889 EXT_MAX_EXTENT(path[depth].p_hdr)) {
890 ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
891 le32_to_cpu(path[depth].p_ext->ee_block),
892 ext_pblock(path[depth].p_ext),
893 ext4_ext_is_uninitialized(path[depth].p_ext),
894 ext4_ext_get_actual_len(path[depth].p_ext),
895 newblock);
896 /*memmove(ex++, path[depth].p_ext++,
897 sizeof(struct ext4_extent));
898 neh->eh_entries++;*/
899 path[depth].p_ext++;
900 m++;
902 if (m) {
903 memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);
904 le16_add_cpu(&neh->eh_entries, m);
907 set_buffer_uptodate(bh);
908 unlock_buffer(bh);
910 err = ext4_handle_dirty_metadata(handle, inode, bh);
911 if (err)
912 goto cleanup;
913 brelse(bh);
914 bh = NULL;
916 /* correct old leaf */
917 if (m) {
918 err = ext4_ext_get_access(handle, inode, path + depth);
919 if (err)
920 goto cleanup;
921 le16_add_cpu(&path[depth].p_hdr->eh_entries, -m);
922 err = ext4_ext_dirty(handle, inode, path + depth);
923 if (err)
924 goto cleanup;
928 /* create intermediate indexes */
929 k = depth - at - 1;
930 BUG_ON(k < 0);
931 if (k)
932 ext_debug("create %d intermediate indices\n", k);
933 /* insert new index into current index block */
934 /* current depth stored in i var */
935 i = depth - 1;
936 while (k--) {
937 oldblock = newblock;
938 newblock = ablocks[--a];
939 bh = sb_getblk(inode->i_sb, newblock);
940 if (!bh) {
941 err = -EIO;
942 goto cleanup;
944 lock_buffer(bh);
946 err = ext4_journal_get_create_access(handle, bh);
947 if (err)
948 goto cleanup;
950 neh = ext_block_hdr(bh);
951 neh->eh_entries = cpu_to_le16(1);
952 neh->eh_magic = EXT4_EXT_MAGIC;
953 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
954 neh->eh_depth = cpu_to_le16(depth - i);
955 fidx = EXT_FIRST_INDEX(neh);
956 fidx->ei_block = border;
957 ext4_idx_store_pblock(fidx, oldblock);
959 ext_debug("int.index at %d (block %llu): %u -> %llu\n",
960 i, newblock, le32_to_cpu(border), oldblock);
961 /* copy indexes */
962 m = 0;
963 path[i].p_idx++;
965 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
966 EXT_MAX_INDEX(path[i].p_hdr));
967 BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) !=
968 EXT_LAST_INDEX(path[i].p_hdr));
969 while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
970 ext_debug("%d: move %d:%llu in new index %llu\n", i,
971 le32_to_cpu(path[i].p_idx->ei_block),
972 idx_pblock(path[i].p_idx),
973 newblock);
974 /*memmove(++fidx, path[i].p_idx++,
975 sizeof(struct ext4_extent_idx));
976 neh->eh_entries++;
977 BUG_ON(neh->eh_entries > neh->eh_max);*/
978 path[i].p_idx++;
979 m++;
981 if (m) {
982 memmove(++fidx, path[i].p_idx - m,
983 sizeof(struct ext4_extent_idx) * m);
984 le16_add_cpu(&neh->eh_entries, m);
986 set_buffer_uptodate(bh);
987 unlock_buffer(bh);
989 err = ext4_handle_dirty_metadata(handle, inode, bh);
990 if (err)
991 goto cleanup;
992 brelse(bh);
993 bh = NULL;
995 /* correct old index */
996 if (m) {
997 err = ext4_ext_get_access(handle, inode, path + i);
998 if (err)
999 goto cleanup;
1000 le16_add_cpu(&path[i].p_hdr->eh_entries, -m);
1001 err = ext4_ext_dirty(handle, inode, path + i);
1002 if (err)
1003 goto cleanup;
1006 i--;
1009 /* insert new index */
1010 err = ext4_ext_insert_index(handle, inode, path + at,
1011 le32_to_cpu(border), newblock);
1013 cleanup:
1014 if (bh) {
1015 if (buffer_locked(bh))
1016 unlock_buffer(bh);
1017 brelse(bh);
1020 if (err) {
1021 /* free all allocated blocks in error case */
1022 for (i = 0; i < depth; i++) {
1023 if (!ablocks[i])
1024 continue;
1025 ext4_free_blocks(handle, inode, ablocks[i], 1, 1);
1028 kfree(ablocks);
1030 return err;
1034 * ext4_ext_grow_indepth:
1035 * implements tree growing procedure:
1036 * - allocates new block
1037 * - moves top-level data (index block or leaf) into the new block
1038 * - initializes new top-level, creating index that points to the
1039 * just created block
1041 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1042 struct ext4_ext_path *path,
1043 struct ext4_extent *newext)
1045 struct ext4_ext_path *curp = path;
1046 struct ext4_extent_header *neh;
1047 struct ext4_extent_idx *fidx;
1048 struct buffer_head *bh;
1049 ext4_fsblk_t newblock;
1050 int err = 0;
1052 newblock = ext4_ext_new_meta_block(handle, inode, path, newext, &err);
1053 if (newblock == 0)
1054 return err;
1056 bh = sb_getblk(inode->i_sb, newblock);
1057 if (!bh) {
1058 err = -EIO;
1059 ext4_std_error(inode->i_sb, err);
1060 return err;
1062 lock_buffer(bh);
1064 err = ext4_journal_get_create_access(handle, bh);
1065 if (err) {
1066 unlock_buffer(bh);
1067 goto out;
1070 /* move top-level index/leaf into new block */
1071 memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data));
1073 /* set size of new block */
1074 neh = ext_block_hdr(bh);
1075 /* old root could have indexes or leaves
1076 * so calculate e_max right way */
1077 if (ext_depth(inode))
1078 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode, 0));
1079 else
1080 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1081 neh->eh_magic = EXT4_EXT_MAGIC;
1082 set_buffer_uptodate(bh);
1083 unlock_buffer(bh);
1085 err = ext4_handle_dirty_metadata(handle, inode, bh);
1086 if (err)
1087 goto out;
1089 /* create index in new top-level index: num,max,pointer */
1090 err = ext4_ext_get_access(handle, inode, curp);
1091 if (err)
1092 goto out;
1094 curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
1095 curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode, 0));
1096 curp->p_hdr->eh_entries = cpu_to_le16(1);
1097 curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
1099 if (path[0].p_hdr->eh_depth)
1100 curp->p_idx->ei_block =
1101 EXT_FIRST_INDEX(path[0].p_hdr)->ei_block;
1102 else
1103 curp->p_idx->ei_block =
1104 EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
1105 ext4_idx_store_pblock(curp->p_idx, newblock);
1107 neh = ext_inode_hdr(inode);
1108 fidx = EXT_FIRST_INDEX(neh);
1109 ext_debug("new root: num %d(%d), lblock %d, ptr %llu\n",
1110 le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
1111 le32_to_cpu(fidx->ei_block), idx_pblock(fidx));
1113 neh->eh_depth = cpu_to_le16(path->p_depth + 1);
1114 err = ext4_ext_dirty(handle, inode, curp);
1115 out:
1116 brelse(bh);
1118 return err;
1122 * ext4_ext_create_new_leaf:
1123 * finds empty index and adds new leaf.
1124 * if no free index is found, then it requests in-depth growing.
1126 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1127 struct ext4_ext_path *path,
1128 struct ext4_extent *newext)
1130 struct ext4_ext_path *curp;
1131 int depth, i, err = 0;
1133 repeat:
1134 i = depth = ext_depth(inode);
1136 /* walk up to the tree and look for free index entry */
1137 curp = path + depth;
1138 while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
1139 i--;
1140 curp--;
1143 /* we use already allocated block for index block,
1144 * so subsequent data blocks should be contiguous */
1145 if (EXT_HAS_FREE_INDEX(curp)) {
1146 /* if we found index with free entry, then use that
1147 * entry: create all needed subtree and add new leaf */
1148 err = ext4_ext_split(handle, inode, path, newext, i);
1149 if (err)
1150 goto out;
1152 /* refill path */
1153 ext4_ext_drop_refs(path);
1154 path = ext4_ext_find_extent(inode,
1155 (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1156 path);
1157 if (IS_ERR(path))
1158 err = PTR_ERR(path);
1159 } else {
1160 /* tree is full, time to grow in depth */
1161 err = ext4_ext_grow_indepth(handle, inode, path, newext);
1162 if (err)
1163 goto out;
1165 /* refill path */
1166 ext4_ext_drop_refs(path);
1167 path = ext4_ext_find_extent(inode,
1168 (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1169 path);
1170 if (IS_ERR(path)) {
1171 err = PTR_ERR(path);
1172 goto out;
1176 * only first (depth 0 -> 1) produces free space;
1177 * in all other cases we have to split the grown tree
1179 depth = ext_depth(inode);
1180 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
1181 /* now we need to split */
1182 goto repeat;
1186 out:
1187 return err;
1191 * search the closest allocated block to the left for *logical
1192 * and returns it at @logical + it's physical address at @phys
1193 * if *logical is the smallest allocated block, the function
1194 * returns 0 at @phys
1195 * return value contains 0 (success) or error code
1198 ext4_ext_search_left(struct inode *inode, struct ext4_ext_path *path,
1199 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1201 struct ext4_extent_idx *ix;
1202 struct ext4_extent *ex;
1203 int depth, ee_len;
1205 BUG_ON(path == NULL);
1206 depth = path->p_depth;
1207 *phys = 0;
1209 if (depth == 0 && path->p_ext == NULL)
1210 return 0;
1212 /* usually extent in the path covers blocks smaller
1213 * then *logical, but it can be that extent is the
1214 * first one in the file */
1216 ex = path[depth].p_ext;
1217 ee_len = ext4_ext_get_actual_len(ex);
1218 if (*logical < le32_to_cpu(ex->ee_block)) {
1219 BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex);
1220 while (--depth >= 0) {
1221 ix = path[depth].p_idx;
1222 BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr));
1224 return 0;
1227 BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len));
1229 *logical = le32_to_cpu(ex->ee_block) + ee_len - 1;
1230 *phys = ext_pblock(ex) + ee_len - 1;
1231 return 0;
1235 * search the closest allocated block to the right for *logical
1236 * and returns it at @logical + it's physical address at @phys
1237 * if *logical is the smallest allocated block, the function
1238 * returns 0 at @phys
1239 * return value contains 0 (success) or error code
1242 ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
1243 ext4_lblk_t *logical, ext4_fsblk_t *phys)
1245 struct buffer_head *bh = NULL;
1246 struct ext4_extent_header *eh;
1247 struct ext4_extent_idx *ix;
1248 struct ext4_extent *ex;
1249 ext4_fsblk_t block;
1250 int depth; /* Note, NOT eh_depth; depth from top of tree */
1251 int ee_len;
1253 BUG_ON(path == NULL);
1254 depth = path->p_depth;
1255 *phys = 0;
1257 if (depth == 0 && path->p_ext == NULL)
1258 return 0;
1260 /* usually extent in the path covers blocks smaller
1261 * then *logical, but it can be that extent is the
1262 * first one in the file */
1264 ex = path[depth].p_ext;
1265 ee_len = ext4_ext_get_actual_len(ex);
1266 if (*logical < le32_to_cpu(ex->ee_block)) {
1267 BUG_ON(EXT_FIRST_EXTENT(path[depth].p_hdr) != ex);
1268 while (--depth >= 0) {
1269 ix = path[depth].p_idx;
1270 BUG_ON(ix != EXT_FIRST_INDEX(path[depth].p_hdr));
1272 *logical = le32_to_cpu(ex->ee_block);
1273 *phys = ext_pblock(ex);
1274 return 0;
1277 BUG_ON(*logical < (le32_to_cpu(ex->ee_block) + ee_len));
1279 if (ex != EXT_LAST_EXTENT(path[depth].p_hdr)) {
1280 /* next allocated block in this leaf */
1281 ex++;
1282 *logical = le32_to_cpu(ex->ee_block);
1283 *phys = ext_pblock(ex);
1284 return 0;
1287 /* go up and search for index to the right */
1288 while (--depth >= 0) {
1289 ix = path[depth].p_idx;
1290 if (ix != EXT_LAST_INDEX(path[depth].p_hdr))
1291 goto got_index;
1294 /* we've gone up to the root and found no index to the right */
1295 return 0;
1297 got_index:
1298 /* we've found index to the right, let's
1299 * follow it and find the closest allocated
1300 * block to the right */
1301 ix++;
1302 block = idx_pblock(ix);
1303 while (++depth < path->p_depth) {
1304 bh = sb_bread(inode->i_sb, block);
1305 if (bh == NULL)
1306 return -EIO;
1307 eh = ext_block_hdr(bh);
1308 /* subtract from p_depth to get proper eh_depth */
1309 if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1310 put_bh(bh);
1311 return -EIO;
1313 ix = EXT_FIRST_INDEX(eh);
1314 block = idx_pblock(ix);
1315 put_bh(bh);
1318 bh = sb_bread(inode->i_sb, block);
1319 if (bh == NULL)
1320 return -EIO;
1321 eh = ext_block_hdr(bh);
1322 if (ext4_ext_check(inode, eh, path->p_depth - depth)) {
1323 put_bh(bh);
1324 return -EIO;
1326 ex = EXT_FIRST_EXTENT(eh);
1327 *logical = le32_to_cpu(ex->ee_block);
1328 *phys = ext_pblock(ex);
1329 put_bh(bh);
1330 return 0;
1334 * ext4_ext_next_allocated_block:
1335 * returns allocated block in subsequent extent or EXT_MAX_BLOCK.
1336 * NOTE: it considers block number from index entry as
1337 * allocated block. Thus, index entries have to be consistent
1338 * with leaves.
1340 static ext4_lblk_t
1341 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1343 int depth;
1345 BUG_ON(path == NULL);
1346 depth = path->p_depth;
1348 if (depth == 0 && path->p_ext == NULL)
1349 return EXT_MAX_BLOCK;
1351 while (depth >= 0) {
1352 if (depth == path->p_depth) {
1353 /* leaf */
1354 if (path[depth].p_ext !=
1355 EXT_LAST_EXTENT(path[depth].p_hdr))
1356 return le32_to_cpu(path[depth].p_ext[1].ee_block);
1357 } else {
1358 /* index */
1359 if (path[depth].p_idx !=
1360 EXT_LAST_INDEX(path[depth].p_hdr))
1361 return le32_to_cpu(path[depth].p_idx[1].ei_block);
1363 depth--;
1366 return EXT_MAX_BLOCK;
1370 * ext4_ext_next_leaf_block:
1371 * returns first allocated block from next leaf or EXT_MAX_BLOCK
1373 static ext4_lblk_t ext4_ext_next_leaf_block(struct inode *inode,
1374 struct ext4_ext_path *path)
1376 int depth;
1378 BUG_ON(path == NULL);
1379 depth = path->p_depth;
1381 /* zero-tree has no leaf blocks at all */
1382 if (depth == 0)
1383 return EXT_MAX_BLOCK;
1385 /* go to index block */
1386 depth--;
1388 while (depth >= 0) {
1389 if (path[depth].p_idx !=
1390 EXT_LAST_INDEX(path[depth].p_hdr))
1391 return (ext4_lblk_t)
1392 le32_to_cpu(path[depth].p_idx[1].ei_block);
1393 depth--;
1396 return EXT_MAX_BLOCK;
1400 * ext4_ext_correct_indexes:
1401 * if leaf gets modified and modified extent is first in the leaf,
1402 * then we have to correct all indexes above.
1403 * TODO: do we need to correct tree in all cases?
1405 static int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1406 struct ext4_ext_path *path)
1408 struct ext4_extent_header *eh;
1409 int depth = ext_depth(inode);
1410 struct ext4_extent *ex;
1411 __le32 border;
1412 int k, err = 0;
1414 eh = path[depth].p_hdr;
1415 ex = path[depth].p_ext;
1416 BUG_ON(ex == NULL);
1417 BUG_ON(eh == NULL);
1419 if (depth == 0) {
1420 /* there is no tree at all */
1421 return 0;
1424 if (ex != EXT_FIRST_EXTENT(eh)) {
1425 /* we correct tree if first leaf got modified only */
1426 return 0;
1430 * TODO: we need correction if border is smaller than current one
1432 k = depth - 1;
1433 border = path[depth].p_ext->ee_block;
1434 err = ext4_ext_get_access(handle, inode, path + k);
1435 if (err)
1436 return err;
1437 path[k].p_idx->ei_block = border;
1438 err = ext4_ext_dirty(handle, inode, path + k);
1439 if (err)
1440 return err;
1442 while (k--) {
1443 /* change all left-side indexes */
1444 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1445 break;
1446 err = ext4_ext_get_access(handle, inode, path + k);
1447 if (err)
1448 break;
1449 path[k].p_idx->ei_block = border;
1450 err = ext4_ext_dirty(handle, inode, path + k);
1451 if (err)
1452 break;
1455 return err;
1459 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1460 struct ext4_extent *ex2)
1462 unsigned short ext1_ee_len, ext2_ee_len, max_len;
1465 * Make sure that either both extents are uninitialized, or
1466 * both are _not_.
1468 if (ext4_ext_is_uninitialized(ex1) ^ ext4_ext_is_uninitialized(ex2))
1469 return 0;
1471 if (ext4_ext_is_uninitialized(ex1))
1472 max_len = EXT_UNINIT_MAX_LEN;
1473 else
1474 max_len = EXT_INIT_MAX_LEN;
1476 ext1_ee_len = ext4_ext_get_actual_len(ex1);
1477 ext2_ee_len = ext4_ext_get_actual_len(ex2);
1479 if (le32_to_cpu(ex1->ee_block) + ext1_ee_len !=
1480 le32_to_cpu(ex2->ee_block))
1481 return 0;
1484 * To allow future support for preallocated extents to be added
1485 * as an RO_COMPAT feature, refuse to merge to extents if
1486 * this can result in the top bit of ee_len being set.
1488 if (ext1_ee_len + ext2_ee_len > max_len)
1489 return 0;
1490 #ifdef AGGRESSIVE_TEST
1491 if (ext1_ee_len >= 4)
1492 return 0;
1493 #endif
1495 if (ext_pblock(ex1) + ext1_ee_len == ext_pblock(ex2))
1496 return 1;
1497 return 0;
1501 * This function tries to merge the "ex" extent to the next extent in the tree.
1502 * It always tries to merge towards right. If you want to merge towards
1503 * left, pass "ex - 1" as argument instead of "ex".
1504 * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns
1505 * 1 if they got merged.
1507 int ext4_ext_try_to_merge(struct inode *inode,
1508 struct ext4_ext_path *path,
1509 struct ext4_extent *ex)
1511 struct ext4_extent_header *eh;
1512 unsigned int depth, len;
1513 int merge_done = 0;
1514 int uninitialized = 0;
1516 depth = ext_depth(inode);
1517 BUG_ON(path[depth].p_hdr == NULL);
1518 eh = path[depth].p_hdr;
1520 while (ex < EXT_LAST_EXTENT(eh)) {
1521 if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
1522 break;
1523 /* merge with next extent! */
1524 if (ext4_ext_is_uninitialized(ex))
1525 uninitialized = 1;
1526 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1527 + ext4_ext_get_actual_len(ex + 1));
1528 if (uninitialized)
1529 ext4_ext_mark_uninitialized(ex);
1531 if (ex + 1 < EXT_LAST_EXTENT(eh)) {
1532 len = (EXT_LAST_EXTENT(eh) - ex - 1)
1533 * sizeof(struct ext4_extent);
1534 memmove(ex + 1, ex + 2, len);
1536 le16_add_cpu(&eh->eh_entries, -1);
1537 merge_done = 1;
1538 WARN_ON(eh->eh_entries == 0);
1539 if (!eh->eh_entries)
1540 ext4_error(inode->i_sb, "ext4_ext_try_to_merge",
1541 "inode#%lu, eh->eh_entries = 0!", inode->i_ino);
1544 return merge_done;
1548 * check if a portion of the "newext" extent overlaps with an
1549 * existing extent.
1551 * If there is an overlap discovered, it updates the length of the newext
1552 * such that there will be no overlap, and then returns 1.
1553 * If there is no overlap found, it returns 0.
1555 unsigned int ext4_ext_check_overlap(struct inode *inode,
1556 struct ext4_extent *newext,
1557 struct ext4_ext_path *path)
1559 ext4_lblk_t b1, b2;
1560 unsigned int depth, len1;
1561 unsigned int ret = 0;
1563 b1 = le32_to_cpu(newext->ee_block);
1564 len1 = ext4_ext_get_actual_len(newext);
1565 depth = ext_depth(inode);
1566 if (!path[depth].p_ext)
1567 goto out;
1568 b2 = le32_to_cpu(path[depth].p_ext->ee_block);
1571 * get the next allocated block if the extent in the path
1572 * is before the requested block(s)
1574 if (b2 < b1) {
1575 b2 = ext4_ext_next_allocated_block(path);
1576 if (b2 == EXT_MAX_BLOCK)
1577 goto out;
1580 /* check for wrap through zero on extent logical start block*/
1581 if (b1 + len1 < b1) {
1582 len1 = EXT_MAX_BLOCK - b1;
1583 newext->ee_len = cpu_to_le16(len1);
1584 ret = 1;
1587 /* check for overlap */
1588 if (b1 + len1 > b2) {
1589 newext->ee_len = cpu_to_le16(b2 - b1);
1590 ret = 1;
1592 out:
1593 return ret;
1597 * ext4_ext_insert_extent:
1598 * tries to merge requsted extent into the existing extent or
1599 * inserts requested extent as new one into the tree,
1600 * creating new leaf in the no-space case.
1602 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1603 struct ext4_ext_path *path,
1604 struct ext4_extent *newext, int flag)
1606 struct ext4_extent_header *eh;
1607 struct ext4_extent *ex, *fex;
1608 struct ext4_extent *nearex; /* nearest extent */
1609 struct ext4_ext_path *npath = NULL;
1610 int depth, len, err;
1611 ext4_lblk_t next;
1612 unsigned uninitialized = 0;
1614 BUG_ON(ext4_ext_get_actual_len(newext) == 0);
1615 depth = ext_depth(inode);
1616 ex = path[depth].p_ext;
1617 BUG_ON(path[depth].p_hdr == NULL);
1619 /* try to insert block into found extent and return */
1620 if (ex && (flag != EXT4_GET_BLOCKS_DIO_CREATE_EXT)
1621 && ext4_can_extents_be_merged(inode, ex, newext)) {
1622 ext_debug("append [%d]%d block to %d:[%d]%d (from %llu)\n",
1623 ext4_ext_is_uninitialized(newext),
1624 ext4_ext_get_actual_len(newext),
1625 le32_to_cpu(ex->ee_block),
1626 ext4_ext_is_uninitialized(ex),
1627 ext4_ext_get_actual_len(ex), ext_pblock(ex));
1628 err = ext4_ext_get_access(handle, inode, path + depth);
1629 if (err)
1630 return err;
1633 * ext4_can_extents_be_merged should have checked that either
1634 * both extents are uninitialized, or both aren't. Thus we
1635 * need to check only one of them here.
1637 if (ext4_ext_is_uninitialized(ex))
1638 uninitialized = 1;
1639 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
1640 + ext4_ext_get_actual_len(newext));
1641 if (uninitialized)
1642 ext4_ext_mark_uninitialized(ex);
1643 eh = path[depth].p_hdr;
1644 nearex = ex;
1645 goto merge;
1648 repeat:
1649 depth = ext_depth(inode);
1650 eh = path[depth].p_hdr;
1651 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1652 goto has_space;
1654 /* probably next leaf has space for us? */
1655 fex = EXT_LAST_EXTENT(eh);
1656 next = ext4_ext_next_leaf_block(inode, path);
1657 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
1658 && next != EXT_MAX_BLOCK) {
1659 ext_debug("next leaf block - %d\n", next);
1660 BUG_ON(npath != NULL);
1661 npath = ext4_ext_find_extent(inode, next, NULL);
1662 if (IS_ERR(npath))
1663 return PTR_ERR(npath);
1664 BUG_ON(npath->p_depth != path->p_depth);
1665 eh = npath[depth].p_hdr;
1666 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1667 ext_debug("next leaf isnt full(%d)\n",
1668 le16_to_cpu(eh->eh_entries));
1669 path = npath;
1670 goto repeat;
1672 ext_debug("next leaf has no free space(%d,%d)\n",
1673 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
1677 * There is no free space in the found leaf.
1678 * We're gonna add a new leaf in the tree.
1680 err = ext4_ext_create_new_leaf(handle, inode, path, newext);
1681 if (err)
1682 goto cleanup;
1683 depth = ext_depth(inode);
1684 eh = path[depth].p_hdr;
1686 has_space:
1687 nearex = path[depth].p_ext;
1689 err = ext4_ext_get_access(handle, inode, path + depth);
1690 if (err)
1691 goto cleanup;
1693 if (!nearex) {
1694 /* there is no extent in this leaf, create first one */
1695 ext_debug("first extent in the leaf: %d:%llu:[%d]%d\n",
1696 le32_to_cpu(newext->ee_block),
1697 ext_pblock(newext),
1698 ext4_ext_is_uninitialized(newext),
1699 ext4_ext_get_actual_len(newext));
1700 path[depth].p_ext = EXT_FIRST_EXTENT(eh);
1701 } else if (le32_to_cpu(newext->ee_block)
1702 > le32_to_cpu(nearex->ee_block)) {
1703 /* BUG_ON(newext->ee_block == nearex->ee_block); */
1704 if (nearex != EXT_LAST_EXTENT(eh)) {
1705 len = EXT_MAX_EXTENT(eh) - nearex;
1706 len = (len - 1) * sizeof(struct ext4_extent);
1707 len = len < 0 ? 0 : len;
1708 ext_debug("insert %d:%llu:[%d]%d after: nearest 0x%p, "
1709 "move %d from 0x%p to 0x%p\n",
1710 le32_to_cpu(newext->ee_block),
1711 ext_pblock(newext),
1712 ext4_ext_is_uninitialized(newext),
1713 ext4_ext_get_actual_len(newext),
1714 nearex, len, nearex + 1, nearex + 2);
1715 memmove(nearex + 2, nearex + 1, len);
1717 path[depth].p_ext = nearex + 1;
1718 } else {
1719 BUG_ON(newext->ee_block == nearex->ee_block);
1720 len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
1721 len = len < 0 ? 0 : len;
1722 ext_debug("insert %d:%llu:[%d]%d before: nearest 0x%p, "
1723 "move %d from 0x%p to 0x%p\n",
1724 le32_to_cpu(newext->ee_block),
1725 ext_pblock(newext),
1726 ext4_ext_is_uninitialized(newext),
1727 ext4_ext_get_actual_len(newext),
1728 nearex, len, nearex + 1, nearex + 2);
1729 memmove(nearex + 1, nearex, len);
1730 path[depth].p_ext = nearex;
1733 le16_add_cpu(&eh->eh_entries, 1);
1734 nearex = path[depth].p_ext;
1735 nearex->ee_block = newext->ee_block;
1736 ext4_ext_store_pblock(nearex, ext_pblock(newext));
1737 nearex->ee_len = newext->ee_len;
1739 merge:
1740 /* try to merge extents to the right */
1741 if (flag != EXT4_GET_BLOCKS_DIO_CREATE_EXT)
1742 ext4_ext_try_to_merge(inode, path, nearex);
1744 /* try to merge extents to the left */
1746 /* time to correct all indexes above */
1747 err = ext4_ext_correct_indexes(handle, inode, path);
1748 if (err)
1749 goto cleanup;
1751 err = ext4_ext_dirty(handle, inode, path + depth);
1753 cleanup:
1754 if (npath) {
1755 ext4_ext_drop_refs(npath);
1756 kfree(npath);
1758 ext4_ext_invalidate_cache(inode);
1759 return err;
1762 int ext4_ext_walk_space(struct inode *inode, ext4_lblk_t block,
1763 ext4_lblk_t num, ext_prepare_callback func,
1764 void *cbdata)
1766 struct ext4_ext_path *path = NULL;
1767 struct ext4_ext_cache cbex;
1768 struct ext4_extent *ex;
1769 ext4_lblk_t next, start = 0, end = 0;
1770 ext4_lblk_t last = block + num;
1771 int depth, exists, err = 0;
1773 BUG_ON(func == NULL);
1774 BUG_ON(inode == NULL);
1776 while (block < last && block != EXT_MAX_BLOCK) {
1777 num = last - block;
1778 /* find extent for this block */
1779 down_read(&EXT4_I(inode)->i_data_sem);
1780 path = ext4_ext_find_extent(inode, block, path);
1781 up_read(&EXT4_I(inode)->i_data_sem);
1782 if (IS_ERR(path)) {
1783 err = PTR_ERR(path);
1784 path = NULL;
1785 break;
1788 depth = ext_depth(inode);
1789 BUG_ON(path[depth].p_hdr == NULL);
1790 ex = path[depth].p_ext;
1791 next = ext4_ext_next_allocated_block(path);
1793 exists = 0;
1794 if (!ex) {
1795 /* there is no extent yet, so try to allocate
1796 * all requested space */
1797 start = block;
1798 end = block + num;
1799 } else if (le32_to_cpu(ex->ee_block) > block) {
1800 /* need to allocate space before found extent */
1801 start = block;
1802 end = le32_to_cpu(ex->ee_block);
1803 if (block + num < end)
1804 end = block + num;
1805 } else if (block >= le32_to_cpu(ex->ee_block)
1806 + ext4_ext_get_actual_len(ex)) {
1807 /* need to allocate space after found extent */
1808 start = block;
1809 end = block + num;
1810 if (end >= next)
1811 end = next;
1812 } else if (block >= le32_to_cpu(ex->ee_block)) {
1814 * some part of requested space is covered
1815 * by found extent
1817 start = block;
1818 end = le32_to_cpu(ex->ee_block)
1819 + ext4_ext_get_actual_len(ex);
1820 if (block + num < end)
1821 end = block + num;
1822 exists = 1;
1823 } else {
1824 BUG();
1826 BUG_ON(end <= start);
1828 if (!exists) {
1829 cbex.ec_block = start;
1830 cbex.ec_len = end - start;
1831 cbex.ec_start = 0;
1832 cbex.ec_type = EXT4_EXT_CACHE_GAP;
1833 } else {
1834 cbex.ec_block = le32_to_cpu(ex->ee_block);
1835 cbex.ec_len = ext4_ext_get_actual_len(ex);
1836 cbex.ec_start = ext_pblock(ex);
1837 cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
1840 BUG_ON(cbex.ec_len == 0);
1841 err = func(inode, path, &cbex, ex, cbdata);
1842 ext4_ext_drop_refs(path);
1844 if (err < 0)
1845 break;
1847 if (err == EXT_REPEAT)
1848 continue;
1849 else if (err == EXT_BREAK) {
1850 err = 0;
1851 break;
1854 if (ext_depth(inode) != depth) {
1855 /* depth was changed. we have to realloc path */
1856 kfree(path);
1857 path = NULL;
1860 block = cbex.ec_block + cbex.ec_len;
1863 if (path) {
1864 ext4_ext_drop_refs(path);
1865 kfree(path);
1868 return err;
1871 static void
1872 ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
1873 __u32 len, ext4_fsblk_t start, int type)
1875 struct ext4_ext_cache *cex;
1876 BUG_ON(len == 0);
1877 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1878 cex = &EXT4_I(inode)->i_cached_extent;
1879 cex->ec_type = type;
1880 cex->ec_block = block;
1881 cex->ec_len = len;
1882 cex->ec_start = start;
1883 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1887 * ext4_ext_put_gap_in_cache:
1888 * calculate boundaries of the gap that the requested block fits into
1889 * and cache this gap
1891 static void
1892 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
1893 ext4_lblk_t block)
1895 int depth = ext_depth(inode);
1896 unsigned long len;
1897 ext4_lblk_t lblock;
1898 struct ext4_extent *ex;
1900 ex = path[depth].p_ext;
1901 if (ex == NULL) {
1902 /* there is no extent yet, so gap is [0;-] */
1903 lblock = 0;
1904 len = EXT_MAX_BLOCK;
1905 ext_debug("cache gap(whole file):");
1906 } else if (block < le32_to_cpu(ex->ee_block)) {
1907 lblock = block;
1908 len = le32_to_cpu(ex->ee_block) - block;
1909 ext_debug("cache gap(before): %u [%u:%u]",
1910 block,
1911 le32_to_cpu(ex->ee_block),
1912 ext4_ext_get_actual_len(ex));
1913 } else if (block >= le32_to_cpu(ex->ee_block)
1914 + ext4_ext_get_actual_len(ex)) {
1915 ext4_lblk_t next;
1916 lblock = le32_to_cpu(ex->ee_block)
1917 + ext4_ext_get_actual_len(ex);
1919 next = ext4_ext_next_allocated_block(path);
1920 ext_debug("cache gap(after): [%u:%u] %u",
1921 le32_to_cpu(ex->ee_block),
1922 ext4_ext_get_actual_len(ex),
1923 block);
1924 BUG_ON(next == lblock);
1925 len = next - lblock;
1926 } else {
1927 lblock = len = 0;
1928 BUG();
1931 ext_debug(" -> %u:%lu\n", lblock, len);
1932 ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
1935 static int
1936 ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
1937 struct ext4_extent *ex)
1939 struct ext4_ext_cache *cex;
1940 int ret = EXT4_EXT_CACHE_NO;
1943 * We borrow i_block_reservation_lock to protect i_cached_extent
1945 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1946 cex = &EXT4_I(inode)->i_cached_extent;
1948 /* has cache valid data? */
1949 if (cex->ec_type == EXT4_EXT_CACHE_NO)
1950 goto errout;
1952 BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
1953 cex->ec_type != EXT4_EXT_CACHE_EXTENT);
1954 if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
1955 ex->ee_block = cpu_to_le32(cex->ec_block);
1956 ext4_ext_store_pblock(ex, cex->ec_start);
1957 ex->ee_len = cpu_to_le16(cex->ec_len);
1958 ext_debug("%u cached by %u:%u:%llu\n",
1959 block,
1960 cex->ec_block, cex->ec_len, cex->ec_start);
1961 ret = cex->ec_type;
1963 errout:
1964 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1965 return ret;
1969 * ext4_ext_rm_idx:
1970 * removes index from the index block.
1971 * It's used in truncate case only, thus all requests are for
1972 * last index in the block only.
1974 static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
1975 struct ext4_ext_path *path)
1977 struct buffer_head *bh;
1978 int err;
1979 ext4_fsblk_t leaf;
1981 /* free index block */
1982 path--;
1983 leaf = idx_pblock(path->p_idx);
1984 BUG_ON(path->p_hdr->eh_entries == 0);
1985 err = ext4_ext_get_access(handle, inode, path);
1986 if (err)
1987 return err;
1988 le16_add_cpu(&path->p_hdr->eh_entries, -1);
1989 err = ext4_ext_dirty(handle, inode, path);
1990 if (err)
1991 return err;
1992 ext_debug("index is empty, remove it, free block %llu\n", leaf);
1993 bh = sb_find_get_block(inode->i_sb, leaf);
1994 ext4_forget(handle, 1, inode, bh, leaf);
1995 ext4_free_blocks(handle, inode, leaf, 1, 1);
1996 return err;
2000 * ext4_ext_calc_credits_for_single_extent:
2001 * This routine returns max. credits that needed to insert an extent
2002 * to the extent tree.
2003 * When pass the actual path, the caller should calculate credits
2004 * under i_data_sem.
2006 int ext4_ext_calc_credits_for_single_extent(struct inode *inode, int nrblocks,
2007 struct ext4_ext_path *path)
2009 if (path) {
2010 int depth = ext_depth(inode);
2011 int ret = 0;
2013 /* probably there is space in leaf? */
2014 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
2015 < le16_to_cpu(path[depth].p_hdr->eh_max)) {
2018 * There are some space in the leaf tree, no
2019 * need to account for leaf block credit
2021 * bitmaps and block group descriptor blocks
2022 * and other metadat blocks still need to be
2023 * accounted.
2025 /* 1 bitmap, 1 block group descriptor */
2026 ret = 2 + EXT4_META_TRANS_BLOCKS(inode->i_sb);
2027 return ret;
2031 return ext4_chunk_trans_blocks(inode, nrblocks);
2035 * How many index/leaf blocks need to change/allocate to modify nrblocks?
2037 * if nrblocks are fit in a single extent (chunk flag is 1), then
2038 * in the worse case, each tree level index/leaf need to be changed
2039 * if the tree split due to insert a new extent, then the old tree
2040 * index/leaf need to be updated too
2042 * If the nrblocks are discontiguous, they could cause
2043 * the whole tree split more than once, but this is really rare.
2045 int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
2047 int index;
2048 int depth = ext_depth(inode);
2050 if (chunk)
2051 index = depth * 2;
2052 else
2053 index = depth * 3;
2055 return index;
2058 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2059 struct ext4_extent *ex,
2060 ext4_lblk_t from, ext4_lblk_t to)
2062 struct buffer_head *bh;
2063 unsigned short ee_len = ext4_ext_get_actual_len(ex);
2064 int i, metadata = 0;
2066 if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
2067 metadata = 1;
2068 #ifdef EXTENTS_STATS
2070 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2071 spin_lock(&sbi->s_ext_stats_lock);
2072 sbi->s_ext_blocks += ee_len;
2073 sbi->s_ext_extents++;
2074 if (ee_len < sbi->s_ext_min)
2075 sbi->s_ext_min = ee_len;
2076 if (ee_len > sbi->s_ext_max)
2077 sbi->s_ext_max = ee_len;
2078 if (ext_depth(inode) > sbi->s_depth_max)
2079 sbi->s_depth_max = ext_depth(inode);
2080 spin_unlock(&sbi->s_ext_stats_lock);
2082 #endif
2083 if (from >= le32_to_cpu(ex->ee_block)
2084 && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
2085 /* tail removal */
2086 ext4_lblk_t num;
2087 ext4_fsblk_t start;
2089 num = le32_to_cpu(ex->ee_block) + ee_len - from;
2090 start = ext_pblock(ex) + ee_len - num;
2091 ext_debug("free last %u blocks starting %llu\n", num, start);
2092 for (i = 0; i < num; i++) {
2093 bh = sb_find_get_block(inode->i_sb, start + i);
2094 ext4_forget(handle, metadata, inode, bh, start + i);
2096 ext4_free_blocks(handle, inode, start, num, metadata);
2097 } else if (from == le32_to_cpu(ex->ee_block)
2098 && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
2099 printk(KERN_INFO "strange request: removal %u-%u from %u:%u\n",
2100 from, to, le32_to_cpu(ex->ee_block), ee_len);
2101 } else {
2102 printk(KERN_INFO "strange request: removal(2) "
2103 "%u-%u from %u:%u\n",
2104 from, to, le32_to_cpu(ex->ee_block), ee_len);
2106 return 0;
2109 static int
2110 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2111 struct ext4_ext_path *path, ext4_lblk_t start)
2113 int err = 0, correct_index = 0;
2114 int depth = ext_depth(inode), credits;
2115 struct ext4_extent_header *eh;
2116 ext4_lblk_t a, b, block;
2117 unsigned num;
2118 ext4_lblk_t ex_ee_block;
2119 unsigned short ex_ee_len;
2120 unsigned uninitialized = 0;
2121 struct ext4_extent *ex;
2123 /* the header must be checked already in ext4_ext_remove_space() */
2124 ext_debug("truncate since %u in leaf\n", start);
2125 if (!path[depth].p_hdr)
2126 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
2127 eh = path[depth].p_hdr;
2128 BUG_ON(eh == NULL);
2130 /* find where to start removing */
2131 ex = EXT_LAST_EXTENT(eh);
2133 ex_ee_block = le32_to_cpu(ex->ee_block);
2134 ex_ee_len = ext4_ext_get_actual_len(ex);
2136 while (ex >= EXT_FIRST_EXTENT(eh) &&
2137 ex_ee_block + ex_ee_len > start) {
2139 if (ext4_ext_is_uninitialized(ex))
2140 uninitialized = 1;
2141 else
2142 uninitialized = 0;
2144 ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
2145 uninitialized, ex_ee_len);
2146 path[depth].p_ext = ex;
2148 a = ex_ee_block > start ? ex_ee_block : start;
2149 b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ?
2150 ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK;
2152 ext_debug(" border %u:%u\n", a, b);
2154 if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) {
2155 block = 0;
2156 num = 0;
2157 BUG();
2158 } else if (a != ex_ee_block) {
2159 /* remove tail of the extent */
2160 block = ex_ee_block;
2161 num = a - block;
2162 } else if (b != ex_ee_block + ex_ee_len - 1) {
2163 /* remove head of the extent */
2164 block = a;
2165 num = b - a;
2166 /* there is no "make a hole" API yet */
2167 BUG();
2168 } else {
2169 /* remove whole extent: excellent! */
2170 block = ex_ee_block;
2171 num = 0;
2172 BUG_ON(a != ex_ee_block);
2173 BUG_ON(b != ex_ee_block + ex_ee_len - 1);
2177 * 3 for leaf, sb, and inode plus 2 (bmap and group
2178 * descriptor) for each block group; assume two block
2179 * groups plus ex_ee_len/blocks_per_block_group for
2180 * the worst case
2182 credits = 7 + 2*(ex_ee_len/EXT4_BLOCKS_PER_GROUP(inode->i_sb));
2183 if (ex == EXT_FIRST_EXTENT(eh)) {
2184 correct_index = 1;
2185 credits += (ext_depth(inode)) + 1;
2187 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
2189 err = ext4_ext_truncate_extend_restart(handle, inode, credits);
2190 if (err)
2191 goto out;
2193 err = ext4_ext_get_access(handle, inode, path + depth);
2194 if (err)
2195 goto out;
2197 err = ext4_remove_blocks(handle, inode, ex, a, b);
2198 if (err)
2199 goto out;
2201 if (num == 0) {
2202 /* this extent is removed; mark slot entirely unused */
2203 ext4_ext_store_pblock(ex, 0);
2204 le16_add_cpu(&eh->eh_entries, -1);
2207 ex->ee_block = cpu_to_le32(block);
2208 ex->ee_len = cpu_to_le16(num);
2210 * Do not mark uninitialized if all the blocks in the
2211 * extent have been removed.
2213 if (uninitialized && num)
2214 ext4_ext_mark_uninitialized(ex);
2216 err = ext4_ext_dirty(handle, inode, path + depth);
2217 if (err)
2218 goto out;
2220 ext_debug("new extent: %u:%u:%llu\n", block, num,
2221 ext_pblock(ex));
2222 ex--;
2223 ex_ee_block = le32_to_cpu(ex->ee_block);
2224 ex_ee_len = ext4_ext_get_actual_len(ex);
2227 if (correct_index && eh->eh_entries)
2228 err = ext4_ext_correct_indexes(handle, inode, path);
2230 /* if this leaf is free, then we should
2231 * remove it from index block above */
2232 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
2233 err = ext4_ext_rm_idx(handle, inode, path + depth);
2235 out:
2236 return err;
2240 * ext4_ext_more_to_rm:
2241 * returns 1 if current index has to be freed (even partial)
2243 static int
2244 ext4_ext_more_to_rm(struct ext4_ext_path *path)
2246 BUG_ON(path->p_idx == NULL);
2248 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
2249 return 0;
2252 * if truncate on deeper level happened, it wasn't partial,
2253 * so we have to consider current index for truncation
2255 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
2256 return 0;
2257 return 1;
2260 static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start)
2262 struct super_block *sb = inode->i_sb;
2263 int depth = ext_depth(inode);
2264 struct ext4_ext_path *path;
2265 handle_t *handle;
2266 int i = 0, err = 0;
2268 ext_debug("truncate since %u\n", start);
2270 /* probably first extent we're gonna free will be last in block */
2271 handle = ext4_journal_start(inode, depth + 1);
2272 if (IS_ERR(handle))
2273 return PTR_ERR(handle);
2275 ext4_ext_invalidate_cache(inode);
2278 * We start scanning from right side, freeing all the blocks
2279 * after i_size and walking into the tree depth-wise.
2281 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
2282 if (path == NULL) {
2283 ext4_journal_stop(handle);
2284 return -ENOMEM;
2286 path[0].p_hdr = ext_inode_hdr(inode);
2287 if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
2288 err = -EIO;
2289 goto out;
2291 path[0].p_depth = depth;
2293 while (i >= 0 && err == 0) {
2294 if (i == depth) {
2295 /* this is leaf block */
2296 err = ext4_ext_rm_leaf(handle, inode, path, start);
2297 /* root level has p_bh == NULL, brelse() eats this */
2298 brelse(path[i].p_bh);
2299 path[i].p_bh = NULL;
2300 i--;
2301 continue;
2304 /* this is index block */
2305 if (!path[i].p_hdr) {
2306 ext_debug("initialize header\n");
2307 path[i].p_hdr = ext_block_hdr(path[i].p_bh);
2310 if (!path[i].p_idx) {
2311 /* this level hasn't been touched yet */
2312 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
2313 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
2314 ext_debug("init index ptr: hdr 0x%p, num %d\n",
2315 path[i].p_hdr,
2316 le16_to_cpu(path[i].p_hdr->eh_entries));
2317 } else {
2318 /* we were already here, see at next index */
2319 path[i].p_idx--;
2322 ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
2323 i, EXT_FIRST_INDEX(path[i].p_hdr),
2324 path[i].p_idx);
2325 if (ext4_ext_more_to_rm(path + i)) {
2326 struct buffer_head *bh;
2327 /* go to the next level */
2328 ext_debug("move to level %d (block %llu)\n",
2329 i + 1, idx_pblock(path[i].p_idx));
2330 memset(path + i + 1, 0, sizeof(*path));
2331 bh = sb_bread(sb, idx_pblock(path[i].p_idx));
2332 if (!bh) {
2333 /* should we reset i_size? */
2334 err = -EIO;
2335 break;
2337 if (WARN_ON(i + 1 > depth)) {
2338 err = -EIO;
2339 break;
2341 if (ext4_ext_check(inode, ext_block_hdr(bh),
2342 depth - i - 1)) {
2343 err = -EIO;
2344 break;
2346 path[i + 1].p_bh = bh;
2348 /* save actual number of indexes since this
2349 * number is changed at the next iteration */
2350 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
2351 i++;
2352 } else {
2353 /* we finished processing this index, go up */
2354 if (path[i].p_hdr->eh_entries == 0 && i > 0) {
2355 /* index is empty, remove it;
2356 * handle must be already prepared by the
2357 * truncatei_leaf() */
2358 err = ext4_ext_rm_idx(handle, inode, path + i);
2360 /* root level has p_bh == NULL, brelse() eats this */
2361 brelse(path[i].p_bh);
2362 path[i].p_bh = NULL;
2363 i--;
2364 ext_debug("return to level %d\n", i);
2368 /* TODO: flexible tree reduction should be here */
2369 if (path->p_hdr->eh_entries == 0) {
2371 * truncate to zero freed all the tree,
2372 * so we need to correct eh_depth
2374 err = ext4_ext_get_access(handle, inode, path);
2375 if (err == 0) {
2376 ext_inode_hdr(inode)->eh_depth = 0;
2377 ext_inode_hdr(inode)->eh_max =
2378 cpu_to_le16(ext4_ext_space_root(inode, 0));
2379 err = ext4_ext_dirty(handle, inode, path);
2382 out:
2383 ext4_ext_drop_refs(path);
2384 kfree(path);
2385 ext4_journal_stop(handle);
2387 return err;
2391 * called at mount time
2393 void ext4_ext_init(struct super_block *sb)
2396 * possible initialization would be here
2399 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
2400 #if defined(AGGRESSIVE_TEST) || defined(CHECK_BINSEARCH) || defined(EXTENTS_STATS)
2401 printk(KERN_INFO "EXT4-fs: file extents enabled");
2402 #ifdef AGGRESSIVE_TEST
2403 printk(", aggressive tests");
2404 #endif
2405 #ifdef CHECK_BINSEARCH
2406 printk(", check binsearch");
2407 #endif
2408 #ifdef EXTENTS_STATS
2409 printk(", stats");
2410 #endif
2411 printk("\n");
2412 #endif
2413 #ifdef EXTENTS_STATS
2414 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
2415 EXT4_SB(sb)->s_ext_min = 1 << 30;
2416 EXT4_SB(sb)->s_ext_max = 0;
2417 #endif
2422 * called at umount time
2424 void ext4_ext_release(struct super_block *sb)
2426 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
2427 return;
2429 #ifdef EXTENTS_STATS
2430 if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
2431 struct ext4_sb_info *sbi = EXT4_SB(sb);
2432 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
2433 sbi->s_ext_blocks, sbi->s_ext_extents,
2434 sbi->s_ext_blocks / sbi->s_ext_extents);
2435 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
2436 sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
2438 #endif
2441 static void bi_complete(struct bio *bio, int error)
2443 complete((struct completion *)bio->bi_private);
2446 /* FIXME!! we need to try to merge to left or right after zero-out */
2447 static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
2449 int ret;
2450 struct bio *bio;
2451 int blkbits, blocksize;
2452 sector_t ee_pblock;
2453 struct completion event;
2454 unsigned int ee_len, len, done, offset;
2457 blkbits = inode->i_blkbits;
2458 blocksize = inode->i_sb->s_blocksize;
2459 ee_len = ext4_ext_get_actual_len(ex);
2460 ee_pblock = ext_pblock(ex);
2462 /* convert ee_pblock to 512 byte sectors */
2463 ee_pblock = ee_pblock << (blkbits - 9);
2465 while (ee_len > 0) {
2467 if (ee_len > BIO_MAX_PAGES)
2468 len = BIO_MAX_PAGES;
2469 else
2470 len = ee_len;
2472 bio = bio_alloc(GFP_NOIO, len);
2473 if (!bio)
2474 return -ENOMEM;
2476 bio->bi_sector = ee_pblock;
2477 bio->bi_bdev = inode->i_sb->s_bdev;
2479 done = 0;
2480 offset = 0;
2481 while (done < len) {
2482 ret = bio_add_page(bio, ZERO_PAGE(0),
2483 blocksize, offset);
2484 if (ret != blocksize) {
2486 * We can't add any more pages because of
2487 * hardware limitations. Start a new bio.
2489 break;
2491 done++;
2492 offset += blocksize;
2493 if (offset >= PAGE_CACHE_SIZE)
2494 offset = 0;
2497 init_completion(&event);
2498 bio->bi_private = &event;
2499 bio->bi_end_io = bi_complete;
2500 submit_bio(WRITE, bio);
2501 wait_for_completion(&event);
2503 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
2504 bio_put(bio);
2505 return -EIO;
2507 bio_put(bio);
2508 ee_len -= done;
2509 ee_pblock += done << (blkbits - 9);
2511 return 0;
2514 #define EXT4_EXT_ZERO_LEN 7
2516 * This function is called by ext4_ext_get_blocks() if someone tries to write
2517 * to an uninitialized extent. It may result in splitting the uninitialized
2518 * extent into multiple extents (upto three - one initialized and two
2519 * uninitialized).
2520 * There are three possibilities:
2521 * a> There is no split required: Entire extent should be initialized
2522 * b> Splits in two extents: Write is happening at either end of the extent
2523 * c> Splits in three extents: Somone is writing in middle of the extent
2525 static int ext4_ext_convert_to_initialized(handle_t *handle,
2526 struct inode *inode,
2527 struct ext4_ext_path *path,
2528 ext4_lblk_t iblock,
2529 unsigned int max_blocks)
2531 struct ext4_extent *ex, newex, orig_ex;
2532 struct ext4_extent *ex1 = NULL;
2533 struct ext4_extent *ex2 = NULL;
2534 struct ext4_extent *ex3 = NULL;
2535 struct ext4_extent_header *eh;
2536 ext4_lblk_t ee_block, eof_block;
2537 unsigned int allocated, ee_len, depth;
2538 ext4_fsblk_t newblock;
2539 int err = 0;
2540 int ret = 0;
2541 int may_zeroout;
2543 ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical"
2544 "block %llu, max_blocks %u\n", inode->i_ino,
2545 (unsigned long long)iblock, max_blocks);
2547 eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
2548 inode->i_sb->s_blocksize_bits;
2549 if (eof_block < iblock + max_blocks)
2550 eof_block = iblock + max_blocks;
2552 depth = ext_depth(inode);
2553 eh = path[depth].p_hdr;
2554 ex = path[depth].p_ext;
2555 ee_block = le32_to_cpu(ex->ee_block);
2556 ee_len = ext4_ext_get_actual_len(ex);
2557 allocated = ee_len - (iblock - ee_block);
2558 newblock = iblock - ee_block + ext_pblock(ex);
2560 ex2 = ex;
2561 orig_ex.ee_block = ex->ee_block;
2562 orig_ex.ee_len = cpu_to_le16(ee_len);
2563 ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
2566 * It is safe to convert extent to initialized via explicit
2567 * zeroout only if extent is fully insde i_size or new_size.
2569 may_zeroout = ee_block + ee_len <= eof_block;
2571 err = ext4_ext_get_access(handle, inode, path + depth);
2572 if (err)
2573 goto out;
2574 /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */
2575 if (ee_len <= 2*EXT4_EXT_ZERO_LEN && may_zeroout) {
2576 err = ext4_ext_zeroout(inode, &orig_ex);
2577 if (err)
2578 goto fix_extent_len;
2579 /* update the extent length and mark as initialized */
2580 ex->ee_block = orig_ex.ee_block;
2581 ex->ee_len = orig_ex.ee_len;
2582 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2583 ext4_ext_dirty(handle, inode, path + depth);
2584 /* zeroed the full extent */
2585 return allocated;
2588 /* ex1: ee_block to iblock - 1 : uninitialized */
2589 if (iblock > ee_block) {
2590 ex1 = ex;
2591 ex1->ee_len = cpu_to_le16(iblock - ee_block);
2592 ext4_ext_mark_uninitialized(ex1);
2593 ex2 = &newex;
2596 * for sanity, update the length of the ex2 extent before
2597 * we insert ex3, if ex1 is NULL. This is to avoid temporary
2598 * overlap of blocks.
2600 if (!ex1 && allocated > max_blocks)
2601 ex2->ee_len = cpu_to_le16(max_blocks);
2602 /* ex3: to ee_block + ee_len : uninitialised */
2603 if (allocated > max_blocks) {
2604 unsigned int newdepth;
2605 /* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */
2606 if (allocated <= EXT4_EXT_ZERO_LEN && may_zeroout) {
2608 * iblock == ee_block is handled by the zerouout
2609 * at the beginning.
2610 * Mark first half uninitialized.
2611 * Mark second half initialized and zero out the
2612 * initialized extent
2614 ex->ee_block = orig_ex.ee_block;
2615 ex->ee_len = cpu_to_le16(ee_len - allocated);
2616 ext4_ext_mark_uninitialized(ex);
2617 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2618 ext4_ext_dirty(handle, inode, path + depth);
2620 ex3 = &newex;
2621 ex3->ee_block = cpu_to_le32(iblock);
2622 ext4_ext_store_pblock(ex3, newblock);
2623 ex3->ee_len = cpu_to_le16(allocated);
2624 err = ext4_ext_insert_extent(handle, inode, path,
2625 ex3, 0);
2626 if (err == -ENOSPC) {
2627 err = ext4_ext_zeroout(inode, &orig_ex);
2628 if (err)
2629 goto fix_extent_len;
2630 ex->ee_block = orig_ex.ee_block;
2631 ex->ee_len = orig_ex.ee_len;
2632 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2633 ext4_ext_dirty(handle, inode, path + depth);
2634 /* blocks available from iblock */
2635 return allocated;
2637 } else if (err)
2638 goto fix_extent_len;
2641 * We need to zero out the second half because
2642 * an fallocate request can update file size and
2643 * converting the second half to initialized extent
2644 * implies that we can leak some junk data to user
2645 * space.
2647 err = ext4_ext_zeroout(inode, ex3);
2648 if (err) {
2650 * We should actually mark the
2651 * second half as uninit and return error
2652 * Insert would have changed the extent
2654 depth = ext_depth(inode);
2655 ext4_ext_drop_refs(path);
2656 path = ext4_ext_find_extent(inode,
2657 iblock, path);
2658 if (IS_ERR(path)) {
2659 err = PTR_ERR(path);
2660 return err;
2662 /* get the second half extent details */
2663 ex = path[depth].p_ext;
2664 err = ext4_ext_get_access(handle, inode,
2665 path + depth);
2666 if (err)
2667 return err;
2668 ext4_ext_mark_uninitialized(ex);
2669 ext4_ext_dirty(handle, inode, path + depth);
2670 return err;
2673 /* zeroed the second half */
2674 return allocated;
2676 ex3 = &newex;
2677 ex3->ee_block = cpu_to_le32(iblock + max_blocks);
2678 ext4_ext_store_pblock(ex3, newblock + max_blocks);
2679 ex3->ee_len = cpu_to_le16(allocated - max_blocks);
2680 ext4_ext_mark_uninitialized(ex3);
2681 err = ext4_ext_insert_extent(handle, inode, path, ex3, 0);
2682 if (err == -ENOSPC && may_zeroout) {
2683 err = ext4_ext_zeroout(inode, &orig_ex);
2684 if (err)
2685 goto fix_extent_len;
2686 /* update the extent length and mark as initialized */
2687 ex->ee_block = orig_ex.ee_block;
2688 ex->ee_len = orig_ex.ee_len;
2689 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2690 ext4_ext_dirty(handle, inode, path + depth);
2691 /* zeroed the full extent */
2692 /* blocks available from iblock */
2693 return allocated;
2695 } else if (err)
2696 goto fix_extent_len;
2698 * The depth, and hence eh & ex might change
2699 * as part of the insert above.
2701 newdepth = ext_depth(inode);
2703 * update the extent length after successful insert of the
2704 * split extent
2706 ee_len -= ext4_ext_get_actual_len(ex3);
2707 orig_ex.ee_len = cpu_to_le16(ee_len);
2708 may_zeroout = ee_block + ee_len <= eof_block;
2710 depth = newdepth;
2711 ext4_ext_drop_refs(path);
2712 path = ext4_ext_find_extent(inode, iblock, path);
2713 if (IS_ERR(path)) {
2714 err = PTR_ERR(path);
2715 goto out;
2717 eh = path[depth].p_hdr;
2718 ex = path[depth].p_ext;
2719 if (ex2 != &newex)
2720 ex2 = ex;
2722 err = ext4_ext_get_access(handle, inode, path + depth);
2723 if (err)
2724 goto out;
2726 allocated = max_blocks;
2728 /* If extent has less than EXT4_EXT_ZERO_LEN and we are trying
2729 * to insert a extent in the middle zerout directly
2730 * otherwise give the extent a chance to merge to left
2732 if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN &&
2733 iblock != ee_block && may_zeroout) {
2734 err = ext4_ext_zeroout(inode, &orig_ex);
2735 if (err)
2736 goto fix_extent_len;
2737 /* update the extent length and mark as initialized */
2738 ex->ee_block = orig_ex.ee_block;
2739 ex->ee_len = orig_ex.ee_len;
2740 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2741 ext4_ext_dirty(handle, inode, path + depth);
2742 /* zero out the first half */
2743 /* blocks available from iblock */
2744 return allocated;
2748 * If there was a change of depth as part of the
2749 * insertion of ex3 above, we need to update the length
2750 * of the ex1 extent again here
2752 if (ex1 && ex1 != ex) {
2753 ex1 = ex;
2754 ex1->ee_len = cpu_to_le16(iblock - ee_block);
2755 ext4_ext_mark_uninitialized(ex1);
2756 ex2 = &newex;
2758 /* ex2: iblock to iblock + maxblocks-1 : initialised */
2759 ex2->ee_block = cpu_to_le32(iblock);
2760 ext4_ext_store_pblock(ex2, newblock);
2761 ex2->ee_len = cpu_to_le16(allocated);
2762 if (ex2 != ex)
2763 goto insert;
2765 * New (initialized) extent starts from the first block
2766 * in the current extent. i.e., ex2 == ex
2767 * We have to see if it can be merged with the extent
2768 * on the left.
2770 if (ex2 > EXT_FIRST_EXTENT(eh)) {
2772 * To merge left, pass "ex2 - 1" to try_to_merge(),
2773 * since it merges towards right _only_.
2775 ret = ext4_ext_try_to_merge(inode, path, ex2 - 1);
2776 if (ret) {
2777 err = ext4_ext_correct_indexes(handle, inode, path);
2778 if (err)
2779 goto out;
2780 depth = ext_depth(inode);
2781 ex2--;
2785 * Try to Merge towards right. This might be required
2786 * only when the whole extent is being written to.
2787 * i.e. ex2 == ex and ex3 == NULL.
2789 if (!ex3) {
2790 ret = ext4_ext_try_to_merge(inode, path, ex2);
2791 if (ret) {
2792 err = ext4_ext_correct_indexes(handle, inode, path);
2793 if (err)
2794 goto out;
2797 /* Mark modified extent as dirty */
2798 err = ext4_ext_dirty(handle, inode, path + depth);
2799 goto out;
2800 insert:
2801 err = ext4_ext_insert_extent(handle, inode, path, &newex, 0);
2802 if (err == -ENOSPC && may_zeroout) {
2803 err = ext4_ext_zeroout(inode, &orig_ex);
2804 if (err)
2805 goto fix_extent_len;
2806 /* update the extent length and mark as initialized */
2807 ex->ee_block = orig_ex.ee_block;
2808 ex->ee_len = orig_ex.ee_len;
2809 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2810 ext4_ext_dirty(handle, inode, path + depth);
2811 /* zero out the first half */
2812 return allocated;
2813 } else if (err)
2814 goto fix_extent_len;
2815 out:
2816 ext4_ext_show_leaf(inode, path);
2817 return err ? err : allocated;
2819 fix_extent_len:
2820 ex->ee_block = orig_ex.ee_block;
2821 ex->ee_len = orig_ex.ee_len;
2822 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2823 ext4_ext_mark_uninitialized(ex);
2824 ext4_ext_dirty(handle, inode, path + depth);
2825 return err;
2829 * This function is called by ext4_ext_get_blocks() from
2830 * ext4_get_blocks_dio_write() when DIO to write
2831 * to an uninitialized extent.
2833 * Writing to an uninitized extent may result in splitting the uninitialized
2834 * extent into multiple /intialized unintialized extents (up to three)
2835 * There are three possibilities:
2836 * a> There is no split required: Entire extent should be uninitialized
2837 * b> Splits in two extents: Write is happening at either end of the extent
2838 * c> Splits in three extents: Somone is writing in middle of the extent
2840 * One of more index blocks maybe needed if the extent tree grow after
2841 * the unintialized extent split. To prevent ENOSPC occur at the IO
2842 * complete, we need to split the uninitialized extent before DIO submit
2843 * the IO. The uninitilized extent called at this time will be split
2844 * into three uninitialized extent(at most). After IO complete, the part
2845 * being filled will be convert to initialized by the end_io callback function
2846 * via ext4_convert_unwritten_extents().
2848 * Returns the size of uninitialized extent to be written on success.
2850 static int ext4_split_unwritten_extents(handle_t *handle,
2851 struct inode *inode,
2852 struct ext4_ext_path *path,
2853 ext4_lblk_t iblock,
2854 unsigned int max_blocks,
2855 int flags)
2857 struct ext4_extent *ex, newex, orig_ex;
2858 struct ext4_extent *ex1 = NULL;
2859 struct ext4_extent *ex2 = NULL;
2860 struct ext4_extent *ex3 = NULL;
2861 struct ext4_extent_header *eh;
2862 ext4_lblk_t ee_block, eof_block;
2863 unsigned int allocated, ee_len, depth;
2864 ext4_fsblk_t newblock;
2865 int err = 0;
2866 int may_zeroout;
2868 ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
2869 "block %llu, max_blocks %u\n", inode->i_ino,
2870 (unsigned long long)iblock, max_blocks);
2872 eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
2873 inode->i_sb->s_blocksize_bits;
2874 if (eof_block < iblock + max_blocks)
2875 eof_block = iblock + max_blocks;
2877 depth = ext_depth(inode);
2878 eh = path[depth].p_hdr;
2879 ex = path[depth].p_ext;
2880 ee_block = le32_to_cpu(ex->ee_block);
2881 ee_len = ext4_ext_get_actual_len(ex);
2882 allocated = ee_len - (iblock - ee_block);
2883 newblock = iblock - ee_block + ext_pblock(ex);
2885 ex2 = ex;
2886 orig_ex.ee_block = ex->ee_block;
2887 orig_ex.ee_len = cpu_to_le16(ee_len);
2888 ext4_ext_store_pblock(&orig_ex, ext_pblock(ex));
2891 * It is safe to convert extent to initialized via explicit
2892 * zeroout only if extent is fully insde i_size or new_size.
2894 may_zeroout = ee_block + ee_len <= eof_block;
2897 * If the uninitialized extent begins at the same logical
2898 * block where the write begins, and the write completely
2899 * covers the extent, then we don't need to split it.
2901 if ((iblock == ee_block) && (allocated <= max_blocks))
2902 return allocated;
2904 err = ext4_ext_get_access(handle, inode, path + depth);
2905 if (err)
2906 goto out;
2907 /* ex1: ee_block to iblock - 1 : uninitialized */
2908 if (iblock > ee_block) {
2909 ex1 = ex;
2910 ex1->ee_len = cpu_to_le16(iblock - ee_block);
2911 ext4_ext_mark_uninitialized(ex1);
2912 ex2 = &newex;
2915 * for sanity, update the length of the ex2 extent before
2916 * we insert ex3, if ex1 is NULL. This is to avoid temporary
2917 * overlap of blocks.
2919 if (!ex1 && allocated > max_blocks)
2920 ex2->ee_len = cpu_to_le16(max_blocks);
2921 /* ex3: to ee_block + ee_len : uninitialised */
2922 if (allocated > max_blocks) {
2923 unsigned int newdepth;
2924 ex3 = &newex;
2925 ex3->ee_block = cpu_to_le32(iblock + max_blocks);
2926 ext4_ext_store_pblock(ex3, newblock + max_blocks);
2927 ex3->ee_len = cpu_to_le16(allocated - max_blocks);
2928 ext4_ext_mark_uninitialized(ex3);
2929 err = ext4_ext_insert_extent(handle, inode, path, ex3, flags);
2930 if (err == -ENOSPC && may_zeroout) {
2931 err = ext4_ext_zeroout(inode, &orig_ex);
2932 if (err)
2933 goto fix_extent_len;
2934 /* update the extent length and mark as initialized */
2935 ex->ee_block = orig_ex.ee_block;
2936 ex->ee_len = orig_ex.ee_len;
2937 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
2938 ext4_ext_dirty(handle, inode, path + depth);
2939 /* zeroed the full extent */
2940 /* blocks available from iblock */
2941 return allocated;
2943 } else if (err)
2944 goto fix_extent_len;
2946 * The depth, and hence eh & ex might change
2947 * as part of the insert above.
2949 newdepth = ext_depth(inode);
2951 * update the extent length after successful insert of the
2952 * split extent
2954 ee_len -= ext4_ext_get_actual_len(ex3);
2955 orig_ex.ee_len = cpu_to_le16(ee_len);
2956 may_zeroout = ee_block + ee_len <= eof_block;
2958 depth = newdepth;
2959 ext4_ext_drop_refs(path);
2960 path = ext4_ext_find_extent(inode, iblock, path);
2961 if (IS_ERR(path)) {
2962 err = PTR_ERR(path);
2963 goto out;
2965 eh = path[depth].p_hdr;
2966 ex = path[depth].p_ext;
2967 if (ex2 != &newex)
2968 ex2 = ex;
2970 err = ext4_ext_get_access(handle, inode, path + depth);
2971 if (err)
2972 goto out;
2974 allocated = max_blocks;
2977 * If there was a change of depth as part of the
2978 * insertion of ex3 above, we need to update the length
2979 * of the ex1 extent again here
2981 if (ex1 && ex1 != ex) {
2982 ex1 = ex;
2983 ex1->ee_len = cpu_to_le16(iblock - ee_block);
2984 ext4_ext_mark_uninitialized(ex1);
2985 ex2 = &newex;
2988 * ex2: iblock to iblock + maxblocks-1 : to be direct IO written,
2989 * uninitialised still.
2991 ex2->ee_block = cpu_to_le32(iblock);
2992 ext4_ext_store_pblock(ex2, newblock);
2993 ex2->ee_len = cpu_to_le16(allocated);
2994 ext4_ext_mark_uninitialized(ex2);
2995 if (ex2 != ex)
2996 goto insert;
2997 /* Mark modified extent as dirty */
2998 err = ext4_ext_dirty(handle, inode, path + depth);
2999 ext_debug("out here\n");
3000 goto out;
3001 insert:
3002 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
3003 if (err == -ENOSPC && may_zeroout) {
3004 err = ext4_ext_zeroout(inode, &orig_ex);
3005 if (err)
3006 goto fix_extent_len;
3007 /* update the extent length and mark as initialized */
3008 ex->ee_block = orig_ex.ee_block;
3009 ex->ee_len = orig_ex.ee_len;
3010 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
3011 ext4_ext_dirty(handle, inode, path + depth);
3012 /* zero out the first half */
3013 return allocated;
3014 } else if (err)
3015 goto fix_extent_len;
3016 out:
3017 ext4_ext_show_leaf(inode, path);
3018 return err ? err : allocated;
3020 fix_extent_len:
3021 ex->ee_block = orig_ex.ee_block;
3022 ex->ee_len = orig_ex.ee_len;
3023 ext4_ext_store_pblock(ex, ext_pblock(&orig_ex));
3024 ext4_ext_mark_uninitialized(ex);
3025 ext4_ext_dirty(handle, inode, path + depth);
3026 return err;
3028 static int ext4_convert_unwritten_extents_dio(handle_t *handle,
3029 struct inode *inode,
3030 struct ext4_ext_path *path)
3032 struct ext4_extent *ex;
3033 struct ext4_extent_header *eh;
3034 int depth;
3035 int err = 0;
3036 int ret = 0;
3038 depth = ext_depth(inode);
3039 eh = path[depth].p_hdr;
3040 ex = path[depth].p_ext;
3042 err = ext4_ext_get_access(handle, inode, path + depth);
3043 if (err)
3044 goto out;
3045 /* first mark the extent as initialized */
3046 ext4_ext_mark_initialized(ex);
3049 * We have to see if it can be merged with the extent
3050 * on the left.
3052 if (ex > EXT_FIRST_EXTENT(eh)) {
3054 * To merge left, pass "ex - 1" to try_to_merge(),
3055 * since it merges towards right _only_.
3057 ret = ext4_ext_try_to_merge(inode, path, ex - 1);
3058 if (ret) {
3059 err = ext4_ext_correct_indexes(handle, inode, path);
3060 if (err)
3061 goto out;
3062 depth = ext_depth(inode);
3063 ex--;
3067 * Try to Merge towards right.
3069 ret = ext4_ext_try_to_merge(inode, path, ex);
3070 if (ret) {
3071 err = ext4_ext_correct_indexes(handle, inode, path);
3072 if (err)
3073 goto out;
3074 depth = ext_depth(inode);
3076 /* Mark modified extent as dirty */
3077 err = ext4_ext_dirty(handle, inode, path + depth);
3078 out:
3079 ext4_ext_show_leaf(inode, path);
3080 return err;
3083 static void unmap_underlying_metadata_blocks(struct block_device *bdev,
3084 sector_t block, int count)
3086 int i;
3087 for (i = 0; i < count; i++)
3088 unmap_underlying_metadata(bdev, block + i);
3091 static int
3092 ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3093 ext4_lblk_t iblock, unsigned int max_blocks,
3094 struct ext4_ext_path *path, int flags,
3095 unsigned int allocated, struct buffer_head *bh_result,
3096 ext4_fsblk_t newblock)
3098 int ret = 0;
3099 int err = 0;
3100 ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3102 ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical"
3103 "block %llu, max_blocks %u, flags %d, allocated %u",
3104 inode->i_ino, (unsigned long long)iblock, max_blocks,
3105 flags, allocated);
3106 ext4_ext_show_leaf(inode, path);
3108 /* DIO get_block() before submit the IO, split the extent */
3109 if (flags == EXT4_GET_BLOCKS_DIO_CREATE_EXT) {
3110 ret = ext4_split_unwritten_extents(handle,
3111 inode, path, iblock,
3112 max_blocks, flags);
3114 * Flag the inode(non aio case) or end_io struct (aio case)
3115 * that this IO needs to convertion to written when IO is
3116 * completed
3118 if (io)
3119 io->flag = DIO_AIO_UNWRITTEN;
3120 else
3121 ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3122 goto out;
3124 /* async DIO end_io complete, convert the filled extent to written */
3125 if (flags == EXT4_GET_BLOCKS_DIO_CONVERT_EXT) {
3126 ret = ext4_convert_unwritten_extents_dio(handle, inode,
3127 path);
3128 if (ret >= 0)
3129 ext4_update_inode_fsync_trans(handle, inode, 1);
3130 goto out2;
3132 /* buffered IO case */
3134 * repeat fallocate creation request
3135 * we already have an unwritten extent
3137 if (flags & EXT4_GET_BLOCKS_UNINIT_EXT)
3138 goto map_out;
3140 /* buffered READ or buffered write_begin() lookup */
3141 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3143 * We have blocks reserved already. We
3144 * return allocated blocks so that delalloc
3145 * won't do block reservation for us. But
3146 * the buffer head will be unmapped so that
3147 * a read from the block returns 0s.
3149 set_buffer_unwritten(bh_result);
3150 goto out1;
3153 /* buffered write, writepage time, convert*/
3154 ret = ext4_ext_convert_to_initialized(handle, inode,
3155 path, iblock,
3156 max_blocks);
3157 if (ret >= 0)
3158 ext4_update_inode_fsync_trans(handle, inode, 1);
3159 out:
3160 if (ret <= 0) {
3161 err = ret;
3162 goto out2;
3163 } else
3164 allocated = ret;
3165 set_buffer_new(bh_result);
3167 * if we allocated more blocks than requested
3168 * we need to make sure we unmap the extra block
3169 * allocated. The actual needed block will get
3170 * unmapped later when we find the buffer_head marked
3171 * new.
3173 if (allocated > max_blocks) {
3174 unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
3175 newblock + max_blocks,
3176 allocated - max_blocks);
3177 allocated = max_blocks;
3181 * If we have done fallocate with the offset that is already
3182 * delayed allocated, we would have block reservation
3183 * and quota reservation done in the delayed write path.
3184 * But fallocate would have already updated quota and block
3185 * count for this offset. So cancel these reservation
3187 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
3188 ext4_da_update_reserve_space(inode, allocated, 0);
3190 map_out:
3191 set_buffer_mapped(bh_result);
3192 out1:
3193 if (allocated > max_blocks)
3194 allocated = max_blocks;
3195 ext4_ext_show_leaf(inode, path);
3196 bh_result->b_bdev = inode->i_sb->s_bdev;
3197 bh_result->b_blocknr = newblock;
3198 out2:
3199 if (path) {
3200 ext4_ext_drop_refs(path);
3201 kfree(path);
3203 return err ? err : allocated;
3206 * Block allocation/map/preallocation routine for extents based files
3209 * Need to be called with
3210 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
3211 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
3213 * return > 0, number of of blocks already mapped/allocated
3214 * if create == 0 and these are pre-allocated blocks
3215 * buffer head is unmapped
3216 * otherwise blocks are mapped
3218 * return = 0, if plain look up failed (blocks have not been allocated)
3219 * buffer head is unmapped
3221 * return < 0, error case.
3223 int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
3224 ext4_lblk_t iblock,
3225 unsigned int max_blocks, struct buffer_head *bh_result,
3226 int flags)
3228 struct ext4_ext_path *path = NULL;
3229 struct ext4_extent_header *eh;
3230 struct ext4_extent newex, *ex, *last_ex;
3231 ext4_fsblk_t newblock;
3232 int i, err = 0, depth, ret, cache_type;
3233 unsigned int allocated = 0;
3234 struct ext4_allocation_request ar;
3235 ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio;
3237 __clear_bit(BH_New, &bh_result->b_state);
3238 ext_debug("blocks %u/%u requested for inode %lu\n",
3239 iblock, max_blocks, inode->i_ino);
3241 /* check in cache */
3242 cache_type = ext4_ext_in_cache(inode, iblock, &newex);
3243 if (cache_type) {
3244 if (cache_type == EXT4_EXT_CACHE_GAP) {
3245 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3247 * block isn't allocated yet and
3248 * user doesn't want to allocate it
3250 goto out2;
3252 /* we should allocate requested block */
3253 } else if (cache_type == EXT4_EXT_CACHE_EXTENT) {
3254 /* block is already allocated */
3255 newblock = iblock
3256 - le32_to_cpu(newex.ee_block)
3257 + ext_pblock(&newex);
3258 /* number of remaining blocks in the extent */
3259 allocated = ext4_ext_get_actual_len(&newex) -
3260 (iblock - le32_to_cpu(newex.ee_block));
3261 goto out;
3262 } else {
3263 BUG();
3267 /* find extent for this block */
3268 path = ext4_ext_find_extent(inode, iblock, NULL);
3269 if (IS_ERR(path)) {
3270 err = PTR_ERR(path);
3271 path = NULL;
3272 goto out2;
3275 depth = ext_depth(inode);
3278 * consistent leaf must not be empty;
3279 * this situation is possible, though, _during_ tree modification;
3280 * this is why assert can't be put in ext4_ext_find_extent()
3282 if (path[depth].p_ext == NULL && depth != 0) {
3283 ext4_error(inode->i_sb, __func__, "bad extent address "
3284 "inode: %lu, iblock: %lu, depth: %d",
3285 inode->i_ino, (unsigned long) iblock, depth);
3286 err = -EIO;
3287 goto out2;
3289 eh = path[depth].p_hdr;
3291 ex = path[depth].p_ext;
3292 if (ex) {
3293 ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
3294 ext4_fsblk_t ee_start = ext_pblock(ex);
3295 unsigned short ee_len;
3298 * Uninitialized extents are treated as holes, except that
3299 * we split out initialized portions during a write.
3301 ee_len = ext4_ext_get_actual_len(ex);
3302 /* if found extent covers block, simply return it */
3303 if (iblock >= ee_block && iblock < ee_block + ee_len) {
3304 newblock = iblock - ee_block + ee_start;
3305 /* number of remaining blocks in the extent */
3306 allocated = ee_len - (iblock - ee_block);
3307 ext_debug("%u fit into %u:%d -> %llu\n", iblock,
3308 ee_block, ee_len, newblock);
3310 /* Do not put uninitialized extent in the cache */
3311 if (!ext4_ext_is_uninitialized(ex)) {
3312 ext4_ext_put_in_cache(inode, ee_block,
3313 ee_len, ee_start,
3314 EXT4_EXT_CACHE_EXTENT);
3315 goto out;
3317 ret = ext4_ext_handle_uninitialized_extents(handle,
3318 inode, iblock, max_blocks, path,
3319 flags, allocated, bh_result, newblock);
3320 return ret;
3325 * requested block isn't allocated yet;
3326 * we couldn't try to create block if create flag is zero
3328 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) {
3330 * put just found gap into cache to speed up
3331 * subsequent requests
3333 ext4_ext_put_gap_in_cache(inode, path, iblock);
3334 goto out2;
3337 * Okay, we need to do block allocation.
3340 /* find neighbour allocated blocks */
3341 ar.lleft = iblock;
3342 err = ext4_ext_search_left(inode, path, &ar.lleft, &ar.pleft);
3343 if (err)
3344 goto out2;
3345 ar.lright = iblock;
3346 err = ext4_ext_search_right(inode, path, &ar.lright, &ar.pright);
3347 if (err)
3348 goto out2;
3351 * See if request is beyond maximum number of blocks we can have in
3352 * a single extent. For an initialized extent this limit is
3353 * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
3354 * EXT_UNINIT_MAX_LEN.
3356 if (max_blocks > EXT_INIT_MAX_LEN &&
3357 !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
3358 max_blocks = EXT_INIT_MAX_LEN;
3359 else if (max_blocks > EXT_UNINIT_MAX_LEN &&
3360 (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
3361 max_blocks = EXT_UNINIT_MAX_LEN;
3363 /* Check if we can really insert (iblock)::(iblock+max_blocks) extent */
3364 newex.ee_block = cpu_to_le32(iblock);
3365 newex.ee_len = cpu_to_le16(max_blocks);
3366 err = ext4_ext_check_overlap(inode, &newex, path);
3367 if (err)
3368 allocated = ext4_ext_get_actual_len(&newex);
3369 else
3370 allocated = max_blocks;
3372 /* allocate new block */
3373 ar.inode = inode;
3374 ar.goal = ext4_ext_find_goal(inode, path, iblock);
3375 ar.logical = iblock;
3376 ar.len = allocated;
3377 if (S_ISREG(inode->i_mode))
3378 ar.flags = EXT4_MB_HINT_DATA;
3379 else
3380 /* disable in-core preallocation for non-regular files */
3381 ar.flags = 0;
3382 newblock = ext4_mb_new_blocks(handle, &ar, &err);
3383 if (!newblock)
3384 goto out2;
3385 ext_debug("allocate new block: goal %llu, found %llu/%u\n",
3386 ar.goal, newblock, allocated);
3388 /* try to insert new extent into found leaf and return */
3389 ext4_ext_store_pblock(&newex, newblock);
3390 newex.ee_len = cpu_to_le16(ar.len);
3391 /* Mark uninitialized */
3392 if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
3393 ext4_ext_mark_uninitialized(&newex);
3395 * io_end structure was created for every async
3396 * direct IO write to the middle of the file.
3397 * To avoid unecessary convertion for every aio dio rewrite
3398 * to the mid of file, here we flag the IO that is really
3399 * need the convertion.
3400 * For non asycn direct IO case, flag the inode state
3401 * that we need to perform convertion when IO is done.
3403 if (flags == EXT4_GET_BLOCKS_DIO_CREATE_EXT) {
3404 if (io)
3405 io->flag = DIO_AIO_UNWRITTEN;
3406 else
3407 ext4_set_inode_state(inode,
3408 EXT4_STATE_DIO_UNWRITTEN);
3412 if (unlikely(ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))) {
3413 if (unlikely(!eh->eh_entries)) {
3414 ext4_error(inode->i_sb, __func__,
3415 "inode#%lu, eh->eh_entries = 0 and "
3416 "EOFBLOCKS_FL set", inode->i_ino);
3417 err = -EIO;
3418 goto out2;
3420 last_ex = EXT_LAST_EXTENT(eh);
3422 * If the current leaf block was reached by looking at
3423 * the last index block all the way down the tree, and
3424 * we are extending the inode beyond the last extent
3425 * in the current leaf block, then clear the
3426 * EOFBLOCKS_FL flag.
3428 for (i = depth-1; i >= 0; i--) {
3429 if (path[i].p_idx != EXT_LAST_INDEX(path[i].p_hdr))
3430 break;
3432 if ((i < 0) &&
3433 (iblock + ar.len > le32_to_cpu(last_ex->ee_block) +
3434 ext4_ext_get_actual_len(last_ex)))
3435 ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3437 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
3438 if (err) {
3439 /* free data blocks we just allocated */
3440 /* not a good idea to call discard here directly,
3441 * but otherwise we'd need to call it every free() */
3442 ext4_discard_preallocations(inode);
3443 ext4_free_blocks(handle, inode, ext_pblock(&newex),
3444 ext4_ext_get_actual_len(&newex), 0);
3445 goto out2;
3448 /* previous routine could use block we allocated */
3449 newblock = ext_pblock(&newex);
3450 allocated = ext4_ext_get_actual_len(&newex);
3451 if (allocated > max_blocks)
3452 allocated = max_blocks;
3453 set_buffer_new(bh_result);
3456 * Update reserved blocks/metadata blocks after successful
3457 * block allocation which had been deferred till now.
3459 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
3460 ext4_da_update_reserve_space(inode, allocated, 1);
3463 * Cache the extent and update transaction to commit on fdatasync only
3464 * when it is _not_ an uninitialized extent.
3466 if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0) {
3467 ext4_ext_put_in_cache(inode, iblock, allocated, newblock,
3468 EXT4_EXT_CACHE_EXTENT);
3469 ext4_update_inode_fsync_trans(handle, inode, 1);
3470 } else
3471 ext4_update_inode_fsync_trans(handle, inode, 0);
3472 out:
3473 if (allocated > max_blocks)
3474 allocated = max_blocks;
3475 ext4_ext_show_leaf(inode, path);
3476 set_buffer_mapped(bh_result);
3477 bh_result->b_bdev = inode->i_sb->s_bdev;
3478 bh_result->b_blocknr = newblock;
3479 out2:
3480 if (path) {
3481 ext4_ext_drop_refs(path);
3482 kfree(path);
3484 return err ? err : allocated;
3487 void ext4_ext_truncate(struct inode *inode)
3489 struct address_space *mapping = inode->i_mapping;
3490 struct super_block *sb = inode->i_sb;
3491 ext4_lblk_t last_block;
3492 handle_t *handle;
3493 int err = 0;
3496 * probably first extent we're gonna free will be last in block
3498 err = ext4_writepage_trans_blocks(inode);
3499 handle = ext4_journal_start(inode, err);
3500 if (IS_ERR(handle))
3501 return;
3503 if (inode->i_size & (sb->s_blocksize - 1))
3504 ext4_block_truncate_page(handle, mapping, inode->i_size);
3506 if (ext4_orphan_add(handle, inode))
3507 goto out_stop;
3509 down_write(&EXT4_I(inode)->i_data_sem);
3510 ext4_ext_invalidate_cache(inode);
3512 ext4_discard_preallocations(inode);
3515 * TODO: optimization is possible here.
3516 * Probably we need not scan at all,
3517 * because page truncation is enough.
3520 /* we have to know where to truncate from in crash case */
3521 EXT4_I(inode)->i_disksize = inode->i_size;
3522 ext4_mark_inode_dirty(handle, inode);
3524 last_block = (inode->i_size + sb->s_blocksize - 1)
3525 >> EXT4_BLOCK_SIZE_BITS(sb);
3526 err = ext4_ext_remove_space(inode, last_block);
3528 /* In a multi-transaction truncate, we only make the final
3529 * transaction synchronous.
3531 if (IS_SYNC(inode))
3532 ext4_handle_sync(handle);
3534 out_stop:
3535 up_write(&EXT4_I(inode)->i_data_sem);
3537 * If this was a simple ftruncate() and the file will remain alive,
3538 * then we need to clear up the orphan record which we created above.
3539 * However, if this was a real unlink then we were called by
3540 * ext4_delete_inode(), and we allow that function to clean up the
3541 * orphan info for us.
3543 if (inode->i_nlink)
3544 ext4_orphan_del(handle, inode);
3546 inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
3547 ext4_mark_inode_dirty(handle, inode);
3548 ext4_journal_stop(handle);
3551 static void ext4_falloc_update_inode(struct inode *inode,
3552 int mode, loff_t new_size, int update_ctime)
3554 struct timespec now;
3556 if (update_ctime) {
3557 now = current_fs_time(inode->i_sb);
3558 if (!timespec_equal(&inode->i_ctime, &now))
3559 inode->i_ctime = now;
3562 * Update only when preallocation was requested beyond
3563 * the file size.
3565 if (!(mode & FALLOC_FL_KEEP_SIZE)) {
3566 if (new_size > i_size_read(inode))
3567 i_size_write(inode, new_size);
3568 if (new_size > EXT4_I(inode)->i_disksize)
3569 ext4_update_i_disksize(inode, new_size);
3570 } else {
3572 * Mark that we allocate beyond EOF so the subsequent truncate
3573 * can proceed even if the new size is the same as i_size.
3575 if (new_size > i_size_read(inode))
3576 ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3582 * preallocate space for a file. This implements ext4's fallocate inode
3583 * operation, which gets called from sys_fallocate system call.
3584 * For block-mapped files, posix_fallocate should fall back to the method
3585 * of writing zeroes to the required new blocks (the same behavior which is
3586 * expected for file systems which do not support fallocate() system call).
3588 long ext4_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len)
3590 handle_t *handle;
3591 ext4_lblk_t block;
3592 loff_t new_size;
3593 unsigned int max_blocks;
3594 int ret = 0;
3595 int ret2 = 0;
3596 int retries = 0;
3597 struct buffer_head map_bh;
3598 unsigned int credits, blkbits = inode->i_blkbits;
3601 * currently supporting (pre)allocate mode for extent-based
3602 * files _only_
3604 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
3605 return -EOPNOTSUPP;
3607 /* preallocation to directories is currently not supported */
3608 if (S_ISDIR(inode->i_mode))
3609 return -ENODEV;
3611 block = offset >> blkbits;
3613 * We can't just convert len to max_blocks because
3614 * If blocksize = 4096 offset = 3072 and len = 2048
3616 max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
3617 - block;
3619 * credits to insert 1 extent into extent tree
3621 credits = ext4_chunk_trans_blocks(inode, max_blocks);
3622 mutex_lock(&inode->i_mutex);
3623 ret = inode_newsize_ok(inode, (len + offset));
3624 if (ret) {
3625 mutex_unlock(&inode->i_mutex);
3626 return ret;
3628 retry:
3629 while (ret >= 0 && ret < max_blocks) {
3630 block = block + ret;
3631 max_blocks = max_blocks - ret;
3632 handle = ext4_journal_start(inode, credits);
3633 if (IS_ERR(handle)) {
3634 ret = PTR_ERR(handle);
3635 break;
3637 map_bh.b_state = 0;
3638 ret = ext4_get_blocks(handle, inode, block,
3639 max_blocks, &map_bh,
3640 EXT4_GET_BLOCKS_CREATE_UNINIT_EXT);
3641 if (ret <= 0) {
3642 #ifdef EXT4FS_DEBUG
3643 WARN_ON(ret <= 0);
3644 printk(KERN_ERR "%s: ext4_ext_get_blocks "
3645 "returned error inode#%lu, block=%u, "
3646 "max_blocks=%u", __func__,
3647 inode->i_ino, block, max_blocks);
3648 #endif
3649 ext4_mark_inode_dirty(handle, inode);
3650 ret2 = ext4_journal_stop(handle);
3651 break;
3653 if ((block + ret) >= (EXT4_BLOCK_ALIGN(offset + len,
3654 blkbits) >> blkbits))
3655 new_size = offset + len;
3656 else
3657 new_size = (block + ret) << blkbits;
3659 ext4_falloc_update_inode(inode, mode, new_size,
3660 buffer_new(&map_bh));
3661 ext4_mark_inode_dirty(handle, inode);
3662 ret2 = ext4_journal_stop(handle);
3663 if (ret2)
3664 break;
3666 if (ret == -ENOSPC &&
3667 ext4_should_retry_alloc(inode->i_sb, &retries)) {
3668 ret = 0;
3669 goto retry;
3671 mutex_unlock(&inode->i_mutex);
3672 return ret > 0 ? ret2 : ret;
3676 * This function convert a range of blocks to written extents
3677 * The caller of this function will pass the start offset and the size.
3678 * all unwritten extents within this range will be converted to
3679 * written extents.
3681 * This function is called from the direct IO end io call back
3682 * function, to convert the fallocated extents after IO is completed.
3683 * Returns 0 on success.
3685 int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
3686 ssize_t len)
3688 handle_t *handle;
3689 ext4_lblk_t block;
3690 unsigned int max_blocks;
3691 int ret = 0;
3692 int ret2 = 0;
3693 struct buffer_head map_bh;
3694 unsigned int credits, blkbits = inode->i_blkbits;
3696 block = offset >> blkbits;
3698 * We can't just convert len to max_blocks because
3699 * If blocksize = 4096 offset = 3072 and len = 2048
3701 max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
3702 - block;
3704 * credits to insert 1 extent into extent tree
3706 credits = ext4_chunk_trans_blocks(inode, max_blocks);
3707 while (ret >= 0 && ret < max_blocks) {
3708 block = block + ret;
3709 max_blocks = max_blocks - ret;
3710 handle = ext4_journal_start(inode, credits);
3711 if (IS_ERR(handle)) {
3712 ret = PTR_ERR(handle);
3713 break;
3715 map_bh.b_state = 0;
3716 ret = ext4_get_blocks(handle, inode, block,
3717 max_blocks, &map_bh,
3718 EXT4_GET_BLOCKS_DIO_CONVERT_EXT);
3719 if (ret <= 0) {
3720 WARN_ON(ret <= 0);
3721 printk(KERN_ERR "%s: ext4_ext_get_blocks "
3722 "returned error inode#%lu, block=%u, "
3723 "max_blocks=%u", __func__,
3724 inode->i_ino, block, max_blocks);
3726 ext4_mark_inode_dirty(handle, inode);
3727 ret2 = ext4_journal_stop(handle);
3728 if (ret <= 0 || ret2 )
3729 break;
3731 return ret > 0 ? ret2 : ret;
3734 * Callback function called for each extent to gather FIEMAP information.
3736 static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
3737 struct ext4_ext_cache *newex, struct ext4_extent *ex,
3738 void *data)
3740 struct fiemap_extent_info *fieinfo = data;
3741 unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
3742 __u64 logical;
3743 __u64 physical;
3744 __u64 length;
3745 __u32 flags = 0;
3746 int error;
3748 logical = (__u64)newex->ec_block << blksize_bits;
3750 if (newex->ec_type == EXT4_EXT_CACHE_GAP) {
3751 pgoff_t offset;
3752 struct page *page;
3753 struct buffer_head *bh = NULL;
3755 offset = logical >> PAGE_SHIFT;
3756 page = find_get_page(inode->i_mapping, offset);
3757 if (!page || !page_has_buffers(page))
3758 return EXT_CONTINUE;
3760 bh = page_buffers(page);
3762 if (!bh)
3763 return EXT_CONTINUE;
3765 if (buffer_delay(bh)) {
3766 flags |= FIEMAP_EXTENT_DELALLOC;
3767 page_cache_release(page);
3768 } else {
3769 page_cache_release(page);
3770 return EXT_CONTINUE;
3774 physical = (__u64)newex->ec_start << blksize_bits;
3775 length = (__u64)newex->ec_len << blksize_bits;
3777 if (ex && ext4_ext_is_uninitialized(ex))
3778 flags |= FIEMAP_EXTENT_UNWRITTEN;
3781 * If this extent reaches EXT_MAX_BLOCK, it must be last.
3783 * Or if ext4_ext_next_allocated_block is EXT_MAX_BLOCK,
3784 * this also indicates no more allocated blocks.
3786 * XXX this might miss a single-block extent at EXT_MAX_BLOCK
3788 if (ext4_ext_next_allocated_block(path) == EXT_MAX_BLOCK ||
3789 newex->ec_block + newex->ec_len - 1 == EXT_MAX_BLOCK) {
3790 loff_t size = i_size_read(inode);
3791 loff_t bs = EXT4_BLOCK_SIZE(inode->i_sb);
3793 flags |= FIEMAP_EXTENT_LAST;
3794 if ((flags & FIEMAP_EXTENT_DELALLOC) &&
3795 logical+length > size)
3796 length = (size - logical + bs - 1) & ~(bs-1);
3799 error = fiemap_fill_next_extent(fieinfo, logical, physical,
3800 length, flags);
3801 if (error < 0)
3802 return error;
3803 if (error == 1)
3804 return EXT_BREAK;
3806 return EXT_CONTINUE;
3809 /* fiemap flags we can handle specified here */
3810 #define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR)
3812 static int ext4_xattr_fiemap(struct inode *inode,
3813 struct fiemap_extent_info *fieinfo)
3815 __u64 physical = 0;
3816 __u64 length;
3817 __u32 flags = FIEMAP_EXTENT_LAST;
3818 int blockbits = inode->i_sb->s_blocksize_bits;
3819 int error = 0;
3821 /* in-inode? */
3822 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
3823 struct ext4_iloc iloc;
3824 int offset; /* offset of xattr in inode */
3826 error = ext4_get_inode_loc(inode, &iloc);
3827 if (error)
3828 return error;
3829 physical = iloc.bh->b_blocknr << blockbits;
3830 offset = EXT4_GOOD_OLD_INODE_SIZE +
3831 EXT4_I(inode)->i_extra_isize;
3832 physical += offset;
3833 length = EXT4_SB(inode->i_sb)->s_inode_size - offset;
3834 flags |= FIEMAP_EXTENT_DATA_INLINE;
3835 brelse(iloc.bh);
3836 } else { /* external block */
3837 physical = EXT4_I(inode)->i_file_acl << blockbits;
3838 length = inode->i_sb->s_blocksize;
3841 if (physical)
3842 error = fiemap_fill_next_extent(fieinfo, 0, physical,
3843 length, flags);
3844 return (error < 0 ? error : 0);
3847 int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3848 __u64 start, __u64 len)
3850 ext4_lblk_t start_blk;
3851 int error = 0;
3853 /* fallback to generic here if not in extents fmt */
3854 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
3855 return generic_block_fiemap(inode, fieinfo, start, len,
3856 ext4_get_block);
3858 if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
3859 return -EBADR;
3861 if (fieinfo->fi_flags & FIEMAP_FLAG_XATTR) {
3862 error = ext4_xattr_fiemap(inode, fieinfo);
3863 } else {
3864 ext4_lblk_t len_blks;
3865 __u64 last_blk;
3867 start_blk = start >> inode->i_sb->s_blocksize_bits;
3868 last_blk = (start + len - 1) >> inode->i_sb->s_blocksize_bits;
3869 if (last_blk >= EXT_MAX_BLOCK)
3870 last_blk = EXT_MAX_BLOCK-1;
3871 len_blks = ((ext4_lblk_t) last_blk) - start_blk + 1;
3874 * Walk the extent tree gathering extent information.
3875 * ext4_ext_fiemap_cb will push extents back to user.
3877 error = ext4_ext_walk_space(inode, start_blk, len_blks,
3878 ext4_ext_fiemap_cb, fieinfo);
3881 return error;