1 ext4: refactor ext4_move_extents code base
3 From: Dmitry Monakhov <dmonakhov@openvz.org>
5 ext4_move_extents is too complex for review. It has duplicate almost
6 each function available in the rest of other codebase. It has useless
7 artificial restriction orig_offset == donor_offset. But in fact logic
8 of ext4_move_extents is very simple:
10 Iterate extents one by one (similar to ext4_fill_fiemap_extents)
11 ->Iterate each page covered extent (similar to generic_perform_write)
12 ->swap extents for covered by page (can be shared with IOC_MOVE_DATA)
14 Signed-off-by: Dmitry Monakhov <dmonakhov@openvz.org>
15 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
18 fs/ext4/extents.c | 234 +++++++++++++-
19 fs/ext4/move_extent.c | 990 +++++++----------------------------------------------------
20 3 files changed, 338 insertions(+), 891 deletions(-)
22 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
23 index 420c9be..cf3ad75 100644
26 @@ -2740,10 +2740,15 @@ extern int ext4_find_delalloc_range(struct inode *inode,
27 ext4_lblk_t lblk_start,
28 ext4_lblk_t lblk_end);
29 extern int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk);
30 +extern ext4_lblk_t ext4_ext_next_allocated_block(struct ext4_ext_path *path);
31 extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
32 __u64 start, __u64 len);
33 extern int ext4_ext_precache(struct inode *inode);
34 extern int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len);
35 +extern int ext4_swap_extents(handle_t *handle, struct inode *inode1,
36 + struct inode *inode2, ext4_lblk_t lblk1,
37 + ext4_lblk_t lblk2, ext4_lblk_t count,
38 + int mark_unwritten,int *err);
41 extern void ext4_double_down_write_data_sem(struct inode *first,
42 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
43 index 1b76834..73d9ae9 100644
44 --- a/fs/ext4/extents.c
45 +++ b/fs/ext4/extents.c
46 @@ -291,6 +291,19 @@ static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
51 +ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
52 + struct ext4_ext_path *path, ext4_lblk_t lblk,
55 + int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext);
57 + return ext4_split_extent_at(handle, inode, path, lblk, unwritten ?
58 + EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0,
59 + EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO |
60 + (nofail ? EXT4_GET_BLOCKS_METADATA_NOFAIL:0));
64 * Calculate the number of metadata blocks needed
66 @@ -1559,7 +1572,7 @@ found_extent:
67 * allocated block. Thus, index entries have to be consistent
72 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
75 @@ -2854,24 +2867,14 @@ again:
77 if (end >= ee_block &&
78 end < ee_block + ext4_ext_get_actual_len(ex) - 1) {
81 - if (ext4_ext_is_unwritten(ex))
82 - split_flag = EXT4_EXT_MARK_UNWRIT1 |
83 - EXT4_EXT_MARK_UNWRIT2;
86 * Split the extent in two so that 'end' is the last
87 * block in the first new extent. Also we should not
88 * fail removing space due to ENOSPC so try to use
89 * reserved block if that happens.
91 - err = ext4_split_extent_at(handle, inode, path,
92 - end + 1, split_flag,
94 - EXT4_GET_BLOCKS_PRE_IO |
95 - EXT4_GET_BLOCKS_METADATA_NOFAIL);
97 + err = ext4_force_split_extent_at(handle, inode, path,
102 @@ -5506,3 +5509,208 @@ out_mutex:
103 mutex_unlock(&inode->i_mutex);
108 + * ext4_swap_extents - Swap extents between two inodes
110 + * @inode1: First inode
111 + * @inode2: Second inode
112 + * @lblk1: Start block for first inode
113 + * @lblk2: Start block for second inode
114 + * @count: Number of blocks to swap
115 + * @mark_unwritten: Mark second inode's extents as unwritten after swap
116 + * @erp: Pointer to save error value
118 + * This helper routine does exactly what is promise "swap extents". All other
119 + * stuff such as page-cache locking consistency, bh mapping consistency or
120 + * extent's data copying must be performed by caller.
122 + * i_mutex is held for both inodes
123 + * i_data_sem is locked for write for both inodes
125 + * All pages from requested range are locked for both inodes
128 +ext4_swap_extents(handle_t *handle, struct inode *inode1,
129 + struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2,
130 + ext4_lblk_t count, int unwritten, int *erp)
132 + struct ext4_ext_path *path1 = NULL;
133 + struct ext4_ext_path *path2 = NULL;
134 + int replaced_count = 0;
136 + BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem));
137 + BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem));
138 + BUG_ON(!mutex_is_locked(&inode1->i_mutex));
139 + BUG_ON(!mutex_is_locked(&inode1->i_mutex));
141 + *erp = ext4_es_remove_extent(inode1, lblk1, count);
144 + *erp = ext4_es_remove_extent(inode2, lblk2, count);
149 + struct ext4_extent *ex1, *ex2, tmp_ex;
150 + ext4_lblk_t e1_blk, e2_blk;
151 + int e1_len, e2_len, len;
154 + path1 = ext4_ext_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE);
155 + if (IS_ERR(path1)) {
156 + *erp = PTR_ERR(path1);
159 + path2 = ext4_ext_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE);
160 + if (IS_ERR(path2)) {
161 + *erp = PTR_ERR(path2);
164 + ex1 = path1[path1->p_depth].p_ext;
165 + ex2 = path2[path2->p_depth].p_ext;
166 + /* Do we have somthing to swap ? */
167 + if (unlikely(!ex2 || !ex1))
170 + e1_blk = le32_to_cpu(ex1->ee_block);
171 + e2_blk = le32_to_cpu(ex2->ee_block);
172 + e1_len = ext4_ext_get_actual_len(ex1);
173 + e2_len = ext4_ext_get_actual_len(ex2);
175 + /* Hole handling */
176 + if (!in_range(lblk1, e1_blk, e1_len) ||
177 + !in_range(lblk2, e2_blk, e2_len)) {
178 + ext4_lblk_t next1, next2;
180 + /* if hole after extent, then go to next extent */
181 + next1 = ext4_ext_next_allocated_block(path1);
182 + next2 = ext4_ext_next_allocated_block(path2);
183 + /* If hole before extent, then shift to that extent */
184 + if (e1_blk > lblk1)
186 + if (e2_blk > lblk2)
188 + /* Do we have something to swap */
189 + if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS)
191 + /* Move to the rightest boundary */
192 + len = next1 - lblk1;
193 + if (len < next2 - lblk2)
194 + len = next2 - lblk2;
203 + /* Prepare left boundary */
204 + if (e1_blk < lblk1) {
206 + *erp = ext4_force_split_extent_at(handle, inode1,
211 + if (e2_blk < lblk2) {
213 + *erp = ext4_force_split_extent_at(handle, inode2,
218 + /* ext4_split_extent_at() may retult in leaf extent split,
219 + * path must to be revalidated. */
223 + /* Prepare right boundary */
225 + if (len > e1_blk + e1_len - lblk1)
226 + len = e1_blk + e1_len - lblk1;
227 + if (len > e2_blk + e2_len - lblk2)
228 + len = e2_blk + e2_len - lblk2;
230 + if (len != e1_len) {
232 + *erp = ext4_force_split_extent_at(handle, inode1,
233 + path1, lblk1 + len, 0);
237 + if (len != e2_len) {
239 + *erp = ext4_force_split_extent_at(handle, inode2,
240 + path2, lblk2 + len, 0);
244 + /* ext4_split_extent_at() may retult in leaf extent split,
245 + * path must to be revalidated. */
249 + BUG_ON(e2_len != e1_len);
250 + *erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth);
253 + *erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth);
257 + /* Both extents are fully inside boundaries. Swap it now */
259 + ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2));
260 + ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex));
261 + ex1->ee_len = cpu_to_le16(e2_len);
262 + ex2->ee_len = cpu_to_le16(e1_len);
264 + ext4_ext_mark_unwritten(ex2);
265 + if (ext4_ext_is_unwritten(&tmp_ex))
266 + ext4_ext_mark_unwritten(ex1);
268 + ext4_ext_try_to_merge(handle, inode2, path2, ex2);
269 + ext4_ext_try_to_merge(handle, inode1, path1, ex1);
270 + *erp = ext4_ext_dirty(handle, inode2, path2 +
274 + *erp = ext4_ext_dirty(handle, inode1, path1 +
277 + * Looks scarry ah..? second inode already points to new blocks,
278 + * and it was successfully dirtied. But luckily error may happen
279 + * only due to journal error, so full transaction will be
286 + replaced_count += len;
291 + ext4_ext_drop_refs(path1);
296 + ext4_ext_drop_refs(path2);
302 + ext4_ext_drop_refs(path1);
306 + ext4_ext_drop_refs(path2);
309 + return replaced_count;
311 diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
312 index 123a51b..c8f895b 100644
313 --- a/fs/ext4/move_extent.c
314 +++ b/fs/ext4/move_extent.c
315 @@ -49,101 +49,6 @@ get_ext_path(struct inode *inode, ext4_lblk_t lblock,
319 - * copy_extent_status - Copy the extent's initialization status
321 - * @src: an extent for getting initialize status
322 - * @dest: an extent to be set the status
325 -copy_extent_status(struct ext4_extent *src, struct ext4_extent *dest)
327 - if (ext4_ext_is_unwritten(src))
328 - ext4_ext_mark_unwritten(dest);
330 - dest->ee_len = cpu_to_le16(ext4_ext_get_actual_len(dest));
334 - * mext_next_extent - Search for the next extent and set it to "extent"
336 - * @inode: inode which is searched
337 - * @path: this will obtain data for the next extent
338 - * @extent: pointer to the next extent we have just gotten
340 - * Search the next extent in the array of ext4_ext_path structure (@path)
341 - * and set it to ext4_extent structure (@extent). In addition, the member of
342 - * @path (->p_ext) also points the next extent. Return 0 on success, 1 if
343 - * ext4_ext_path structure refers to the last extent, or a negative error
344 - * value on failure.
347 -mext_next_extent(struct inode *inode, struct ext4_ext_path *path,
348 - struct ext4_extent **extent)
350 - struct ext4_extent_header *eh;
351 - int ppos, leaf_ppos = path->p_depth;
354 - if (EXT_LAST_EXTENT(path[ppos].p_hdr) > path[ppos].p_ext) {
356 - *extent = ++path[ppos].p_ext;
357 - path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
361 - while (--ppos >= 0) {
362 - if (EXT_LAST_INDEX(path[ppos].p_hdr) >
363 - path[ppos].p_idx) {
364 - int cur_ppos = ppos;
367 - path[ppos].p_idx++;
368 - path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
369 - if (path[ppos+1].p_bh)
370 - brelse(path[ppos+1].p_bh);
371 - path[ppos+1].p_bh =
372 - sb_bread(inode->i_sb, path[ppos].p_block);
373 - if (!path[ppos+1].p_bh)
375 - path[ppos+1].p_hdr =
376 - ext_block_hdr(path[ppos+1].p_bh);
378 - /* Halfway index block */
379 - while (++cur_ppos < leaf_ppos) {
380 - path[cur_ppos].p_idx =
381 - EXT_FIRST_INDEX(path[cur_ppos].p_hdr);
382 - path[cur_ppos].p_block =
383 - ext4_idx_pblock(path[cur_ppos].p_idx);
384 - if (path[cur_ppos+1].p_bh)
385 - brelse(path[cur_ppos+1].p_bh);
386 - path[cur_ppos+1].p_bh = sb_bread(inode->i_sb,
387 - path[cur_ppos].p_block);
388 - if (!path[cur_ppos+1].p_bh)
390 - path[cur_ppos+1].p_hdr =
391 - ext_block_hdr(path[cur_ppos+1].p_bh);
394 - path[leaf_ppos].p_ext = *extent = NULL;
396 - eh = path[leaf_ppos].p_hdr;
397 - if (le16_to_cpu(eh->eh_entries) == 0)
398 - /* empty leaf is found */
402 - path[leaf_ppos].p_ext = *extent =
403 - EXT_FIRST_EXTENT(path[leaf_ppos].p_hdr);
404 - path[leaf_ppos].p_block =
405 - ext4_ext_pblock(path[leaf_ppos].p_ext);
409 - /* We found the last extent */
414 * ext4_double_down_write_data_sem - Acquire two inodes' write lock
417 @@ -178,417 +83,6 @@ ext4_double_up_write_data_sem(struct inode *orig_inode,
421 - * mext_insert_across_blocks - Insert extents across leaf block
423 - * @handle: journal handle
424 - * @orig_inode: original inode
425 - * @o_start: first original extent to be changed
426 - * @o_end: last original extent to be changed
427 - * @start_ext: first new extent to be inserted
428 - * @new_ext: middle of new extent to be inserted
429 - * @end_ext: last new extent to be inserted
431 - * Allocate a new leaf block and insert extents into it. Return 0 on success,
432 - * or a negative error value on failure.
435 -mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode,
436 - struct ext4_extent *o_start, struct ext4_extent *o_end,
437 - struct ext4_extent *start_ext, struct ext4_extent *new_ext,
438 - struct ext4_extent *end_ext)
440 - struct ext4_ext_path *orig_path = NULL;
441 - ext4_lblk_t eblock = 0;
446 - if (start_ext->ee_len && new_ext->ee_len && end_ext->ee_len) {
447 - if (o_start == o_end) {
449 - /* start_ext new_ext end_ext
450 - * donor |---------|-----------|--------|
451 - * orig |------------------------------|
456 - /* start_ext new_ext end_ext
457 - * donor |---------|----------|---------|
458 - * orig |---------------|--------------|
460 - o_end->ee_block = end_ext->ee_block;
461 - o_end->ee_len = end_ext->ee_len;
462 - ext4_ext_store_pblock(o_end, ext4_ext_pblock(end_ext));
465 - o_start->ee_len = start_ext->ee_len;
466 - eblock = le32_to_cpu(start_ext->ee_block);
469 - } else if (start_ext->ee_len && new_ext->ee_len &&
470 - !end_ext->ee_len && o_start == o_end) {
472 - /* start_ext new_ext
473 - * donor |--------------|---------------|
474 - * orig |------------------------------|
476 - o_start->ee_len = start_ext->ee_len;
477 - eblock = le32_to_cpu(start_ext->ee_block);
480 - } else if (!start_ext->ee_len && new_ext->ee_len &&
481 - end_ext->ee_len && o_start == o_end) {
484 - * donor |--------------|---------------|
485 - * orig |------------------------------|
487 - o_end->ee_block = end_ext->ee_block;
488 - o_end->ee_len = end_ext->ee_len;
489 - ext4_ext_store_pblock(o_end, ext4_ext_pblock(end_ext));
492 - * Set 0 to the extent block if new_ext was
495 - if (new_ext->ee_block)
496 - eblock = le32_to_cpu(new_ext->ee_block);
500 - ext4_debug("ext4 move extent: Unexpected insert case\n");
505 - err = get_ext_path(orig_inode, eblock, &orig_path);
509 - if (ext4_ext_insert_extent(handle, orig_inode,
510 - orig_path, new_ext, 0))
515 - err = get_ext_path(orig_inode,
516 - le32_to_cpu(end_ext->ee_block) - 1, &orig_path);
520 - if (ext4_ext_insert_extent(handle, orig_inode,
521 - orig_path, end_ext, 0))
526 - ext4_ext_drop_refs(orig_path);
535 - * mext_insert_inside_block - Insert new extent to the extent block
537 - * @o_start: first original extent to be moved
538 - * @o_end: last original extent to be moved
539 - * @start_ext: first new extent to be inserted
540 - * @new_ext: middle of new extent to be inserted
541 - * @end_ext: last new extent to be inserted
542 - * @eh: extent header of target leaf block
543 - * @range_to_move: used to decide how to insert extent
545 - * Insert extents into the leaf block. The extent (@o_start) is overwritten
546 - * by inserted extents.
549 -mext_insert_inside_block(struct ext4_extent *o_start,
550 - struct ext4_extent *o_end,
551 - struct ext4_extent *start_ext,
552 - struct ext4_extent *new_ext,
553 - struct ext4_extent *end_ext,
554 - struct ext4_extent_header *eh,
560 - /* Move the existing extents */
561 - if (range_to_move && o_end < EXT_LAST_EXTENT(eh)) {
562 - len = (unsigned long)(EXT_LAST_EXTENT(eh) + 1) -
563 - (unsigned long)(o_end + 1);
564 - memmove(o_end + 1 + range_to_move, o_end + 1, len);
567 - /* Insert start entry */
568 - if (start_ext->ee_len)
569 - o_start[i++].ee_len = start_ext->ee_len;
571 - /* Insert new entry */
572 - if (new_ext->ee_len) {
573 - o_start[i] = *new_ext;
574 - ext4_ext_store_pblock(&o_start[i++], ext4_ext_pblock(new_ext));
577 - /* Insert end entry */
578 - if (end_ext->ee_len)
579 - o_start[i] = *end_ext;
581 - /* Increment the total entries counter on the extent block */
582 - le16_add_cpu(&eh->eh_entries, range_to_move);
586 - * mext_insert_extents - Insert new extent
588 - * @handle: journal handle
589 - * @orig_inode: original inode
590 - * @orig_path: path indicates first extent to be changed
591 - * @o_start: first original extent to be changed
592 - * @o_end: last original extent to be changed
593 - * @start_ext: first new extent to be inserted
594 - * @new_ext: middle of new extent to be inserted
595 - * @end_ext: last new extent to be inserted
597 - * Call the function to insert extents. If we cannot add more extents into
598 - * the leaf block, we call mext_insert_across_blocks() to create a
599 - * new leaf block. Otherwise call mext_insert_inside_block(). Return 0
600 - * on success, or a negative error value on failure.
603 -mext_insert_extents(handle_t *handle, struct inode *orig_inode,
604 - struct ext4_ext_path *orig_path,
605 - struct ext4_extent *o_start,
606 - struct ext4_extent *o_end,
607 - struct ext4_extent *start_ext,
608 - struct ext4_extent *new_ext,
609 - struct ext4_extent *end_ext)
611 - struct ext4_extent_header *eh;
612 - unsigned long need_slots, slots_range;
613 - int range_to_move, depth, ret;
616 - * The extents need to be inserted
617 - * start_extent + new_extent + end_extent.
619 - need_slots = (start_ext->ee_len ? 1 : 0) + (end_ext->ee_len ? 1 : 0) +
620 - (new_ext->ee_len ? 1 : 0);
622 - /* The number of slots between start and end */
623 - slots_range = ((unsigned long)(o_end + 1) - (unsigned long)o_start + 1)
624 - / sizeof(struct ext4_extent);
626 - /* Range to move the end of extent */
627 - range_to_move = need_slots - slots_range;
628 - depth = orig_path->p_depth;
629 - orig_path += depth;
630 - eh = orig_path->p_hdr;
633 - /* Register to journal */
634 - BUFFER_TRACE(orig_path->p_bh, "get_write_access");
635 - ret = ext4_journal_get_write_access(handle, orig_path->p_bh);
641 - if (range_to_move > 0 &&
642 - (range_to_move > le16_to_cpu(eh->eh_max)
643 - - le16_to_cpu(eh->eh_entries))) {
645 - ret = mext_insert_across_blocks(handle, orig_inode, o_start,
646 - o_end, start_ext, new_ext, end_ext);
650 - mext_insert_inside_block(o_start, o_end, start_ext, new_ext,
651 - end_ext, eh, range_to_move);
653 - return ext4_ext_dirty(handle, orig_inode, orig_path);
657 - * mext_leaf_block - Move one leaf extent block into the inode.
659 - * @handle: journal handle
660 - * @orig_inode: original inode
661 - * @orig_path: path indicates first extent to be changed
662 - * @dext: donor extent
663 - * @from: start offset on the target file
665 - * In order to insert extents into the leaf block, we must divide the extent
666 - * in the leaf block into three extents. The one is located to be inserted
667 - * extents, and the others are located around it.
669 - * Therefore, this function creates structures to save extents of the leaf
670 - * block, and inserts extents by calling mext_insert_extents() with
671 - * created extents. Return 0 on success, or a negative error value on failure.
674 -mext_leaf_block(handle_t *handle, struct inode *orig_inode,
675 - struct ext4_ext_path *orig_path, struct ext4_extent *dext,
678 - struct ext4_extent *oext, *o_start, *o_end, *prev_ext;
679 - struct ext4_extent new_ext, start_ext, end_ext;
680 - ext4_lblk_t new_ext_end;
681 - int oext_alen, new_ext_alen, end_ext_alen;
682 - int depth = ext_depth(orig_inode);
685 - start_ext.ee_block = end_ext.ee_block = 0;
686 - o_start = o_end = oext = orig_path[depth].p_ext;
687 - oext_alen = ext4_ext_get_actual_len(oext);
688 - start_ext.ee_len = end_ext.ee_len = 0;
690 - new_ext.ee_block = cpu_to_le32(*from);
691 - ext4_ext_store_pblock(&new_ext, ext4_ext_pblock(dext));
692 - new_ext.ee_len = dext->ee_len;
693 - new_ext_alen = ext4_ext_get_actual_len(&new_ext);
694 - new_ext_end = le32_to_cpu(new_ext.ee_block) + new_ext_alen - 1;
697 - * Case: original extent is first
702 - if (le32_to_cpu(oext->ee_block) < le32_to_cpu(new_ext.ee_block) &&
703 - le32_to_cpu(new_ext.ee_block) <
704 - le32_to_cpu(oext->ee_block) + oext_alen) {
705 - start_ext.ee_len = cpu_to_le16(le32_to_cpu(new_ext.ee_block) -
706 - le32_to_cpu(oext->ee_block));
707 - start_ext.ee_block = oext->ee_block;
708 - copy_extent_status(oext, &start_ext);
709 - } else if (oext > EXT_FIRST_EXTENT(orig_path[depth].p_hdr)) {
710 - prev_ext = oext - 1;
712 - * We can merge new_ext into previous extent,
713 - * if these are contiguous and same extent type.
715 - if (ext4_can_extents_be_merged(orig_inode, prev_ext,
717 - o_start = prev_ext;
718 - start_ext.ee_len = cpu_to_le16(
719 - ext4_ext_get_actual_len(prev_ext) +
721 - start_ext.ee_block = oext->ee_block;
722 - copy_extent_status(prev_ext, &start_ext);
723 - new_ext.ee_len = 0;
728 - * Case: new_ext_end must be less than oext
729 - * oext |-----------|
730 - * new_ext |-------|
732 - if (le32_to_cpu(oext->ee_block) + oext_alen - 1 < new_ext_end) {
733 - EXT4_ERROR_INODE(orig_inode,
734 - "new_ext_end(%u) should be less than or equal to "
735 - "oext->ee_block(%u) + oext_alen(%d) - 1",
736 - new_ext_end, le32_to_cpu(oext->ee_block),
743 - * Case: new_ext is smaller than original extent
744 - * oext |---------------|
745 - * new_ext |-----------|
748 - if (le32_to_cpu(oext->ee_block) <= new_ext_end &&
749 - new_ext_end < le32_to_cpu(oext->ee_block) + oext_alen - 1) {
751 - cpu_to_le16(le32_to_cpu(oext->ee_block) +
752 - oext_alen - 1 - new_ext_end);
753 - copy_extent_status(oext, &end_ext);
754 - end_ext_alen = ext4_ext_get_actual_len(&end_ext);
755 - ext4_ext_store_pblock(&end_ext,
756 - (ext4_ext_pblock(o_end) + oext_alen - end_ext_alen));
758 - cpu_to_le32(le32_to_cpu(o_end->ee_block) +
759 - oext_alen - end_ext_alen);
762 - ret = mext_insert_extents(handle, orig_inode, orig_path, o_start,
763 - o_end, &start_ext, &new_ext, &end_ext);
769 - * mext_calc_swap_extents - Calculate extents for extent swapping.
771 - * @tmp_dext: the extent that will belong to the original inode
772 - * @tmp_oext: the extent that will belong to the donor inode
773 - * @orig_off: block offset of original inode
774 - * @donor_off: block offset of donor inode
775 - * @max_count: the maximum length of extents
777 - * Return 0 on success, or a negative error value on failure.
780 -mext_calc_swap_extents(struct ext4_extent *tmp_dext,
781 - struct ext4_extent *tmp_oext,
782 - ext4_lblk_t orig_off, ext4_lblk_t donor_off,
783 - ext4_lblk_t max_count)
785 - ext4_lblk_t diff, orig_diff;
786 - struct ext4_extent dext_old, oext_old;
788 - BUG_ON(orig_off != donor_off);
790 - /* original and donor extents have to cover the same block offset */
791 - if (orig_off < le32_to_cpu(tmp_oext->ee_block) ||
792 - le32_to_cpu(tmp_oext->ee_block) +
793 - ext4_ext_get_actual_len(tmp_oext) - 1 < orig_off)
796 - if (orig_off < le32_to_cpu(tmp_dext->ee_block) ||
797 - le32_to_cpu(tmp_dext->ee_block) +
798 - ext4_ext_get_actual_len(tmp_dext) - 1 < orig_off)
801 - dext_old = *tmp_dext;
802 - oext_old = *tmp_oext;
804 - /* When tmp_dext is too large, pick up the target range. */
805 - diff = donor_off - le32_to_cpu(tmp_dext->ee_block);
807 - ext4_ext_store_pblock(tmp_dext, ext4_ext_pblock(tmp_dext) + diff);
808 - le32_add_cpu(&tmp_dext->ee_block, diff);
809 - le16_add_cpu(&tmp_dext->ee_len, -diff);
811 - if (max_count < ext4_ext_get_actual_len(tmp_dext))
812 - tmp_dext->ee_len = cpu_to_le16(max_count);
814 - orig_diff = orig_off - le32_to_cpu(tmp_oext->ee_block);
815 - ext4_ext_store_pblock(tmp_oext, ext4_ext_pblock(tmp_oext) + orig_diff);
817 - /* Adjust extent length if donor extent is larger than orig */
818 - if (ext4_ext_get_actual_len(tmp_dext) >
819 - ext4_ext_get_actual_len(tmp_oext) - orig_diff)
820 - tmp_dext->ee_len = cpu_to_le16(le16_to_cpu(tmp_oext->ee_len) -
823 - tmp_oext->ee_len = cpu_to_le16(ext4_ext_get_actual_len(tmp_dext));
825 - copy_extent_status(&oext_old, tmp_dext);
826 - copy_extent_status(&dext_old, tmp_oext);
832 * mext_check_coverage - Check that all extents in range has the same type
834 * @inode: inode in question
835 @@ -647,129 +141,6 @@ out:
837 * Return replaced block count.
840 -mext_replace_branches(handle_t *handle, struct inode *orig_inode,
841 - struct inode *donor_inode, ext4_lblk_t from,
842 - ext4_lblk_t count, int *err)
844 - struct ext4_ext_path *orig_path = NULL;
845 - struct ext4_ext_path *donor_path = NULL;
846 - struct ext4_extent *oext, *dext;
847 - struct ext4_extent tmp_dext, tmp_oext;
848 - ext4_lblk_t orig_off = from, donor_off = from;
850 - int replaced_count = 0;
853 - *err = ext4_es_remove_extent(orig_inode, from, count);
857 - *err = ext4_es_remove_extent(donor_inode, from, count);
861 - /* Get the original extent for the block "orig_off" */
862 - *err = get_ext_path(orig_inode, orig_off, &orig_path);
866 - /* Get the donor extent for the head */
867 - *err = get_ext_path(donor_inode, donor_off, &donor_path);
870 - depth = ext_depth(orig_inode);
871 - oext = orig_path[depth].p_ext;
874 - depth = ext_depth(donor_inode);
875 - dext = donor_path[depth].p_ext;
876 - if (unlikely(!dext))
877 - goto missing_donor_extent;
880 - *err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off,
885 - /* Loop for the donor extents */
887 - /* The extent for donor must be found. */
888 - if (unlikely(!dext)) {
889 - missing_donor_extent:
890 - EXT4_ERROR_INODE(donor_inode,
891 - "The extent for donor must be found");
894 - } else if (donor_off != le32_to_cpu(tmp_dext.ee_block)) {
895 - EXT4_ERROR_INODE(donor_inode,
896 - "Donor offset(%u) and the first block of donor "
897 - "extent(%u) should be equal",
899 - le32_to_cpu(tmp_dext.ee_block));
904 - /* Set donor extent to orig extent */
905 - *err = mext_leaf_block(handle, orig_inode,
906 - orig_path, &tmp_dext, &orig_off);
910 - /* Set orig extent to donor extent */
911 - *err = mext_leaf_block(handle, donor_inode,
912 - donor_path, &tmp_oext, &donor_off);
916 - dext_alen = ext4_ext_get_actual_len(&tmp_dext);
917 - replaced_count += dext_alen;
918 - donor_off += dext_alen;
919 - orig_off += dext_alen;
921 - BUG_ON(replaced_count > count);
922 - /* Already moved the expected blocks */
923 - if (replaced_count >= count)
927 - ext4_ext_drop_refs(orig_path);
928 - *err = get_ext_path(orig_inode, orig_off, &orig_path);
931 - depth = ext_depth(orig_inode);
932 - oext = orig_path[depth].p_ext;
936 - ext4_ext_drop_refs(donor_path);
937 - *err = get_ext_path(donor_inode, donor_off, &donor_path);
940 - depth = ext_depth(donor_inode);
941 - dext = donor_path[depth].p_ext;
944 - *err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off,
945 - donor_off, count - replaced_count);
952 - ext4_ext_drop_refs(orig_path);
956 - ext4_ext_drop_refs(donor_path);
960 - return replaced_count;
964 * mext_page_double_lock - Grab and lock pages on both @inode1 and @inode2
965 @@ -783,7 +154,7 @@ out:
968 mext_page_double_lock(struct inode *inode1, struct inode *inode2,
969 - pgoff_t index, struct page *page[2])
970 + pgoff_t index1, pgoff_t index2, struct page *page[2])
972 struct address_space *mapping[2];
973 unsigned fl = AOP_FLAG_NOFS;
974 @@ -793,15 +164,18 @@ mext_page_double_lock(struct inode *inode1, struct inode *inode2,
975 mapping[0] = inode1->i_mapping;
976 mapping[1] = inode2->i_mapping;
978 + pgoff_t tmp = index1;
981 mapping[0] = inode2->i_mapping;
982 mapping[1] = inode1->i_mapping;
985 - page[0] = grab_cache_page_write_begin(mapping[0], index, fl);
986 + page[0] = grab_cache_page_write_begin(mapping[0], index1, fl);
990 - page[1] = grab_cache_page_write_begin(mapping[1], index, fl);
991 + page[1] = grab_cache_page_write_begin(mapping[1], index2, fl);
993 unlock_page(page[0]);
994 page_cache_release(page[0]);
995 @@ -905,13 +279,14 @@ out:
998 move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
999 - pgoff_t orig_page_offset, int data_offset_in_page,
1000 - int block_len_in_page, int unwritten, int *err)
1001 + pgoff_t orig_page_offset, pgoff_t donor_page_offset,
1002 + int data_offset_in_page,
1003 + int block_len_in_page, int unwritten, int *err)
1005 struct inode *orig_inode = file_inode(o_filp);
1006 struct page *pagep[2] = {NULL, NULL};
1008 - ext4_lblk_t orig_blk_offset;
1009 + ext4_lblk_t orig_blk_offset, donor_blk_offset;
1010 unsigned long blocksize = orig_inode->i_sb->s_blocksize;
1011 unsigned int w_flags = 0;
1012 unsigned int tmp_data_size, data_size, replaced_size;
1013 @@ -939,6 +314,9 @@ again:
1014 orig_blk_offset = orig_page_offset * blocks_per_page +
1015 data_offset_in_page;
1017 + donor_blk_offset = donor_page_offset * blocks_per_page +
1018 + data_offset_in_page;
1020 /* Calculate data_size */
1021 if ((orig_blk_offset + block_len_in_page - 1) ==
1022 ((orig_inode->i_size - 1) >> orig_inode->i_blkbits)) {
1023 @@ -959,7 +337,7 @@ again:
1024 replaced_size = data_size;
1026 *err = mext_page_double_lock(orig_inode, donor_inode, orig_page_offset,
1028 + donor_page_offset, pagep);
1029 if (unlikely(*err < 0))
1032 @@ -978,7 +356,7 @@ again:
1036 - unwritten &= mext_check_coverage(donor_inode, orig_blk_offset,
1037 + unwritten &= mext_check_coverage(donor_inode, donor_blk_offset,
1038 block_len_in_page, 1, err);
1041 @@ -994,9 +372,10 @@ again:
1045 - replaced_count = mext_replace_branches(handle, orig_inode,
1046 - donor_inode, orig_blk_offset,
1047 - block_len_in_page, err);
1048 + replaced_count = ext4_swap_extents(handle, orig_inode,
1049 + donor_inode, orig_blk_offset,
1051 + block_len_in_page, 1, err);
1053 ext4_double_up_write_data_sem(orig_inode, donor_inode);
1055 @@ -1014,9 +393,9 @@ data_copy:
1058 ext4_double_down_write_data_sem(orig_inode, donor_inode);
1059 - replaced_count = mext_replace_branches(handle, orig_inode, donor_inode,
1061 - block_len_in_page, err);
1062 + replaced_count = ext4_swap_extents(handle, orig_inode, donor_inode,
1063 + orig_blk_offset, donor_blk_offset,
1064 + block_len_in_page, 1, err);
1065 ext4_double_up_write_data_sem(orig_inode, donor_inode);
1067 if (replaced_count) {
1068 @@ -1061,9 +440,9 @@ repair_branches:
1069 * Try to swap extents to it's original places
1071 ext4_double_down_write_data_sem(orig_inode, donor_inode);
1072 - replaced_count = mext_replace_branches(handle, donor_inode, orig_inode,
1074 - block_len_in_page, &err2);
1075 + replaced_count = ext4_swap_extents(handle, donor_inode, orig_inode,
1076 + orig_blk_offset, donor_blk_offset,
1077 + block_len_in_page, 0, &err2);
1078 ext4_double_up_write_data_sem(orig_inode, donor_inode);
1079 if (replaced_count != block_len_in_page) {
1080 EXT4_ERROR_INODE_BLOCK(orig_inode, (sector_t)(orig_blk_offset),
1081 @@ -1093,10 +472,14 @@ mext_check_arguments(struct inode *orig_inode,
1082 struct inode *donor_inode, __u64 orig_start,
1083 __u64 donor_start, __u64 *len)
1085 - ext4_lblk_t orig_blocks, donor_blocks;
1086 + __u64 orig_eof, donor_eof;
1087 unsigned int blkbits = orig_inode->i_blkbits;
1088 unsigned int blocksize = 1 << blkbits;
1090 + orig_eof = (i_size_read(orig_inode) + blocksize - 1) >> blkbits;
1091 + donor_eof = (i_size_read(donor_inode) + blocksize - 1) >> blkbits;
1094 if (donor_inode->i_mode & (S_ISUID|S_ISGID)) {
1095 ext4_debug("ext4 move extent: suid or sgid is set"
1096 " to donor file [ino:orig %lu, donor %lu]\n",
1097 @@ -1112,7 +495,7 @@ mext_check_arguments(struct inode *orig_inode,
1098 ext4_debug("ext4 move extent: The argument files should "
1099 "not be swapfile [ino:orig %lu, donor %lu]\n",
1100 orig_inode->i_ino, donor_inode->i_ino);
1105 /* Ext4 move extent supports only extent based file */
1106 @@ -1132,67 +515,28 @@ mext_check_arguments(struct inode *orig_inode,
1109 /* Start offset should be same */
1110 - if (orig_start != donor_start) {
1111 + if ((orig_start & ~(PAGE_MASK >> orig_inode->i_blkbits)) !=
1112 + (donor_start & ~(PAGE_MASK >> orig_inode->i_blkbits))) {
1113 ext4_debug("ext4 move extent: orig and donor's start "
1114 - "offset are not same [ino:orig %lu, donor %lu]\n",
1115 + "offset are not alligned [ino:orig %lu, donor %lu]\n",
1116 orig_inode->i_ino, donor_inode->i_ino);
1120 if ((orig_start >= EXT_MAX_BLOCKS) ||
1121 + (donor_start >= EXT_MAX_BLOCKS) ||
1122 (*len > EXT_MAX_BLOCKS) ||
1123 + (donor_start + *len >= EXT_MAX_BLOCKS) ||
1124 (orig_start + *len >= EXT_MAX_BLOCKS)) {
1125 ext4_debug("ext4 move extent: Can't handle over [%u] blocks "
1126 "[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCKS,
1127 orig_inode->i_ino, donor_inode->i_ino);
1131 - if (orig_inode->i_size > donor_inode->i_size) {
1132 - donor_blocks = (donor_inode->i_size + blocksize - 1) >> blkbits;
1133 - /* TODO: eliminate this artificial restriction */
1134 - if (orig_start >= donor_blocks) {
1135 - ext4_debug("ext4 move extent: orig start offset "
1136 - "[%llu] should be less than donor file blocks "
1137 - "[%u] [ino:orig %lu, donor %lu]\n",
1138 - orig_start, donor_blocks,
1139 - orig_inode->i_ino, donor_inode->i_ino);
1143 - /* TODO: eliminate this artificial restriction */
1144 - if (orig_start + *len > donor_blocks) {
1145 - ext4_debug("ext4 move extent: End offset [%llu] should "
1146 - "be less than donor file blocks [%u]."
1147 - "So adjust length from %llu to %llu "
1148 - "[ino:orig %lu, donor %lu]\n",
1149 - orig_start + *len, donor_blocks,
1150 - *len, donor_blocks - orig_start,
1151 - orig_inode->i_ino, donor_inode->i_ino);
1152 - *len = donor_blocks - orig_start;
1155 - orig_blocks = (orig_inode->i_size + blocksize - 1) >> blkbits;
1156 - if (orig_start >= orig_blocks) {
1157 - ext4_debug("ext4 move extent: start offset [%llu] "
1158 - "should be less than original file blocks "
1159 - "[%u] [ino:orig %lu, donor %lu]\n",
1160 - orig_start, orig_blocks,
1161 - orig_inode->i_ino, donor_inode->i_ino);
1165 - if (orig_start + *len > orig_blocks) {
1166 - ext4_debug("ext4 move extent: Adjust length "
1167 - "from %llu to %llu. Because it should be "
1168 - "less than original file blocks "
1169 - "[ino:orig %lu, donor %lu]\n",
1170 - *len, orig_blocks - orig_start,
1171 - orig_inode->i_ino, donor_inode->i_ino);
1172 - *len = orig_blocks - orig_start;
1176 + if (orig_eof < orig_start + *len - 1)
1177 + *len = orig_eof - orig_start;
1178 + if (donor_eof < donor_start + *len - 1)
1179 + *len = donor_eof - donor_start;
1181 ext4_debug("ext4 move extent: len should not be 0 "
1182 "[ino:orig %lu, donor %lu]\n", orig_inode->i_ino,
1183 @@ -1245,23 +589,16 @@ mext_check_arguments(struct inode *orig_inode,
1184 * 7:Return 0 on success, or a negative error value on failure.
1187 -ext4_move_extents(struct file *o_filp, struct file *d_filp,
1188 - __u64 orig_start, __u64 donor_start, __u64 len,
1190 +ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
1191 + __u64 donor_blk, __u64 len, __u64 *moved_len)
1193 struct inode *orig_inode = file_inode(o_filp);
1194 struct inode *donor_inode = file_inode(d_filp);
1195 - struct ext4_ext_path *orig_path = NULL, *holecheck_path = NULL;
1196 - struct ext4_extent *ext_prev, *ext_cur, *ext_dummy;
1197 - ext4_lblk_t block_start = orig_start;
1198 - ext4_lblk_t block_end, seq_start, add_blocks, file_end, seq_blocks = 0;
1199 - ext4_lblk_t rest_blocks;
1200 - pgoff_t orig_page_offset = 0, seq_end_page;
1201 - int ret, depth, last_extent = 0;
1202 + struct ext4_ext_path *path = NULL;
1203 int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
1204 - int data_offset_in_page;
1205 - int block_len_in_page;
1207 + ext4_lblk_t o_end, o_start = orig_blk;
1208 + ext4_lblk_t d_start = donor_blk;
1211 if (orig_inode->i_sb != donor_inode->i_sb) {
1212 ext4_debug("ext4 move extent: The argument files "
1213 @@ -1303,121 +640,58 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
1214 /* Protect extent tree against block allocations via delalloc */
1215 ext4_double_down_write_data_sem(orig_inode, donor_inode);
1216 /* Check the filesystem environment whether move_extent can be done */
1217 - ret = mext_check_arguments(orig_inode, donor_inode, orig_start,
1218 - donor_start, &len);
1219 + ret = mext_check_arguments(orig_inode, donor_inode, orig_blk,
1223 + o_end = o_start + len;
1225 - file_end = (i_size_read(orig_inode) - 1) >> orig_inode->i_blkbits;
1226 - block_end = block_start + len - 1;
1227 - if (file_end < block_end)
1228 - len -= block_end - file_end;
1230 - ret = get_ext_path(orig_inode, block_start, &orig_path);
1234 - /* Get path structure to check the hole */
1235 - ret = get_ext_path(orig_inode, block_start, &holecheck_path);
1238 + while (o_start < o_end) {
1239 + struct ext4_extent *ex;
1240 + ext4_lblk_t cur_blk, next_blk;
1241 + pgoff_t orig_page_index, donor_page_index;
1242 + int offset_in_page;
1243 + int unwritten, cur_len;
1245 - depth = ext_depth(orig_inode);
1246 - ext_cur = holecheck_path[depth].p_ext;
1249 - * Get proper starting location of block replacement if block_start was
1250 - * within the hole.
1252 - if (le32_to_cpu(ext_cur->ee_block) +
1253 - ext4_ext_get_actual_len(ext_cur) - 1 < block_start) {
1255 - * The hole exists between extents or the tail of
1258 - last_extent = mext_next_extent(orig_inode,
1259 - holecheck_path, &ext_cur);
1260 - if (last_extent < 0) {
1261 - ret = last_extent;
1264 - last_extent = mext_next_extent(orig_inode, orig_path,
1266 - if (last_extent < 0) {
1267 - ret = last_extent;
1268 + ret = get_ext_path(orig_inode, o_start, &path);
1271 + ex = path[path->p_depth].p_ext;
1272 + next_blk = ext4_ext_next_allocated_block(path);
1273 + cur_blk = le32_to_cpu(ex->ee_block);
1274 + cur_len = ext4_ext_get_actual_len(ex);
1275 + /* Check hole before the start pos */
1276 + if (cur_blk + cur_len - 1 < o_start) {
1277 + if (next_blk == EXT_MAX_BLOCKS) {
1282 + d_start += next_blk - o_start;
1283 + o_start = next_blk;
1285 + /* Check hole after the start pos */
1286 + } else if (cur_blk > o_start) {
1288 + d_start += cur_blk - o_start;
1289 + o_start = cur_blk;
1290 + /* Extent inside requested range ?*/
1291 + if (cur_blk >= o_end)
1293 + } else { /* in_range(o_start, o_blk, o_len) */
1294 + cur_len += cur_blk - o_start;
1296 - seq_start = le32_to_cpu(ext_cur->ee_block);
1297 - } else if (le32_to_cpu(ext_cur->ee_block) > block_start)
1298 - /* The hole exists at the beginning of original file. */
1299 - seq_start = le32_to_cpu(ext_cur->ee_block);
1301 - seq_start = block_start;
1303 - /* No blocks within the specified range. */
1304 - if (le32_to_cpu(ext_cur->ee_block) > block_end) {
1305 - ext4_debug("ext4 move extent: The specified range of file "
1306 - "may be the hole\n");
1311 - /* Adjust start blocks */
1312 - add_blocks = min(le32_to_cpu(ext_cur->ee_block) +
1313 - ext4_ext_get_actual_len(ext_cur), block_end + 1) -
1314 - max(le32_to_cpu(ext_cur->ee_block), block_start);
1316 - while (!last_extent && le32_to_cpu(ext_cur->ee_block) <= block_end) {
1317 - seq_blocks += add_blocks;
1319 - /* Adjust tail blocks */
1320 - if (seq_start + seq_blocks - 1 > block_end)
1321 - seq_blocks = block_end - seq_start + 1;
1323 - ext_prev = ext_cur;
1324 - last_extent = mext_next_extent(orig_inode, holecheck_path,
1326 - if (last_extent < 0) {
1327 - ret = last_extent;
1330 - add_blocks = ext4_ext_get_actual_len(ext_cur);
1333 - * Extend the length of contiguous block (seq_blocks)
1334 - * if extents are contiguous.
1336 - if (ext4_can_extents_be_merged(orig_inode,
1337 - ext_prev, ext_cur) &&
1338 - block_end >= le32_to_cpu(ext_cur->ee_block) &&
1342 - /* Is original extent is unwritten */
1343 - unwritten = ext4_ext_is_unwritten(ext_prev);
1345 - data_offset_in_page = seq_start % blocks_per_page;
1348 - * Calculate data blocks count that should be swapped
1349 - * at the first page.
1351 - if (data_offset_in_page + seq_blocks > blocks_per_page) {
1352 - /* Swapped blocks are across pages */
1353 - block_len_in_page =
1354 - blocks_per_page - data_offset_in_page;
1356 - /* Swapped blocks are in a page */
1357 - block_len_in_page = seq_blocks;
1360 - orig_page_offset = seq_start >>
1361 - (PAGE_CACHE_SHIFT - orig_inode->i_blkbits);
1362 - seq_end_page = (seq_start + seq_blocks - 1) >>
1363 - (PAGE_CACHE_SHIFT - orig_inode->i_blkbits);
1364 - seq_start = le32_to_cpu(ext_cur->ee_block);
1365 - rest_blocks = seq_blocks;
1367 + unwritten = ext4_ext_is_unwritten(ex);
1368 + if (o_end - o_start < cur_len)
1369 + cur_len = o_end - o_start;
1371 + orig_page_index = o_start >> (PAGE_CACHE_SHIFT -
1372 + orig_inode->i_blkbits);
1373 + donor_page_index = d_start >> (PAGE_CACHE_SHIFT -
1374 + donor_inode->i_blkbits);
1375 + offset_in_page = o_start % blocks_per_page;
1376 + if (cur_len > blocks_per_page- offset_in_page)
1377 + cur_len = blocks_per_page - offset_in_page;
1379 * Up semaphore to avoid following problems:
1380 * a. transaction deadlock among ext4_journal_start,
1381 @@ -1426,76 +700,36 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
1382 * in move_extent_per_page
1384 ext4_double_up_write_data_sem(orig_inode, donor_inode);
1386 - while (orig_page_offset <= seq_end_page) {
1388 - /* Swap original branches with new branches */
1389 - block_len_in_page = move_extent_per_page(
1390 - o_filp, donor_inode,
1392 - data_offset_in_page,
1393 - block_len_in_page,
1396 - /* Count how many blocks we have exchanged */
1397 - *moved_len += block_len_in_page;
1400 - if (*moved_len > len) {
1401 - EXT4_ERROR_INODE(orig_inode,
1402 - "We replaced blocks too much! "
1403 - "sum of replaced: %llu requested: %llu",
1409 - orig_page_offset++;
1410 - data_offset_in_page = 0;
1411 - rest_blocks -= block_len_in_page;
1412 - if (rest_blocks > blocks_per_page)
1413 - block_len_in_page = blocks_per_page;
1415 - block_len_in_page = rest_blocks;
1418 + /* Swap original branches with new branches */
1419 + move_extent_per_page(o_filp, donor_inode,
1420 + orig_page_index, donor_page_index,
1421 + offset_in_page, cur_len,
1423 ext4_double_down_write_data_sem(orig_inode, donor_inode);
1427 - /* Decrease buffer counter */
1428 - if (holecheck_path)
1429 - ext4_ext_drop_refs(holecheck_path);
1430 - ret = get_ext_path(orig_inode, seq_start, &holecheck_path);
1433 - depth = holecheck_path->p_depth;
1435 - /* Decrease buffer counter */
1437 - ext4_ext_drop_refs(orig_path);
1438 - ret = get_ext_path(orig_inode, seq_start, &orig_path);
1442 - ext_cur = holecheck_path[depth].p_ext;
1443 - add_blocks = ext4_ext_get_actual_len(ext_cur);
1446 + o_start += cur_len;
1447 + d_start += cur_len;
1450 + ext4_ext_drop_refs(path);
1455 + *moved_len = o_start - orig_blk;
1456 + if (*moved_len > len)
1461 ext4_discard_preallocations(orig_inode);
1462 ext4_discard_preallocations(donor_inode);
1466 - ext4_ext_drop_refs(orig_path);
1469 - if (holecheck_path) {
1470 - ext4_ext_drop_refs(holecheck_path);
1471 - kfree(holecheck_path);
1473 + ext4_ext_drop_refs(path);
1476 ext4_double_up_write_data_sem(orig_inode, donor_inode);
1477 ext4_inode_resume_unlocked_dio(orig_inode);