1 ext4: use ext4_zero_partial_blocks in punch_hole
3 From: Lukas Czerner <lczerner@redhat.com>
5 We're doing to get rid of ext4_discard_partial_page_buffers() since it is
6 duplicating some code and also partially duplicating work of
7 truncate_pagecache_range(), moreover the old implementation was much
10 Now when the truncate_inode_pages_range() can handle truncating non page
11 aligned regions we can use this to invalidate and zero out block aligned
12 region of the punched out range and then use ext4_block_truncate_page()
13 to zero the unaligned blocks on the start and end of the range. This
14 will greatly simplify the punch hole code. Moreover after this commit we
15 can get rid of the ext4_discard_partial_page_buffers() completely.
17 We also introduce function ext4_prepare_punch_hole() to do come common
18 operations before we attempt to do the actual punch hole on
19 indirect or extent file which saves us some code duplication.
21 This has been tested on ppc64 with 1k block size with fsx and xfstests
24 Signed-off-by: Lukas Czerner <lczerner@redhat.com>
25 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
27 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
28 index 9f9719f..2d4b0aa 100644
31 @@ -2100,6 +2100,8 @@ extern int ext4_block_truncate_page(handle_t *handle,
32 struct address_space *mapping, loff_t from);
33 extern int ext4_block_zero_page_range(handle_t *handle,
34 struct address_space *mapping, loff_t from, loff_t length);
35 +extern int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
36 + loff_t lstart, loff_t lend);
37 extern int ext4_discard_partial_page_buffers(handle_t *handle,
38 struct address_space *mapping, loff_t from,
39 loff_t length, int flags);
40 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
41 index a7f6b31..3acf353 100644
44 @@ -3693,6 +3693,41 @@ unlock:
48 +int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
49 + loff_t lstart, loff_t length)
51 + struct super_block *sb = inode->i_sb;
52 + struct address_space *mapping = inode->i_mapping;
53 + unsigned partial = lstart & (sb->s_blocksize - 1);
54 + ext4_fsblk_t start, end;
55 + loff_t byte_end = (lstart + length - 1);
58 + start = lstart >> sb->s_blocksize_bits;
59 + end = byte_end >> sb->s_blocksize_bits;
61 + /* Handle partial zero within the single block */
63 + err = ext4_block_zero_page_range(handle, mapping,
67 + /* Handle partial zero out on the start of the range */
69 + err = ext4_block_zero_page_range(handle, mapping,
70 + lstart, sb->s_blocksize);
74 + /* Handle partial zero out on the end of the range */
75 + partial = byte_end & (sb->s_blocksize - 1);
76 + if (partial != sb->s_blocksize - 1)
77 + err = ext4_block_zero_page_range(handle, mapping,
83 int ext4_can_truncate(struct inode *inode)
85 if (S_ISREG(inode->i_mode))
86 @@ -3721,8 +3756,7 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
87 struct super_block *sb = inode->i_sb;
88 ext4_lblk_t first_block, stop_block;
89 struct address_space *mapping = inode->i_mapping;
90 - loff_t first_page, last_page, page_len;
91 - loff_t first_page_offset, last_page_offset;
92 + loff_t first_block_offset, last_block_offset;
96 @@ -3773,17 +3807,13 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
100 - first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
101 - last_page = (offset + length) >> PAGE_CACHE_SHIFT;
102 + first_block_offset = round_up(offset, sb->s_blocksize);
103 + last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
105 - first_page_offset = first_page << PAGE_CACHE_SHIFT;
106 - last_page_offset = last_page << PAGE_CACHE_SHIFT;
108 - /* Now release the pages */
109 - if (last_page_offset > first_page_offset) {
110 - truncate_pagecache_range(inode, first_page_offset,
111 - last_page_offset - 1);
113 + /* Now release the pages and zero block aligned part of pages*/
114 + if (last_block_offset > first_block_offset)
115 + truncate_pagecache_range(inode, first_block_offset,
116 + last_block_offset);
118 /* Wait all existing dio workers, newcomers will block on i_mutex */
119 ext4_inode_block_unlocked_dio(inode);
120 @@ -3803,66 +3833,10 @@ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
125 - * Now we need to zero out the non-page-aligned data in the
126 - * pages at the start and tail of the hole, and unmap the
127 - * buffer heads for the block aligned regions of the page that
128 - * were completely zeroed.
130 - if (first_page > last_page) {
132 - * If the file space being truncated is contained
133 - * within a page just zero out and unmap the middle of
136 - ret = ext4_discard_partial_page_buffers(handle,
137 - mapping, offset, length, 0);
143 - * zero out and unmap the partial page that contains
144 - * the start of the hole
146 - page_len = first_page_offset - offset;
147 - if (page_len > 0) {
148 - ret = ext4_discard_partial_page_buffers(handle, mapping,
149 - offset, page_len, 0);
155 - * zero out and unmap the partial page that contains
156 - * the end of the hole
158 - page_len = offset + length - last_page_offset;
159 - if (page_len > 0) {
160 - ret = ext4_discard_partial_page_buffers(handle, mapping,
161 - last_page_offset, page_len, 0);
168 - * If i_size is contained in the last page, we need to
169 - * unmap and zero the partial page after i_size
171 - if (inode->i_size >> PAGE_CACHE_SHIFT == last_page &&
172 - inode->i_size % PAGE_CACHE_SIZE != 0) {
173 - page_len = PAGE_CACHE_SIZE -
174 - (inode->i_size & (PAGE_CACHE_SIZE - 1));
176 - if (page_len > 0) {
177 - ret = ext4_discard_partial_page_buffers(handle,
178 - mapping, inode->i_size, page_len, 0);
184 + ret = ext4_zero_partial_blocks(handle, inode, offset,
189 first_block = (offset + sb->s_blocksize - 1) >>
190 EXT4_BLOCK_SIZE_BITS(sb);