Rebase to reflect accepted patches and snapshot
[ext4-patch-queue.git] / fix-xfstests-punch-hole-failure
blob0c21541319399982b53d5fc48d781cfa054c29b9
1 ext4: fix xfstests 75, 112, 127 punch hole failure
3 From: Allison Henderson <achender@linux.vnet.ibm.com>
5 This patch corrects a punch hole bug found by xfstests
6 when the block size is set to 1k.  Test 127 runs longer
7 before it fails, but that appears to be a separate bug.
9 This bug happens because the punch hole code only zeros
10 out non block aligned blocks, and then releases the pages
11 for data that is page aligned.  This means that if the
12 blocks are smaller than a page, then the blocks contained
13 in the non page aligned regions (but still block aligned)
14 are left unzeroed and mapped.
16 This patch adds a new ext4_unmap_partial_page_buffers routine
17 that unmapps the block aligned buffers in a page that are
18 contained in a specified range.
20 Signed-off-by: Allison Henderson <achender@linux.vnet.ibm.com>
21 Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
22 ---
23 v1 -> v2
24 Added EXT4_BLOCK_ZERO_DISCARD_BUFFER flag
26 v2 -> v3
27 Moved code out of ext4_zero_block_page_range and in
28 to new ext4_unmap_page_range function
30 v3 -> v4
31 Renamed ext4_unmap_page_range to ext4_unmap_partial_page_buffers
32 Moved ext4_unmap_partial_page_buffers from inode.c to extents.c
33 Corrected comments for non block/page aligned handling
34 Added checks to avoid unnecessary page unmaps
35 Removed unneeded journaling and mapping from new routine
37 :100644 100644 4d73e11... a946023... M  fs/ext4/extents.c
38  fs/ext4/extents.c |  142 +++++++++++++++++++++++++++++++++++++++++++++++++++--
39  1 files changed, 138 insertions(+), 4 deletions(-)
41 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
42 index 4d73e11..a946023 100644
43 --- a/fs/ext4/extents.c
44 +++ b/fs/ext4/extents.c
45 @@ -4137,6 +4137,107 @@ static int ext4_xattr_fiemap(struct inode *inode,
46  }
48  /*
49 + * ext4_unmap_partial_page_buffers()
50 + * Unmaps a page range of length 'length' starting from offset
51 + * 'from'.  The range to be unmaped must be contained with in
52 + * one page.  If the specified range exceeds the end of the page
53 + * it will be shortened to end of the page that cooresponds to
54 + * 'from'.  Only block aligned buffers will be unmapped and unblock
55 + * aligned buffers are skipped
56 + */
57 +static int ext4_unmap_partial_page_buffers(handle_t *handle,
58 +               struct address_space *mapping, loff_t from, loff_t length)
60 +       ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
61 +       unsigned int offset = from & (PAGE_CACHE_SIZE-1);
62 +       unsigned int blocksize, max, pos;
63 +       unsigned int end_of_block, range_to_unmap;
64 +       ext4_lblk_t iblock;
65 +       struct inode *inode = mapping->host;
66 +       struct buffer_head *bh;
67 +       struct page *page;
68 +       int err = 0;
70 +       page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
71 +                                  mapping_gfp_mask(mapping) & ~__GFP_FS);
72 +       if (!page)
73 +               return -EINVAL;
75 +       blocksize = inode->i_sb->s_blocksize;
76 +       max = PAGE_CACHE_SIZE - offset;
78 +       /*
79 +        * correct length if it does not fall between
80 +        * 'from' and the end of the page
81 +        */
82 +       if (length > max || length < 0)
83 +               length = max;
85 +       iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
87 +       if (!page_has_buffers(page))
88 +               goto unlock;
90 +       /* Find the buffer that contains "offset" */
91 +       bh = page_buffers(page);
92 +       pos = blocksize;
93 +       while (offset >= pos) {
94 +               bh = bh->b_this_page;
95 +               iblock++;
96 +               pos += blocksize;
97 +       }
99 +       pos = offset;
100 +       while (pos < offset + length) {
101 +               err = 0;
103 +               /* The length of space left to zero */
104 +               range_to_unmap = offset + length - pos;
106 +               /* The length of space until the end of the block */
107 +               end_of_block = blocksize - (pos & (blocksize-1));
109 +               /* Do not unmap past end of block */
110 +               if (range_to_unmap > end_of_block)
111 +                       range_to_unmap = end_of_block;
113 +               if (buffer_freed(bh)) {
114 +                       BUFFER_TRACE(bh, "freed: skip");
115 +                       goto next;
116 +               }
118 +               if (!buffer_mapped(bh)) {
119 +                       BUFFER_TRACE(bh, "unmapped: skip");
120 +                       goto next;
121 +               }
123 +               /* If the range is not block aligned, skip */
124 +               if (range_to_unmap != blocksize)
125 +                       goto next;
127 +               clear_buffer_dirty(bh);
128 +               bh->b_bdev = NULL;
129 +               clear_buffer_mapped(bh);
130 +               clear_buffer_req(bh);
131 +               clear_buffer_new(bh);
132 +               clear_buffer_delay(bh);
133 +               clear_buffer_unwritten(bh);
134 +               clear_buffer_uptodate(bh);
135 +               ClearPageUptodate(page);
137 +               BUFFER_TRACE(bh, "buffer unmapped");
138 +next:
139 +               bh = bh->b_this_page;
140 +               iblock++;
141 +               pos += range_to_unmap;
142 +       }
143 +unlock:
144 +       unlock_page(page);
145 +       page_cache_release(page);
146 +       return err;
150   * ext4_ext_punch_hole
151   *
152   * Punches a hole of "length" bytes in a file starting
153 @@ -4157,7 +4258,7 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
154         struct address_space *mapping = inode->i_mapping;
155         struct ext4_map_blocks map;
156         handle_t *handle;
157 -       loff_t first_block_offset, last_block_offset, block_len;
158 +       loff_t first_block_offset, last_block_offset, block_len, page_len;
159         loff_t first_page, last_page, first_page_offset, last_page_offset;
160         int ret, credits, blocks_released, err = 0;
162 @@ -4206,9 +4307,9 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
163                 goto out;
165         /*
166 -        * Now we need to zero out the un block aligned data.
167 -        * If the file is smaller than a block, just
168 -        * zero out the middle
169 +        * Now we need to zero out the non-block-aligned data.
170 +        * If the file space being truncated is smaller than
171 +        * than a block, just zero out the middle
172          */
173         if (first_block > last_block)
174                 ext4_block_zero_page_range(handle, mapping, offset, length);
175 @@ -4227,6 +4328,39 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
176                 }
177         }
179 +       /*
180 +        * Now we need to unmap the non-page-aligned buffers.
181 +        * If the block size is smaller than the page size
182 +        * and the file space being truncated is not
183 +        * page aligned, then unmap the buffers
184 +        */
185 +       if (inode->i_sb->s_blocksize < PAGE_CACHE_SIZE &&
186 +          !((offset % PAGE_CACHE_SIZE == 0) &&
187 +          (length % PAGE_CACHE_SIZE == 0))) {
189 +               /*
190 +               * If the file space being truncated is smaller
191 +               * than a page, just unmap the middle
192 +               */
193 +               if (first_page > last_page) {
194 +                       ext4_unmap_partial_page_buffers(handle,
195 +                               mapping, offset, length);
196 +               } else {
197 +                       /* unmap page buffers before the first aligned page */
198 +                       page_len = first_page_offset - offset;
199 +                       if (page_len > 0)
200 +                               ext4_unmap_partial_page_buffers(handle,
201 +                                       mapping, offset, page_len);
203 +                       /* unmap the page buffers after the last aligned page */
204 +                       page_len = offset + length - last_page_offset;
205 +                       if (page_len > 0) {
206 +                               ext4_unmap_partial_page_buffers(handle,
207 +                                       mapping, last_page_offset, page_len);
208 +                       }
209 +               }
210 +       }
212         /* If there are no blocks to remove, return now */
213         if (first_block >= last_block)
214                 goto out;
215 -- 
216 1.7.1
219 To unsubscribe from this list: send the line "unsubscribe linux-ext4" in
220 the body of a message to majordomo@vger.kernel.org
221 More majordomo info at  http://vger.kernel.org/majordomo-info.html