add jbd2 speedup patches
[ext4-patch-queue.git] / add-support-collapse-range
blob1967bf380d332b8733ff55ebf2f187f8588566e6
1 ext4: Add support FALLOC_FL_COLLAPSE_RANGE for fallocate
3 From: Namjae Jeon <namjae.jeon@samsung.com>
5 This patch implements fallocate's FALLOC_FL_COLLAPSE_RANGE for Ext4.
7 The semantics of this flag are following:
8 1) It collapses the range lying between offset and length by removing any data
9    blocks which are present in this range and than updates all the logical
10    offsets of extents beyond "offset + len" to nullify the hole created by
11    removing blocks. In short, it does not leave a hole.
12 2) It should be used exclusively. No other fallocate flag in combination.
13 3) Offset and length supplied to fallocate should be fs block size aligned
14    in case of xfs and ext4.
15 4) Collaspe range does not work beyond i_size.
17 Signed-off-by: Namjae Jeon <namjae.jeon@samsung.com>
18 Signed-off-by: Ashish Sangwan <a.sangwan@samsung.com>
19 Tested-by: Dongsu Park <dongsu.park@profitbricks.com>
20 Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
21 ---
22  fs/ext4/ext4.h              |   3 +
23  fs/ext4/extents.c           | 308 ++++++++++++++++++++++++++++++++++++++++++++++++-
24  fs/ext4/move_extent.c       |   2 +-
25  include/trace/events/ext4.h |  25 ++++
26  4 files changed, 336 insertions(+), 2 deletions(-)
28 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
29 index d3a534f..0596657 100644
30 --- a/fs/ext4/ext4.h
31 +++ b/fs/ext4/ext4.h
32 @@ -2757,6 +2757,7 @@ extern int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk);
33  extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
34                         __u64 start, __u64 len);
35  extern int ext4_ext_precache(struct inode *inode);
36 +extern int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len);
38  /* move_extent.c */
39  extern void ext4_double_down_write_data_sem(struct inode *first,
40 @@ -2766,6 +2767,8 @@ extern void ext4_double_up_write_data_sem(struct inode *orig_inode,
41  extern int ext4_move_extents(struct file *o_filp, struct file *d_filp,
42                              __u64 start_orig, __u64 start_donor,
43                              __u64 len, __u64 *moved_len);
44 +extern int mext_next_extent(struct inode *inode, struct ext4_ext_path *path,
45 +                           struct ext4_extent **extent);
47  /* page-io.c */
48  extern int __init ext4_init_pageio(void);
49 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
50 index ef4b535..d7a78ed 100644
51 --- a/fs/ext4/extents.c
52 +++ b/fs/ext4/extents.c
53 @@ -46,6 +46,10 @@
55  #include <trace/events/ext4.h>
57 +#ifndef FALLOC_FL_COLLAPSE_RANGE
58 +#define FALLOC_FL_COLLAPSE_RANGE       0x08
59 +#endif
61  /*
62   * used by extent splitting.
63   */
64 @@ -4582,12 +4586,16 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
65         unsigned int credits, blkbits = inode->i_blkbits;
67         /* Return error if mode is not supported */
68 -       if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
69 +       if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
70 +                    FALLOC_FL_COLLAPSE_RANGE))
71                 return -EOPNOTSUPP;
73         if (mode & FALLOC_FL_PUNCH_HOLE)
74                 return ext4_punch_hole(inode, offset, len);
76 +       if (mode & FALLOC_FL_COLLAPSE_RANGE)
77 +               return ext4_collapse_range(inode, offset, len);
79         ret = ext4_convert_inline_data(inode);
80         if (ret)
81                 return ret;
82 @@ -4886,3 +4894,301 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
83         ext4_es_lru_add(inode);
84         return error;
85  }
87 +/*
88 + * ext4_access_path:
89 + * Function to access the path buffer for marking it dirty.
90 + * It also checks if there are sufficient credits left in the journal handle
91 + * to update path.
92 + */
93 +static int
94 +ext4_access_path(handle_t *handle, struct inode *inode,
95 +               struct ext4_ext_path *path)
97 +       int credits, err;
99 +       /*
100 +        * Check if need to extend journal credits
101 +        * 3 for leaf, sb, and inode plus 2 (bmap and group
102 +        * descriptor) for each block group; assume two block
103 +        * groups
104 +        */
105 +       if (handle->h_buffer_credits < 7) {
106 +               credits = ext4_writepage_trans_blocks(inode);
107 +               err = ext4_ext_truncate_extend_restart(handle, inode, credits);
108 +               /* EAGAIN is success */
109 +               if (err && err != -EAGAIN)
110 +                       return err;
111 +       }
113 +       err = ext4_ext_get_access(handle, inode, path);
114 +       return err;
118 + * ext4_ext_shift_path_extents:
119 + * Shift the extents of a path structure lying between path[depth].p_ext
120 + * and EXT_LAST_EXTENT(path[depth].p_hdr) downwards, by subtracting shift
121 + * from starting block for each extent.
122 + */
123 +static int
124 +ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
125 +                           struct inode *inode, handle_t *handle,
126 +                           ext4_lblk_t *start)
128 +       int depth, err = 0;
129 +       struct ext4_extent *ex_start, *ex_last;
130 +       bool update = 0;
131 +       depth = path->p_depth;
133 +       while (depth >= 0) {
134 +               if (depth == path->p_depth) {
135 +                       ex_start = path[depth].p_ext;
136 +                       if (!ex_start)
137 +                               return -EIO;
139 +                       ex_last = EXT_LAST_EXTENT(path[depth].p_hdr);
140 +                       if (!ex_last)
141 +                               return -EIO;
143 +                       err = ext4_access_path(handle, inode, path + depth);
144 +                       if (err)
145 +                               goto out;
147 +                       if (ex_start == EXT_FIRST_EXTENT(path[depth].p_hdr))
148 +                               update = 1;
150 +                       *start = ex_last->ee_block +
151 +                               ext4_ext_get_actual_len(ex_last);
153 +                       while (ex_start <= ex_last) {
154 +                               ex_start->ee_block -= shift;
155 +                               if (ex_start >
156 +                                       EXT_FIRST_EXTENT(path[depth].p_hdr)) {
157 +                                       if (ext4_ext_try_to_merge_right(inode,
158 +                                               path, ex_start - 1))
159 +                                               ex_last--;
160 +                               }
161 +                               ex_start++;
162 +                       }
163 +                       err = ext4_ext_dirty(handle, inode, path + depth);
164 +                       if (err)
165 +                               goto out;
167 +                       if (--depth < 0 || !update)
168 +                               break;
169 +               }
171 +               /* Update index too */
172 +               err = ext4_access_path(handle, inode, path + depth);
173 +               if (err)
174 +                       goto out;
176 +               path[depth].p_idx->ei_block -= shift;
177 +               err = ext4_ext_dirty(handle, inode, path + depth);
178 +               if (err)
179 +                       goto out;
181 +               /* we are done if current index is not a starting index */
182 +               if (path[depth].p_idx != EXT_FIRST_INDEX(path[depth].p_hdr))
183 +                       break;
185 +               depth--;
186 +       }
188 +out:
189 +       return err;
193 + * ext4_ext_shift_extents:
194 + * All the extents which lies in the range from start to the last allocated
195 + * block for the file are shifted downwards by shift blocks.
196 + * On success, 0 is returned, error otherwise.
197 + */
198 +static int
199 +ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
200 +                      ext4_lblk_t start, ext4_lblk_t shift)
202 +       struct ext4_ext_path *path;
203 +       int ret = 0, depth;
204 +       struct ext4_extent *extent;
205 +       ext4_lblk_t stop_block, current_block;
206 +       ext4_lblk_t ex_start, ex_end;
208 +       /* Let path point to the last extent */
209 +       path = ext4_ext_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 0);
210 +       if (IS_ERR(path))
211 +               return PTR_ERR(path);
213 +       depth = path->p_depth;
214 +       extent = path[depth].p_ext;
215 +       if (!extent) {
216 +               ext4_ext_drop_refs(path);
217 +               kfree(path);
218 +               return ret;
219 +       }
221 +       stop_block = extent->ee_block + ext4_ext_get_actual_len(extent);
222 +       ext4_ext_drop_refs(path);
223 +       kfree(path);
225 +       /* Nothing to shift, if hole is at the end of file */
226 +       if (start >= stop_block)
227 +               return ret;
229 +       /*
230 +        * Don't start shifting extents until we make sure the hole is big
231 +        * enough to accomodate the shift.
232 +        */
233 +       path = ext4_ext_find_extent(inode, start - 1, NULL, 0);
234 +       depth = path->p_depth;
235 +       extent =  path[depth].p_ext;
236 +       ex_start = extent->ee_block;
237 +       ex_end = extent->ee_block + ext4_ext_get_actual_len(extent);
238 +       ext4_ext_drop_refs(path);
239 +       kfree(path);
241 +       if ((start == ex_start && shift > ex_start) ||
242 +           (shift > start - ex_end))
243 +               return -EINVAL;
245 +       /* Its safe to start updating extents */
246 +       while (start < stop_block) {
247 +               path = ext4_ext_find_extent(inode, start, NULL, 0);
248 +               if (IS_ERR(path))
249 +                       return PTR_ERR(path);
250 +               depth = path->p_depth;
251 +               extent = path[depth].p_ext;
252 +               current_block = extent->ee_block;
253 +               if (start > current_block) {
254 +                       /* Hole, move to the next extent */
255 +                       ret = mext_next_extent(inode, path, &extent);
256 +                       if (ret != 0) {
257 +                               ext4_ext_drop_refs(path);
258 +                               kfree(path);
259 +                               if (ret == 1)
260 +                                       ret = 0;
261 +                               break;
262 +                       }
263 +               }
264 +               ret = ext4_ext_shift_path_extents(path, shift, inode,
265 +                               handle, &start);
266 +               ext4_ext_drop_refs(path);
267 +               kfree(path);
268 +               if (ret)
269 +                       break;
270 +       }
272 +       return ret;
276 + * ext4_collapse_range:
277 + * This implements the fallocate's collapse range functionality for ext4
278 + * Returns: 0 and non-zero on error.
279 + */
280 +int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
282 +       struct super_block *sb = inode->i_sb;
283 +       ext4_lblk_t punch_start, punch_stop;
284 +       handle_t *handle;
285 +       unsigned int credits;
286 +       loff_t new_size;
287 +       int ret;
289 +       BUG_ON(offset + len > i_size_read(inode));
291 +       /* Collapse range works only on fs block size aligned offsets. */
292 +       if (offset & (EXT4_BLOCK_SIZE(sb) - 1) ||
293 +           len & (EXT4_BLOCK_SIZE(sb) - 1))
294 +               return -EINVAL;
296 +       if (!S_ISREG(inode->i_mode))
297 +               return -EOPNOTSUPP;
299 +       trace_ext4_collapse_range(inode, offset, len);
301 +       punch_start = offset >> EXT4_BLOCK_SIZE_BITS(sb);
302 +       punch_stop = (offset + len) >> EXT4_BLOCK_SIZE_BITS(sb);
304 +       /* Write out all dirty pages */
305 +       ret = filemap_write_and_wait_range(inode->i_mapping, offset, -1);
306 +       if (ret)
307 +               return ret;
309 +       /* Take mutex lock */
310 +       mutex_lock(&inode->i_mutex);
312 +       /* It's not possible punch hole on append only file */
313 +       if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
314 +               ret = -EPERM;
315 +               goto out_mutex;
316 +       }
318 +       if (IS_SWAPFILE(inode)) {
319 +               ret = -ETXTBSY;
320 +               goto out_mutex;
321 +       }
323 +       /* Currently just for extent based files */
324 +       if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
325 +               ret = -EOPNOTSUPP;
326 +               goto out_mutex;
327 +       }
329 +       truncate_pagecache_range(inode, offset, -1);
331 +       /* Wait for existing dio to complete */
332 +       ext4_inode_block_unlocked_dio(inode);
333 +       inode_dio_wait(inode);
335 +       credits = ext4_writepage_trans_blocks(inode);
336 +       handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
337 +       if (IS_ERR(handle)) {
338 +               ret = PTR_ERR(handle);
339 +               goto out_dio;
340 +       }
342 +       down_write(&EXT4_I(inode)->i_data_sem);
343 +       ext4_discard_preallocations(inode);
345 +       ret = ext4_es_remove_extent(inode, punch_start,
346 +                                   EXT_MAX_BLOCKS - punch_start - 1);
347 +       if (ret) {
348 +               up_write(&EXT4_I(inode)->i_data_sem);
349 +               goto out_stop;
350 +       }
352 +       ret = ext4_ext_remove_space(inode, punch_start, punch_stop - 1);
353 +       if (ret) {
354 +               up_write(&EXT4_I(inode)->i_data_sem);
355 +               goto out_stop;
356 +       }
358 +       ret = ext4_ext_shift_extents(inode, handle, punch_stop,
359 +                                    punch_stop - punch_start);
360 +       if (ret) {
361 +               up_write(&EXT4_I(inode)->i_data_sem);
362 +               goto out_stop;
363 +       }
365 +       new_size = i_size_read(inode) - len;
366 +       i_size_write(inode, new_size);
367 +       EXT4_I(inode)->i_disksize = new_size;
369 +       ext4_discard_preallocations(inode);
370 +       up_write(&EXT4_I(inode)->i_data_sem);
371 +       if (IS_SYNC(inode))
372 +               ext4_handle_sync(handle);
373 +       inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
374 +       ext4_mark_inode_dirty(handle, inode);
376 +out_stop:
377 +       ext4_journal_stop(handle);
378 +out_dio:
379 +       ext4_inode_resume_unlocked_dio(inode);
380 +out_mutex:
381 +       mutex_unlock(&inode->i_mutex);
382 +       return ret;
384 diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
385 index f39a88a..58ee7dc 100644
386 --- a/fs/ext4/move_extent.c
387 +++ b/fs/ext4/move_extent.c
388 @@ -76,7 +76,7 @@ copy_extent_status(struct ext4_extent *src, struct ext4_extent *dest)
389   * ext4_ext_path structure refers to the last extent, or a negative error
390   * value on failure.
391   */
392 -static int
393 +int
394  mext_next_extent(struct inode *inode, struct ext4_ext_path *path,
395                       struct ext4_extent **extent)
397 diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
398 index 197d312..90e2f71 100644
399 --- a/include/trace/events/ext4.h
400 +++ b/include/trace/events/ext4.h
401 @@ -2410,6 +2410,31 @@ TRACE_EVENT(ext4_es_shrink_exit,
402                   __entry->shrunk_nr, __entry->cache_cnt)
403  );
405 +TRACE_EVENT(ext4_collapse_range,
406 +       TP_PROTO(struct inode *inode, loff_t offset, loff_t len),
408 +       TP_ARGS(inode, offset, len),
410 +       TP_STRUCT__entry(
411 +               __field(dev_t,  dev)
412 +               __field(ino_t,  ino)
413 +               __field(loff_t, offset)
414 +               __field(loff_t, len)
415 +       ),
417 +       TP_fast_assign(
418 +               __entry->dev    = inode->i_sb->s_dev;
419 +               __entry->ino    = inode->i_ino;
420 +               __entry->offset = offset;
421 +               __entry->len    = len;
422 +       ),
424 +       TP_printk("dev %d,%d ino %lu offset %lld len %lld",
425 +                 MAJOR(__entry->dev), MINOR(__entry->dev),
426 +                 (unsigned long) __entry->ino,
427 +                 __entry->offset, __entry->len)
430  #endif /* _TRACE_EXT4_H */
432  /* This part must be outside protection */