update atomically-set-inode-flags
[ext4-patch-queue.git] / add-zero-range-support
blob9b72dc8b74edca6299e77dd2a7c1ac93eb2319d4
1 ext4: Introduce FALLOC_FL_ZERO_RANGE flag for fallocate
3 From: Lukas Czerner <lczerner@redhat.com>
5 Introduce new FALLOC_FL_ZERO_RANGE flag for fallocate. This has the same
6 functionality as xfs ioctl XFS_IOC_ZERO_RANGE.
8 It can be used to convert a range of file to zeros preferably without
9 issuing data IO. Blocks should be preallocated for the regions that span
10 holes in the file, and the entire range is preferable converted to
11 unwritten extents
13 This can be also used to preallocate blocks past EOF in the same way as
14 with fallocate. Flag FALLOC_FL_KEEP_SIZE which should cause the inode
15 size to remain the same.
17 Also add appropriate tracepoints.
19 Signed-off-by: Lukas Czerner <lczerner@redhat.com>
20 Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
21 ---
22 v3: Set EXT4_INODE_EOFBLOCKS when needed in zero_range
23     Fix the problems when rebased on ext4/dev branch
25  fs/ext4/ext4.h              |   2 +
26  fs/ext4/extents.c           | 273 +++++++++++++++++++++++++++++++++++++++++---
27  fs/ext4/inode.c             |  17 ++-
28  include/trace/events/ext4.h |  68 +++++------
29  4 files changed, 307 insertions(+), 53 deletions(-)
31 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
32 index beec427..1b3cbf8 100644
33 --- a/fs/ext4/ext4.h
34 +++ b/fs/ext4/ext4.h
35 @@ -568,6 +568,8 @@ enum {
36  #define EXT4_GET_BLOCKS_NO_LOCK                        0x0100
37         /* Do not put hole in extent cache */
38  #define EXT4_GET_BLOCKS_NO_PUT_HOLE            0x0200
39 +       /* Convert written extents to unwritten */
40 +#define EXT4_GET_BLOCKS_CONVERT_UNWRITTEN      0x0400
42  /*
43   * The bit position of these flags must not overlap with any of the
44 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
45 index 1c09a09..243a02e 100644
46 --- a/fs/ext4/extents.c
47 +++ b/fs/ext4/extents.c
48 @@ -3602,6 +3602,8 @@ out:
49   *   b> Splits in two extents: Write is happening at either end of the extent
50   *   c> Splits in three extents: Somone is writing in middle of the extent
51   *
52 + * This works the same way in the case of initialized -> unwritten conversion.
53 + *
54   * One of more index blocks maybe needed if the extent tree grow after
55   * the uninitialized extent split. To prevent ENOSPC occur at the IO
56   * complete, we need to split the uninitialized extent before DIO submit
57 @@ -3612,7 +3614,7 @@ out:
58   *
59   * Returns the size of uninitialized extent to be written on success.
60   */
61 -static int ext4_split_unwritten_extents(handle_t *handle,
62 +static int ext4_split_convert_extents(handle_t *handle,
63                                         struct inode *inode,
64                                         struct ext4_map_blocks *map,
65                                         struct ext4_ext_path *path,
66 @@ -3624,9 +3626,9 @@ static int ext4_split_unwritten_extents(handle_t *handle,
67         unsigned int ee_len;
68         int split_flag = 0, depth;
70 -       ext_debug("ext4_split_unwritten_extents: inode %lu, logical"
71 -               "block %llu, max_blocks %u\n", inode->i_ino,
72 -               (unsigned long long)map->m_lblk, map->m_len);
73 +       ext_debug("%s: inode %lu, logical block %llu, max_blocks %u\n",
74 +                 __func__, inode->i_ino,
75 +                 (unsigned long long)map->m_lblk, map->m_len);
77         eof_block = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
78                 inode->i_sb->s_blocksize_bits;
79 @@ -3641,14 +3643,73 @@ static int ext4_split_unwritten_extents(handle_t *handle,
80         ee_block = le32_to_cpu(ex->ee_block);
81         ee_len = ext4_ext_get_actual_len(ex);
83 -       split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0;
84 -       split_flag |= EXT4_EXT_MARK_UNINIT2;
85 -       if (flags & EXT4_GET_BLOCKS_CONVERT)
86 -               split_flag |= EXT4_EXT_DATA_VALID2;
87 +       /* Convert to unwritten */
88 +       if (flags | EXT4_GET_BLOCKS_CONVERT_UNWRITTEN) {
89 +               split_flag |= EXT4_EXT_DATA_VALID1;
90 +       /* Convert to initialized */
91 +       } else if (flags | EXT4_GET_BLOCKS_CONVERT) {
92 +               split_flag |= ee_block + ee_len <= eof_block ?
93 +                             EXT4_EXT_MAY_ZEROOUT : 0;
94 +               split_flag |= (EXT4_EXT_MARK_UNINIT2 & EXT4_EXT_DATA_VALID2);
95 +       }
96         flags |= EXT4_GET_BLOCKS_PRE_IO;
97         return ext4_split_extent(handle, inode, path, map, split_flag, flags);
98  }
100 +static int ext4_convert_initialized_extents(handle_t *handle,
101 +                                           struct inode *inode,
102 +                                           struct ext4_map_blocks *map,
103 +                                           struct ext4_ext_path *path)
105 +       struct ext4_extent *ex;
106 +       ext4_lblk_t ee_block;
107 +       unsigned int ee_len;
108 +       int depth;
109 +       int err = 0;
111 +       depth = ext_depth(inode);
112 +       ex = path[depth].p_ext;
113 +       ee_block = le32_to_cpu(ex->ee_block);
114 +       ee_len = ext4_ext_get_actual_len(ex);
116 +       ext_debug("%s: inode %lu, logical"
117 +               "block %llu, max_blocks %u\n", __func__, inode->i_ino,
118 +                 (unsigned long long)ee_block, ee_len);
120 +       if (ee_block != map->m_lblk || ee_len > map->m_len) {
121 +               err = ext4_split_convert_extents(handle, inode, map, path,
122 +                               EXT4_GET_BLOCKS_CONVERT_UNWRITTEN);
123 +               if (err < 0)
124 +                       goto out;
125 +               ext4_ext_drop_refs(path);
126 +               path = ext4_ext_find_extent(inode, map->m_lblk, path, 0);
127 +               if (IS_ERR(path)) {
128 +                       err = PTR_ERR(path);
129 +                       goto out;
130 +               }
131 +               depth = ext_depth(inode);
132 +               ex = path[depth].p_ext;
133 +       }
135 +       err = ext4_ext_get_access(handle, inode, path + depth);
136 +       if (err)
137 +               goto out;
138 +       /* first mark the extent as uninitialized */
139 +       ext4_ext_mark_uninitialized(ex);
141 +       /* note: ext4_ext_correct_indexes() isn't needed here because
142 +        * borders are not changed
143 +        */
144 +       ext4_ext_try_to_merge(handle, inode, path, ex);
146 +       /* Mark modified extent as dirty */
147 +       err = ext4_ext_dirty(handle, inode, path + path->p_depth);
148 +out:
149 +       ext4_ext_show_leaf(inode, path);
150 +       return err;
154  static int ext4_convert_unwritten_extents_endio(handle_t *handle,
155                                                 struct inode *inode,
156                                                 struct ext4_map_blocks *map,
157 @@ -3682,8 +3743,8 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
158                              inode->i_ino, (unsigned long long)ee_block, ee_len,
159                              (unsigned long long)map->m_lblk, map->m_len);
160  #endif
161 -               err = ext4_split_unwritten_extents(handle, inode, map, path,
162 -                                                  EXT4_GET_BLOCKS_CONVERT);
163 +               err = ext4_split_convert_extents(handle, inode, map, path,
164 +                                                EXT4_GET_BLOCKS_CONVERT);
165                 if (err < 0)
166                         goto out;
167                 ext4_ext_drop_refs(path);
168 @@ -3884,6 +3945,38 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
171  static int
172 +ext4_ext_convert_initialized_extent(handle_t *handle, struct inode *inode,
173 +                       struct ext4_map_blocks *map,
174 +                       struct ext4_ext_path *path, int flags,
175 +                       unsigned int allocated, ext4_fsblk_t newblock)
177 +       int ret = 0;
178 +       int err = 0;
180 +       /*
181 +        * Make sure that the extent is no bigger than we support with
182 +        * uninitialized extent
183 +        */
184 +       if (map->m_len > EXT_UNINIT_MAX_LEN)
185 +               map->m_len = EXT_UNINIT_MAX_LEN / 2;
187 +       ret = ext4_convert_initialized_extents(handle, inode, map,
188 +                                               path);
189 +       if (ret >= 0) {
190 +               ext4_update_inode_fsync_trans(handle, inode, 1);
191 +               err = check_eofblocks_fl(handle, inode, map->m_lblk,
192 +                                        path, map->m_len);
193 +       } else
194 +               err = ret;
195 +       map->m_flags |= EXT4_MAP_UNWRITTEN;
196 +       if (allocated > map->m_len)
197 +               allocated = map->m_len;
198 +       map->m_len = allocated;
200 +       return err ? err : allocated;
203 +static int
204  ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
205                         struct ext4_map_blocks *map,
206                         struct ext4_ext_path *path, int flags,
207 @@ -3910,8 +4003,8 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
209         /* get_block() before submit the IO, split the extent */
210         if ((flags & EXT4_GET_BLOCKS_PRE_IO)) {
211 -               ret = ext4_split_unwritten_extents(handle, inode, map,
212 -                                                  path, flags);
213 +               ret = ext4_split_convert_extents(handle, inode, map,
214 +                                        path, flags | EXT4_GET_BLOCKS_CONVERT);
215                 if (ret <= 0)
216                         goto out;
217                 /*
218 @@ -4199,6 +4292,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
219                 ext4_fsblk_t ee_start = ext4_ext_pblock(ex);
220                 unsigned short ee_len;
223                 /*
224                  * Uninitialized extents are treated as holes, except that
225                  * we split out initialized portions during a write.
226 @@ -4215,7 +4309,17 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
227                         ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk,
228                                   ee_block, ee_len, newblock);
230 -                       if (!ext4_ext_is_uninitialized(ex))
231 +                       /*
232 +                        * If the extent is initialized check whether the
233 +                        * caller wants to convert it to unwritten.
234 +                        */
235 +                       if ((!ext4_ext_is_uninitialized(ex)) &&
236 +                           (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
237 +                               allocated = ext4_ext_convert_initialized_extent(
238 +                                               handle, inode, map, path, flags,
239 +                                               allocated, newblock);
240 +                               goto out2;
241 +                       } else if (!ext4_ext_is_uninitialized(ex))
242                                 goto out;
244                         ret = ext4_ext_handle_uninitialized_extents(
245 @@ -4604,6 +4708,144 @@ retry:
246         return ret > 0 ? ret2 : ret;
249 +static long ext4_zero_range(struct file *file, loff_t offset,
250 +                           loff_t len, int mode)
252 +       struct inode *inode = file_inode(file);
253 +       handle_t *handle = NULL;
254 +       unsigned int max_blocks;
255 +       loff_t new_size = 0;
256 +       int ret = 0;
257 +       int flags;
258 +       int partial;
259 +       loff_t start, end;
260 +       ext4_lblk_t lblk;
261 +       struct address_space *mapping = inode->i_mapping;
262 +       unsigned int blkbits = inode->i_blkbits;
264 +       trace_ext4_zero_range(inode, offset, len, mode);
266 +       /*
267 +        * Write out all dirty pages to avoid race conditions
268 +        * Then release them.
269 +        */
270 +       if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
271 +               ret = filemap_write_and_wait_range(mapping, offset,
272 +                                                  offset + len - 1);
273 +               if (ret)
274 +                       return ret;
275 +       }
277 +       /*
278 +        * Round up offset. This is not fallocate, we neet to zero out
279 +        * blocks, so convert interior block aligned part of the range to
280 +        * unwritten and possibly manually zero out unaligned parts of the
281 +        * range.
282 +        */
283 +       start = round_up(offset, 1 << blkbits);
284 +       end = round_down((offset + len), 1 << blkbits);
286 +       if (start < offset || end > offset + len)
287 +               return -EINVAL;
288 +       partial = (offset + len) & ((1 << blkbits) - 1);
290 +       lblk = start >> blkbits;
291 +       max_blocks = (end >> blkbits);
292 +       if (max_blocks < lblk)
293 +               max_blocks = 0;
294 +       else
295 +               max_blocks -= lblk;
297 +       flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT |
298 +               EXT4_GET_BLOCKS_CONVERT_UNWRITTEN;
299 +       if (mode & FALLOC_FL_KEEP_SIZE)
300 +               flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
302 +       mutex_lock(&inode->i_mutex);
304 +       /*
305 +        * Indirect files do not support unwritten extnets
306 +        */
307 +       if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
308 +               ret = -EOPNOTSUPP;
309 +               goto out_mutex;
310 +       }
312 +       if (!(mode & FALLOC_FL_KEEP_SIZE) &&
313 +            offset + len > i_size_read(inode)) {
314 +               new_size = offset + len;
315 +               ret = inode_newsize_ok(inode, new_size);
316 +               if (ret)
317 +                       goto out_mutex;
318 +               /*
319 +                * If we have a partial block after EOF we have to allocate
320 +                * the entire block.
321 +                */
322 +               if (partial)
323 +                       max_blocks += 1;
324 +       }
326 +       if (max_blocks > 0) {
328 +               /* Now release the pages and zero block aligned part of pages*/
329 +               truncate_pagecache_range(inode, start, end - 1);
331 +               /* Wait all existing dio workers, newcomers will block on i_mutex */
332 +               ext4_inode_block_unlocked_dio(inode);
333 +               inode_dio_wait(inode);
335 +               /*
336 +                * Remove entire range from the extent status tree.
337 +                */
338 +               ret = ext4_es_remove_extent(inode, lblk, max_blocks);
339 +               if (ret)
340 +                       goto out_dio;
342 +               ret = ext4_alloc_file_blocks(file, lblk, max_blocks, flags,
343 +                                            mode);
344 +               if (ret)
345 +                       goto out_dio;
346 +       }
348 +       handle = ext4_journal_start(inode, EXT4_HT_MISC, 4);
349 +       if (IS_ERR(handle)) {
350 +               ret = PTR_ERR(handle);
351 +               ext4_std_error(inode->i_sb, ret);
352 +               goto out_dio;
353 +       }
355 +       inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
357 +       if (!ret && new_size) {
358 +               if (new_size > i_size_read(inode))
359 +                       i_size_write(inode, new_size);
360 +               if (new_size > EXT4_I(inode)->i_disksize)
361 +                       ext4_update_i_disksize(inode, new_size);
362 +       } else if (!ret && !new_size) {
363 +               /*
364 +               * Mark that we allocate beyond EOF so the subsequent truncate
365 +               * can proceed even if the new size is the same as i_size.
366 +               */
367 +               if ((offset + len) > i_size_read(inode))
368 +                       ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
369 +       }
371 +       ext4_mark_inode_dirty(handle, inode);
373 +       /* Zero out partial block at the edges of the range */
374 +       ret = ext4_zero_partial_blocks(handle, inode, offset, len);
376 +       if (file->f_flags & O_SYNC)
377 +               ext4_handle_sync(handle);
379 +       ext4_journal_stop(handle);
380 +out_dio:
381 +       ext4_inode_resume_unlocked_dio(inode);
382 +out_mutex:
383 +       mutex_unlock(&inode->i_mutex);
384 +       return ret;
387  /*
388   * preallocate space for a file. This implements ext4's fallocate file
389   * operation, which gets called from sys_fallocate system call.
390 @@ -4625,7 +4867,7 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
392         /* Return error if mode is not supported */
393         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE |
394 -                    FALLOC_FL_COLLAPSE_RANGE))
395 +                    FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE))
396                 return -EOPNOTSUPP;
398         if (mode & FALLOC_FL_PUNCH_HOLE)
399 @@ -4645,6 +4887,9 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
400         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
401                 return -EOPNOTSUPP;
403 +       if (mode & FALLOC_FL_ZERO_RANGE)
404 +               return ext4_zero_range(file, offset, len, mode);
406         trace_ext4_fallocate_enter(inode, offset, len, mode);
407         lblk = offset >> blkbits;
408         /*
409 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
410 index ab3e835..7cc2455 100644
411 --- a/fs/ext4/inode.c
412 +++ b/fs/ext4/inode.c
413 @@ -503,6 +503,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
415         struct extent_status es;
416         int retval;
417 +       int ret = 0;
418  #ifdef ES_AGGRESSIVE_TEST
419         struct ext4_map_blocks orig_map;
421 @@ -558,7 +559,6 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
422                                              EXT4_GET_BLOCKS_KEEP_SIZE);
423         }
424         if (retval > 0) {
425 -               int ret;
426                 unsigned int status;
428                 if (unlikely(retval != map->m_len)) {
429 @@ -585,7 +585,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
431  found:
432         if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
433 -               int ret = check_block_validity(inode, map);
434 +               ret = check_block_validity(inode, map);
435                 if (ret != 0)
436                         return ret;
437         }
438 @@ -602,7 +602,13 @@ found:
439          * with buffer head unmapped.
440          */
441         if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
442 -               return retval;
443 +               /*
444 +                * If we need to convert extent to unwritten
445 +                * we continue and do the actual work in
446 +                * ext4_ext_map_blocks()
447 +                */
448 +               if (!(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN))
449 +                       return retval;
451         /*
452          * Here we clear m_flags because after allocating an new extent,
453 @@ -658,7 +664,6 @@ found:
454                 ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
456         if (retval > 0) {
457 -               int ret;
458                 unsigned int status;
460                 if (unlikely(retval != map->m_len)) {
461 @@ -693,7 +698,7 @@ found:
462  has_zeroout:
463         up_write((&EXT4_I(inode)->i_data_sem));
464         if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
465 -               int ret = check_block_validity(inode, map);
466 +               ret = check_block_validity(inode, map);
467                 if (ret != 0)
468                         return ret;
469         }
470 @@ -3507,7 +3512,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
471         if (!S_ISREG(inode->i_mode))
472                 return -EOPNOTSUPP;
474 -       trace_ext4_punch_hole(inode, offset, length);
475 +       trace_ext4_punch_hole(inode, offset, length, 0);
477         /*
478          * Write out all dirty pages to avoid race conditions
479 diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
480 index e9d7ee7..010ea89 100644
481 --- a/include/trace/events/ext4.h
482 +++ b/include/trace/events/ext4.h
483 @@ -21,6 +21,10 @@ struct extent_status;
484  #define FALLOC_FL_COLLAPSE_RANGE       0x08
485  #endif
487 +#ifndef FALLOC_FL_ZERO_RANGE
488 +#define FALLOC_FL_ZERO_RANGE           0x10
489 +#endif
491  #define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode))
493  #define show_mballoc_flags(flags) __print_flags(flags, "|",    \
494 @@ -77,7 +81,8 @@ struct extent_status;
495         { FALLOC_FL_KEEP_SIZE,          "KEEP_SIZE"},           \
496         { FALLOC_FL_PUNCH_HOLE,         "PUNCH_HOLE"},          \
497         { FALLOC_FL_NO_HIDE_STALE,      "NO_HIDE_STALE"},       \
498 -       { FALLOC_FL_COLLAPSE_RANGE,     "COLLAPSE_RANGE"})
499 +       { FALLOC_FL_COLLAPSE_RANGE,     "COLLAPSE_RANGE"},      \
500 +       { FALLOC_FL_ZERO_RANGE,         "ZERO_RANGE"})
503  TRACE_EVENT(ext4_free_inode,
504 @@ -1339,7 +1344,7 @@ TRACE_EVENT(ext4_direct_IO_exit,
505                   __entry->rw, __entry->ret)
506  );
508 -TRACE_EVENT(ext4_fallocate_enter,
509 +DECLARE_EVENT_CLASS(ext4__fallocate_mode,
510         TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int mode),
512         TP_ARGS(inode, offset, len, mode),
513 @@ -1347,23 +1352,45 @@ TRACE_EVENT(ext4_fallocate_enter,
514         TP_STRUCT__entry(
515                 __field(        dev_t,  dev                     )
516                 __field(        ino_t,  ino                     )
517 -               __field(        loff_t, pos                     )
518 -               __field(        loff_t, len                     )
519 +               __field(        loff_t, offset                  )
520 +               __field(        loff_t, len                     )
521                 __field(        int,    mode                    )
522         ),
524         TP_fast_assign(
525                 __entry->dev    = inode->i_sb->s_dev;
526                 __entry->ino    = inode->i_ino;
527 -               __entry->pos    = offset;
528 +               __entry->offset = offset;
529                 __entry->len    = len;
530                 __entry->mode   = mode;
531         ),
533 -       TP_printk("dev %d,%d ino %lu pos %lld len %lld mode %s",
534 +       TP_printk("dev %d,%d ino %lu offset %lld len %lld mode %s",
535                   MAJOR(__entry->dev), MINOR(__entry->dev),
536 -                 (unsigned long) __entry->ino, __entry->pos,
537 -                 __entry->len, show_falloc_mode(__entry->mode))
538 +                 (unsigned long) __entry->ino,
539 +                 __entry->offset, __entry->len,
540 +                 show_falloc_mode(__entry->mode))
543 +DEFINE_EVENT(ext4__fallocate_mode, ext4_fallocate_enter,
545 +       TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int mode),
547 +       TP_ARGS(inode, offset, len, mode)
550 +DEFINE_EVENT(ext4__fallocate_mode, ext4_punch_hole,
552 +       TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int mode),
554 +       TP_ARGS(inode, offset, len, mode)
557 +DEFINE_EVENT(ext4__fallocate_mode, ext4_zero_range,
559 +       TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int mode),
561 +       TP_ARGS(inode, offset, len, mode)
562  );
564  TRACE_EVENT(ext4_fallocate_exit,
565 @@ -1395,31 +1422,6 @@ TRACE_EVENT(ext4_fallocate_exit,
566                   __entry->ret)
567  );
569 -TRACE_EVENT(ext4_punch_hole,
570 -       TP_PROTO(struct inode *inode, loff_t offset, loff_t len),
572 -       TP_ARGS(inode, offset, len),
574 -       TP_STRUCT__entry(
575 -               __field(        dev_t,  dev                     )
576 -               __field(        ino_t,  ino                     )
577 -               __field(        loff_t, offset                  )
578 -               __field(        loff_t, len                     )
579 -       ),
581 -       TP_fast_assign(
582 -               __entry->dev    = inode->i_sb->s_dev;
583 -               __entry->ino    = inode->i_ino;
584 -               __entry->offset = offset;
585 -               __entry->len    = len;
586 -       ),
588 -       TP_printk("dev %d,%d ino %lu offset %lld len %lld",
589 -                 MAJOR(__entry->dev), MINOR(__entry->dev),
590 -                 (unsigned long) __entry->ino,
591 -                 __entry->offset, __entry->len)
594  TRACE_EVENT(ext4_unlink_enter,
595         TP_PROTO(struct inode *parent, struct dentry *dentry),
597 -- 
598 1.8.3.1