1 ext4: rename uninitialized extents to unwritten
3 From: Lukas Czerner <lczerner@redhat.com>
5 Currently in ext4 there is quite a mess when it comes to naming
6 unwritten extents. Sometimes we call it uninitialized and sometimes we
7 refer to it as unwritten.
9 The right name for the extent which has been allocated but does not
10 contain any written data is _unwritten_. Other file systems are
11 using this name consistently, even the buffer head state refers to it as
12 unwritten. We need to fix this confusion in ext4.
14 This commit changes every reference to an uninitialized extent (meaning
15 allocated but unwritten) to unwritten extent. This includes comments,
16 function names and variable names. It even covers abbreviation of the
17 word uninitialized (such as uninit) and some misspellings.
19 This commit does not change any of the code paths at all. This has been
20 confirmed by comparing md5sums of the assembly code of each object file
21 after all the function names were stripped from it.
23 Signed-off-by: Lukas Czerner <lczerner@redhat.com>
24 Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
26 fs/ext4/ext4.h | 16 ++--
27 fs/ext4/ext4_extents.h | 22 ++---
28 fs/ext4/extents.c | 218 ++++++++++++++++++++++-----------------------
29 fs/ext4/extents_status.c | 2 +-
31 fs/ext4/inode.c | 18 ++--
32 fs/ext4/move_extent.c | 38 ++++----
33 fs/ext4/super.c | 2 +-
34 include/trace/events/ext4.h | 8 +-
35 9 files changed, 163 insertions(+), 163 deletions(-)
37 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
38 index b681d90..86c2cda 100644
41 @@ -183,7 +183,7 @@ struct ext4_map_blocks {
42 #define EXT4_IO_END_UNWRITTEN 0x0001
45 - * For converting uninitialized extents on a work queue. 'handle' is used for
46 + * For converting unwritten extents on a work queue. 'handle' is used for
49 typedef struct ext4_io_end {
50 @@ -536,26 +536,26 @@ enum {
52 * Flags used by ext4_map_blocks()
54 - /* Allocate any needed blocks and/or convert an unitialized
55 + /* Allocate any needed blocks and/or convert an unwritten
56 extent to be an initialized ext4 */
57 #define EXT4_GET_BLOCKS_CREATE 0x0001
58 - /* Request the creation of an unitialized extent */
59 -#define EXT4_GET_BLOCKS_UNINIT_EXT 0x0002
60 -#define EXT4_GET_BLOCKS_CREATE_UNINIT_EXT (EXT4_GET_BLOCKS_UNINIT_EXT|\
61 + /* Request the creation of an unwritten extent */
62 +#define EXT4_GET_BLOCKS_UNWRIT_EXT 0x0002
63 +#define EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT (EXT4_GET_BLOCKS_UNWRIT_EXT|\
64 EXT4_GET_BLOCKS_CREATE)
65 /* Caller is from the delayed allocation writeout path
66 * finally doing the actual allocation of delayed blocks */
67 #define EXT4_GET_BLOCKS_DELALLOC_RESERVE 0x0004
68 /* caller is from the direct IO path, request to creation of an
69 - unitialized extents if not allocated, split the uninitialized
70 + unwritten extents if not allocated, split the unwritten
71 extent if blocks has been preallocated already*/
72 #define EXT4_GET_BLOCKS_PRE_IO 0x0008
73 #define EXT4_GET_BLOCKS_CONVERT 0x0010
74 #define EXT4_GET_BLOCKS_IO_CREATE_EXT (EXT4_GET_BLOCKS_PRE_IO|\
75 - EXT4_GET_BLOCKS_CREATE_UNINIT_EXT)
76 + EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT)
77 /* Convert extent to initialized after IO complete */
78 #define EXT4_GET_BLOCKS_IO_CONVERT_EXT (EXT4_GET_BLOCKS_CONVERT|\
79 - EXT4_GET_BLOCKS_CREATE_UNINIT_EXT)
80 + EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT)
81 /* Eventual metadata allocation (due to growing extent tree)
82 * should not fail, so try to use reserved blocks for that.*/
83 #define EXT4_GET_BLOCKS_METADATA_NOFAIL 0x0020
84 diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
85 index 5074fe2..a867f5c 100644
86 --- a/fs/ext4/ext4_extents.h
87 +++ b/fs/ext4/ext4_extents.h
88 @@ -137,21 +137,21 @@ struct ext4_ext_path {
89 * EXT_INIT_MAX_LEN is the maximum number of blocks we can have in an
90 * initialized extent. This is 2^15 and not (2^16 - 1), since we use the
91 * MSB of ee_len field in the extent datastructure to signify if this
92 - * particular extent is an initialized extent or an uninitialized (i.e.
93 + * particular extent is an initialized extent or an unwritten (i.e.
95 - * EXT_UNINIT_MAX_LEN is the maximum number of blocks we can have in an
96 - * uninitialized extent.
97 + * EXT_UNWRITTEN_MAX_LEN is the maximum number of blocks we can have in an
99 * If ee_len is <= 0x8000, it is an initialized extent. Otherwise, it is an
100 - * uninitialized one. In other words, if MSB of ee_len is set, it is an
101 - * uninitialized extent with only one special scenario when ee_len = 0x8000.
102 - * In this case we can not have an uninitialized extent of zero length and
103 + * unwritten one. In other words, if MSB of ee_len is set, it is an
104 + * unwritten extent with only one special scenario when ee_len = 0x8000.
105 + * In this case we can not have an unwritten extent of zero length and
106 * thus we make it as a special case of initialized extent with 0x8000 length.
107 * This way we get better extent-to-group alignment for initialized extents.
108 * Hence, the maximum number of blocks we can have in an *initialized*
109 - * extent is 2^15 (32768) and in an *uninitialized* extent is 2^15-1 (32767).
110 + * extent is 2^15 (32768) and in an *unwritten* extent is 2^15-1 (32767).
112 #define EXT_INIT_MAX_LEN (1UL << 15)
113 -#define EXT_UNINIT_MAX_LEN (EXT_INIT_MAX_LEN - 1)
114 +#define EXT_UNWRITTEN_MAX_LEN (EXT_INIT_MAX_LEN - 1)
117 #define EXT_FIRST_EXTENT(__hdr__) \
118 @@ -187,14 +187,14 @@ static inline unsigned short ext_depth(struct inode *inode)
119 return le16_to_cpu(ext_inode_hdr(inode)->eh_depth);
122 -static inline void ext4_ext_mark_uninitialized(struct ext4_extent *ext)
123 +static inline void ext4_ext_mark_unwritten(struct ext4_extent *ext)
125 - /* We can not have an uninitialized extent of zero length! */
126 + /* We can not have an unwritten extent of zero length! */
127 BUG_ON((le16_to_cpu(ext->ee_len) & ~EXT_INIT_MAX_LEN) == 0);
128 ext->ee_len |= cpu_to_le16(EXT_INIT_MAX_LEN);
131 -static inline int ext4_ext_is_uninitialized(struct ext4_extent *ext)
132 +static inline int ext4_ext_is_unwritten(struct ext4_extent *ext)
134 /* Extent with ee_len of 0x8000 is treated as an initialized extent */
135 return (le16_to_cpu(ext->ee_len) > EXT_INIT_MAX_LEN);
136 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
137 index e67afdd..7252c0f 100644
138 --- a/fs/ext4/extents.c
139 +++ b/fs/ext4/extents.c
142 #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \
144 -#define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */
145 -#define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */
146 +#define EXT4_EXT_MARK_UNWRIT1 0x2 /* mark first half unwritten */
147 +#define EXT4_EXT_MARK_UNWRIT2 0x4 /* mark second half unwritten */
149 #define EXT4_EXT_DATA_VALID1 0x8 /* first half contains valid data */
150 #define EXT4_EXT_DATA_VALID2 0x10 /* second half contains valid data */
151 @@ -525,7 +525,7 @@ __read_extent_tree_block(const char *function, unsigned int line,
155 - if (ext4_ext_is_uninitialized(ex))
156 + if (ext4_ext_is_unwritten(ex))
157 status = EXTENT_STATUS_UNWRITTEN;
158 ext4_es_cache_extent(inode, lblk, len,
159 ext4_ext_pblock(ex), status);
160 @@ -621,7 +621,7 @@ static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
161 } else if (path->p_ext) {
162 ext_debug(" %d:[%d]%d:%llu ",
163 le32_to_cpu(path->p_ext->ee_block),
164 - ext4_ext_is_uninitialized(path->p_ext),
165 + ext4_ext_is_unwritten(path->p_ext),
166 ext4_ext_get_actual_len(path->p_ext),
167 ext4_ext_pblock(path->p_ext));
169 @@ -647,7 +647,7 @@ static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
171 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
172 ext_debug("%d:[%d]%d:%llu ", le32_to_cpu(ex->ee_block),
173 - ext4_ext_is_uninitialized(ex),
174 + ext4_ext_is_unwritten(ex),
175 ext4_ext_get_actual_len(ex), ext4_ext_pblock(ex));
178 @@ -678,7 +678,7 @@ static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
179 ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n",
180 le32_to_cpu(ex->ee_block),
182 - ext4_ext_is_uninitialized(ex),
183 + ext4_ext_is_unwritten(ex),
184 ext4_ext_get_actual_len(ex),
187 @@ -803,7 +803,7 @@ ext4_ext_binsearch(struct inode *inode,
188 ext_debug(" -> %d:%llu:[%d]%d ",
189 le32_to_cpu(path->p_ext->ee_block),
190 ext4_ext_pblock(path->p_ext),
191 - ext4_ext_is_uninitialized(path->p_ext),
192 + ext4_ext_is_unwritten(path->p_ext),
193 ext4_ext_get_actual_len(path->p_ext));
195 #ifdef CHECK_BINSEARCH
196 @@ -1687,11 +1687,11 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
199 * Make sure that both extents are initialized. We don't merge
200 - * uninitialized extents so that we can be sure that end_io code has
201 + * unwritten extents so that we can be sure that end_io code has
202 * the extent that was written properly split out and conversion to
203 * initialized is trivial.
205 - if (ext4_ext_is_uninitialized(ex1) != ext4_ext_is_uninitialized(ex2))
206 + if (ext4_ext_is_unwritten(ex1) != ext4_ext_is_unwritten(ex2))
209 ext1_ee_len = ext4_ext_get_actual_len(ex1);
210 @@ -1708,10 +1708,10 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
212 if (ext1_ee_len + ext2_ee_len > EXT_INIT_MAX_LEN)
214 - if (ext4_ext_is_uninitialized(ex1) &&
215 + if (ext4_ext_is_unwritten(ex1) &&
216 (ext4_test_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN) ||
217 atomic_read(&EXT4_I(inode)->i_unwritten) ||
218 - (ext1_ee_len + ext2_ee_len > EXT_UNINIT_MAX_LEN)))
219 + (ext1_ee_len + ext2_ee_len > EXT_UNWRITTEN_MAX_LEN)))
221 #ifdef AGGRESSIVE_TEST
222 if (ext1_ee_len >= 4)
223 @@ -1736,7 +1736,7 @@ static int ext4_ext_try_to_merge_right(struct inode *inode,
225 struct ext4_extent_header *eh;
226 unsigned int depth, len;
227 - int merge_done = 0, uninit;
228 + int merge_done = 0, unwritten;
230 depth = ext_depth(inode);
231 BUG_ON(path[depth].p_hdr == NULL);
232 @@ -1746,11 +1746,11 @@ static int ext4_ext_try_to_merge_right(struct inode *inode,
233 if (!ext4_can_extents_be_merged(inode, ex, ex + 1))
235 /* merge with next extent! */
236 - uninit = ext4_ext_is_uninitialized(ex);
237 + unwritten = ext4_ext_is_unwritten(ex);
238 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
239 + ext4_ext_get_actual_len(ex + 1));
241 - ext4_ext_mark_uninitialized(ex);
243 + ext4_ext_mark_unwritten(ex);
245 if (ex + 1 < EXT_LAST_EXTENT(eh)) {
246 len = (EXT_LAST_EXTENT(eh) - ex - 1)
247 @@ -1904,7 +1904,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
248 struct ext4_ext_path *npath = NULL;
251 - int mb_flags = 0, uninit;
252 + int mb_flags = 0, unwritten;
254 if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
255 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
256 @@ -1944,21 +1944,21 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
257 if (ext4_can_extents_be_merged(inode, ex, newext)) {
258 ext_debug("append [%d]%d block to %u:[%d]%d"
260 - ext4_ext_is_uninitialized(newext),
261 + ext4_ext_is_unwritten(newext),
262 ext4_ext_get_actual_len(newext),
263 le32_to_cpu(ex->ee_block),
264 - ext4_ext_is_uninitialized(ex),
265 + ext4_ext_is_unwritten(ex),
266 ext4_ext_get_actual_len(ex),
267 ext4_ext_pblock(ex));
268 err = ext4_ext_get_access(handle, inode,
272 - uninit = ext4_ext_is_uninitialized(ex);
273 + unwritten = ext4_ext_is_unwritten(ex);
274 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
275 + ext4_ext_get_actual_len(newext));
277 - ext4_ext_mark_uninitialized(ex);
279 + ext4_ext_mark_unwritten(ex);
280 eh = path[depth].p_hdr;
283 @@ -1970,10 +1970,10 @@ prepend:
284 ext_debug("prepend %u[%d]%d block to %u:[%d]%d"
286 le32_to_cpu(newext->ee_block),
287 - ext4_ext_is_uninitialized(newext),
288 + ext4_ext_is_unwritten(newext),
289 ext4_ext_get_actual_len(newext),
290 le32_to_cpu(ex->ee_block),
291 - ext4_ext_is_uninitialized(ex),
292 + ext4_ext_is_unwritten(ex),
293 ext4_ext_get_actual_len(ex),
294 ext4_ext_pblock(ex));
295 err = ext4_ext_get_access(handle, inode,
296 @@ -1981,13 +1981,13 @@ prepend:
300 - uninit = ext4_ext_is_uninitialized(ex);
301 + unwritten = ext4_ext_is_unwritten(ex);
302 ex->ee_block = newext->ee_block;
303 ext4_ext_store_pblock(ex, ext4_ext_pblock(newext));
304 ex->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ex)
305 + ext4_ext_get_actual_len(newext));
307 - ext4_ext_mark_uninitialized(ex);
309 + ext4_ext_mark_unwritten(ex);
310 eh = path[depth].p_hdr;
313 @@ -2047,7 +2047,7 @@ has_space:
314 ext_debug("first extent in the leaf: %u:%llu:[%d]%d\n",
315 le32_to_cpu(newext->ee_block),
316 ext4_ext_pblock(newext),
317 - ext4_ext_is_uninitialized(newext),
318 + ext4_ext_is_unwritten(newext),
319 ext4_ext_get_actual_len(newext));
320 nearex = EXT_FIRST_EXTENT(eh);
322 @@ -2058,7 +2058,7 @@ has_space:
324 le32_to_cpu(newext->ee_block),
325 ext4_ext_pblock(newext),
326 - ext4_ext_is_uninitialized(newext),
327 + ext4_ext_is_unwritten(newext),
328 ext4_ext_get_actual_len(newext),
331 @@ -2069,7 +2069,7 @@ has_space:
333 le32_to_cpu(newext->ee_block),
334 ext4_ext_pblock(newext),
335 - ext4_ext_is_uninitialized(newext),
336 + ext4_ext_is_unwritten(newext),
337 ext4_ext_get_actual_len(newext),
340 @@ -2079,7 +2079,7 @@ has_space:
341 "move %d extents from 0x%p to 0x%p\n",
342 le32_to_cpu(newext->ee_block),
343 ext4_ext_pblock(newext),
344 - ext4_ext_is_uninitialized(newext),
345 + ext4_ext_is_unwritten(newext),
346 ext4_ext_get_actual_len(newext),
347 len, nearex, nearex + 1);
348 memmove(nearex + 1, nearex,
349 @@ -2201,7 +2201,7 @@ static int ext4_fill_fiemap_extents(struct inode *inode,
350 es.es_lblk = le32_to_cpu(ex->ee_block);
351 es.es_len = ext4_ext_get_actual_len(ex);
352 es.es_pblk = ext4_ext_pblock(ex);
353 - if (ext4_ext_is_uninitialized(ex))
354 + if (ext4_ext_is_unwritten(ex))
355 flags |= FIEMAP_EXTENT_UNWRITTEN;
358 @@ -2577,7 +2577,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
360 ext4_lblk_t ex_ee_block;
361 unsigned short ex_ee_len;
362 - unsigned uninitialized = 0;
363 + unsigned unwritten = 0;
364 struct ext4_extent *ex;
367 @@ -2624,13 +2624,13 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
368 while (ex >= EXT_FIRST_EXTENT(eh) &&
369 ex_ee_block + ex_ee_len > start) {
371 - if (ext4_ext_is_uninitialized(ex))
373 + if (ext4_ext_is_unwritten(ex))
379 ext_debug("remove ext %u:[%d]%d\n", ex_ee_block,
380 - uninitialized, ex_ee_len);
381 + unwritten, ex_ee_len);
382 path[depth].p_ext = ex;
384 a = ex_ee_block > start ? ex_ee_block : start;
385 @@ -2702,11 +2702,11 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
387 ex->ee_len = cpu_to_le16(num);
389 - * Do not mark uninitialized if all the blocks in the
390 + * Do not mark unwritten if all the blocks in the
391 * extent have been removed.
393 - if (uninitialized && num)
394 - ext4_ext_mark_uninitialized(ex);
395 + if (unwritten && num)
396 + ext4_ext_mark_unwritten(ex);
398 * If the extent was completely released,
399 * we need to remove it from the leaf
400 @@ -2855,9 +2855,9 @@ again:
401 end < ee_block + ext4_ext_get_actual_len(ex) - 1) {
404 - if (ext4_ext_is_uninitialized(ex))
405 - split_flag = EXT4_EXT_MARK_UNINIT1 |
406 - EXT4_EXT_MARK_UNINIT2;
407 + if (ext4_ext_is_unwritten(ex))
408 + split_flag = EXT4_EXT_MARK_UNWRIT1 |
409 + EXT4_EXT_MARK_UNWRIT2;
412 * Split the extent in two so that 'end' is the last
413 @@ -3114,7 +3114,7 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
414 * @path: the path to the extent
415 * @split: the logical block where the extent is splitted.
416 * @split_flags: indicates if the extent could be zeroout if split fails, and
417 - * the states(init or uninit) of new extents.
418 + * the states(init or unwritten) of new extents.
419 * @flags: flags used to insert new extent to extent tree.
422 @@ -3156,10 +3156,10 @@ static int ext4_split_extent_at(handle_t *handle,
423 newblock = split - ee_block + ext4_ext_pblock(ex);
425 BUG_ON(split < ee_block || split >= (ee_block + ee_len));
426 - BUG_ON(!ext4_ext_is_uninitialized(ex) &&
427 + BUG_ON(!ext4_ext_is_unwritten(ex) &&
428 split_flag & (EXT4_EXT_MAY_ZEROOUT |
429 - EXT4_EXT_MARK_UNINIT1 |
430 - EXT4_EXT_MARK_UNINIT2));
431 + EXT4_EXT_MARK_UNWRIT1 |
432 + EXT4_EXT_MARK_UNWRIT2));
434 err = ext4_ext_get_access(handle, inode, path + depth);
436 @@ -3171,8 +3171,8 @@ static int ext4_split_extent_at(handle_t *handle,
437 * then we just change the state of the extent, and splitting
440 - if (split_flag & EXT4_EXT_MARK_UNINIT2)
441 - ext4_ext_mark_uninitialized(ex);
442 + if (split_flag & EXT4_EXT_MARK_UNWRIT2)
443 + ext4_ext_mark_unwritten(ex);
445 ext4_ext_mark_initialized(ex);
447 @@ -3186,8 +3186,8 @@ static int ext4_split_extent_at(handle_t *handle,
449 memcpy(&orig_ex, ex, sizeof(orig_ex));
450 ex->ee_len = cpu_to_le16(split - ee_block);
451 - if (split_flag & EXT4_EXT_MARK_UNINIT1)
452 - ext4_ext_mark_uninitialized(ex);
453 + if (split_flag & EXT4_EXT_MARK_UNWRIT1)
454 + ext4_ext_mark_unwritten(ex);
457 * path may lead to new leaf, not to original leaf any more
458 @@ -3201,8 +3201,8 @@ static int ext4_split_extent_at(handle_t *handle,
459 ex2->ee_block = cpu_to_le32(split);
460 ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block));
461 ext4_ext_store_pblock(ex2, newblock);
462 - if (split_flag & EXT4_EXT_MARK_UNINIT2)
463 - ext4_ext_mark_uninitialized(ex2);
464 + if (split_flag & EXT4_EXT_MARK_UNWRIT2)
465 + ext4_ext_mark_unwritten(ex2);
467 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags);
468 if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
469 @@ -3279,7 +3279,7 @@ static int ext4_split_extent(handle_t *handle,
470 struct ext4_extent *ex;
471 unsigned int ee_len, depth;
475 int split_flag1, flags1;
476 int allocated = map->m_len;
478 @@ -3287,14 +3287,14 @@ static int ext4_split_extent(handle_t *handle,
479 ex = path[depth].p_ext;
480 ee_block = le32_to_cpu(ex->ee_block);
481 ee_len = ext4_ext_get_actual_len(ex);
482 - uninitialized = ext4_ext_is_uninitialized(ex);
483 + unwritten = ext4_ext_is_unwritten(ex);
485 if (map->m_lblk + map->m_len < ee_block + ee_len) {
486 split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT;
487 flags1 = flags | EXT4_GET_BLOCKS_PRE_IO;
489 - split_flag1 |= EXT4_EXT_MARK_UNINIT1 |
490 - EXT4_EXT_MARK_UNINIT2;
492 + split_flag1 |= EXT4_EXT_MARK_UNWRIT1 |
493 + EXT4_EXT_MARK_UNWRIT2;
494 if (split_flag & EXT4_EXT_DATA_VALID2)
495 split_flag1 |= EXT4_EXT_DATA_VALID1;
496 err = ext4_split_extent_at(handle, inode, path,
497 @@ -3319,15 +3319,15 @@ static int ext4_split_extent(handle_t *handle,
498 (unsigned long) map->m_lblk);
501 - uninitialized = ext4_ext_is_uninitialized(ex);
502 + unwritten = ext4_ext_is_unwritten(ex);
505 if (map->m_lblk >= ee_block) {
506 split_flag1 = split_flag & EXT4_EXT_DATA_VALID2;
507 - if (uninitialized) {
508 - split_flag1 |= EXT4_EXT_MARK_UNINIT1;
510 + split_flag1 |= EXT4_EXT_MARK_UNWRIT1;
511 split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
512 - EXT4_EXT_MARK_UNINIT2);
513 + EXT4_EXT_MARK_UNWRIT2);
515 err = ext4_split_extent_at(handle, inode, path,
516 map->m_lblk, split_flag1, flags);
517 @@ -3342,16 +3342,16 @@ out:
520 * This function is called by ext4_ext_map_blocks() if someone tries to write
521 - * to an uninitialized extent. It may result in splitting the uninitialized
522 + * to an unwritten extent. It may result in splitting the unwritten
523 * extent into multiple extents (up to three - one initialized and two
526 * There are three possibilities:
527 * a> There is no split required: Entire extent should be initialized
528 * b> Splits in two extents: Write is happening at either end of the extent
529 * c> Splits in three extents: Somone is writing in middle of the extent
532 - * - The extent pointed to by 'path' is uninitialized.
533 + * - The extent pointed to by 'path' is unwritten.
534 * - The extent pointed to by 'path' contains a superset
535 * of the logical span [map->m_lblk, map->m_lblk + map->m_len).
537 @@ -3397,12 +3397,12 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
538 trace_ext4_ext_convert_to_initialized_enter(inode, map, ex);
541 - BUG_ON(!ext4_ext_is_uninitialized(ex));
542 + BUG_ON(!ext4_ext_is_unwritten(ex));
543 BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
546 * Attempt to transfer newly initialized blocks from the currently
547 - * uninitialized extent to its neighbor. This is much cheaper
548 + * unwritten extent to its neighbor. This is much cheaper
549 * than an insertion followed by a merge as those involve costly
550 * memmove() calls. Transferring to the left is the common case in
551 * steady state for workloads doing fallocate(FALLOC_FL_KEEP_SIZE)
552 @@ -3438,7 +3438,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
553 * - C4: abut_ex can receive the additional blocks without
554 * overflowing the (initialized) length limit.
556 - if ((!ext4_ext_is_uninitialized(abut_ex)) && /*C1*/
557 + if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/
558 ((prev_lblk + prev_len) == ee_block) && /*C2*/
559 ((prev_pblk + prev_len) == ee_pblk) && /*C3*/
560 (prev_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/
561 @@ -3453,7 +3453,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
562 ex->ee_block = cpu_to_le32(ee_block + map_len);
563 ext4_ext_store_pblock(ex, ee_pblk + map_len);
564 ex->ee_len = cpu_to_le16(ee_len - map_len);
565 - ext4_ext_mark_uninitialized(ex); /* Restore the flag */
566 + ext4_ext_mark_unwritten(ex); /* Restore the flag */
568 /* Extend abut_ex by 'map_len' blocks */
569 abut_ex->ee_len = cpu_to_le16(prev_len + map_len);
570 @@ -3484,7 +3484,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
571 * - C4: abut_ex can receive the additional blocks without
572 * overflowing the (initialized) length limit.
574 - if ((!ext4_ext_is_uninitialized(abut_ex)) && /*C1*/
575 + if ((!ext4_ext_is_unwritten(abut_ex)) && /*C1*/
576 ((map->m_lblk + map_len) == next_lblk) && /*C2*/
577 ((ee_pblk + ee_len) == next_pblk) && /*C3*/
578 (next_len < (EXT_INIT_MAX_LEN - map_len))) { /*C4*/
579 @@ -3499,7 +3499,7 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
580 abut_ex->ee_block = cpu_to_le32(next_lblk - map_len);
581 ext4_ext_store_pblock(abut_ex, next_pblk - map_len);
582 ex->ee_len = cpu_to_le16(ee_len - map_len);
583 - ext4_ext_mark_uninitialized(ex); /* Restore the flag */
584 + ext4_ext_mark_unwritten(ex); /* Restore the flag */
586 /* Extend abut_ex by 'map_len' blocks */
587 abut_ex->ee_len = cpu_to_le16(next_len + map_len);
588 @@ -3604,26 +3604,26 @@ out:
590 * This function is called by ext4_ext_map_blocks() from
591 * ext4_get_blocks_dio_write() when DIO to write
592 - * to an uninitialized extent.
593 + * to an unwritten extent.
595 - * Writing to an uninitialized extent may result in splitting the uninitialized
596 - * extent into multiple initialized/uninitialized extents (up to three)
597 + * Writing to an unwritten extent may result in splitting the unwritten
598 + * extent into multiple initialized/unwritten extents (up to three)
599 * There are three possibilities:
600 - * a> There is no split required: Entire extent should be uninitialized
601 + * a> There is no split required: Entire extent should be unwritten
602 * b> Splits in two extents: Write is happening at either end of the extent
603 * c> Splits in three extents: Somone is writing in middle of the extent
605 * This works the same way in the case of initialized -> unwritten conversion.
607 * One of more index blocks maybe needed if the extent tree grow after
608 - * the uninitialized extent split. To prevent ENOSPC occur at the IO
609 - * complete, we need to split the uninitialized extent before DIO submit
610 - * the IO. The uninitialized extent called at this time will be split
611 - * into three uninitialized extent(at most). After IO complete, the part
612 + * the unwritten extent split. To prevent ENOSPC occur at the IO
613 + * complete, we need to split the unwritten extent before DIO submit
614 + * the IO. The unwritten extent called at this time will be split
615 + * into three unwritten extent(at most). After IO complete, the part
616 * being filled will be convert to initialized by the end_io callback function
617 * via ext4_convert_unwritten_extents().
619 - * Returns the size of uninitialized extent to be written on success.
620 + * Returns the size of unwritten extent to be written on success.
622 static int ext4_split_convert_extents(handle_t *handle,
624 @@ -3661,7 +3661,7 @@ static int ext4_split_convert_extents(handle_t *handle,
625 } else if (flags & EXT4_GET_BLOCKS_CONVERT) {
626 split_flag |= ee_block + ee_len <= eof_block ?
627 EXT4_EXT_MAY_ZEROOUT : 0;
628 - split_flag |= (EXT4_EXT_MARK_UNINIT2 | EXT4_EXT_DATA_VALID2);
629 + split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
631 flags |= EXT4_GET_BLOCKS_PRE_IO;
632 return ext4_split_extent(handle, inode, path, map, split_flag, flags);
633 @@ -3711,8 +3711,8 @@ static int ext4_convert_initialized_extents(handle_t *handle,
634 err = ext4_ext_get_access(handle, inode, path + depth);
637 - /* first mark the extent as uninitialized */
638 - ext4_ext_mark_uninitialized(ex);
639 + /* first mark the extent as unwritten */
640 + ext4_ext_mark_unwritten(ex);
642 /* note: ext4_ext_correct_indexes() isn't needed here because
643 * borders are not changed
644 @@ -3972,10 +3972,10 @@ ext4_ext_convert_initialized_extent(handle_t *handle, struct inode *inode,
647 * Make sure that the extent is no bigger than we support with
648 - * uninitialized extent
651 - if (map->m_len > EXT_UNINIT_MAX_LEN)
652 - map->m_len = EXT_UNINIT_MAX_LEN / 2;
653 + if (map->m_len > EXT_UNWRITTEN_MAX_LEN)
654 + map->m_len = EXT_UNWRITTEN_MAX_LEN / 2;
656 ret = ext4_convert_initialized_extents(handle, inode, map,
658 @@ -3994,7 +3994,7 @@ ext4_ext_convert_initialized_extent(handle_t *handle, struct inode *inode,
662 -ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
663 +ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
664 struct ext4_map_blocks *map,
665 struct ext4_ext_path *path, int flags,
666 unsigned int allocated, ext4_fsblk_t newblock)
667 @@ -4003,19 +4003,19 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
669 ext4_io_end_t *io = ext4_inode_aio(inode);
671 - ext_debug("ext4_ext_handle_uninitialized_extents: inode %lu, logical "
672 + ext_debug("ext4_ext_handle_unwritten_extents: inode %lu, logical "
673 "block %llu, max_blocks %u, flags %x, allocated %u\n",
674 inode->i_ino, (unsigned long long)map->m_lblk, map->m_len,
676 ext4_ext_show_leaf(inode, path);
679 - * When writing into uninitialized space, we should not fail to
680 + * When writing into unwritten space, we should not fail to
681 * allocate metadata blocks for the new extent block if needed.
683 flags |= EXT4_GET_BLOCKS_METADATA_NOFAIL;
685 - trace_ext4_ext_handle_uninitialized_extents(inode, map, flags,
686 + trace_ext4_ext_handle_unwritten_extents(inode, map, flags,
687 allocated, newblock);
689 /* get_block() before submit the IO, split the extent */
690 @@ -4058,7 +4058,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
691 * repeat fallocate creation request
692 * we already have an unwritten extent
694 - if (flags & EXT4_GET_BLOCKS_UNINIT_EXT) {
695 + if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT) {
696 map->m_flags |= EXT4_MAP_UNWRITTEN;
699 @@ -4309,7 +4309,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
703 - * Uninitialized extents are treated as holes, except that
704 + * unwritten extents are treated as holes, except that
705 * we split out initialized portions during a write.
707 ee_len = ext4_ext_get_actual_len(ex);
708 @@ -4328,16 +4328,16 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
709 * If the extent is initialized check whether the
710 * caller wants to convert it to unwritten.
712 - if ((!ext4_ext_is_uninitialized(ex)) &&
713 + if ((!ext4_ext_is_unwritten(ex)) &&
714 (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
715 allocated = ext4_ext_convert_initialized_extent(
716 handle, inode, map, path, flags,
717 allocated, newblock);
719 - } else if (!ext4_ext_is_uninitialized(ex))
720 + } else if (!ext4_ext_is_unwritten(ex))
723 - ret = ext4_ext_handle_uninitialized_extents(
724 + ret = ext4_ext_handle_unwritten_extents(
725 handle, inode, map, path, flags,
726 allocated, newblock);
728 @@ -4409,15 +4409,15 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
730 * See if request is beyond maximum number of blocks we can have in
731 * a single extent. For an initialized extent this limit is
732 - * EXT_INIT_MAX_LEN and for an uninitialized extent this limit is
733 - * EXT_UNINIT_MAX_LEN.
734 + * EXT_INIT_MAX_LEN and for an unwritten extent this limit is
735 + * EXT_UNWRITTEN_MAX_LEN.
737 if (map->m_len > EXT_INIT_MAX_LEN &&
738 - !(flags & EXT4_GET_BLOCKS_UNINIT_EXT))
739 + !(flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
740 map->m_len = EXT_INIT_MAX_LEN;
741 - else if (map->m_len > EXT_UNINIT_MAX_LEN &&
742 - (flags & EXT4_GET_BLOCKS_UNINIT_EXT))
743 - map->m_len = EXT_UNINIT_MAX_LEN;
744 + else if (map->m_len > EXT_UNWRITTEN_MAX_LEN &&
745 + (flags & EXT4_GET_BLOCKS_UNWRIT_EXT))
746 + map->m_len = EXT_UNWRITTEN_MAX_LEN;
748 /* Check if we can really insert (m_lblk)::(m_lblk + m_len) extent */
749 newex.ee_len = cpu_to_le16(map->m_len);
750 @@ -4465,13 +4465,13 @@ got_allocated_blocks:
751 /* try to insert new extent into found leaf and return */
752 ext4_ext_store_pblock(&newex, newblock + offset);
753 newex.ee_len = cpu_to_le16(ar.len);
754 - /* Mark uninitialized */
755 - if (flags & EXT4_GET_BLOCKS_UNINIT_EXT){
756 - ext4_ext_mark_uninitialized(&newex);
757 + /* Mark unwritten */
758 + if (flags & EXT4_GET_BLOCKS_UNWRIT_EXT){
759 + ext4_ext_mark_unwritten(&newex);
760 map->m_flags |= EXT4_MAP_UNWRITTEN;
762 * io_end structure was created for every IO write to an
763 - * uninitialized extent. To avoid unnecessary conversion,
764 + * unwritten extent. To avoid unnecessary conversion,
765 * here we flag the IO that really needs the conversion.
766 * For non asycn direct IO case, flag the inode state
767 * that we need to perform conversion when IO is done.
768 @@ -4604,9 +4604,9 @@ got_allocated_blocks:
771 * Cache the extent and update transaction to commit on fdatasync only
772 - * when it is _not_ an uninitialized extent.
773 + * when it is _not_ an unwritten extent.
775 - if ((flags & EXT4_GET_BLOCKS_UNINIT_EXT) == 0)
776 + if ((flags & EXT4_GET_BLOCKS_UNWRIT_EXT) == 0)
777 ext4_update_inode_fsync_trans(handle, inode, 1);
779 ext4_update_inode_fsync_trans(handle, inode, 0);
780 @@ -4680,7 +4680,7 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
781 * that it doesn't get unnecessarily split into multiple
784 - if (len <= EXT_UNINIT_MAX_LEN)
785 + if (len <= EXT_UNWRITTEN_MAX_LEN)
786 flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
789 @@ -4769,7 +4769,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
793 - flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT |
794 + flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT |
795 EXT4_GET_BLOCKS_CONVERT_UNWRITTEN;
796 if (mode & FALLOC_FL_KEEP_SIZE)
797 flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
798 @@ -4925,7 +4925,7 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
799 max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits)
802 - flags = EXT4_GET_BLOCKS_CREATE_UNINIT_EXT;
803 + flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT;
804 if (mode & FALLOC_FL_KEEP_SIZE)
805 flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
807 diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
808 index 0ebc212..98c90f5 100644
809 --- a/fs/ext4/extents_status.c
810 +++ b/fs/ext4/extents_status.c
811 @@ -433,7 +433,7 @@ static void ext4_es_insert_extent_ext_check(struct inode *inode,
812 ee_start = ext4_ext_pblock(ex);
813 ee_len = ext4_ext_get_actual_len(ex);
815 - ee_status = ext4_ext_is_uninitialized(ex) ? 1 : 0;
816 + ee_status = ext4_ext_is_unwritten(ex) ? 1 : 0;
817 es_status = ext4_es_is_unwritten(es) ? 1 : 0;
820 diff --git a/fs/ext4/file.c b/fs/ext4/file.c
821 index bc76559..25a20b6 100644
824 @@ -136,7 +136,7 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
826 * 'err==len' means that all of blocks has been preallocated no
827 * matter they are initialized or not. For excluding
828 - * uninitialized extents, we need to check m_flags. There are
829 + * unwritten extents, we need to check m_flags. There are
830 * two conditions that indicate for initialized extents.
831 * 1) If we hit extent cache, EXT4_MAP_MAPPED flag is returned;
832 * 2) If we do a real lookup, non-flags are returned.
833 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
834 index 638f21d..2eb5fad 100644
835 --- a/fs/ext4/inode.c
836 +++ b/fs/ext4/inode.c
837 @@ -489,8 +489,8 @@ static void ext4_map_blocks_es_recheck(handle_t *handle,
838 * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
841 - * On success, it returns the number of blocks being mapped or allocate.
842 - * if create==0 and the blocks are pre-allocated and uninitialized block,
843 + * On success, it returns the number of blocks being mapped or allocated.
844 + * if create==0 and the blocks are pre-allocated and unwritten block,
845 * the result buffer head is unmapped. If the create ==1, it will make sure
846 * the buffer head is mapped.
848 @@ -622,7 +622,7 @@ found:
849 map->m_flags &= ~EXT4_MAP_FLAGS;
852 - * New blocks allocate and/or writing to uninitialized extent
853 + * New blocks allocate and/or writing to unwritten extent
854 * will possibly result in updating i_data, so we take
855 * the write lock of i_data_sem, and call get_blocks()
856 * with create == 1 flag.
857 @@ -2032,7 +2032,7 @@ static int mpage_process_page_bufs(struct mpage_da_data *mpd,
858 * Scan buffers corresponding to changed extent (we expect corresponding pages
859 * to be already locked) and update buffer state according to new extent state.
860 * We map delalloc buffers to their physical location, clear unwritten bits,
861 - * and mark buffers as uninit when we perform writes to uninitialized extents
862 + * and mark buffers as uninit when we perform writes to unwritten extents
863 * and do extent conversion after IO is finished. If the last page is not fully
864 * mapped, we update @map to the next extent in the last page that needs
865 * mapping. Otherwise we submit the page for IO.
866 @@ -2131,7 +2131,7 @@ static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
867 trace_ext4_da_write_pages_extent(inode, map);
869 * Call ext4_map_blocks() to allocate any delayed allocation blocks, or
870 - * to convert an uninitialized extent to be initialized (in the case
871 + * to convert an unwritten extent to be initialized (in the case
872 * where we have written into one or more preallocated blocks). It is
873 * possible that we're going to need more metadata blocks than
874 * previously reserved. However we must not fail because we're in
875 @@ -3071,9 +3071,9 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
876 * preallocated extents, and those write extend the file, no need to
877 * fall back to buffered IO.
879 - * For holes, we fallocate those blocks, mark them as uninitialized
880 + * For holes, we fallocate those blocks, mark them as unwritten
881 * If those blocks were preallocated, we mark sure they are split, but
882 - * still keep the range to write as uninitialized.
883 + * still keep the range to write as unwritten.
885 * The unwritten extents will be converted to written when DIO is completed.
886 * For async direct IO, since the IO may still pending when return, we
887 @@ -3125,12 +3125,12 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
888 * We could direct write to holes and fallocate.
890 * Allocated blocks to fill the hole are marked as
891 - * uninitialized to prevent parallel buffered read to expose
892 + * unwritten to prevent parallel buffered read to expose
893 * the stale data before DIO complete the data IO.
895 * As to previously fallocated extents, ext4 get_block will
896 * just simply mark the buffer mapped but still keep the
897 - * extents uninitialized.
898 + * extents unwritten.
900 * For non AIO case, we will convert those unwritten extents
901 * to written after return back from blockdev_direct_IO.
902 diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
903 index 58ee7dc..1b809fe 100644
904 --- a/fs/ext4/move_extent.c
905 +++ b/fs/ext4/move_extent.c
906 @@ -57,8 +57,8 @@ get_ext_path(struct inode *inode, ext4_lblk_t lblock,
908 copy_extent_status(struct ext4_extent *src, struct ext4_extent *dest)
910 - if (ext4_ext_is_uninitialized(src))
911 - ext4_ext_mark_uninitialized(dest);
912 + if (ext4_ext_is_unwritten(src))
913 + ext4_ext_mark_unwritten(dest);
915 dest->ee_len = cpu_to_le16(ext4_ext_get_actual_len(dest));
917 @@ -593,14 +593,14 @@ mext_calc_swap_extents(struct ext4_extent *tmp_dext,
918 * @inode: inode in question
919 * @from: block offset of inode
920 * @count: block count to be checked
921 - * @uninit: extents expected to be uninitialized
922 + * @unwritten: extents expected to be unwritten
923 * @err: pointer to save error value
925 * Return 1 if all extents in range has expected type, and zero otherwise.
928 mext_check_coverage(struct inode *inode, ext4_lblk_t from, ext4_lblk_t count,
929 - int uninit, int *err)
930 + int unwritten, int *err)
932 struct ext4_ext_path *path = NULL;
933 struct ext4_extent *ext;
934 @@ -611,7 +611,7 @@ mext_check_coverage(struct inode *inode, ext4_lblk_t from, ext4_lblk_t count,
937 ext = path[ext_depth(inode)].p_ext;
938 - if (uninit != ext4_ext_is_uninitialized(ext))
939 + if (unwritten != ext4_ext_is_unwritten(ext))
941 from += ext4_ext_get_actual_len(ext);
942 ext4_ext_drop_refs(path);
943 @@ -894,7 +894,7 @@ out:
944 * @orig_page_offset: page index on original file
945 * @data_offset_in_page: block index where data swapping starts
946 * @block_len_in_page: the number of blocks to be swapped
947 - * @uninit: orig extent is uninitialized or not
948 + * @unwritten: orig extent is unwritten or not
949 * @err: pointer to save return value
951 * Save the data in original inode blocks and replace original inode extents
952 @@ -905,7 +905,7 @@ out:
954 move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
955 pgoff_t orig_page_offset, int data_offset_in_page,
956 - int block_len_in_page, int uninit, int *err)
957 + int block_len_in_page, int unwritten, int *err)
959 struct inode *orig_inode = file_inode(o_filp);
960 struct page *pagep[2] = {NULL, NULL};
961 @@ -962,27 +962,27 @@ again:
962 if (unlikely(*err < 0))
965 - * If orig extent was uninitialized it can become initialized
966 + * If orig extent was unwritten it can become initialized
967 * at any time after i_data_sem was dropped, in order to
968 * serialize with delalloc we have recheck extent while we
969 * hold page's lock, if it is still the case data copy is not
970 * necessary, just swap data blocks between orig and donor.
974 ext4_double_down_write_data_sem(orig_inode, donor_inode);
975 /* If any of extents in range became initialized we have to
976 * fallback to data copying */
977 - uninit = mext_check_coverage(orig_inode, orig_blk_offset,
978 - block_len_in_page, 1, err);
979 + unwritten = mext_check_coverage(orig_inode, orig_blk_offset,
980 + block_len_in_page, 1, err);
984 - uninit &= mext_check_coverage(donor_inode, orig_blk_offset,
985 - block_len_in_page, 1, err);
986 + unwritten &= mext_check_coverage(donor_inode, orig_blk_offset,
987 + block_len_in_page, 1, err);
993 ext4_double_up_write_data_sem(orig_inode, donor_inode);
996 @@ -1259,7 +1259,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
997 int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
998 int data_offset_in_page;
999 int block_len_in_page;
1003 if (orig_inode->i_sb != donor_inode->i_sb) {
1004 ext4_debug("ext4 move extent: The argument files "
1005 @@ -1391,8 +1391,8 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
1009 - /* Is original extent is uninitialized */
1010 - uninit = ext4_ext_is_uninitialized(ext_prev);
1011 + /* Is original extent is unwritten */
1012 + unwritten = ext4_ext_is_unwritten(ext_prev);
1014 data_offset_in_page = seq_start % blocks_per_page;
1016 @@ -1432,8 +1432,8 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
1017 o_filp, donor_inode,
1019 data_offset_in_page,
1020 - block_len_in_page, uninit,
1022 + block_len_in_page,
1025 /* Count how many blocks we have exchanged */
1026 *moved_len += block_len_in_page;
1027 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
1028 index 6f9e6fa..c4895c1 100644
1029 --- a/fs/ext4/super.c
1030 +++ b/fs/ext4/super.c
1031 @@ -3337,7 +3337,7 @@ static ext4_fsblk_t ext4_calculate_resv_clusters(struct super_block *sb)
1032 * By default we reserve 2% or 4096 clusters, whichever is smaller.
1033 * This should cover the situations where we can not afford to run
1034 * out of space like for example punch hole, or converting
1035 - * uninitialized extents in delalloc path. In most cases such
1036 + * unwritten extents in delalloc path. In most cases such
1037 * allocation would require 1, or 2 blocks, higher numbers are
1040 diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
1041 index be1c889..683cd75 100644
1042 --- a/include/trace/events/ext4.h
1043 +++ b/include/trace/events/ext4.h
1044 @@ -45,7 +45,7 @@ struct extent_status;
1046 #define show_map_flags(flags) __print_flags(flags, "|", \
1047 { EXT4_GET_BLOCKS_CREATE, "CREATE" }, \
1048 - { EXT4_GET_BLOCKS_UNINIT_EXT, "UNINIT" }, \
1049 + { EXT4_GET_BLOCKS_UNWRIT_EXT, "UNWRIT" }, \
1050 { EXT4_GET_BLOCKS_DELALLOC_RESERVE, "DELALLOC" }, \
1051 { EXT4_GET_BLOCKS_PRE_IO, "PRE_IO" }, \
1052 { EXT4_GET_BLOCKS_CONVERT, "CONVERT" }, \
1053 @@ -1505,7 +1505,7 @@ DEFINE_EVENT(ext4__truncate, ext4_truncate_exit,
1057 -/* 'ux' is the uninitialized extent. */
1058 +/* 'ux' is the unwritten extent. */
1059 TRACE_EVENT(ext4_ext_convert_to_initialized_enter,
1060 TP_PROTO(struct inode *inode, struct ext4_map_blocks *map,
1061 struct ext4_extent *ux),
1062 @@ -1541,7 +1541,7 @@ TRACE_EVENT(ext4_ext_convert_to_initialized_enter,
1066 - * 'ux' is the uninitialized extent.
1067 + * 'ux' is the unwritten extent.
1068 * 'ix' is the initialized extent to which blocks are transferred.
1070 TRACE_EVENT(ext4_ext_convert_to_initialized_fastpath,
1071 @@ -1819,7 +1819,7 @@ DEFINE_EVENT(ext4__trim, ext4_trim_all_free,
1072 TP_ARGS(sb, group, start, len)
1075 -TRACE_EVENT(ext4_ext_handle_uninitialized_extents,
1076 +TRACE_EVENT(ext4_ext_handle_unwritten_extents,
1077 TP_PROTO(struct inode *inode, struct ext4_map_blocks *map, int flags,
1078 unsigned int allocated, ext4_fsblk_t newblock),