1 ext4: prepare to drop EXT4_STATE_DELALLOC_RESERVED
3 The EXT4_STATE_DELALLOC_RESERVED flag was originally implemented
4 because it was too hard to make sure the mballoc and get_block flags
5 could be reliably passed down through all of the codepaths that end up
6 calling ext4_mb_new_blocks().
8 Since then, we have mb_flags passed down through most of the code
9 paths, so getting rid of EXT4_STATE_DELALLOC_RESERVED isn't as tricky
12 This commit plumbs in the last of what is required, and then adds a
13 WARN_ON check to make sure we haven't missed anything. If this passes
14 a full regression test run, we can then drop
15 EXT4_STATE_DELALLOC_RESERVED.
17 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
18 Reviewed-by: Jan Kara <jack@suse.cz>
20 fs/ext4/balloc.c | 3 +--
21 fs/ext4/extents.c | 6 +++++-
22 fs/ext4/indirect.c | 6 +++++-
23 fs/ext4/mballoc.c | 10 ++++++----
24 fs/ext4/xattr.c | 6 ------
25 5 files changed, 17 insertions(+), 14 deletions(-)
27 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
28 index 581ef40..d70f154 100644
29 --- a/fs/ext4/balloc.c
30 +++ b/fs/ext4/balloc.c
31 @@ -636,8 +636,7 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
32 * Account for the allocated meta blocks. We will never
33 * fail EDQUOT for metdata, but we do account for it.
36 - ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) {
37 + if (!(*errp) && (flags & EXT4_MB_DELALLOC_RESERVED)) {
38 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
39 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
40 dquot_alloc_block_nofail(inode,
41 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
42 index 3ac1686..8170b32 100644
43 --- a/fs/ext4/extents.c
44 +++ b/fs/ext4/extents.c
45 @@ -1933,6 +1933,8 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
47 int mb_flags = 0, unwritten;
49 + if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
50 + mb_flags |= EXT4_MB_DELALLOC_RESERVED;
51 if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
52 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
54 @@ -2054,7 +2056,7 @@ prepend:
55 * We're gonna add a new leaf in the tree.
57 if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
58 - mb_flags = EXT4_MB_USE_RESERVED;
59 + mb_flags |= EXT4_MB_USE_RESERVED;
60 err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
63 @@ -4438,6 +4440,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
65 if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
66 ar.flags |= EXT4_MB_HINT_NOPREALLOC;
67 + if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
68 + ar.flags |= EXT4_MB_DELALLOC_RESERVED;
69 newblock = ext4_mb_new_blocks(handle, &ar, &err);
72 diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
73 index 69af0cd..36b3696 100644
74 --- a/fs/ext4/indirect.c
75 +++ b/fs/ext4/indirect.c
76 @@ -333,7 +333,9 @@ static int ext4_alloc_branch(handle_t *handle,
77 new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err);
79 ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle,
80 - ar->inode, ar->goal, 0, NULL, &err);
81 + ar->inode, ar->goal,
82 + ar->flags & EXT4_MB_DELALLOC_RESERVED,
87 @@ -572,6 +574,8 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
88 ar.logical = map->m_lblk;
89 if (S_ISREG(inode->i_mode))
90 ar.flags = EXT4_MB_HINT_DATA;
91 + if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
92 + ar.flags |= EXT4_MB_DELALLOC_RESERVED;
94 ar.goal = ext4_find_goal(inode, map->m_lblk, partial);
96 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
97 index 8b0f9ef..15dffda 100644
98 --- a/fs/ext4/mballoc.c
99 +++ b/fs/ext4/mballoc.c
100 @@ -4415,9 +4415,12 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
101 * EDQUOT check, as blocks and quotas have been already
102 * reserved when data being copied into pagecache.
104 - if (ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED))
105 + if (ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED)) {
106 + WARN_ON((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0);
107 ar->flags |= EXT4_MB_DELALLOC_RESERVED;
111 + if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) {
112 /* Without delayed allocation we need to verify
113 * there is enough free blocks to do block allocation
114 * and verify allocation doesn't exceed the quota limits.
115 @@ -4528,8 +4531,7 @@ out:
116 if (inquota && ar->len < inquota)
117 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
119 - if (!ext4_test_inode_state(ar->inode,
120 - EXT4_STATE_DELALLOC_RESERVED))
121 + if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0)
122 /* release all the reserved blocks if non delalloc */
123 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
125 diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
126 index e738733..da4df70 100644
127 --- a/fs/ext4/xattr.c
128 +++ b/fs/ext4/xattr.c
129 @@ -899,14 +899,8 @@ inserted:
130 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
131 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
134 - * take i_data_sem because we will test
135 - * i_delalloc_reserved_flag in ext4_mb_new_blocks
137 - down_read(&EXT4_I(inode)->i_data_sem);
138 block = ext4_new_meta_blocks(handle, inode, goal, 0,
140 - up_read((&EXT4_I(inode)->i_data_sem));