1 ext4: remove metadata reservation checks
3 Commit 27dd43854227b ("ext4: introduce reserved space") reserves 2% of
4 the file system space to make sure metadata allocations will always
5 succeed. Given that, tracking the reservation of metadata blocks is
8 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
10 fs/ext4/balloc.c | 1 -
12 fs/ext4/extents.c | 3 +-
13 fs/ext4/inode.c | 128 +++--------------------------------------------------
14 fs/ext4/mballoc.c | 15 +------
15 5 files changed, 7 insertions(+), 141 deletions(-)
17 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
18 index 0762d14..807071d 100644
19 --- a/fs/ext4/balloc.c
20 +++ b/fs/ext4/balloc.c
21 @@ -623,7 +623,6 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
23 ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) {
24 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
25 - EXT4_I(inode)->i_allocated_meta_blocks += ar.len;
26 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
27 dquot_alloc_block_nofail(inode,
28 EXT4_C2B(EXT4_SB(inode->i_sb), ar.len));
29 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
30 index 7cc5a0e..d35c78c 100644
33 @@ -591,7 +591,6 @@ enum {
34 #define EXT4_FREE_BLOCKS_NO_QUOT_UPDATE 0x0008
35 #define EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER 0x0010
36 #define EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER 0x0020
37 -#define EXT4_FREE_BLOCKS_RESERVE 0x0040
41 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
42 index 4da228a..b30172d 100644
43 --- a/fs/ext4/extents.c
44 +++ b/fs/ext4/extents.c
45 @@ -1808,8 +1808,7 @@ static void ext4_ext_try_to_merge_up(handle_t *handle,
48 ext4_free_blocks(handle, inode, NULL, blk, 1,
49 - EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET |
50 - EXT4_FREE_BLOCKS_RESERVE);
51 + EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
55 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
56 index 8a06473..027ee8c 100644
59 @@ -325,18 +325,6 @@ qsize_t *ext4_get_reserved_space(struct inode *inode)
63 - * Calculate the number of metadata blocks need to reserve
64 - * to allocate a block located at @lblock
66 -static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
68 - if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
69 - return ext4_ext_calc_metadata_amount(inode, lblock);
71 - return ext4_ind_calc_metadata_amount(inode, lblock);
75 * Called with i_data_sem down, which is important since we can call
76 * ext4_discard_preallocations() from here.
78 @@ -357,35 +345,10 @@ void ext4_da_update_reserve_space(struct inode *inode,
79 used = ei->i_reserved_data_blocks;
82 - if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
83 - ext4_warning(inode->i_sb, "ino %lu, allocated %d "
84 - "with only %d reserved metadata blocks "
85 - "(releasing %d blocks with reserved %d data blocks)",
86 - inode->i_ino, ei->i_allocated_meta_blocks,
87 - ei->i_reserved_meta_blocks, used,
88 - ei->i_reserved_data_blocks);
90 - ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
93 /* Update per-inode reservations */
94 ei->i_reserved_data_blocks -= used;
95 - ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
96 - percpu_counter_sub(&sbi->s_dirtyclusters_counter,
97 - used + ei->i_allocated_meta_blocks);
98 - ei->i_allocated_meta_blocks = 0;
99 + percpu_counter_sub(&sbi->s_dirtyclusters_counter, used);
101 - if (ei->i_reserved_data_blocks == 0) {
103 - * We can release all of the reserved metadata blocks
104 - * only when we have written all of the delayed
105 - * allocation blocks.
107 - percpu_counter_sub(&sbi->s_dirtyclusters_counter,
108 - ei->i_reserved_meta_blocks);
109 - ei->i_reserved_meta_blocks = 0;
110 - ei->i_da_metadata_calc_len = 0;
112 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
114 /* Update quota subsystem for data blocks */
115 @@ -1222,49 +1185,6 @@ static int ext4_journalled_write_end(struct file *file,
119 - * Reserve a metadata for a single block located at lblock
121 -static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
123 - struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
124 - struct ext4_inode_info *ei = EXT4_I(inode);
125 - unsigned int md_needed;
126 - ext4_lblk_t save_last_lblock;
130 - * recalculate the amount of metadata blocks to reserve
131 - * in order to allocate nrblocks
132 - * worse case is one extent per block
134 - spin_lock(&ei->i_block_reservation_lock);
136 - * ext4_calc_metadata_amount() has side effects, which we have
137 - * to be prepared undo if we fail to claim space.
139 - save_len = ei->i_da_metadata_calc_len;
140 - save_last_lblock = ei->i_da_metadata_calc_last_lblock;
141 - md_needed = EXT4_NUM_B2C(sbi,
142 - ext4_calc_metadata_amount(inode, lblock));
143 - trace_ext4_da_reserve_space(inode, md_needed);
146 - * We do still charge estimated metadata to the sb though;
147 - * we cannot afford to run out of free blocks.
149 - if (ext4_claim_free_clusters(sbi, md_needed, 0)) {
150 - ei->i_da_metadata_calc_len = save_len;
151 - ei->i_da_metadata_calc_last_lblock = save_last_lblock;
152 - spin_unlock(&ei->i_block_reservation_lock);
155 - ei->i_reserved_meta_blocks += md_needed;
156 - spin_unlock(&ei->i_block_reservation_lock);
158 - return 0; /* success */
162 * Reserve a single cluster located at lblock
164 static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
165 @@ -1273,8 +1193,6 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
166 struct ext4_inode_info *ei = EXT4_I(inode);
167 unsigned int md_needed;
169 - ext4_lblk_t save_last_lblock;
173 * We will charge metadata quota at writeout time; this saves
174 @@ -1295,25 +1213,15 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
175 * ext4_calc_metadata_amount() has side effects, which we have
176 * to be prepared undo if we fail to claim space.
178 - save_len = ei->i_da_metadata_calc_len;
179 - save_last_lblock = ei->i_da_metadata_calc_last_lblock;
180 - md_needed = EXT4_NUM_B2C(sbi,
181 - ext4_calc_metadata_amount(inode, lblock));
182 - trace_ext4_da_reserve_space(inode, md_needed);
184 + trace_ext4_da_reserve_space(inode, 0);
187 - * We do still charge estimated metadata to the sb though;
188 - * we cannot afford to run out of free blocks.
190 - if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
191 - ei->i_da_metadata_calc_len = save_len;
192 - ei->i_da_metadata_calc_last_lblock = save_last_lblock;
193 + if (ext4_claim_free_clusters(sbi, 1, 0)) {
194 spin_unlock(&ei->i_block_reservation_lock);
195 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
198 ei->i_reserved_data_blocks++;
199 - ei->i_reserved_meta_blocks += md_needed;
200 spin_unlock(&ei->i_block_reservation_lock);
202 return 0; /* success */
203 @@ -1346,20 +1254,6 @@ static void ext4_da_release_space(struct inode *inode, int to_free)
205 ei->i_reserved_data_blocks -= to_free;
207 - if (ei->i_reserved_data_blocks == 0) {
209 - * We can release all of the reserved metadata blocks
210 - * only when we have written all of the delayed
211 - * allocation blocks.
212 - * Note that in case of bigalloc, i_reserved_meta_blocks,
213 - * i_reserved_data_blocks, etc. refer to number of clusters.
215 - percpu_counter_sub(&sbi->s_dirtyclusters_counter,
216 - ei->i_reserved_meta_blocks);
217 - ei->i_reserved_meta_blocks = 0;
218 - ei->i_da_metadata_calc_len = 0;
221 /* update fs dirty data blocks counter */
222 percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
224 @@ -1500,10 +1394,6 @@ static void ext4_print_free_blocks(struct inode *inode)
225 ext4_msg(sb, KERN_CRIT, "Block reservation details");
226 ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
227 ei->i_reserved_data_blocks);
228 - ext4_msg(sb, KERN_CRIT, "i_reserved_meta_blocks=%u",
229 - ei->i_reserved_meta_blocks);
230 - ext4_msg(sb, KERN_CRIT, "i_allocated_meta_blocks=%u",
231 - ei->i_allocated_meta_blocks);
235 @@ -1620,13 +1510,6 @@ add_delayed:
240 - ret = ext4_da_reserve_metadata(inode, iblock);
242 - /* not enough space to reserve */
248 ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
249 @@ -2843,8 +2726,7 @@ int ext4_alloc_da_blocks(struct inode *inode)
251 trace_ext4_alloc_da_blocks(inode);
253 - if (!EXT4_I(inode)->i_reserved_data_blocks &&
254 - !EXT4_I(inode)->i_reserved_meta_blocks)
255 + if (!EXT4_I(inode)->i_reserved_data_blocks)
259 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
260 index 59e3162..4503b8f 100644
261 --- a/fs/ext4/mballoc.c
262 +++ b/fs/ext4/mballoc.c
263 @@ -4619,7 +4619,6 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
264 struct buffer_head *gd_bh;
265 ext4_group_t block_group;
266 struct ext4_sb_info *sbi;
267 - struct ext4_inode_info *ei = EXT4_I(inode);
268 struct ext4_buddy e4b;
269 unsigned int count_clusters;
271 @@ -4830,19 +4829,7 @@ do_more:
272 &sbi->s_flex_groups[flex_group].free_clusters);
275 - if (flags & EXT4_FREE_BLOCKS_RESERVE && ei->i_reserved_data_blocks) {
276 - percpu_counter_add(&sbi->s_dirtyclusters_counter,
278 - spin_lock(&ei->i_block_reservation_lock);
279 - if (flags & EXT4_FREE_BLOCKS_METADATA)
280 - ei->i_reserved_meta_blocks += count_clusters;
282 - ei->i_reserved_data_blocks += count_clusters;
283 - spin_unlock(&ei->i_block_reservation_lock);
284 - if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
285 - dquot_reclaim_block(inode,
286 - EXT4_C2B(sbi, count_clusters));
287 - } else if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
288 + if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
289 dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
290 percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);