Add ext4-printk-throttling patch
[ext4-patch-queue.git] / ext4-Add-percpu-dirty-block-accounting.patch
blob53081817d98428c633b9231eeb319cf92f3a5a0b
1 ext4: Add percpu dirty block accounting.
3 From: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
5 This patch adds dirty block accounting using percpu_counters. Delayed
6 allocation block reservation is now done by updating dirty block
7 counter. In the later patch we switch to non delalloc mode if the
8 filesystem free blocks is greater than 150% of total filesystem dirty
9 blocks
11 Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
12 Signed-off-by: Mingming Cao<cmm@us.ibm.com>
13 Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
14 ---
15 fs/ext4/balloc.c | 59 +++++++++++++++++++++++++++++++++---------------------
16 fs/ext4/ext4_sb.h | 1
17 fs/ext4/inode.c | 22 ++++++++++----------
18 fs/ext4/mballoc.c | 17 ++-------------
19 fs/ext4/super.c | 8 ++++++-
20 5 files changed, 59 insertions(+), 48 deletions(-)
22 Index: linux-2.6.27-rc3/fs/ext4/balloc.c
23 ===================================================================
24 --- linux-2.6.27-rc3.orig/fs/ext4/balloc.c 2008-08-27 13:54:26.000000000 -0700
25 +++ linux-2.6.27-rc3/fs/ext4/balloc.c 2008-08-27 13:59:34.000000000 -0700
26 @@ -1603,26 +1603,38 @@
27 int ext4_claim_free_blocks(struct ext4_sb_info *sbi,
28 ext4_fsblk_t nblocks)
30 - s64 free_blocks;
31 + s64 free_blocks, dirty_blocks;
32 ext4_fsblk_t root_blocks = 0;
33 struct percpu_counter *fbc = &sbi->s_freeblocks_counter;
34 + struct percpu_counter *dbc = &sbi->s_dirtyblocks_counter;
36 - free_blocks = percpu_counter_read(fbc);
37 + free_blocks = percpu_counter_read_positive(fbc);
38 + dirty_blocks = percpu_counter_read_positive(dbc);
40 if (!capable(CAP_SYS_RESOURCE) &&
41 sbi->s_resuid != current->fsuid &&
42 (sbi->s_resgid == 0 || !in_group_p(sbi->s_resgid)))
43 root_blocks = ext4_r_blocks_count(sbi->s_es);
45 - if (free_blocks - (nblocks + root_blocks) < EXT4_FREEBLOCKS_WATERMARK)
46 - free_blocks = percpu_counter_sum(&sbi->s_freeblocks_counter);
48 - if (free_blocks < (root_blocks + nblocks))
49 + if (free_blocks - (nblocks + root_blocks + dirty_blocks) <
50 + EXT4_FREEBLOCKS_WATERMARK) {
51 + free_blocks = percpu_counter_sum(fbc);
52 + dirty_blocks = percpu_counter_sum(dbc);
53 + if (dirty_blocks < 0) {
54 + printk(KERN_CRIT "Dirty block accounting "
55 + "went wrong %lld\n",
56 + dirty_blocks);
57 + }
58 + }
59 + /* Check whether we have space after
60 + * accounting for current dirty blocks
61 + */
62 + if (free_blocks < ((s64)(root_blocks + nblocks) + dirty_blocks))
63 /* we don't have free space */
64 return -ENOSPC;
66 - /* reduce fs free blocks counter */
67 - percpu_counter_sub(fbc, nblocks);
68 + /* Add the blocks to nblocks */
69 + percpu_counter_add(dbc, nblocks);
70 return 0;
73 @@ -1638,23 +1650,28 @@
74 ext4_fsblk_t ext4_has_free_blocks(struct ext4_sb_info *sbi,
75 ext4_fsblk_t nblocks)
77 - ext4_fsblk_t free_blocks;
78 + ext4_fsblk_t free_blocks, dirty_blocks;
79 ext4_fsblk_t root_blocks = 0;
80 + struct percpu_counter *fbc = &sbi->s_freeblocks_counter;
81 + struct percpu_counter *dbc = &sbi->s_dirtyblocks_counter;
83 - free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
84 + free_blocks = percpu_counter_read_positive(fbc);
85 + dirty_blocks = percpu_counter_read_positive(dbc);
87 if (!capable(CAP_SYS_RESOURCE) &&
88 sbi->s_resuid != current->fsuid &&
89 (sbi->s_resgid == 0 || !in_group_p(sbi->s_resgid)))
90 root_blocks = ext4_r_blocks_count(sbi->s_es);
92 - if (free_blocks - (nblocks + root_blocks) < EXT4_FREEBLOCKS_WATERMARK)
93 - free_blocks = percpu_counter_sum_positive(&sbi->s_freeblocks_counter);
95 - if (free_blocks <= root_blocks)
96 + if (free_blocks - (nblocks + root_blocks + dirty_blocks) <
97 + EXT4_FREEBLOCKS_WATERMARK) {
98 + free_blocks = percpu_counter_sum_positive(fbc);
99 + dirty_blocks = percpu_counter_sum_positive(dbc);
101 + if (free_blocks <= (root_blocks + dirty_blocks))
102 /* we don't have free space */
103 return 0;
104 - if (free_blocks - root_blocks < nblocks)
105 + if (free_blocks - (root_blocks + dirty_blocks) < nblocks)
106 return free_blocks - root_blocks;
107 return nblocks;
109 @@ -1941,13 +1958,11 @@
110 le16_add_cpu(&gdp->bg_free_blocks_count, -num);
111 gdp->bg_checksum = ext4_group_desc_csum(sbi, group_no, gdp);
112 spin_unlock(sb_bgl_lock(sbi, group_no));
113 - if (!EXT4_I(inode)->i_delalloc_reserved_flag && (*count != num)) {
114 - /*
115 - * we allocated less blocks than we
116 - * claimed. Add the difference back.
117 - */
118 - percpu_counter_add(&sbi->s_freeblocks_counter, *count - num);
120 + percpu_counter_sub(&sbi->s_freeblocks_counter, num);
121 + /*
122 + * Now reduce the dirty block count also. Should not go negative
123 + */
124 + percpu_counter_sub(&sbi->s_dirtyblocks_counter, num);
125 if (sbi->s_log_groups_per_flex) {
126 ext4_group_t flex_group = ext4_flex_group(sbi, group_no);
127 spin_lock(sb_bgl_lock(sbi, flex_group));
128 Index: linux-2.6.27-rc3/fs/ext4/ext4_sb.h
129 ===================================================================
130 --- linux-2.6.27-rc3.orig/fs/ext4/ext4_sb.h 2008-08-12 18:55:39.000000000 -0700
131 +++ linux-2.6.27-rc3/fs/ext4/ext4_sb.h 2008-08-27 13:59:34.000000000 -0700
132 @@ -59,6 +59,7 @@
133 struct percpu_counter s_freeblocks_counter;
134 struct percpu_counter s_freeinodes_counter;
135 struct percpu_counter s_dirs_counter;
136 + struct percpu_counter s_dirtyblocks_counter;
137 struct blockgroup_lock s_blockgroup_lock;
139 /* root of the per fs reservation window tree */
140 Index: linux-2.6.27-rc3/fs/ext4/inode.c
141 ===================================================================
142 --- linux-2.6.27-rc3.orig/fs/ext4/inode.c 2008-08-27 13:54:26.000000000 -0700
143 +++ linux-2.6.27-rc3/fs/ext4/inode.c 2008-08-27 13:59:34.000000000 -0700
144 @@ -1030,19 +1030,20 @@
145 BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
146 mdb_free = EXT4_I(inode)->i_reserved_meta_blocks - mdb;
148 - /* Account for allocated meta_blocks */
149 - mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks;
151 - /* update fs free blocks counter for truncate case */
152 - percpu_counter_add(&sbi->s_freeblocks_counter, mdb_free);
153 + if (mdb_free) {
154 + /* Account for allocated meta_blocks */
155 + mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks;
157 + /* update fs dirty blocks counter */
158 + percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free);
159 + EXT4_I(inode)->i_allocated_meta_blocks = 0;
160 + EXT4_I(inode)->i_reserved_meta_blocks = mdb;
163 /* update per-inode reservations */
164 BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks);
165 EXT4_I(inode)->i_reserved_data_blocks -= used;
167 - BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
168 - EXT4_I(inode)->i_reserved_meta_blocks = mdb;
169 - EXT4_I(inode)->i_allocated_meta_blocks = 0;
170 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
173 @@ -1588,8 +1589,8 @@
175 release = to_free + mdb_free;
177 - /* update fs free blocks counter for truncate case */
178 - percpu_counter_add(&sbi->s_freeblocks_counter, release);
179 + /* update fs dirty blocks counter for truncate case */
180 + percpu_counter_sub(&sbi->s_dirtyblocks_counter, release);
182 /* update per-inode reservations */
183 BUG_ON(to_free > EXT4_I(inode)->i_reserved_data_blocks);
184 @@ -2471,7 +2472,6 @@
185 index = pos >> PAGE_CACHE_SHIFT;
186 from = pos & (PAGE_CACHE_SIZE - 1);
187 to = from + len;
189 retry:
191 * With delayed allocation, we don't log the i_disksize update
192 Index: linux-2.6.27-rc3/fs/ext4/mballoc.c
193 ===================================================================
194 --- linux-2.6.27-rc3.orig/fs/ext4/mballoc.c 2008-08-27 13:54:26.000000000 -0700
195 +++ linux-2.6.27-rc3/fs/ext4/mballoc.c 2008-08-27 13:59:34.000000000 -0700
196 @@ -2968,22 +2968,11 @@
197 le16_add_cpu(&gdp->bg_free_blocks_count, -ac->ac_b_ex.fe_len);
198 gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
199 spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
201 + percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len);
203 - * free blocks account has already be reduced/reserved
204 - * at write_begin() time for delayed allocation
205 - * do not double accounting
206 + * Now reduce the dirty block count also. Should not go negative
208 - if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED) &&
209 - ac->ac_o_ex.fe_len != ac->ac_b_ex.fe_len) {
210 - /*
211 - * we allocated less blocks than we calimed
212 - * Add the difference back
213 - */
214 - percpu_counter_add(&sbi->s_freeblocks_counter,
215 - ac->ac_o_ex.fe_len - ac->ac_b_ex.fe_len);
218 + percpu_counter_sub(&sbi->s_dirtyblocks_counter, ac->ac_b_ex.fe_len);
219 if (sbi->s_log_groups_per_flex) {
220 ext4_group_t flex_group = ext4_flex_group(sbi,
221 ac->ac_b_ex.fe_group);
222 Index: linux-2.6.27-rc3/fs/ext4/super.c
223 ===================================================================
224 --- linux-2.6.27-rc3.orig/fs/ext4/super.c 2008-08-27 09:31:21.000000000 -0700
225 +++ linux-2.6.27-rc3/fs/ext4/super.c 2008-08-27 13:59:34.000000000 -0700
226 @@ -520,6 +520,7 @@
227 percpu_counter_destroy(&sbi->s_freeblocks_counter);
228 percpu_counter_destroy(&sbi->s_freeinodes_counter);
229 percpu_counter_destroy(&sbi->s_dirs_counter);
230 + percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
231 brelse(sbi->s_sbh);
232 #ifdef CONFIG_QUOTA
233 for (i = 0; i < MAXQUOTAS; i++)
234 @@ -2257,6 +2258,9 @@
235 err = percpu_counter_init(&sbi->s_dirs_counter,
236 ext4_count_dirs(sb));
238 + if (!err) {
239 + err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0);
241 if (err) {
242 printk(KERN_ERR "EXT4-fs: insufficient memory\n");
243 goto failed_mount3;
244 @@ -2489,6 +2493,7 @@
245 percpu_counter_destroy(&sbi->s_freeblocks_counter);
246 percpu_counter_destroy(&sbi->s_freeinodes_counter);
247 percpu_counter_destroy(&sbi->s_dirs_counter);
248 + percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
249 failed_mount2:
250 for (i = 0; i < db_count; i++)
251 brelse(sbi->s_group_desc[i]);
252 @@ -3162,7 +3167,8 @@
253 buf->f_type = EXT4_SUPER_MAGIC;
254 buf->f_bsize = sb->s_blocksize;
255 buf->f_blocks = ext4_blocks_count(es) - sbi->s_overhead_last;
256 - buf->f_bfree = percpu_counter_sum_positive(&sbi->s_freeblocks_counter);
257 + buf->f_bfree = percpu_counter_sum_positive(&sbi->s_freeblocks_counter) -
258 + percpu_counter_sum_positive(&sbi->s_dirtyblocks_counter);
259 ext4_free_blocks_count_set(es, buf->f_bfree);
260 buf->f_bavail = buf->f_bfree - ext4_r_blocks_count(es);
261 if (buf->f_bfree < ext4_r_blocks_count(es))