add patch fix-setting-of-referenced-bit-in-ext4_es_lookup_extent
[ext4-patch-queue.git] / ext4-convert-to-mbcache2
blob5344bee1c85bf0a89cc5fe9679931bdfc388264c
1 ext4: convert to mbcache2
3 From: Jan Kara <jack@suse.cz>
5 The conversion is generally straightforward. The only tricky part is
6 that xattr block corresponding to found mbcache entry can get freed
7 before we get buffer lock for that block. So we have to check whether
8 the entry is still valid after getting buffer lock.
10 Signed-off-by: Jan Kara <jack@suse.cz>
11 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
12 ---
13  fs/ext4/ext4.h  |   2 +-
14  fs/ext4/super.c |   7 ++-
15  fs/ext4/xattr.c | 136 ++++++++++++++++++++++++++++----------------------------
16  fs/ext4/xattr.h |   5 +--
17  4 files changed, 75 insertions(+), 75 deletions(-)
19 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
20 index 0662b285dc8a..b53cbc05b172 100644
21 --- a/fs/ext4/ext4.h
22 +++ b/fs/ext4/ext4.h
23 @@ -1468,7 +1468,7 @@ struct ext4_sb_info {
24         struct list_head s_es_list;     /* List of inodes with reclaimable extents */
25         long s_es_nr_inode;
26         struct ext4_es_stats s_es_stats;
27 -       struct mb_cache *s_mb_cache;
28 +       struct mb2_cache *s_mb_cache;
29         spinlock_t s_es_lock ____cacheline_aligned_in_smp;
31         /* Ratelimit ext4 messages. */
32 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
33 index 3ed01ec011d7..ecc37e103435 100644
34 --- a/fs/ext4/super.c
35 +++ b/fs/ext4/super.c
36 @@ -844,7 +844,6 @@ static void ext4_put_super(struct super_block *sb)
37         ext4_release_system_zone(sb);
38         ext4_mb_release(sb);
39         ext4_ext_release(sb);
40 -       ext4_xattr_put_super(sb);
42         if (!(sb->s_flags & MS_RDONLY)) {
43                 ext4_clear_feature_journal_needs_recovery(sb);
44 @@ -3797,7 +3796,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
46  no_journal:
47         if (ext4_mballoc_ready) {
48 -               sbi->s_mb_cache = ext4_xattr_create_cache(sb->s_id);
49 +               sbi->s_mb_cache = ext4_xattr_create_cache();
50                 if (!sbi->s_mb_cache) {
51                         ext4_msg(sb, KERN_ERR, "Failed to create an mb_cache");
52                         goto failed_mount_wq;
53 @@ -4027,6 +4026,10 @@ failed_mount4:
54         if (EXT4_SB(sb)->rsv_conversion_wq)
55                 destroy_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
56  failed_mount_wq:
57 +       if (sbi->s_mb_cache) {
58 +               ext4_xattr_destroy_cache(sbi->s_mb_cache);
59 +               sbi->s_mb_cache = NULL;
60 +       }
61         if (sbi->s_journal) {
62                 jbd2_journal_destroy(sbi->s_journal);
63                 sbi->s_journal = NULL;
64 diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
65 index a95151e875bd..fe9f8d6ab6c9 100644
66 --- a/fs/ext4/xattr.c
67 +++ b/fs/ext4/xattr.c
68 @@ -53,7 +53,7 @@
69  #include <linux/init.h>
70  #include <linux/fs.h>
71  #include <linux/slab.h>
72 -#include <linux/mbcache.h>
73 +#include <linux/mbcache2.h>
74  #include <linux/quotaops.h>
75  #include "ext4_jbd2.h"
76  #include "ext4.h"
77 @@ -78,10 +78,10 @@
78  # define ea_bdebug(bh, fmt, ...)       no_printk(fmt, ##__VA_ARGS__)
79  #endif
81 -static void ext4_xattr_cache_insert(struct mb_cache *, struct buffer_head *);
82 +static void ext4_xattr_cache_insert(struct mb2_cache *, struct buffer_head *);
83  static struct buffer_head *ext4_xattr_cache_find(struct inode *,
84                                                  struct ext4_xattr_header *,
85 -                                                struct mb_cache_entry **);
86 +                                                struct mb2_cache_entry **);
87  static void ext4_xattr_rehash(struct ext4_xattr_header *,
88                               struct ext4_xattr_entry *);
89  static int ext4_xattr_list(struct dentry *dentry, char *buffer,
90 @@ -276,7 +276,7 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
91         struct ext4_xattr_entry *entry;
92         size_t size;
93         int error;
94 -       struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
95 +       struct mb2_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
97         ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
98                   name_index, name, buffer, (long)buffer_size);
99 @@ -428,7 +428,7 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
100         struct inode *inode = d_inode(dentry);
101         struct buffer_head *bh = NULL;
102         int error;
103 -       struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
104 +       struct mb2_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
106         ea_idebug(inode, "buffer=%p, buffer_size=%ld",
107                   buffer, (long)buffer_size);
108 @@ -545,11 +545,8 @@ static void
109  ext4_xattr_release_block(handle_t *handle, struct inode *inode,
110                          struct buffer_head *bh)
112 -       struct mb_cache_entry *ce = NULL;
113         int error = 0;
114 -       struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
116 -       ce = mb_cache_entry_get(ext4_mb_cache, bh->b_bdev, bh->b_blocknr);
117         BUFFER_TRACE(bh, "get_write_access");
118         error = ext4_journal_get_write_access(handle, bh);
119         if (error)
120 @@ -557,9 +554,15 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
122         lock_buffer(bh);
123         if (BHDR(bh)->h_refcount == cpu_to_le32(1)) {
124 +               __u32 hash = le32_to_cpu(BHDR(bh)->h_hash);
126                 ea_bdebug(bh, "refcount now=0; freeing");
127 -               if (ce)
128 -                       mb_cache_entry_free(ce);
129 +               /*
130 +                * This must happen under buffer lock for
131 +                * ext4_xattr_block_set() to reliably detect freed block
132 +                */
133 +               mb2_cache_entry_delete_block(EXT4_GET_MB_CACHE(inode), hash,
134 +                                            bh->b_blocknr);
135                 get_bh(bh);
136                 unlock_buffer(bh);
137                 ext4_free_blocks(handle, inode, bh, 0, 1,
138 @@ -567,8 +570,6 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
139                                  EXT4_FREE_BLOCKS_FORGET);
140         } else {
141                 le32_add_cpu(&BHDR(bh)->h_refcount, -1);
142 -               if (ce)
143 -                       mb_cache_entry_release(ce);
144                 /*
145                  * Beware of this ugliness: Releasing of xattr block references
146                  * from different inodes can race and so we have to protect
147 @@ -781,17 +782,15 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
148         struct super_block *sb = inode->i_sb;
149         struct buffer_head *new_bh = NULL;
150         struct ext4_xattr_search *s = &bs->s;
151 -       struct mb_cache_entry *ce = NULL;
152 +       struct mb2_cache_entry *ce = NULL;
153         int error = 0;
154 -       struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
155 +       struct mb2_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
157  #define header(x) ((struct ext4_xattr_header *)(x))
159         if (i->value && i->value_len > sb->s_blocksize)
160                 return -ENOSPC;
161         if (s->base) {
162 -               ce = mb_cache_entry_get(ext4_mb_cache, bs->bh->b_bdev,
163 -                                       bs->bh->b_blocknr);
164                 BUFFER_TRACE(bs->bh, "get_write_access");
165                 error = ext4_journal_get_write_access(handle, bs->bh);
166                 if (error)
167 @@ -799,10 +798,15 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
168                 lock_buffer(bs->bh);
170                 if (header(s->base)->h_refcount == cpu_to_le32(1)) {
171 -                       if (ce) {
172 -                               mb_cache_entry_free(ce);
173 -                               ce = NULL;
174 -                       }
175 +                       __u32 hash = le32_to_cpu(BHDR(bs->bh)->h_hash);
177 +                       /*
178 +                        * This must happen under buffer lock for
179 +                        * ext4_xattr_block_set() to reliably detect modified
180 +                        * block
181 +                        */
182 +                       mb2_cache_entry_delete_block(ext4_mb_cache, hash,
183 +                                                    bs->bh->b_blocknr);
184                         ea_bdebug(bs->bh, "modifying in-place");
185                         error = ext4_xattr_set_entry(i, s);
186                         if (!error) {
187 @@ -826,10 +830,6 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
188                         int offset = (char *)s->here - bs->bh->b_data;
190                         unlock_buffer(bs->bh);
191 -                       if (ce) {
192 -                               mb_cache_entry_release(ce);
193 -                               ce = NULL;
194 -                       }
195                         ea_bdebug(bs->bh, "cloning");
196                         s->base = kmalloc(bs->bh->b_size, GFP_NOFS);
197                         error = -ENOMEM;
198 @@ -884,6 +884,31 @@ inserted:
199                                 if (error)
200                                         goto cleanup_dquot;
201                                 lock_buffer(new_bh);
202 +                               /*
203 +                                * We have to be careful about races with
204 +                                * freeing or rehashing of xattr block. Once we
205 +                                * hold buffer lock xattr block's state is
206 +                                * stable so we can check whether the block got
207 +                                * freed / rehashed or not.  Since we unhash
208 +                                * mbcache entry under buffer lock when freeing
209 +                                * / rehashing xattr block, checking whether
210 +                                * entry is still hashed is reliable.
211 +                                */
212 +                               if (hlist_bl_unhashed(&ce->e_hash_list)) {
213 +                                       /*
214 +                                        * Undo everything and check mbcache
215 +                                        * again.
216 +                                        */
217 +                                       unlock_buffer(new_bh);
218 +                                       dquot_free_block(inode,
219 +                                                        EXT4_C2B(EXT4_SB(sb),
220 +                                                                 1));
221 +                                       brelse(new_bh);
222 +                                       mb2_cache_entry_put(ext4_mb_cache, ce);
223 +                                       ce = NULL;
224 +                                       new_bh = NULL;
225 +                                       goto inserted;
226 +                               }
227                                 le32_add_cpu(&BHDR(new_bh)->h_refcount, 1);
228                                 ea_bdebug(new_bh, "reusing; refcount now=%d",
229                                         le32_to_cpu(BHDR(new_bh)->h_refcount));
230 @@ -894,7 +919,8 @@ inserted:
231                                 if (error)
232                                         goto cleanup_dquot;
233                         }
234 -                       mb_cache_entry_release(ce);
235 +                       mb2_cache_entry_touch(ext4_mb_cache, ce);
236 +                       mb2_cache_entry_put(ext4_mb_cache, ce);
237                         ce = NULL;
238                 } else if (bs->bh && s->base == bs->bh->b_data) {
239                         /* We were modifying this block in-place. */
240 @@ -959,7 +985,7 @@ getblk_failed:
242  cleanup:
243         if (ce)
244 -               mb_cache_entry_release(ce);
245 +               mb2_cache_entry_put(ext4_mb_cache, ce);
246         brelse(new_bh);
247         if (!(bs->bh && s->base == bs->bh->b_data))
248                 kfree(s->base);
249 @@ -1512,17 +1538,6 @@ cleanup:
252  /*
253 - * ext4_xattr_put_super()
254 - *
255 - * This is called when a file system is unmounted.
256 - */
257 -void
258 -ext4_xattr_put_super(struct super_block *sb)
260 -       mb_cache_shrink(sb->s_bdev);
264   * ext4_xattr_cache_insert()
265   *
266   * Create a new entry in the extended attribute cache, and insert
267 @@ -1531,28 +1546,18 @@ ext4_xattr_put_super(struct super_block *sb)
268   * Returns 0, or a negative error number on failure.
269   */
270  static void
271 -ext4_xattr_cache_insert(struct mb_cache *ext4_mb_cache, struct buffer_head *bh)
272 +ext4_xattr_cache_insert(struct mb2_cache *ext4_mb_cache, struct buffer_head *bh)
274         __u32 hash = le32_to_cpu(BHDR(bh)->h_hash);
275 -       struct mb_cache_entry *ce;
276         int error;
278 -       ce = mb_cache_entry_alloc(ext4_mb_cache, GFP_NOFS);
279 -       if (!ce) {
280 -               ea_bdebug(bh, "out of memory");
281 -               return;
282 -       }
283 -       error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, hash);
284 +       error = mb2_cache_entry_create(ext4_mb_cache, GFP_NOFS, hash,
285 +                                      bh->b_blocknr);
286         if (error) {
287 -               mb_cache_entry_free(ce);
288 -               if (error == -EBUSY) {
289 +               if (error == -EBUSY)
290                         ea_bdebug(bh, "already in cache");
291 -                       error = 0;
292 -               }
293 -       } else {
294 +       } else
295                 ea_bdebug(bh, "inserting [%x]", (int)hash);
296 -               mb_cache_entry_release(ce);
297 -       }
300  /*
301 @@ -1605,26 +1610,19 @@ ext4_xattr_cmp(struct ext4_xattr_header *header1,
302   */
303  static struct buffer_head *
304  ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header,
305 -                     struct mb_cache_entry **pce)
306 +                     struct mb2_cache_entry **pce)
308         __u32 hash = le32_to_cpu(header->h_hash);
309 -       struct mb_cache_entry *ce;
310 -       struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
311 +       struct mb2_cache_entry *ce;
312 +       struct mb2_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
314         if (!header->h_hash)
315                 return NULL;  /* never share */
316         ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
317 -again:
318 -       ce = mb_cache_entry_find_first(ext4_mb_cache, inode->i_sb->s_bdev,
319 -                                      hash);
320 +       ce = mb2_cache_entry_find_first(ext4_mb_cache, hash);
321         while (ce) {
322                 struct buffer_head *bh;
324 -               if (IS_ERR(ce)) {
325 -                       if (PTR_ERR(ce) == -EAGAIN)
326 -                               goto again;
327 -                       break;
328 -               }
329                 bh = sb_bread(inode->i_sb, ce->e_block);
330                 if (!bh) {
331                         EXT4_ERROR_INODE(inode, "block %lu read error",
332 @@ -1640,7 +1638,7 @@ again:
333                         return bh;
334                 }
335                 brelse(bh);
336 -               ce = mb_cache_entry_find_next(ce, inode->i_sb->s_bdev, hash);
337 +               ce = mb2_cache_entry_find_next(ext4_mb_cache, ce);
338         }
339         return NULL;
341 @@ -1715,15 +1713,15 @@ static void ext4_xattr_rehash(struct ext4_xattr_header *header,
343  #define        HASH_BUCKET_BITS        10
345 -struct mb_cache *
346 -ext4_xattr_create_cache(char *name)
347 +struct mb2_cache *
348 +ext4_xattr_create_cache(void)
350 -       return mb_cache_create(name, HASH_BUCKET_BITS);
351 +       return mb2_cache_create(HASH_BUCKET_BITS);
354 -void ext4_xattr_destroy_cache(struct mb_cache *cache)
355 +void ext4_xattr_destroy_cache(struct mb2_cache *cache)
357         if (cache)
358 -               mb_cache_destroy(cache);
359 +               mb2_cache_destroy(cache);
362 diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
363 index ddc0957760ba..10b0f7323ed6 100644
364 --- a/fs/ext4/xattr.h
365 +++ b/fs/ext4/xattr.h
366 @@ -108,7 +108,6 @@ extern int ext4_xattr_set(struct inode *, int, const char *, const void *, size_
367  extern int ext4_xattr_set_handle(handle_t *, struct inode *, int, const char *, const void *, size_t, int);
369  extern void ext4_xattr_delete_inode(handle_t *, struct inode *);
370 -extern void ext4_xattr_put_super(struct super_block *);
372  extern int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
373                             struct ext4_inode *raw_inode, handle_t *handle);
374 @@ -124,8 +123,8 @@ extern int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
375                                        struct ext4_xattr_info *i,
376                                        struct ext4_xattr_ibody_find *is);
378 -extern struct mb_cache *ext4_xattr_create_cache(char *name);
379 -extern void ext4_xattr_destroy_cache(struct mb_cache *);
380 +extern struct mb2_cache *ext4_xattr_create_cache(void);
381 +extern void ext4_xattr_destroy_cache(struct mb2_cache *);
383  #ifdef CONFIG_EXT4_FS_SECURITY
384  extern int ext4_init_security(handle_t *handle, struct inode *inode,
385 -- 
386 2.6.2