1 ext4: each filesystem creates and uses its own mb_cache
3 From: T Makphaibulchoke <tmac@hp.com>
5 This patch adds new interfaces to create and destory cache,
6 ext4_xattr_create_cache() and ext4_xattr_destroy_cache(), and remove
7 the cache creation and destory calls from ex4_init_xattr() and
8 ext4_exitxattr() in fs/ext4/xattr.c.
10 fs/ext4/super.c has been changed so that when a filesystem is mounted
11 a cache is allocated and attched to its ext4_sb_info structure.
13 fs/mbcache.c has been changed so that only one slab allocator is
14 allocated and used by all mbcache structures.
16 Signed-off-by: T. Makphaibulchoke <tmac@hp.com>
19 fs/ext4/super.c | 24 ++++++++++++++++--------
20 fs/ext4/xattr.c | 51 ++++++++++++++++++++++++++++-----------------------
21 fs/ext4/xattr.h | 6 +++---
22 fs/mbcache.c | 18 +++++++++++++-----
23 5 files changed, 61 insertions(+), 39 deletions(-)
25 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
26 index e618503..fa20aab 100644
29 @@ -1314,6 +1314,7 @@ struct ext4_sb_info {
30 struct list_head s_es_lru;
31 unsigned long s_es_last_sorted;
32 struct percpu_counter s_extent_cache_cnt;
33 + struct mb_cache *s_mb_cache;
34 spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp;
36 /* Ratelimit ext4 messages. */
37 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
38 index c977f4e..176048e 100644
41 @@ -59,6 +59,7 @@ static struct kset *ext4_kset;
42 static struct ext4_lazy_init *ext4_li_info;
43 static struct mutex ext4_li_mtx;
44 static struct ext4_features *ext4_feat;
45 +static int ext4_mballoc_ready;
47 static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
48 unsigned long journal_devnum);
49 @@ -845,6 +846,10 @@ static void ext4_put_super(struct super_block *sb)
50 invalidate_bdev(sbi->journal_bdev);
51 ext4_blkdev_remove(sbi);
53 + if (sbi->s_mb_cache) {
54 + ext4_xattr_destroy_cache(sbi->s_mb_cache);
55 + sbi->s_mb_cache = NULL;
58 kthread_stop(sbi->s_mmp_tsk);
60 @@ -3981,6 +3986,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
61 set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
63 sbi->s_journal->j_commit_callback = ext4_journal_commit_callback;
64 + if (ext4_mballoc_ready) {
65 + sbi->s_mb_cache = ext4_xattr_create_cache(sb->s_id);
66 + if (!sbi->s_mb_cache) {
67 + ext4_msg(sb, KERN_ERR, "Failed to create an mb_cache");
68 + goto failed_mount_wq;
73 * The journal may have updated the bg summary counts, so we
74 @@ -5501,11 +5513,9 @@ static int __init ext4_init_fs(void)
76 err = ext4_init_mballoc();
80 - err = ext4_init_xattr();
84 + ext4_mballoc_ready = 1;
85 err = init_inodecache();
88 @@ -5521,10 +5531,9 @@ out:
94 + ext4_mballoc_ready = 0;
98 ext4_exit_feat_adverts();
101 @@ -5547,7 +5556,6 @@ static void __exit ext4_exit_fs(void)
102 unregister_as_ext3();
103 unregister_filesystem(&ext4_fs_type);
104 destroy_inodecache();
107 ext4_exit_feat_adverts();
108 remove_proc_entry("fs/ext4", NULL);
109 diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
110 index 1423c48..6e2788f 100644
111 --- a/fs/ext4/xattr.c
112 +++ b/fs/ext4/xattr.c
114 # define ea_bdebug(bh, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
117 -static void ext4_xattr_cache_insert(struct buffer_head *);
118 +static void ext4_xattr_cache_insert(struct mb_cache *, struct buffer_head *);
119 static struct buffer_head *ext4_xattr_cache_find(struct inode *,
120 struct ext4_xattr_header *,
121 struct mb_cache_entry **);
122 @@ -90,8 +90,6 @@ static void ext4_xattr_rehash(struct ext4_xattr_header *,
123 static int ext4_xattr_list(struct dentry *dentry, char *buffer,
126 -static struct mb_cache *ext4_xattr_cache;
128 static const struct xattr_handler *ext4_xattr_handler_map[] = {
129 [EXT4_XATTR_INDEX_USER] = &ext4_xattr_user_handler,
130 #ifdef CONFIG_EXT4_FS_POSIX_ACL
131 @@ -117,6 +115,9 @@ const struct xattr_handler *ext4_xattr_handlers[] = {
135 +#define EXT4_GET_MB_CACHE(inode) (((struct ext4_sb_info *) \
136 + inode->i_sb->s_fs_info)->s_mb_cache)
138 static __le32 ext4_xattr_block_csum(struct inode *inode,
140 struct ext4_xattr_header *hdr)
141 @@ -265,6 +266,7 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
142 struct ext4_xattr_entry *entry;
145 + struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
147 ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
148 name_index, name, buffer, (long)buffer_size);
149 @@ -286,7 +288,7 @@ bad_block:
153 - ext4_xattr_cache_insert(bh);
154 + ext4_xattr_cache_insert(ext4_mb_cache, bh);
156 error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1);
158 @@ -409,6 +411,7 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
159 struct inode *inode = dentry->d_inode;
160 struct buffer_head *bh = NULL;
162 + struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
164 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
165 buffer, (long)buffer_size);
166 @@ -430,7 +433,7 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
170 - ext4_xattr_cache_insert(bh);
171 + ext4_xattr_cache_insert(ext4_mb_cache, bh);
172 error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
175 @@ -526,8 +529,9 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
177 struct mb_cache_entry *ce = NULL;
179 + struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
181 - ce = mb_cache_entry_get(ext4_xattr_cache, bh->b_bdev, bh->b_blocknr);
182 + ce = mb_cache_entry_get(ext4_mb_cache, bh->b_bdev, bh->b_blocknr);
183 error = ext4_journal_get_write_access(handle, bh);
186 @@ -745,13 +749,14 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
187 struct ext4_xattr_search *s = &bs->s;
188 struct mb_cache_entry *ce = NULL;
190 + struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
192 #define header(x) ((struct ext4_xattr_header *)(x))
194 if (i->value && i->value_len > sb->s_blocksize)
197 - ce = mb_cache_entry_get(ext4_xattr_cache, bs->bh->b_bdev,
198 + ce = mb_cache_entry_get(ext4_mb_cache, bs->bh->b_bdev,
200 error = ext4_journal_get_write_access(handle, bs->bh);
202 @@ -769,7 +774,8 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
203 if (!IS_LAST_ENTRY(s->first))
204 ext4_xattr_rehash(header(s->base),
206 - ext4_xattr_cache_insert(bs->bh);
207 + ext4_xattr_cache_insert(ext4_mb_cache,
210 unlock_buffer(bs->bh);
212 @@ -905,7 +911,7 @@ getblk_failed:
213 memcpy(new_bh->b_data, s->base, new_bh->b_size);
214 set_buffer_uptodate(new_bh);
215 unlock_buffer(new_bh);
216 - ext4_xattr_cache_insert(new_bh);
217 + ext4_xattr_cache_insert(ext4_mb_cache, new_bh);
218 error = ext4_handle_dirty_xattr_block(handle,
221 @@ -1495,13 +1501,13 @@ ext4_xattr_put_super(struct super_block *sb)
222 * Returns 0, or a negative error number on failure.
225 -ext4_xattr_cache_insert(struct buffer_head *bh)
226 +ext4_xattr_cache_insert(struct mb_cache *ext4_mb_cache, struct buffer_head *bh)
228 __u32 hash = le32_to_cpu(BHDR(bh)->h_hash);
229 struct mb_cache_entry *ce;
232 - ce = mb_cache_entry_alloc(ext4_xattr_cache, GFP_NOFS);
233 + ce = mb_cache_entry_alloc(ext4_mb_cache, GFP_NOFS);
235 ea_bdebug(bh, "out of memory");
237 @@ -1573,12 +1579,13 @@ ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header,
239 __u32 hash = le32_to_cpu(header->h_hash);
240 struct mb_cache_entry *ce;
241 + struct mb_cache *ext4_mb_cache = EXT4_GET_MB_CACHE(inode);
244 return NULL; /* never share */
245 ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
247 - ce = mb_cache_entry_find_first(ext4_xattr_cache, inode->i_sb->s_bdev,
248 + ce = mb_cache_entry_find_first(ext4_mb_cache, inode->i_sb->s_bdev,
251 struct buffer_head *bh;
252 @@ -1676,19 +1683,17 @@ static void ext4_xattr_rehash(struct ext4_xattr_header *header,
254 #undef BLOCK_HASH_SHIFT
257 -ext4_init_xattr(void)
258 +#define HASH_BUCKET_BITS 10
261 +ext4_xattr_create_cache(char *name)
263 - ext4_xattr_cache = mb_cache_create("ext4_xattr", 6);
264 - if (!ext4_xattr_cache)
267 + return mb_cache_create(name, HASH_BUCKET_BITS);
271 -ext4_exit_xattr(void)
272 +void ext4_xattr_destroy_cache(struct mb_cache *cache)
274 - if (ext4_xattr_cache)
275 - mb_cache_destroy(ext4_xattr_cache);
276 - ext4_xattr_cache = NULL;
278 + mb_cache_destroy(cache);
281 diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
282 index c767dbd..b930320 100644
283 --- a/fs/ext4/xattr.h
284 +++ b/fs/ext4/xattr.h
285 @@ -112,9 +112,6 @@ extern void ext4_xattr_put_super(struct super_block *);
286 extern int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
287 struct ext4_inode *raw_inode, handle_t *handle);
289 -extern int __init ext4_init_xattr(void);
290 -extern void ext4_exit_xattr(void);
292 extern const struct xattr_handler *ext4_xattr_handlers[];
294 extern int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
295 @@ -126,6 +123,9 @@ extern int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
296 struct ext4_xattr_info *i,
297 struct ext4_xattr_ibody_find *is);
299 +extern struct mb_cache *ext4_xattr_create_cache(char *name);
300 +extern void ext4_xattr_destroy_cache(struct mb_cache *);
302 #ifdef CONFIG_EXT4_FS_SECURITY
303 extern int ext4_init_security(handle_t *handle, struct inode *inode,
304 struct inode *dir, const struct qstr *qstr);
305 diff --git a/fs/mbcache.c b/fs/mbcache.c
306 index 786ecab..bf166e3 100644
311 static DECLARE_WAIT_QUEUE_HEAD(mb_cache_queue);
312 static struct blockgroup_lock *mb_cache_bg_lock;
313 +static struct kmem_cache *mb_cache_kmem_cache;
315 MODULE_AUTHOR("Andreas Gruenbacher <a.gruenbacher@computer.org>");
316 MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
317 @@ -351,11 +352,14 @@ mb_cache_create(const char *name, int bucket_bits)
319 for (n=0; n<bucket_count; n++)
320 INIT_HLIST_BL_HEAD(&cache->c_index_hash[n]);
321 - cache->c_entry_cache = kmem_cache_create(name,
322 - sizeof(struct mb_cache_entry), 0,
323 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
324 - if (!cache->c_entry_cache)
326 + if (!mb_cache_kmem_cache) {
327 + mb_cache_kmem_cache = kmem_cache_create(name,
328 + sizeof(struct mb_cache_entry), 0,
329 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
330 + if (!mb_cache_kmem_cache)
333 + cache->c_entry_cache = mb_cache_kmem_cache;
336 * Set an upper limit on the number of cache entries so that the hash
337 @@ -476,6 +480,10 @@ mb_cache_destroy(struct mb_cache *cache)
338 atomic_read(&cache->c_entry_count));
341 + if (list_empty(&mb_cache_list)) {
342 + kmem_cache_destroy(mb_cache_kmem_cache);
343 + mb_cache_kmem_cache = NULL;
345 kfree(cache->c_index_hash);
346 kfree(cache->c_block_hash);