1 fs/mbcache.c: change block and index hash chain to hlist_bl_node
3 From: T Makphaibulchoke <tmac@hp.com>
5 This patch changes each mb_cache's both block and index hash chains to
6 use a hlist_bl_node, which contains a built-in lock. This is the
7 first step in decoupling of locks serializing accesses to mb_cache
8 global data and each mb_cache_entry local data.
10 Signed-off-by: T. Makphaibulchoke <tmac@hp.com>
11 Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
13 fs/mbcache.c | 117 ++++++++++++++++++++++++++++++++----------------
14 include/linux/mbcache.h | 12 ++---
15 2 files changed, 85 insertions(+), 44 deletions(-)
17 diff --git a/fs/mbcache.c b/fs/mbcache.c
18 index e519e45..55db0da 100644
23 #include <linux/slab.h>
24 #include <linux/sched.h>
25 -#include <linux/init.h>
26 +#include <linux/list_bl.h>
27 #include <linux/mbcache.h>
29 +#include <linux/init.h>
32 # define mb_debug(f...) do { \
33 @@ -87,21 +87,38 @@ static LIST_HEAD(mb_cache_lru_list);
34 static DEFINE_SPINLOCK(mb_cache_spinlock);
37 -__mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
38 +__mb_cache_entry_is_block_hashed(struct mb_cache_entry *ce)
40 - return !list_empty(&ce->e_block_list);
41 + return !hlist_bl_unhashed(&ce->e_block_list);
46 -__mb_cache_entry_unhash(struct mb_cache_entry *ce)
48 +__mb_cache_entry_unhash_block(struct mb_cache_entry *ce)
50 - if (__mb_cache_entry_is_hashed(ce)) {
51 - list_del_init(&ce->e_block_list);
52 - list_del(&ce->e_index.o_list);
54 + if (__mb_cache_entry_is_block_hashed(ce))
55 + hlist_bl_del_init(&ce->e_block_list);
59 +__mb_cache_entry_is_index_hashed(struct mb_cache_entry *ce)
61 + return !hlist_bl_unhashed(&ce->e_index.o_list);
65 +__mb_cache_entry_unhash_index(struct mb_cache_entry *ce)
67 + if (__mb_cache_entry_is_index_hashed(ce))
68 + hlist_bl_del_init(&ce->e_index.o_list);
72 +__mb_cache_entry_unhash(struct mb_cache_entry *ce)
74 + __mb_cache_entry_unhash_index(ce);
75 + __mb_cache_entry_unhash_block(ce);
79 __mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask)
80 @@ -125,7 +142,7 @@ __mb_cache_entry_release_unlock(struct mb_cache_entry *ce)
81 ce->e_used -= MB_CACHE_WRITER;
83 if (!(ce->e_used || ce->e_queued)) {
84 - if (!__mb_cache_entry_is_hashed(ce))
85 + if (!__mb_cache_entry_is_block_hashed(ce))
87 mb_assert(list_empty(&ce->e_lru_list));
88 list_add_tail(&ce->e_lru_list, &mb_cache_lru_list);
89 @@ -221,18 +238,18 @@ mb_cache_create(const char *name, int bucket_bits)
91 atomic_set(&cache->c_entry_count, 0);
92 cache->c_bucket_bits = bucket_bits;
93 - cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head),
95 + cache->c_block_hash = kmalloc(bucket_count *
96 + sizeof(struct hlist_bl_head), GFP_KERNEL);
97 if (!cache->c_block_hash)
99 for (n=0; n<bucket_count; n++)
100 - INIT_LIST_HEAD(&cache->c_block_hash[n]);
101 - cache->c_index_hash = kmalloc(bucket_count * sizeof(struct list_head),
103 + INIT_HLIST_BL_HEAD(&cache->c_block_hash[n]);
104 + cache->c_index_hash = kmalloc(bucket_count *
105 + sizeof(struct hlist_bl_head), GFP_KERNEL);
106 if (!cache->c_index_hash)
108 for (n=0; n<bucket_count; n++)
109 - INIT_LIST_HEAD(&cache->c_index_hash[n]);
110 + INIT_HLIST_BL_HEAD(&cache->c_index_hash[n]);
111 cache->c_entry_cache = kmem_cache_create(name,
112 sizeof(struct mb_cache_entry), 0,
113 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
114 @@ -364,10 +381,13 @@ mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
116 atomic_inc(&cache->c_entry_count);
117 INIT_LIST_HEAD(&ce->e_lru_list);
118 - INIT_LIST_HEAD(&ce->e_block_list);
119 + INIT_HLIST_BL_NODE(&ce->e_block_list);
120 + INIT_HLIST_BL_NODE(&ce->e_index.o_list);
124 + ce->e_block_hash_p = &cache->c_block_hash[0];
125 + ce->e_index_hash_p = &cache->c_index_hash[0];
126 ce->e_used = 1 + MB_CACHE_WRITER;
129 @@ -393,25 +413,32 @@ mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev,
131 struct mb_cache *cache = ce->e_cache;
133 - struct list_head *l;
134 + struct hlist_bl_node *l;
136 + struct hlist_bl_head *block_hash_p;
137 + struct hlist_bl_head *index_hash_p;
138 + struct mb_cache_entry *lce;
141 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
142 cache->c_bucket_bits);
143 + block_hash_p = &cache->c_block_hash[bucket];
144 spin_lock(&mb_cache_spinlock);
145 - list_for_each_prev(l, &cache->c_block_hash[bucket]) {
146 - struct mb_cache_entry *ce =
147 - list_entry(l, struct mb_cache_entry, e_block_list);
148 - if (ce->e_bdev == bdev && ce->e_block == block)
149 + hlist_bl_for_each_entry(lce, l, block_hash_p, e_block_list) {
150 + if (lce->e_bdev == bdev && lce->e_block == block)
153 + mb_assert(!__mb_cache_entry_is_block_hashed(ce));
154 __mb_cache_entry_unhash(ce);
157 - list_add(&ce->e_block_list, &cache->c_block_hash[bucket]);
158 + ce->e_block_hash_p = block_hash_p;
159 ce->e_index.o_key = key;
160 bucket = hash_long(key, cache->c_bucket_bits);
161 - list_add(&ce->e_index.o_list, &cache->c_index_hash[bucket]);
162 + index_hash_p = &cache->c_index_hash[bucket];
163 + ce->e_index_hash_p = index_hash_p;
164 + hlist_bl_add_head(&ce->e_index.o_list, index_hash_p);
165 + hlist_bl_add_head(&ce->e_block_list, block_hash_p);
168 spin_unlock(&mb_cache_spinlock);
169 @@ -463,14 +490,16 @@ mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev,
173 - struct list_head *l;
174 + struct hlist_bl_node *l;
175 struct mb_cache_entry *ce;
176 + struct hlist_bl_head *block_hash_p;
178 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
179 cache->c_bucket_bits);
180 + block_hash_p = &cache->c_block_hash[bucket];
181 spin_lock(&mb_cache_spinlock);
182 - list_for_each(l, &cache->c_block_hash[bucket]) {
183 - ce = list_entry(l, struct mb_cache_entry, e_block_list);
184 + hlist_bl_for_each_entry(ce, l, block_hash_p, e_block_list) {
185 + mb_assert(ce->e_block_hash_p == block_hash_p);
186 if (ce->e_bdev == bdev && ce->e_block == block) {
189 @@ -489,7 +518,7 @@ mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev,
190 finish_wait(&mb_cache_queue, &wait);
191 ce->e_used += 1 + MB_CACHE_WRITER;
193 - if (!__mb_cache_entry_is_hashed(ce)) {
194 + if (!__mb_cache_entry_is_block_hashed(ce)) {
195 __mb_cache_entry_release_unlock(ce);
198 @@ -506,12 +535,14 @@ cleanup:
199 #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
201 static struct mb_cache_entry *
202 -__mb_cache_entry_find(struct list_head *l, struct list_head *head,
203 +__mb_cache_entry_find(struct hlist_bl_node *l, struct hlist_bl_head *head,
204 struct block_device *bdev, unsigned int key)
206 - while (l != head) {
207 + while (l != NULL) {
208 struct mb_cache_entry *ce =
209 - list_entry(l, struct mb_cache_entry, e_index.o_list);
210 + hlist_bl_entry(l, struct mb_cache_entry,
212 + mb_assert(ce->e_index_hash_p == head);
213 if (ce->e_bdev == bdev && ce->e_index.o_key == key) {
216 @@ -532,7 +563,7 @@ __mb_cache_entry_find(struct list_head *l, struct list_head *head,
218 finish_wait(&mb_cache_queue, &wait);
220 - if (!__mb_cache_entry_is_hashed(ce)) {
221 + if (!__mb_cache_entry_is_block_hashed(ce)) {
222 __mb_cache_entry_release_unlock(ce);
223 spin_lock(&mb_cache_spinlock);
224 return ERR_PTR(-EAGAIN);
225 @@ -562,12 +593,16 @@ mb_cache_entry_find_first(struct mb_cache *cache, struct block_device *bdev,
228 unsigned int bucket = hash_long(key, cache->c_bucket_bits);
229 - struct list_head *l;
230 - struct mb_cache_entry *ce;
231 + struct hlist_bl_node *l;
232 + struct mb_cache_entry *ce = NULL;
233 + struct hlist_bl_head *index_hash_p;
235 + index_hash_p = &cache->c_index_hash[bucket];
236 spin_lock(&mb_cache_spinlock);
237 - l = cache->c_index_hash[bucket].next;
238 - ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key);
239 + if (!hlist_bl_empty(index_hash_p)) {
240 + l = hlist_bl_first(index_hash_p);
241 + ce = __mb_cache_entry_find(l, index_hash_p, bdev, key);
243 spin_unlock(&mb_cache_spinlock);
246 @@ -597,12 +632,16 @@ mb_cache_entry_find_next(struct mb_cache_entry *prev,
248 struct mb_cache *cache = prev->e_cache;
249 unsigned int bucket = hash_long(key, cache->c_bucket_bits);
250 - struct list_head *l;
251 + struct hlist_bl_node *l;
252 struct mb_cache_entry *ce;
253 + struct hlist_bl_head *index_hash_p;
255 + index_hash_p = &cache->c_index_hash[bucket];
256 + mb_assert(prev->e_index_hash_p == index_hash_p);
257 spin_lock(&mb_cache_spinlock);
258 + mb_assert(!hlist_bl_empty(index_hash_p));
259 l = prev->e_index.o_list.next;
260 - ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key);
261 + ce = __mb_cache_entry_find(l, index_hash_p, bdev, key);
262 __mb_cache_entry_release_unlock(prev);
265 diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
266 index 5525d37..6a392e7 100644
267 --- a/include/linux/mbcache.h
268 +++ b/include/linux/mbcache.h
271 (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
274 struct mb_cache_entry {
275 struct list_head e_lru_list;
276 struct mb_cache *e_cache;
277 unsigned short e_used;
278 unsigned short e_queued;
280 struct block_device *e_bdev;
282 - struct list_head e_block_list;
283 + struct hlist_bl_node e_block_list;
285 - struct list_head o_list;
286 + struct hlist_bl_node o_list;
289 + struct hlist_bl_head *e_block_hash_p;
290 + struct hlist_bl_head *e_index_hash_p;
294 @@ -25,8 +27,8 @@ struct mb_cache {
297 struct kmem_cache *c_entry_cache;
298 - struct list_head *c_block_hash;
299 - struct list_head *c_index_hash;
300 + struct hlist_bl_head *c_block_hash;
301 + struct hlist_bl_head *c_index_hash;
304 /* Functions on caches */