add patch remove-unneeded-test-of-ret-variable
[ext4-patch-queue.git] / change-block-and-index-hash-chain-to-hlist_bl_node
blobfbe7537d08c1538d76114edcb81092acafd9d0be
1 fs/mbcache.c: change block and index hash chain to hlist_bl_node
3 From: T Makphaibulchoke <tmac@hp.com>
5 This patch changes each mb_cache's both block and index hash chains to
6 use a hlist_bl_node, which contains a built-in lock.  This is the
7 first step in decoupling of locks serializing accesses to mb_cache
8 global data and each mb_cache_entry local data.
10 Signed-off-by: T. Makphaibulchoke <tmac@hp.com>
11 Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
12 ---
13  fs/mbcache.c            | 117 ++++++++++++++++++++++++++++++++----------------
14  include/linux/mbcache.h |  12 ++---
15  2 files changed, 85 insertions(+), 44 deletions(-)
17 diff --git a/fs/mbcache.c b/fs/mbcache.c
18 index e519e45..55db0da 100644
19 --- a/fs/mbcache.c
20 +++ b/fs/mbcache.c
21 @@ -34,9 +34,9 @@
22  #include <linux/mm.h>
23  #include <linux/slab.h>
24  #include <linux/sched.h>
25 -#include <linux/init.h>
26 +#include <linux/list_bl.h>
27  #include <linux/mbcache.h>
29 +#include <linux/init.h>
31  #ifdef MB_CACHE_DEBUG
32  # define mb_debug(f...) do { \
33 @@ -87,21 +87,38 @@ static LIST_HEAD(mb_cache_lru_list);
34  static DEFINE_SPINLOCK(mb_cache_spinlock);
36  static inline int
37 -__mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
38 +__mb_cache_entry_is_block_hashed(struct mb_cache_entry *ce)
39  {
40 -       return !list_empty(&ce->e_block_list);
41 +       return !hlist_bl_unhashed(&ce->e_block_list);
42  }
45 -static void
46 -__mb_cache_entry_unhash(struct mb_cache_entry *ce)
47 +static inline void
48 +__mb_cache_entry_unhash_block(struct mb_cache_entry *ce)
49  {
50 -       if (__mb_cache_entry_is_hashed(ce)) {
51 -               list_del_init(&ce->e_block_list);
52 -               list_del(&ce->e_index.o_list);
53 -       }
54 +       if (__mb_cache_entry_is_block_hashed(ce))
55 +               hlist_bl_del_init(&ce->e_block_list);
58 +static inline int
59 +__mb_cache_entry_is_index_hashed(struct mb_cache_entry *ce)
61 +       return !hlist_bl_unhashed(&ce->e_index.o_list);
62  }
64 +static inline void
65 +__mb_cache_entry_unhash_index(struct mb_cache_entry *ce)
67 +       if (__mb_cache_entry_is_index_hashed(ce))
68 +               hlist_bl_del_init(&ce->e_index.o_list);
71 +static inline void
72 +__mb_cache_entry_unhash(struct mb_cache_entry *ce)
74 +       __mb_cache_entry_unhash_index(ce);
75 +       __mb_cache_entry_unhash_block(ce);
78  static void
79  __mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask)
80 @@ -125,7 +142,7 @@ __mb_cache_entry_release_unlock(struct mb_cache_entry *ce)
81                 ce->e_used -= MB_CACHE_WRITER;
82         ce->e_used--;
83         if (!(ce->e_used || ce->e_queued)) {
84 -               if (!__mb_cache_entry_is_hashed(ce))
85 +               if (!__mb_cache_entry_is_block_hashed(ce))
86                         goto forget;
87                 mb_assert(list_empty(&ce->e_lru_list));
88                 list_add_tail(&ce->e_lru_list, &mb_cache_lru_list);
89 @@ -221,18 +238,18 @@ mb_cache_create(const char *name, int bucket_bits)
90         cache->c_name = name;
91         atomic_set(&cache->c_entry_count, 0);
92         cache->c_bucket_bits = bucket_bits;
93 -       cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head),
94 -                                     GFP_KERNEL);
95 +       cache->c_block_hash = kmalloc(bucket_count *
96 +               sizeof(struct hlist_bl_head), GFP_KERNEL);
97         if (!cache->c_block_hash)
98                 goto fail;
99         for (n=0; n<bucket_count; n++)
100 -               INIT_LIST_HEAD(&cache->c_block_hash[n]);
101 -       cache->c_index_hash = kmalloc(bucket_count * sizeof(struct list_head),
102 -                                     GFP_KERNEL);
103 +               INIT_HLIST_BL_HEAD(&cache->c_block_hash[n]);
104 +       cache->c_index_hash = kmalloc(bucket_count *
105 +               sizeof(struct hlist_bl_head), GFP_KERNEL);
106         if (!cache->c_index_hash)
107                 goto fail;
108         for (n=0; n<bucket_count; n++)
109 -               INIT_LIST_HEAD(&cache->c_index_hash[n]);
110 +               INIT_HLIST_BL_HEAD(&cache->c_index_hash[n]);
111         cache->c_entry_cache = kmem_cache_create(name,
112                 sizeof(struct mb_cache_entry), 0,
113                 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
114 @@ -364,10 +381,13 @@ mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
115                         return NULL;
116                 atomic_inc(&cache->c_entry_count);
117                 INIT_LIST_HEAD(&ce->e_lru_list);
118 -               INIT_LIST_HEAD(&ce->e_block_list);
119 +               INIT_HLIST_BL_NODE(&ce->e_block_list);
120 +               INIT_HLIST_BL_NODE(&ce->e_index.o_list);
121                 ce->e_cache = cache;
122                 ce->e_queued = 0;
123         }
124 +       ce->e_block_hash_p = &cache->c_block_hash[0];
125 +       ce->e_index_hash_p = &cache->c_index_hash[0];
126         ce->e_used = 1 + MB_CACHE_WRITER;
127         return ce;
129 @@ -393,25 +413,32 @@ mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev,
131         struct mb_cache *cache = ce->e_cache;
132         unsigned int bucket;
133 -       struct list_head *l;
134 +       struct hlist_bl_node *l;
135         int error = -EBUSY;
136 +       struct hlist_bl_head *block_hash_p;
137 +       struct hlist_bl_head *index_hash_p;
138 +       struct mb_cache_entry *lce;
140 +       mb_assert(ce);
141         bucket = hash_long((unsigned long)bdev + (block & 0xffffffff), 
142                            cache->c_bucket_bits);
143 +       block_hash_p = &cache->c_block_hash[bucket];
144         spin_lock(&mb_cache_spinlock);
145 -       list_for_each_prev(l, &cache->c_block_hash[bucket]) {
146 -               struct mb_cache_entry *ce =
147 -                       list_entry(l, struct mb_cache_entry, e_block_list);
148 -               if (ce->e_bdev == bdev && ce->e_block == block)
149 +       hlist_bl_for_each_entry(lce, l, block_hash_p, e_block_list) {
150 +               if (lce->e_bdev == bdev && lce->e_block == block)
151                         goto out;
152         }
153 +       mb_assert(!__mb_cache_entry_is_block_hashed(ce));
154         __mb_cache_entry_unhash(ce);
155         ce->e_bdev = bdev;
156         ce->e_block = block;
157 -       list_add(&ce->e_block_list, &cache->c_block_hash[bucket]);
158 +       ce->e_block_hash_p = block_hash_p;
159         ce->e_index.o_key = key;
160         bucket = hash_long(key, cache->c_bucket_bits);
161 -       list_add(&ce->e_index.o_list, &cache->c_index_hash[bucket]);
162 +       index_hash_p = &cache->c_index_hash[bucket];
163 +       ce->e_index_hash_p = index_hash_p;
164 +       hlist_bl_add_head(&ce->e_index.o_list, index_hash_p);
165 +       hlist_bl_add_head(&ce->e_block_list, block_hash_p);
166         error = 0;
167  out:
168         spin_unlock(&mb_cache_spinlock);
169 @@ -463,14 +490,16 @@ mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev,
170                    sector_t block)
172         unsigned int bucket;
173 -       struct list_head *l;
174 +       struct hlist_bl_node *l;
175         struct mb_cache_entry *ce;
176 +       struct hlist_bl_head *block_hash_p;
178         bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
179                            cache->c_bucket_bits);
180 +       block_hash_p = &cache->c_block_hash[bucket];
181         spin_lock(&mb_cache_spinlock);
182 -       list_for_each(l, &cache->c_block_hash[bucket]) {
183 -               ce = list_entry(l, struct mb_cache_entry, e_block_list);
184 +       hlist_bl_for_each_entry(ce, l, block_hash_p, e_block_list) {
185 +               mb_assert(ce->e_block_hash_p == block_hash_p);
186                 if (ce->e_bdev == bdev && ce->e_block == block) {
187                         DEFINE_WAIT(wait);
189 @@ -489,7 +518,7 @@ mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev,
190                         finish_wait(&mb_cache_queue, &wait);
191                         ce->e_used += 1 + MB_CACHE_WRITER;
193 -                       if (!__mb_cache_entry_is_hashed(ce)) {
194 +                       if (!__mb_cache_entry_is_block_hashed(ce)) {
195                                 __mb_cache_entry_release_unlock(ce);
196                                 return NULL;
197                         }
198 @@ -506,12 +535,14 @@ cleanup:
199  #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
201  static struct mb_cache_entry *
202 -__mb_cache_entry_find(struct list_head *l, struct list_head *head,
203 +__mb_cache_entry_find(struct hlist_bl_node *l, struct hlist_bl_head *head,
204                       struct block_device *bdev, unsigned int key)
206 -       while (l != head) {
207 +       while (l != NULL) {
208                 struct mb_cache_entry *ce =
209 -                       list_entry(l, struct mb_cache_entry, e_index.o_list);
210 +                       hlist_bl_entry(l, struct mb_cache_entry,
211 +                               e_index.o_list);
212 +               mb_assert(ce->e_index_hash_p == head);
213                 if (ce->e_bdev == bdev && ce->e_index.o_key == key) {
214                         DEFINE_WAIT(wait);
216 @@ -532,7 +563,7 @@ __mb_cache_entry_find(struct list_head *l, struct list_head *head,
217                         }
218                         finish_wait(&mb_cache_queue, &wait);
220 -                       if (!__mb_cache_entry_is_hashed(ce)) {
221 +                       if (!__mb_cache_entry_is_block_hashed(ce)) {
222                                 __mb_cache_entry_release_unlock(ce);
223                                 spin_lock(&mb_cache_spinlock);
224                                 return ERR_PTR(-EAGAIN);
225 @@ -562,12 +593,16 @@ mb_cache_entry_find_first(struct mb_cache *cache, struct block_device *bdev,
226                           unsigned int key)
228         unsigned int bucket = hash_long(key, cache->c_bucket_bits);
229 -       struct list_head *l;
230 -       struct mb_cache_entry *ce;
231 +       struct hlist_bl_node *l;
232 +       struct mb_cache_entry *ce = NULL;
233 +       struct hlist_bl_head *index_hash_p;
235 +       index_hash_p = &cache->c_index_hash[bucket];
236         spin_lock(&mb_cache_spinlock);
237 -       l = cache->c_index_hash[bucket].next;
238 -       ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key);
239 +       if (!hlist_bl_empty(index_hash_p)) {
240 +               l = hlist_bl_first(index_hash_p);
241 +               ce = __mb_cache_entry_find(l, index_hash_p, bdev, key);
242 +       }
243         spin_unlock(&mb_cache_spinlock);
244         return ce;
246 @@ -597,12 +632,16 @@ mb_cache_entry_find_next(struct mb_cache_entry *prev,
248         struct mb_cache *cache = prev->e_cache;
249         unsigned int bucket = hash_long(key, cache->c_bucket_bits);
250 -       struct list_head *l;
251 +       struct hlist_bl_node *l;
252         struct mb_cache_entry *ce;
253 +       struct hlist_bl_head *index_hash_p;
255 +       index_hash_p = &cache->c_index_hash[bucket];
256 +       mb_assert(prev->e_index_hash_p == index_hash_p);
257         spin_lock(&mb_cache_spinlock);
258 +       mb_assert(!hlist_bl_empty(index_hash_p));
259         l = prev->e_index.o_list.next;
260 -       ce = __mb_cache_entry_find(l, &cache->c_index_hash[bucket], bdev, key);
261 +       ce = __mb_cache_entry_find(l, index_hash_p, bdev, key);
262         __mb_cache_entry_release_unlock(prev);
263         return ce;
265 diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
266 index 5525d37..6a392e7 100644
267 --- a/include/linux/mbcache.h
268 +++ b/include/linux/mbcache.h
269 @@ -3,19 +3,21 @@
271    (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
272  */
274  struct mb_cache_entry {
275         struct list_head                e_lru_list;
276         struct mb_cache                 *e_cache;
277         unsigned short                  e_used;
278         unsigned short                  e_queued;
279 +       atomic_t                        e_refcnt;
280         struct block_device             *e_bdev;
281         sector_t                        e_block;
282 -       struct list_head                e_block_list;
283 +       struct hlist_bl_node            e_block_list;
284         struct {
285 -               struct list_head        o_list;
286 +               struct hlist_bl_node    o_list;
287                 unsigned int            o_key;
288         } e_index;
289 +       struct hlist_bl_head            *e_block_hash_p;
290 +       struct hlist_bl_head            *e_index_hash_p;
291  };
293  struct mb_cache {
294 @@ -25,8 +27,8 @@ struct mb_cache {
295         int                             c_max_entries;
296         int                             c_bucket_bits;
297         struct kmem_cache               *c_entry_cache;
298 -       struct list_head                *c_block_hash;
299 -       struct list_head                *c_index_hash;
300 +       struct hlist_bl_head            *c_block_hash;
301 +       struct hlist_bl_head            *c_index_hash;
302  };
304  /* Functions on caches */
305 -- 
306 1.7.11.3