add patch drop-unneeded-BUFFER_TRACE-in-ext4_delete_inline_entry
[ext4-patch-queue.git] / get-rid-of-e_hash_list_head
blob6f2d7c1017be1549d345764ef26d6bcd079c371d
1 mbcache: get rid of _e_hash_list_head
3 From: Andreas Gruenbacher <agruenba@redhat.com>
5 Get rid of field _e_hash_list_head in cache entries and add bit field
6 e_referenced instead.
8 Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
9 Signed-off-by: Jan Kara <jack@suse.cz>
10 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
11 ---
12  fs/mbcache.c            | 41 ++++++++++-------------------------------
13  include/linux/mbcache.h |  8 ++------
14  2 files changed, 12 insertions(+), 37 deletions(-)
16 diff --git a/fs/mbcache.c b/fs/mbcache.c
17 index 4241b633f155..903be151dcfe 100644
18 --- a/fs/mbcache.c
19 +++ b/fs/mbcache.c
20 @@ -45,27 +45,10 @@ static struct kmem_cache *mb_entry_cache;
21  static unsigned long mb_cache_shrink(struct mb_cache *cache,
22                                      unsigned int nr_to_scan);
24 -static inline bool mb_cache_entry_referenced(struct mb_cache_entry *entry)
25 +static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
26 +                                                       u32 key)
27  {
28 -       return entry->_e_hash_list_head & 1;
31 -static inline void mb_cache_entry_set_referenced(struct mb_cache_entry *entry)
33 -       entry->_e_hash_list_head |= 1;
36 -static inline void mb_cache_entry_clear_referenced(
37 -                                       struct mb_cache_entry *entry)
39 -       entry->_e_hash_list_head &= ~1;
42 -static inline struct hlist_bl_head *mb_cache_entry_head(
43 -                                       struct mb_cache_entry *entry)
45 -       return (struct hlist_bl_head *)
46 -                       (entry->_e_hash_list_head & ~1);
47 +       return &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
48  }
50  /*
51 @@ -108,8 +91,7 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
52         atomic_set(&entry->e_refcnt, 1);
53         entry->e_key = key;
54         entry->e_block = block;
55 -       head = &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
56 -       entry->_e_hash_list_head = (unsigned long)head;
57 +       head = mb_cache_entry_head(cache, key);
58         hlist_bl_lock(head);
59         hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
60                 if (dup->e_key == key && dup->e_block == block) {
61 @@ -146,10 +128,7 @@ static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
62         struct hlist_bl_node *node;
63         struct hlist_bl_head *head;
65 -       if (entry)
66 -               head = mb_cache_entry_head(entry);
67 -       else
68 -               head = &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
69 +       head = mb_cache_entry_head(cache, key);
70         hlist_bl_lock(head);
71         if (entry && !hlist_bl_unhashed(&entry->e_hash_list))
72                 node = entry->e_hash_list.next;
73 @@ -219,7 +198,7 @@ void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key,
74         struct hlist_bl_head *head;
75         struct mb_cache_entry *entry;
77 -       head = &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
78 +       head = mb_cache_entry_head(cache, key);
79         hlist_bl_lock(head);
80         hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
81                 if (entry->e_key == key && entry->e_block == block) {
82 @@ -250,7 +229,7 @@ EXPORT_SYMBOL(mb_cache_entry_delete_block);
83  void mb_cache_entry_touch(struct mb_cache *cache,
84                           struct mb_cache_entry *entry)
85  {
86 -       mb_cache_entry_set_referenced(entry);
87 +       entry->e_referenced = 1;
88  }
89  EXPORT_SYMBOL(mb_cache_entry_touch);
91 @@ -275,8 +254,8 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache,
92         while (nr_to_scan-- && !list_empty(&cache->c_list)) {
93                 entry = list_first_entry(&cache->c_list,
94                                          struct mb_cache_entry, e_list);
95 -               if (mb_cache_entry_referenced(entry)) {
96 -                       mb_cache_entry_clear_referenced(entry);
97 +               if (entry->e_referenced) {
98 +                       entry->e_referenced = 0;
99                         list_move_tail(&cache->c_list, &entry->e_list);
100                         continue;
101                 }
102 @@ -287,7 +266,7 @@ static unsigned long mb_cache_shrink(struct mb_cache *cache,
103                  * from under us.
104                  */
105                 spin_unlock(&cache->c_list_lock);
106 -               head = mb_cache_entry_head(entry);
107 +               head = mb_cache_entry_head(cache, entry->e_key);
108                 hlist_bl_lock(head);
109                 if (!hlist_bl_unhashed(&entry->e_hash_list)) {
110                         hlist_bl_del_init(&entry->e_hash_list);
111 diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
112 index a74a1f3082fb..607e6968542e 100644
113 --- a/include/linux/mbcache.h
114 +++ b/include/linux/mbcache.h
115 @@ -12,18 +12,14 @@ struct mb_cache;
116  struct mb_cache_entry {
117         /* List of entries in cache - protected by cache->c_list_lock */
118         struct list_head        e_list;
119 -       /* Hash table list - protected by bitlock in e_hash_list_head */
120 +       /* Hash table list - protected by hash chain bitlock */
121         struct hlist_bl_node    e_hash_list;
122         atomic_t                e_refcnt;
123         /* Key in hash - stable during lifetime of the entry */
124         u32                     e_key;
125 +       u32                     e_referenced:1;
126         /* Block number of hashed block - stable during lifetime of the entry */
127         sector_t                e_block;
128 -       /*
129 -        * Head of hash list (for list bit lock) - stable. Combined with
130 -        * referenced bit of entry
131 -        */
132 -       unsigned long           _e_hash_list_head;
133  };
135  struct mb_cache *mb_cache_create(int bucket_bits);
136 -- 
137 2.6.2