1 mbcache: make mbcache naming more generic
3 From: Tahsin Erdogan <tahsin@google.com>
5 Make names more generic so that mbcache usage is not limited to
6 block sharing. In a subsequent patch in the series
7 ("ext4: xattr inode deduplication"), we start using the mbcache code
8 for sharing xattr inodes. With that patch, old mb_cache_entry.e_block
9 field could be holding either a block number or an inode number.
11 Signed-off-by: Tahsin Erdogan <tahsin@google.com>
12 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
14 v3: removed space after typecast
16 v2: updated commit title and description
18 fs/ext2/xattr.c | 18 +++++++++---------
19 fs/ext4/xattr.c | 10 +++++-----
20 fs/mbcache.c | 43 +++++++++++++++++++++----------------------
21 include/linux/mbcache.h | 11 +++++------
22 4 files changed, 40 insertions(+), 42 deletions(-)
24 diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
25 index fbdb8f171893..1e5f76070580 100644
28 @@ -493,8 +493,8 @@ bad_block: ext2_error(sb, "ext2_xattr_set",
29 * This must happen under buffer lock for
30 * ext2_xattr_set2() to reliably detect modified block
32 - mb_cache_entry_delete_block(EXT2_SB(sb)->s_mb_cache,
33 - hash, bh->b_blocknr);
34 + mb_cache_entry_delete(EXT2_SB(sb)->s_mb_cache, hash,
37 /* keep the buffer locked while modifying it. */
39 @@ -721,8 +721,8 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
40 * This must happen under buffer lock for
41 * ext2_xattr_set2() to reliably detect freed block
43 - mb_cache_entry_delete_block(ext2_mb_cache,
44 - hash, old_bh->b_blocknr);
45 + mb_cache_entry_delete(ext2_mb_cache, hash,
47 /* Free the old block. */
48 ea_bdebug(old_bh, "freeing");
49 ext2_free_blocks(inode, old_bh->b_blocknr, 1);
50 @@ -795,8 +795,8 @@ ext2_xattr_delete_inode(struct inode *inode)
51 * This must happen under buffer lock for ext2_xattr_set2() to
52 * reliably detect freed block
54 - mb_cache_entry_delete_block(EXT2_SB(inode->i_sb)->s_mb_cache,
55 - hash, bh->b_blocknr);
56 + mb_cache_entry_delete(EXT2_SB(inode->i_sb)->s_mb_cache, hash,
58 ext2_free_blocks(inode, EXT2_I(inode)->i_file_acl, 1);
61 @@ -907,11 +907,11 @@ ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header)
63 struct buffer_head *bh;
65 - bh = sb_bread(inode->i_sb, ce->e_block);
66 + bh = sb_bread(inode->i_sb, ce->e_value);
68 ext2_error(inode->i_sb, "ext2_xattr_cache_find",
69 "inode %ld: block %ld read error",
70 - inode->i_ino, (unsigned long) ce->e_block);
71 + inode->i_ino, (unsigned long) ce->e_value);
75 @@ -931,7 +931,7 @@ ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header)
76 } else if (le32_to_cpu(HDR(bh)->h_refcount) >
77 EXT2_XATTR_REFCOUNT_MAX) {
78 ea_idebug(inode, "block %ld refcount %d>%d",
79 - (unsigned long) ce->e_block,
80 + (unsigned long) ce->e_value,
81 le32_to_cpu(HDR(bh)->h_refcount),
82 EXT2_XATTR_REFCOUNT_MAX);
83 } else if (!ext2_xattr_cmp(header, HDR(bh))) {
84 diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
85 index c09fcffb0878..fb437efa8688 100644
88 @@ -678,7 +678,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
89 * This must happen under buffer lock for
90 * ext4_xattr_block_set() to reliably detect freed block
92 - mb_cache_entry_delete_block(ext4_mb_cache, hash, bh->b_blocknr);
93 + mb_cache_entry_delete(ext4_mb_cache, hash, bh->b_blocknr);
96 ext4_free_blocks(handle, inode, bh, 0, 1,
97 @@ -1115,8 +1115,8 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
98 * ext4_xattr_block_set() to reliably detect modified
101 - mb_cache_entry_delete_block(ext4_mb_cache, hash,
102 - bs->bh->b_blocknr);
103 + mb_cache_entry_delete(ext4_mb_cache, hash,
104 + bs->bh->b_blocknr);
105 ea_bdebug(bs->bh, "modifying in-place");
106 error = ext4_xattr_set_entry(i, s, handle, inode);
108 @@ -2238,10 +2238,10 @@ ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header,
110 struct buffer_head *bh;
112 - bh = sb_bread(inode->i_sb, ce->e_block);
113 + bh = sb_bread(inode->i_sb, ce->e_value);
115 EXT4_ERROR_INODE(inode, "block %lu read error",
116 - (unsigned long) ce->e_block);
117 + (unsigned long)ce->e_value);
118 } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
121 diff --git a/fs/mbcache.c b/fs/mbcache.c
122 index b19be429d655..45a8d52dc991 100644
127 * Mbcache is a simple key-value store. Keys need not be unique, however
128 * key-value pairs are expected to be unique (we use this fact in
129 - * mb_cache_entry_delete_block()).
130 + * mb_cache_entry_delete()).
132 * Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
133 * They use hash of a block contents as a key and block number as a value.
134 @@ -62,15 +62,15 @@ static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
135 * @cache - cache where the entry should be created
136 * @mask - gfp mask with which the entry should be allocated
137 * @key - key of the entry
138 - * @block - block that contains data
139 - * @reusable - is the block reusable by other inodes?
140 + * @value - value of the entry
141 + * @reusable - is the entry reusable by others?
143 - * Creates entry in @cache with key @key and records that data is stored in
144 - * block @block. The function returns -EBUSY if entry with the same key
145 - * and for the same block already exists in cache. Otherwise 0 is returned.
146 + * Creates entry in @cache with key @key and value @value. The function returns
147 + * -EBUSY if entry with the same key and value already exists in cache.
148 + * Otherwise 0 is returned.
150 int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
151 - sector_t block, bool reusable)
152 + u64 value, bool reusable)
154 struct mb_cache_entry *entry, *dup;
155 struct hlist_bl_node *dup_node;
156 @@ -91,12 +91,12 @@ int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
157 /* One ref for hash, one ref returned */
158 atomic_set(&entry->e_refcnt, 1);
160 - entry->e_block = block;
161 + entry->e_value = value;
162 entry->e_reusable = reusable;
163 head = mb_cache_entry_head(cache, key);
165 hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
166 - if (dup->e_key == key && dup->e_block == block) {
167 + if (dup->e_key == key && dup->e_value == value) {
168 hlist_bl_unlock(head);
169 kmem_cache_free(mb_entry_cache, entry);
171 @@ -187,13 +187,13 @@ struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
172 EXPORT_SYMBOL(mb_cache_entry_find_next);
175 - * mb_cache_entry_get - get a cache entry by block number (and key)
176 + * mb_cache_entry_get - get a cache entry by value (and key)
177 * @cache - cache we work with
178 - * @key - key of block number @block
179 - * @block - block number
183 struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
187 struct hlist_bl_node *node;
188 struct hlist_bl_head *head;
189 @@ -202,7 +202,7 @@ struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
190 head = mb_cache_entry_head(cache, key);
192 hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
193 - if (entry->e_key == key && entry->e_block == block) {
194 + if (entry->e_key == key && entry->e_value == value) {
195 atomic_inc(&entry->e_refcnt);
198 @@ -214,15 +214,14 @@ struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
200 EXPORT_SYMBOL(mb_cache_entry_get);
202 -/* mb_cache_entry_delete_block - remove information about block from cache
203 +/* mb_cache_entry_delete - remove a cache entry
204 * @cache - cache we work with
205 - * @key - key of block @block
206 - * @block - block number
210 - * Remove entry from cache @cache with key @key with data stored in @block.
211 + * Remove entry from cache @cache with key @key and value @value.
213 -void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key,
215 +void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value)
217 struct hlist_bl_node *node;
218 struct hlist_bl_head *head;
219 @@ -231,7 +230,7 @@ void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key,
220 head = mb_cache_entry_head(cache, key);
222 hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
223 - if (entry->e_key == key && entry->e_block == block) {
224 + if (entry->e_key == key && entry->e_value == value) {
225 /* We keep hash list reference to keep entry alive */
226 hlist_bl_del_init(&entry->e_hash_list);
227 hlist_bl_unlock(head);
228 @@ -248,7 +247,7 @@ void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key,
230 hlist_bl_unlock(head);
232 -EXPORT_SYMBOL(mb_cache_entry_delete_block);
233 +EXPORT_SYMBOL(mb_cache_entry_delete);
235 /* mb_cache_entry_touch - cache entry got used
236 * @cache - cache the entry belongs to
237 diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
238 index 86c9a8b480c5..e1bc73414983 100644
239 --- a/include/linux/mbcache.h
240 +++ b/include/linux/mbcache.h
241 @@ -19,15 +19,15 @@ struct mb_cache_entry {
245 - /* Block number of hashed block - stable during lifetime of the entry */
247 + /* User provided value - stable during lifetime of the entry */
251 struct mb_cache *mb_cache_create(int bucket_bits);
252 void mb_cache_destroy(struct mb_cache *cache);
254 int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
255 - sector_t block, bool reusable);
256 + u64 value, bool reusable);
257 void __mb_cache_entry_free(struct mb_cache_entry *entry);
258 static inline int mb_cache_entry_put(struct mb_cache *cache,
259 struct mb_cache_entry *entry)
260 @@ -38,10 +38,9 @@ static inline int mb_cache_entry_put(struct mb_cache *cache,
264 -void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key,
266 +void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value);
267 struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
270 struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
272 struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
274 2.13.1.611.g7e3b11ae1-goog