3 * (C) 2001-2002 Andreas Gruenbacher, <a.gruenbacher@computer.org>
7 * Filesystem Meta Information Block Cache (mbcache)
9 * The mbcache caches blocks of block devices that need to be located
10 * by their device/block number, as well as by other criteria (such
11 * as the block's contents).
13 * There can only be one cache entry in a cache per device and block number.
14 * Additional indexes need not be unique in this sense. The number of
15 * additional indexes (=other criteria) can be hardwired at compile time
16 * or specified at cache create time.
18 * Each cache entry is of fixed size. An entry may be `valid' or `invalid'
19 * in the cache. A valid entry is in the main hash tables of the cache,
20 * and may also be in the lru list. An invalid entry is not in any hashes
23 * A valid cache entry is only in the lru list if no handles refer to it.
24 * Invalid cache entries will be freed when the last handle to the cache
25 * entry is released. Entries that cannot be freed immediately are put
26 * back on the lru list.
29 #include <linux/kernel.h>
30 #include <linux/module.h>
32 #include <linux/hash.h>
35 #include <linux/slab.h>
36 #include <linux/sched.h>
37 #include <linux/init.h>
38 #include <linux/mbcache.h>
42 # define mb_debug(f...) do { \
43 printk(KERN_DEBUG f); \
46 #define mb_assert(c) do { if (!(c)) \
47 printk(KERN_ERR "assertion " #c " failed\n"); \
50 # define mb_debug(f...) do { } while(0)
51 # define mb_assert(c) do { } while(0)
53 #define mb_error(f...) do { \
58 MODULE_AUTHOR("Andreas Gruenbacher <a.gruenbacher@computer.org>");
59 MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
60 MODULE_LICENSE("GPL");
62 EXPORT_SYMBOL(mb_cache_create
);
63 EXPORT_SYMBOL(mb_cache_shrink
);
64 EXPORT_SYMBOL(mb_cache_destroy
);
65 EXPORT_SYMBOL(mb_cache_entry_alloc
);
66 EXPORT_SYMBOL(mb_cache_entry_insert
);
67 EXPORT_SYMBOL(mb_cache_entry_release
);
68 EXPORT_SYMBOL(mb_cache_entry_free
);
69 EXPORT_SYMBOL(mb_cache_entry_get
);
70 #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
71 EXPORT_SYMBOL(mb_cache_entry_find_first
);
72 EXPORT_SYMBOL(mb_cache_entry_find_next
);
77 * Global data: list of all mbcache's, lru list, and a spinlock for
78 * accessing cache data structures on SMP machines. The lru list is
79 * global across all mbcaches.
82 static LIST_HEAD(mb_cache_list
);
83 static LIST_HEAD(mb_cache_lru_list
);
84 static spinlock_t mb_cache_spinlock
= SPIN_LOCK_UNLOCKED
;
85 static struct shrinker
*mb_shrinker
;
88 mb_cache_indexes(struct mb_cache
*cache
)
90 #ifdef MB_CACHE_INDEXES_COUNT
91 return MB_CACHE_INDEXES_COUNT
;
93 return cache
->c_indexes_count
;
98 * What the mbcache registers as to get shrunk dynamically.
101 static int mb_cache_shrink_fn(int nr_to_scan
, unsigned int gfp_mask
);
105 __mb_cache_entry_is_hashed(struct mb_cache_entry
*ce
)
107 return !list_empty(&ce
->e_block_list
);
112 __mb_cache_entry_unhash(struct mb_cache_entry
*ce
)
116 if (__mb_cache_entry_is_hashed(ce
)) {
117 list_del_init(&ce
->e_block_list
);
118 for (n
=0; n
<mb_cache_indexes(ce
->e_cache
); n
++)
119 list_del(&ce
->e_indexes
[n
].o_list
);
125 __mb_cache_entry_forget(struct mb_cache_entry
*ce
, int gfp_mask
)
127 struct mb_cache
*cache
= ce
->e_cache
;
129 mb_assert(atomic_read(&ce
->e_used
) == 0);
130 if (cache
->c_op
.free
&& cache
->c_op
.free(ce
, gfp_mask
)) {
131 /* free failed -- put back on the lru list
132 for freeing later. */
133 spin_lock(&mb_cache_spinlock
);
134 list_add(&ce
->e_lru_list
, &mb_cache_lru_list
);
135 spin_unlock(&mb_cache_spinlock
);
137 kmem_cache_free(cache
->c_entry_cache
, ce
);
138 atomic_dec(&cache
->c_entry_count
);
144 __mb_cache_entry_release_unlock(struct mb_cache_entry
*ce
)
146 if (atomic_dec_and_test(&ce
->e_used
)) {
147 if (!__mb_cache_entry_is_hashed(ce
))
149 list_add_tail(&ce
->e_lru_list
, &mb_cache_lru_list
);
151 spin_unlock(&mb_cache_spinlock
);
154 spin_unlock(&mb_cache_spinlock
);
155 __mb_cache_entry_forget(ce
, GFP_KERNEL
);
160 * mb_cache_shrink_fn() memory pressure callback
162 * This function is called by the kernel memory management when memory
165 * @nr_to_scan: Number of objects to scan
166 * @gfp_mask: (ignored)
168 * Returns the number of objects which are present in the cache.
171 mb_cache_shrink_fn(int nr_to_scan
, unsigned int gfp_mask
)
173 LIST_HEAD(free_list
);
174 struct list_head
*l
, *ltmp
;
177 spin_lock(&mb_cache_spinlock
);
178 list_for_each(l
, &mb_cache_list
) {
179 struct mb_cache
*cache
=
180 list_entry(l
, struct mb_cache
, c_cache_list
);
181 mb_debug("cache %s (%d)", cache
->c_name
,
182 atomic_read(&cache
->c_entry_count
));
183 count
+= atomic_read(&cache
->c_entry_count
);
185 mb_debug("trying to free %d entries", nr_to_scan
);
186 if (nr_to_scan
== 0) {
187 spin_unlock(&mb_cache_spinlock
);
190 while (nr_to_scan
-- && !list_empty(&mb_cache_lru_list
)) {
191 struct mb_cache_entry
*ce
=
192 list_entry(mb_cache_lru_list
.next
,
193 struct mb_cache_entry
, e_lru_list
);
194 list_move_tail(&ce
->e_lru_list
, &free_list
);
195 __mb_cache_entry_unhash(ce
);
197 spin_unlock(&mb_cache_spinlock
);
198 list_for_each_safe(l
, ltmp
, &free_list
) {
199 __mb_cache_entry_forget(list_entry(l
, struct mb_cache_entry
,
200 e_lru_list
), gfp_mask
);
208 * mb_cache_create() create a new cache
210 * All entries in one cache are equal size. Cache entries may be from
211 * multiple devices. If this is the first mbcache created, registers
212 * the cache with kernel memory management. Returns NULL if no more
213 * memory was available.
215 * @name: name of the cache (informal)
216 * @cache_op: contains the callback called when freeing a cache entry
217 * @entry_size: The size of a cache entry, including
218 * struct mb_cache_entry
219 * @indexes_count: number of additional indexes in the cache. Must equal
220 * MB_CACHE_INDEXES_COUNT if the number of indexes is
222 * @bucket_bits: log2(number of hash buckets)
225 mb_cache_create(const char *name
, struct mb_cache_op
*cache_op
,
226 size_t entry_size
, int indexes_count
, int bucket_bits
)
228 int m
=0, n
, bucket_count
= 1 << bucket_bits
;
229 struct mb_cache
*cache
= NULL
;
231 if(entry_size
< sizeof(struct mb_cache_entry
) +
232 indexes_count
* sizeof(struct mb_cache_entry_index
))
235 cache
= kmalloc(sizeof(struct mb_cache
) +
236 indexes_count
* sizeof(struct list_head
), GFP_KERNEL
);
239 cache
->c_name
= name
;
240 cache
->c_op
.free
= NULL
;
242 cache
->c_op
.free
= cache_op
->free
;
243 atomic_set(&cache
->c_entry_count
, 0);
244 cache
->c_bucket_bits
= bucket_bits
;
245 #ifdef MB_CACHE_INDEXES_COUNT
246 mb_assert(indexes_count
== MB_CACHE_INDEXES_COUNT
);
248 cache
->c_indexes_count
= indexes_count
;
250 cache
->c_block_hash
= kmalloc(bucket_count
* sizeof(struct list_head
),
252 if (!cache
->c_block_hash
)
254 for (n
=0; n
<bucket_count
; n
++)
255 INIT_LIST_HEAD(&cache
->c_block_hash
[n
]);
256 for (m
=0; m
<indexes_count
; m
++) {
257 cache
->c_indexes_hash
[m
] = kmalloc(bucket_count
*
258 sizeof(struct list_head
),
260 if (!cache
->c_indexes_hash
[m
])
262 for (n
=0; n
<bucket_count
; n
++)
263 INIT_LIST_HEAD(&cache
->c_indexes_hash
[m
][n
]);
265 cache
->c_entry_cache
= kmem_cache_create(name
, entry_size
, 0,
266 SLAB_RECLAIM_ACCOUNT
, NULL
, NULL
);
267 if (!cache
->c_entry_cache
)
270 spin_lock(&mb_cache_spinlock
);
271 list_add(&cache
->c_cache_list
, &mb_cache_list
);
272 spin_unlock(&mb_cache_spinlock
);
278 kfree(cache
->c_indexes_hash
[m
]);
279 if (cache
->c_block_hash
)
280 kfree(cache
->c_block_hash
);
290 * Removes all cache entires of a device from the cache. All cache entries
291 * currently in use cannot be freed, and thus remain in the cache. All others
294 * @cache: which cache to shrink
295 * @bdev: which device's cache entries to shrink
298 mb_cache_shrink(struct mb_cache
*cache
, struct block_device
*bdev
)
300 LIST_HEAD(free_list
);
301 struct list_head
*l
, *ltmp
;
303 spin_lock(&mb_cache_spinlock
);
304 list_for_each_safe(l
, ltmp
, &mb_cache_lru_list
) {
305 struct mb_cache_entry
*ce
=
306 list_entry(l
, struct mb_cache_entry
, e_lru_list
);
307 if (ce
->e_bdev
== bdev
) {
308 list_move_tail(&ce
->e_lru_list
, &free_list
);
309 __mb_cache_entry_unhash(ce
);
312 spin_unlock(&mb_cache_spinlock
);
313 list_for_each_safe(l
, ltmp
, &free_list
) {
314 __mb_cache_entry_forget(list_entry(l
, struct mb_cache_entry
,
315 e_lru_list
), GFP_KERNEL
);
323 * Shrinks the cache to its minimum possible size (hopefully 0 entries),
324 * and then destroys it. If this was the last mbcache, un-registers the
325 * mbcache from kernel memory management.
328 mb_cache_destroy(struct mb_cache
*cache
)
330 LIST_HEAD(free_list
);
331 struct list_head
*l
, *ltmp
;
334 spin_lock(&mb_cache_spinlock
);
335 list_for_each_safe(l
, ltmp
, &mb_cache_lru_list
) {
336 struct mb_cache_entry
*ce
=
337 list_entry(l
, struct mb_cache_entry
, e_lru_list
);
338 if (ce
->e_cache
== cache
) {
339 list_move_tail(&ce
->e_lru_list
, &free_list
);
340 __mb_cache_entry_unhash(ce
);
343 list_del(&cache
->c_cache_list
);
344 spin_unlock(&mb_cache_spinlock
);
346 list_for_each_safe(l
, ltmp
, &free_list
) {
347 __mb_cache_entry_forget(list_entry(l
, struct mb_cache_entry
,
348 e_lru_list
), GFP_KERNEL
);
351 if (atomic_read(&cache
->c_entry_count
) > 0) {
352 mb_error("cache %s: %d orphaned entries",
354 atomic_read(&cache
->c_entry_count
));
357 kmem_cache_destroy(cache
->c_entry_cache
);
359 for (n
=0; n
< mb_cache_indexes(cache
); n
++)
360 kfree(cache
->c_indexes_hash
[n
]);
361 kfree(cache
->c_block_hash
);
367 * mb_cache_entry_alloc()
369 * Allocates a new cache entry. The new entry will not be valid initially,
370 * and thus cannot be looked up yet. It should be filled with data, and
371 * then inserted into the cache using mb_cache_entry_insert(). Returns NULL
372 * if no more memory was available.
374 struct mb_cache_entry
*
375 mb_cache_entry_alloc(struct mb_cache
*cache
)
377 struct mb_cache_entry
*ce
;
379 atomic_inc(&cache
->c_entry_count
);
380 ce
= kmem_cache_alloc(cache
->c_entry_cache
, GFP_KERNEL
);
382 INIT_LIST_HEAD(&ce
->e_lru_list
);
383 INIT_LIST_HEAD(&ce
->e_block_list
);
385 atomic_set(&ce
->e_used
, 1);
392 * mb_cache_entry_insert()
394 * Inserts an entry that was allocated using mb_cache_entry_alloc() into
395 * the cache. After this, the cache entry can be looked up, but is not yet
396 * in the lru list as the caller still holds a handle to it. Returns 0 on
397 * success, or -EBUSY if a cache entry for that device + inode exists
398 * already (this may happen after a failed lookup, but when another process
399 * has inserted the same cache entry in the meantime).
401 * @bdev: device the cache entry belongs to
402 * @block: block number
403 * @keys: array of additional keys. There must be indexes_count entries
404 * in the array (as specified when creating the cache).
407 mb_cache_entry_insert(struct mb_cache_entry
*ce
, struct block_device
*bdev
,
408 sector_t block
, unsigned int keys
[])
410 struct mb_cache
*cache
= ce
->e_cache
;
413 int error
= -EBUSY
, n
;
415 bucket
= hash_long((unsigned long)bdev
+ (block
& 0xffffffff),
416 cache
->c_bucket_bits
);
417 spin_lock(&mb_cache_spinlock
);
418 list_for_each_prev(l
, &cache
->c_block_hash
[bucket
]) {
419 struct mb_cache_entry
*ce
=
420 list_entry(l
, struct mb_cache_entry
, e_block_list
);
421 if (ce
->e_bdev
== bdev
&& ce
->e_block
== block
)
424 __mb_cache_entry_unhash(ce
);
427 list_add(&ce
->e_block_list
, &cache
->c_block_hash
[bucket
]);
428 for (n
=0; n
<mb_cache_indexes(cache
); n
++) {
429 ce
->e_indexes
[n
].o_key
= keys
[n
];
430 bucket
= hash_long(keys
[n
], cache
->c_bucket_bits
);
431 list_add(&ce
->e_indexes
[n
].o_list
,
432 &cache
->c_indexes_hash
[n
][bucket
]);
436 spin_unlock(&mb_cache_spinlock
);
442 * mb_cache_entry_release()
444 * Release a handle to a cache entry. When the last handle to a cache entry
445 * is released it is either freed (if it is invalid) or otherwise inserted
446 * in to the lru list.
449 mb_cache_entry_release(struct mb_cache_entry
*ce
)
451 spin_lock(&mb_cache_spinlock
);
452 __mb_cache_entry_release_unlock(ce
);
457 * mb_cache_entry_free()
459 * This is equivalent to the sequence mb_cache_entry_takeout() --
460 * mb_cache_entry_release().
463 mb_cache_entry_free(struct mb_cache_entry
*ce
)
465 spin_lock(&mb_cache_spinlock
);
466 mb_assert(list_empty(&ce
->e_lru_list
));
467 __mb_cache_entry_unhash(ce
);
468 __mb_cache_entry_release_unlock(ce
);
473 * mb_cache_entry_get()
475 * Get a cache entry by device / block number. (There can only be one entry
476 * in the cache per device and block.) Returns NULL if no such cache entry
479 struct mb_cache_entry
*
480 mb_cache_entry_get(struct mb_cache
*cache
, struct block_device
*bdev
,
485 struct mb_cache_entry
*ce
;
487 bucket
= hash_long((unsigned long)bdev
+ (block
& 0xffffffff),
488 cache
->c_bucket_bits
);
489 spin_lock(&mb_cache_spinlock
);
490 list_for_each(l
, &cache
->c_block_hash
[bucket
]) {
491 ce
= list_entry(l
, struct mb_cache_entry
, e_block_list
);
492 if (ce
->e_bdev
== bdev
&& ce
->e_block
== block
) {
493 if (!list_empty(&ce
->e_lru_list
))
494 list_del_init(&ce
->e_lru_list
);
495 atomic_inc(&ce
->e_used
);
502 spin_unlock(&mb_cache_spinlock
);
506 #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
508 static struct mb_cache_entry
*
509 __mb_cache_entry_find(struct list_head
*l
, struct list_head
*head
,
510 int index
, struct block_device
*bdev
, unsigned int key
)
513 struct mb_cache_entry
*ce
=
514 list_entry(l
, struct mb_cache_entry
,
515 e_indexes
[index
].o_list
);
516 if (ce
->e_bdev
== bdev
&& ce
->e_indexes
[index
].o_key
== key
) {
517 if (!list_empty(&ce
->e_lru_list
))
518 list_del_init(&ce
->e_lru_list
);
519 atomic_inc(&ce
->e_used
);
529 * mb_cache_entry_find_first()
531 * Find the first cache entry on a given device with a certain key in
532 * an additional index. Additonal matches can be found with
533 * mb_cache_entry_find_next(). Returns NULL if no match was found.
535 * @cache: the cache to search
536 * @index: the number of the additonal index to search (0<=index<indexes_count)
537 * @bdev: the device the cache entry should belong to
538 * @key: the key in the index
540 struct mb_cache_entry
*
541 mb_cache_entry_find_first(struct mb_cache
*cache
, int index
,
542 struct block_device
*bdev
, unsigned int key
)
544 unsigned int bucket
= hash_long(key
, cache
->c_bucket_bits
);
546 struct mb_cache_entry
*ce
;
548 mb_assert(index
< mb_cache_indexes(cache
));
549 spin_lock(&mb_cache_spinlock
);
550 l
= cache
->c_indexes_hash
[index
][bucket
].next
;
551 ce
= __mb_cache_entry_find(l
, &cache
->c_indexes_hash
[index
][bucket
],
553 spin_unlock(&mb_cache_spinlock
);
559 * mb_cache_entry_find_next()
561 * Find the next cache entry on a given device with a certain key in an
562 * additional index. Returns NULL if no match could be found. The previous
563 * entry is atomatically released, so that mb_cache_entry_find_next() can
564 * be called like this:
566 * entry = mb_cache_entry_find_first();
569 * entry = mb_cache_entry_find_next(entry, ...);
572 * @prev: The previous match
573 * @index: the number of the additonal index to search (0<=index<indexes_count)
574 * @bdev: the device the cache entry should belong to
575 * @key: the key in the index
577 struct mb_cache_entry
*
578 mb_cache_entry_find_next(struct mb_cache_entry
*prev
, int index
,
579 struct block_device
*bdev
, unsigned int key
)
581 struct mb_cache
*cache
= prev
->e_cache
;
582 unsigned int bucket
= hash_long(key
, cache
->c_bucket_bits
);
584 struct mb_cache_entry
*ce
;
586 mb_assert(index
< mb_cache_indexes(cache
));
587 spin_lock(&mb_cache_spinlock
);
588 l
= prev
->e_indexes
[index
].o_list
.next
;
589 ce
= __mb_cache_entry_find(l
, &cache
->c_indexes_hash
[index
][bucket
],
591 __mb_cache_entry_release_unlock(prev
);
595 #endif /* !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) */
597 static int __init
init_mbcache(void)
599 mb_shrinker
= set_shrinker(DEFAULT_SEEKS
, mb_cache_shrink_fn
);
603 static void __exit
exit_mbcache(void)
605 remove_shrinker(mb_shrinker
);
608 module_init(init_mbcache
)
609 module_exit(exit_mbcache
)