1 mbcache: remove mbcache
3 From: Jan Kara <jack@suse.cz>
5 Both ext2 and ext4 are now converted to mbcache2. Remove the old mbcache
8 Signed-off-by: Jan Kara <jack@suse.cz>
9 Signed-off-by: Theodore Ts'o <tytso@mit.edu>
12 fs/mbcache.c | 858 ------------------------------------------------
13 include/linux/mbcache.h | 55 ----
14 3 files changed, 1 insertion(+), 914 deletions(-)
15 delete mode 100644 fs/mbcache.c
16 delete mode 100644 include/linux/mbcache.h
18 diff --git a/fs/Makefile b/fs/Makefile
19 index 15b3d6c4e46a..59b844007fbc 100644
22 @@ -41,7 +41,7 @@ obj-$(CONFIG_COMPAT_BINFMT_ELF) += compat_binfmt_elf.o
23 obj-$(CONFIG_BINFMT_ELF_FDPIC) += binfmt_elf_fdpic.o
24 obj-$(CONFIG_BINFMT_FLAT) += binfmt_flat.o
26 -obj-$(CONFIG_FS_MBCACHE) += mbcache.o mbcache2.o
27 +obj-$(CONFIG_FS_MBCACHE) += mbcache2.o
28 obj-$(CONFIG_FS_POSIX_ACL) += posix_acl.o
29 obj-$(CONFIG_NFS_COMMON) += nfs_common/
30 obj-$(CONFIG_COREDUMP) += coredump.o
31 diff --git a/fs/mbcache.c b/fs/mbcache.c
32 deleted file mode 100644
33 index 187477ded6b3..000000000000
38 - * linux/fs/mbcache.c
39 - * (C) 2001-2002 Andreas Gruenbacher, <a.gruenbacher@computer.org>
43 - * Filesystem Meta Information Block Cache (mbcache)
45 - * The mbcache caches blocks of block devices that need to be located
46 - * by their device/block number, as well as by other criteria (such
47 - * as the block's contents).
49 - * There can only be one cache entry in a cache per device and block number.
50 - * Additional indexes need not be unique in this sense. The number of
51 - * additional indexes (=other criteria) can be hardwired at compile time
52 - * or specified at cache create time.
54 - * Each cache entry is of fixed size. An entry may be `valid' or `invalid'
55 - * in the cache. A valid entry is in the main hash tables of the cache,
56 - * and may also be in the lru list. An invalid entry is not in any hashes
59 - * A valid cache entry is only in the lru list if no handles refer to it.
60 - * Invalid cache entries will be freed when the last handle to the cache
61 - * entry is released. Entries that cannot be freed immediately are put
62 - * back on the lru list.
66 - * Lock descriptions and usage:
68 - * Each hash chain of both the block and index hash tables now contains
69 - * a built-in lock used to serialize accesses to the hash chain.
71 - * Accesses to global data structures mb_cache_list and mb_cache_lru_list
72 - * are serialized via the global spinlock mb_cache_spinlock.
74 - * Each mb_cache_entry contains a spinlock, e_entry_lock, to serialize
75 - * accesses to its local data, such as e_used and e_queued.
79 - * Each block hash chain's lock has the highest lock order, followed by an
80 - * index hash chain's lock, mb_cache_bg_lock (used to implement mb_cache_entry's
81 - * lock), and mb_cach_spinlock, with the lowest order. While holding
82 - * either a block or index hash chain lock, a thread can acquire an
83 - * mc_cache_bg_lock, which in turn can also acquire mb_cache_spinlock.
87 - * Since both mb_cache_entry_get and mb_cache_entry_find scan the block and
88 - * index hash chian, it needs to lock the corresponding hash chain. For each
89 - * mb_cache_entry within the chain, it needs to lock the mb_cache_entry to
90 - * prevent either any simultaneous release or free on the entry and also
91 - * to serialize accesses to either the e_used or e_queued member of the entry.
93 - * To avoid having a dangling reference to an already freed
94 - * mb_cache_entry, an mb_cache_entry is only freed when it is not on a
95 - * block hash chain and also no longer being referenced, both e_used,
96 - * and e_queued are 0's. When an mb_cache_entry is explicitly freed it is
97 - * first removed from a block hash chain.
100 -#include <linux/kernel.h>
101 -#include <linux/module.h>
103 -#include <linux/hash.h>
104 -#include <linux/fs.h>
105 -#include <linux/mm.h>
106 -#include <linux/slab.h>
107 -#include <linux/sched.h>
108 -#include <linux/list_bl.h>
109 -#include <linux/mbcache.h>
110 -#include <linux/init.h>
111 -#include <linux/blockgroup_lock.h>
112 -#include <linux/log2.h>
114 -#ifdef MB_CACHE_DEBUG
115 -# define mb_debug(f...) do { \
116 - printk(KERN_DEBUG f); \
119 -#define mb_assert(c) do { if (!(c)) \
120 - printk(KERN_ERR "assertion " #c " failed\n"); \
123 -# define mb_debug(f...) do { } while(0)
124 -# define mb_assert(c) do { } while(0)
126 -#define mb_error(f...) do { \
127 - printk(KERN_ERR f); \
131 -#define MB_CACHE_WRITER ((unsigned short)~0U >> 1)
133 -#define MB_CACHE_ENTRY_LOCK_BITS ilog2(NR_BG_LOCKS)
134 -#define MB_CACHE_ENTRY_LOCK_INDEX(ce) \
135 - (hash_long((unsigned long)ce, MB_CACHE_ENTRY_LOCK_BITS))
137 -static DECLARE_WAIT_QUEUE_HEAD(mb_cache_queue);
138 -static struct blockgroup_lock *mb_cache_bg_lock;
139 -static struct kmem_cache *mb_cache_kmem_cache;
141 -MODULE_AUTHOR("Andreas Gruenbacher <a.gruenbacher@computer.org>");
142 -MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
143 -MODULE_LICENSE("GPL");
145 -EXPORT_SYMBOL(mb_cache_create);
146 -EXPORT_SYMBOL(mb_cache_shrink);
147 -EXPORT_SYMBOL(mb_cache_destroy);
148 -EXPORT_SYMBOL(mb_cache_entry_alloc);
149 -EXPORT_SYMBOL(mb_cache_entry_insert);
150 -EXPORT_SYMBOL(mb_cache_entry_release);
151 -EXPORT_SYMBOL(mb_cache_entry_free);
152 -EXPORT_SYMBOL(mb_cache_entry_get);
153 -#if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
154 -EXPORT_SYMBOL(mb_cache_entry_find_first);
155 -EXPORT_SYMBOL(mb_cache_entry_find_next);
159 - * Global data: list of all mbcache's, lru list, and a spinlock for
160 - * accessing cache data structures on SMP machines. The lru list is
161 - * global across all mbcaches.
164 -static LIST_HEAD(mb_cache_list);
165 -static LIST_HEAD(mb_cache_lru_list);
166 -static DEFINE_SPINLOCK(mb_cache_spinlock);
169 -__spin_lock_mb_cache_entry(struct mb_cache_entry *ce)
171 - spin_lock(bgl_lock_ptr(mb_cache_bg_lock,
172 - MB_CACHE_ENTRY_LOCK_INDEX(ce)));
176 -__spin_unlock_mb_cache_entry(struct mb_cache_entry *ce)
178 - spin_unlock(bgl_lock_ptr(mb_cache_bg_lock,
179 - MB_CACHE_ENTRY_LOCK_INDEX(ce)));
183 -__mb_cache_entry_is_block_hashed(struct mb_cache_entry *ce)
185 - return !hlist_bl_unhashed(&ce->e_block_list);
190 -__mb_cache_entry_unhash_block(struct mb_cache_entry *ce)
192 - if (__mb_cache_entry_is_block_hashed(ce))
193 - hlist_bl_del_init(&ce->e_block_list);
197 -__mb_cache_entry_is_index_hashed(struct mb_cache_entry *ce)
199 - return !hlist_bl_unhashed(&ce->e_index.o_list);
203 -__mb_cache_entry_unhash_index(struct mb_cache_entry *ce)
205 - if (__mb_cache_entry_is_index_hashed(ce))
206 - hlist_bl_del_init(&ce->e_index.o_list);
210 - * __mb_cache_entry_unhash_unlock()
212 - * This function is called to unhash both the block and index hash
214 - * It assumes both the block and index hash chain is locked upon entry.
215 - * It also unlock both hash chains both exit
218 -__mb_cache_entry_unhash_unlock(struct mb_cache_entry *ce)
220 - __mb_cache_entry_unhash_index(ce);
221 - hlist_bl_unlock(ce->e_index_hash_p);
222 - __mb_cache_entry_unhash_block(ce);
223 - hlist_bl_unlock(ce->e_block_hash_p);
227 -__mb_cache_entry_forget(struct mb_cache_entry *ce, gfp_t gfp_mask)
229 - struct mb_cache *cache = ce->e_cache;
231 - mb_assert(!(ce->e_used || ce->e_queued || atomic_read(&ce->e_refcnt)));
232 - kmem_cache_free(cache->c_entry_cache, ce);
233 - atomic_dec(&cache->c_entry_count);
237 -__mb_cache_entry_release(struct mb_cache_entry *ce)
239 - /* First lock the entry to serialize access to its local data. */
240 - __spin_lock_mb_cache_entry(ce);
241 - /* Wake up all processes queuing for this cache entry. */
243 - wake_up_all(&mb_cache_queue);
244 - if (ce->e_used >= MB_CACHE_WRITER)
245 - ce->e_used -= MB_CACHE_WRITER;
247 - * Make sure that all cache entries on lru_list have
248 - * both e_used and e_qued of 0s.
251 - if (!(ce->e_used || ce->e_queued || atomic_read(&ce->e_refcnt))) {
252 - if (!__mb_cache_entry_is_block_hashed(ce)) {
253 - __spin_unlock_mb_cache_entry(ce);
257 - * Need access to lru list, first drop entry lock,
258 - * then reacquire the lock in the proper order.
260 - spin_lock(&mb_cache_spinlock);
261 - if (list_empty(&ce->e_lru_list))
262 - list_add_tail(&ce->e_lru_list, &mb_cache_lru_list);
263 - spin_unlock(&mb_cache_spinlock);
265 - __spin_unlock_mb_cache_entry(ce);
268 - mb_assert(list_empty(&ce->e_lru_list));
269 - __mb_cache_entry_forget(ce, GFP_KERNEL);
273 - * mb_cache_shrink_scan() memory pressure callback
275 - * This function is called by the kernel memory management when memory
278 - * @shrink: (ignored)
279 - * @sc: shrink_control passed from reclaim
281 - * Returns the number of objects freed.
283 -static unsigned long
284 -mb_cache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
286 - LIST_HEAD(free_list);
287 - struct mb_cache_entry *entry, *tmp;
288 - int nr_to_scan = sc->nr_to_scan;
289 - gfp_t gfp_mask = sc->gfp_mask;
290 - unsigned long freed = 0;
292 - mb_debug("trying to free %d entries", nr_to_scan);
293 - spin_lock(&mb_cache_spinlock);
294 - while ((nr_to_scan-- > 0) && !list_empty(&mb_cache_lru_list)) {
295 - struct mb_cache_entry *ce =
296 - list_entry(mb_cache_lru_list.next,
297 - struct mb_cache_entry, e_lru_list);
298 - list_del_init(&ce->e_lru_list);
299 - if (ce->e_used || ce->e_queued || atomic_read(&ce->e_refcnt))
301 - spin_unlock(&mb_cache_spinlock);
302 - /* Prevent any find or get operation on the entry */
303 - hlist_bl_lock(ce->e_block_hash_p);
304 - hlist_bl_lock(ce->e_index_hash_p);
305 - /* Ignore if it is touched by a find/get */
306 - if (ce->e_used || ce->e_queued || atomic_read(&ce->e_refcnt) ||
307 - !list_empty(&ce->e_lru_list)) {
308 - hlist_bl_unlock(ce->e_index_hash_p);
309 - hlist_bl_unlock(ce->e_block_hash_p);
310 - spin_lock(&mb_cache_spinlock);
313 - __mb_cache_entry_unhash_unlock(ce);
314 - list_add_tail(&ce->e_lru_list, &free_list);
315 - spin_lock(&mb_cache_spinlock);
317 - spin_unlock(&mb_cache_spinlock);
319 - list_for_each_entry_safe(entry, tmp, &free_list, e_lru_list) {
320 - __mb_cache_entry_forget(entry, gfp_mask);
326 -static unsigned long
327 -mb_cache_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
329 - struct mb_cache *cache;
330 - unsigned long count = 0;
332 - spin_lock(&mb_cache_spinlock);
333 - list_for_each_entry(cache, &mb_cache_list, c_cache_list) {
334 - mb_debug("cache %s (%d)", cache->c_name,
335 - atomic_read(&cache->c_entry_count));
336 - count += atomic_read(&cache->c_entry_count);
338 - spin_unlock(&mb_cache_spinlock);
340 - return vfs_pressure_ratio(count);
343 -static struct shrinker mb_cache_shrinker = {
344 - .count_objects = mb_cache_shrink_count,
345 - .scan_objects = mb_cache_shrink_scan,
346 - .seeks = DEFAULT_SEEKS,
350 - * mb_cache_create() create a new cache
352 - * All entries in one cache are equal size. Cache entries may be from
353 - * multiple devices. If this is the first mbcache created, registers
354 - * the cache with kernel memory management. Returns NULL if no more
355 - * memory was available.
357 - * @name: name of the cache (informal)
358 - * @bucket_bits: log2(number of hash buckets)
361 -mb_cache_create(const char *name, int bucket_bits)
363 - int n, bucket_count = 1 << bucket_bits;
364 - struct mb_cache *cache = NULL;
366 - if (!mb_cache_bg_lock) {
367 - mb_cache_bg_lock = kmalloc(sizeof(struct blockgroup_lock),
369 - if (!mb_cache_bg_lock)
371 - bgl_lock_init(mb_cache_bg_lock);
374 - cache = kmalloc(sizeof(struct mb_cache), GFP_KERNEL);
377 - cache->c_name = name;
378 - atomic_set(&cache->c_entry_count, 0);
379 - cache->c_bucket_bits = bucket_bits;
380 - cache->c_block_hash = kmalloc(bucket_count *
381 - sizeof(struct hlist_bl_head), GFP_KERNEL);
382 - if (!cache->c_block_hash)
384 - for (n=0; n<bucket_count; n++)
385 - INIT_HLIST_BL_HEAD(&cache->c_block_hash[n]);
386 - cache->c_index_hash = kmalloc(bucket_count *
387 - sizeof(struct hlist_bl_head), GFP_KERNEL);
388 - if (!cache->c_index_hash)
390 - for (n=0; n<bucket_count; n++)
391 - INIT_HLIST_BL_HEAD(&cache->c_index_hash[n]);
392 - if (!mb_cache_kmem_cache) {
393 - mb_cache_kmem_cache = kmem_cache_create(name,
394 - sizeof(struct mb_cache_entry), 0,
395 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
396 - if (!mb_cache_kmem_cache)
399 - cache->c_entry_cache = mb_cache_kmem_cache;
402 - * Set an upper limit on the number of cache entries so that the hash
403 - * chains won't grow too long.
405 - cache->c_max_entries = bucket_count << 4;
407 - spin_lock(&mb_cache_spinlock);
408 - list_add(&cache->c_cache_list, &mb_cache_list);
409 - spin_unlock(&mb_cache_spinlock);
413 - kfree(cache->c_index_hash);
416 - kfree(cache->c_block_hash);
423 - * mb_cache_shrink()
425 - * Removes all cache entries of a device from the cache. All cache entries
426 - * currently in use cannot be freed, and thus remain in the cache. All others
429 - * @bdev: which device's cache entries to shrink
432 -mb_cache_shrink(struct block_device *bdev)
434 - LIST_HEAD(free_list);
435 - struct list_head *l;
436 - struct mb_cache_entry *ce, *tmp;
438 - l = &mb_cache_lru_list;
439 - spin_lock(&mb_cache_spinlock);
440 - while (!list_is_last(l, &mb_cache_lru_list)) {
442 - ce = list_entry(l, struct mb_cache_entry, e_lru_list);
443 - if (ce->e_bdev == bdev) {
444 - list_del_init(&ce->e_lru_list);
445 - if (ce->e_used || ce->e_queued ||
446 - atomic_read(&ce->e_refcnt))
448 - spin_unlock(&mb_cache_spinlock);
450 - * Prevent any find or get operation on the entry.
452 - hlist_bl_lock(ce->e_block_hash_p);
453 - hlist_bl_lock(ce->e_index_hash_p);
454 - /* Ignore if it is touched by a find/get */
455 - if (ce->e_used || ce->e_queued ||
456 - atomic_read(&ce->e_refcnt) ||
457 - !list_empty(&ce->e_lru_list)) {
458 - hlist_bl_unlock(ce->e_index_hash_p);
459 - hlist_bl_unlock(ce->e_block_hash_p);
460 - l = &mb_cache_lru_list;
461 - spin_lock(&mb_cache_spinlock);
464 - __mb_cache_entry_unhash_unlock(ce);
465 - mb_assert(!(ce->e_used || ce->e_queued ||
466 - atomic_read(&ce->e_refcnt)));
467 - list_add_tail(&ce->e_lru_list, &free_list);
468 - l = &mb_cache_lru_list;
469 - spin_lock(&mb_cache_spinlock);
472 - spin_unlock(&mb_cache_spinlock);
474 - list_for_each_entry_safe(ce, tmp, &free_list, e_lru_list) {
475 - __mb_cache_entry_forget(ce, GFP_KERNEL);
481 - * mb_cache_destroy()
483 - * Shrinks the cache to its minimum possible size (hopefully 0 entries),
484 - * and then destroys it. If this was the last mbcache, un-registers the
485 - * mbcache from kernel memory management.
488 -mb_cache_destroy(struct mb_cache *cache)
490 - LIST_HEAD(free_list);
491 - struct mb_cache_entry *ce, *tmp;
493 - spin_lock(&mb_cache_spinlock);
494 - list_for_each_entry_safe(ce, tmp, &mb_cache_lru_list, e_lru_list) {
495 - if (ce->e_cache == cache)
496 - list_move_tail(&ce->e_lru_list, &free_list);
498 - list_del(&cache->c_cache_list);
499 - spin_unlock(&mb_cache_spinlock);
501 - list_for_each_entry_safe(ce, tmp, &free_list, e_lru_list) {
502 - list_del_init(&ce->e_lru_list);
504 - * Prevent any find or get operation on the entry.
506 - hlist_bl_lock(ce->e_block_hash_p);
507 - hlist_bl_lock(ce->e_index_hash_p);
508 - mb_assert(!(ce->e_used || ce->e_queued ||
509 - atomic_read(&ce->e_refcnt)));
510 - __mb_cache_entry_unhash_unlock(ce);
511 - __mb_cache_entry_forget(ce, GFP_KERNEL);
514 - if (atomic_read(&cache->c_entry_count) > 0) {
515 - mb_error("cache %s: %d orphaned entries",
517 - atomic_read(&cache->c_entry_count));
520 - if (list_empty(&mb_cache_list)) {
521 - kmem_cache_destroy(mb_cache_kmem_cache);
522 - mb_cache_kmem_cache = NULL;
524 - kfree(cache->c_index_hash);
525 - kfree(cache->c_block_hash);
530 - * mb_cache_entry_alloc()
532 - * Allocates a new cache entry. The new entry will not be valid initially,
533 - * and thus cannot be looked up yet. It should be filled with data, and
534 - * then inserted into the cache using mb_cache_entry_insert(). Returns NULL
535 - * if no more memory was available.
537 -struct mb_cache_entry *
538 -mb_cache_entry_alloc(struct mb_cache *cache, gfp_t gfp_flags)
540 - struct mb_cache_entry *ce;
542 - if (atomic_read(&cache->c_entry_count) >= cache->c_max_entries) {
543 - struct list_head *l;
545 - l = &mb_cache_lru_list;
546 - spin_lock(&mb_cache_spinlock);
547 - while (!list_is_last(l, &mb_cache_lru_list)) {
549 - ce = list_entry(l, struct mb_cache_entry, e_lru_list);
550 - if (ce->e_cache == cache) {
551 - list_del_init(&ce->e_lru_list);
552 - if (ce->e_used || ce->e_queued ||
553 - atomic_read(&ce->e_refcnt))
555 - spin_unlock(&mb_cache_spinlock);
557 - * Prevent any find or get operation on the
560 - hlist_bl_lock(ce->e_block_hash_p);
561 - hlist_bl_lock(ce->e_index_hash_p);
562 - /* Ignore if it is touched by a find/get */
563 - if (ce->e_used || ce->e_queued ||
564 - atomic_read(&ce->e_refcnt) ||
565 - !list_empty(&ce->e_lru_list)) {
566 - hlist_bl_unlock(ce->e_index_hash_p);
567 - hlist_bl_unlock(ce->e_block_hash_p);
568 - l = &mb_cache_lru_list;
569 - spin_lock(&mb_cache_spinlock);
572 - mb_assert(list_empty(&ce->e_lru_list));
573 - mb_assert(!(ce->e_used || ce->e_queued ||
574 - atomic_read(&ce->e_refcnt)));
575 - __mb_cache_entry_unhash_unlock(ce);
579 - spin_unlock(&mb_cache_spinlock);
582 - ce = kmem_cache_alloc(cache->c_entry_cache, gfp_flags);
585 - atomic_inc(&cache->c_entry_count);
586 - INIT_LIST_HEAD(&ce->e_lru_list);
587 - INIT_HLIST_BL_NODE(&ce->e_block_list);
588 - INIT_HLIST_BL_NODE(&ce->e_index.o_list);
589 - ce->e_cache = cache;
591 - atomic_set(&ce->e_refcnt, 0);
593 - ce->e_block_hash_p = &cache->c_block_hash[0];
594 - ce->e_index_hash_p = &cache->c_index_hash[0];
595 - ce->e_used = 1 + MB_CACHE_WRITER;
601 - * mb_cache_entry_insert()
603 - * Inserts an entry that was allocated using mb_cache_entry_alloc() into
604 - * the cache. After this, the cache entry can be looked up, but is not yet
605 - * in the lru list as the caller still holds a handle to it. Returns 0 on
606 - * success, or -EBUSY if a cache entry for that device + inode exists
607 - * already (this may happen after a failed lookup, but when another process
608 - * has inserted the same cache entry in the meantime).
610 - * @bdev: device the cache entry belongs to
611 - * @block: block number
615 -mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev,
616 - sector_t block, unsigned int key)
618 - struct mb_cache *cache = ce->e_cache;
619 - unsigned int bucket;
620 - struct hlist_bl_node *l;
621 - struct hlist_bl_head *block_hash_p;
622 - struct hlist_bl_head *index_hash_p;
623 - struct mb_cache_entry *lce;
626 - bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
627 - cache->c_bucket_bits);
628 - block_hash_p = &cache->c_block_hash[bucket];
629 - hlist_bl_lock(block_hash_p);
630 - hlist_bl_for_each_entry(lce, l, block_hash_p, e_block_list) {
631 - if (lce->e_bdev == bdev && lce->e_block == block) {
632 - hlist_bl_unlock(block_hash_p);
636 - mb_assert(!__mb_cache_entry_is_block_hashed(ce));
637 - __mb_cache_entry_unhash_block(ce);
638 - __mb_cache_entry_unhash_index(ce);
640 - ce->e_block = block;
641 - ce->e_block_hash_p = block_hash_p;
642 - ce->e_index.o_key = key;
643 - hlist_bl_add_head(&ce->e_block_list, block_hash_p);
644 - hlist_bl_unlock(block_hash_p);
645 - bucket = hash_long(key, cache->c_bucket_bits);
646 - index_hash_p = &cache->c_index_hash[bucket];
647 - hlist_bl_lock(index_hash_p);
648 - ce->e_index_hash_p = index_hash_p;
649 - hlist_bl_add_head(&ce->e_index.o_list, index_hash_p);
650 - hlist_bl_unlock(index_hash_p);
656 - * mb_cache_entry_release()
658 - * Release a handle to a cache entry. When the last handle to a cache entry
659 - * is released it is either freed (if it is invalid) or otherwise inserted
660 - * in to the lru list.
663 -mb_cache_entry_release(struct mb_cache_entry *ce)
665 - __mb_cache_entry_release(ce);
670 - * mb_cache_entry_free()
674 -mb_cache_entry_free(struct mb_cache_entry *ce)
677 - mb_assert(list_empty(&ce->e_lru_list));
678 - hlist_bl_lock(ce->e_index_hash_p);
679 - __mb_cache_entry_unhash_index(ce);
680 - hlist_bl_unlock(ce->e_index_hash_p);
681 - hlist_bl_lock(ce->e_block_hash_p);
682 - __mb_cache_entry_unhash_block(ce);
683 - hlist_bl_unlock(ce->e_block_hash_p);
684 - __mb_cache_entry_release(ce);
689 - * mb_cache_entry_get()
691 - * Get a cache entry by device / block number. (There can only be one entry
692 - * in the cache per device and block.) Returns NULL if no such cache entry
693 - * exists. The returned cache entry is locked for exclusive access ("single
696 -struct mb_cache_entry *
697 -mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev,
700 - unsigned int bucket;
701 - struct hlist_bl_node *l;
702 - struct mb_cache_entry *ce;
703 - struct hlist_bl_head *block_hash_p;
705 - bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
706 - cache->c_bucket_bits);
707 - block_hash_p = &cache->c_block_hash[bucket];
708 - /* First serialize access to the block corresponding hash chain. */
709 - hlist_bl_lock(block_hash_p);
710 - hlist_bl_for_each_entry(ce, l, block_hash_p, e_block_list) {
711 - mb_assert(ce->e_block_hash_p == block_hash_p);
712 - if (ce->e_bdev == bdev && ce->e_block == block) {
714 - * Prevent a free from removing the entry.
716 - atomic_inc(&ce->e_refcnt);
717 - hlist_bl_unlock(block_hash_p);
718 - __spin_lock_mb_cache_entry(ce);
719 - atomic_dec(&ce->e_refcnt);
720 - if (ce->e_used > 0) {
722 - while (ce->e_used > 0) {
724 - prepare_to_wait(&mb_cache_queue, &wait,
725 - TASK_UNINTERRUPTIBLE);
726 - __spin_unlock_mb_cache_entry(ce);
728 - __spin_lock_mb_cache_entry(ce);
731 - finish_wait(&mb_cache_queue, &wait);
733 - ce->e_used += 1 + MB_CACHE_WRITER;
734 - __spin_unlock_mb_cache_entry(ce);
736 - if (!list_empty(&ce->e_lru_list)) {
737 - spin_lock(&mb_cache_spinlock);
738 - list_del_init(&ce->e_lru_list);
739 - spin_unlock(&mb_cache_spinlock);
741 - if (!__mb_cache_entry_is_block_hashed(ce)) {
742 - __mb_cache_entry_release(ce);
748 - hlist_bl_unlock(block_hash_p);
752 -#if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
754 -static struct mb_cache_entry *
755 -__mb_cache_entry_find(struct hlist_bl_node *l, struct hlist_bl_head *head,
756 - struct block_device *bdev, unsigned int key)
759 - /* The index hash chain is alredy acquire by caller. */
760 - while (l != NULL) {
761 - struct mb_cache_entry *ce =
762 - hlist_bl_entry(l, struct mb_cache_entry,
764 - mb_assert(ce->e_index_hash_p == head);
765 - if (ce->e_bdev == bdev && ce->e_index.o_key == key) {
767 - * Prevent a free from removing the entry.
769 - atomic_inc(&ce->e_refcnt);
770 - hlist_bl_unlock(head);
771 - __spin_lock_mb_cache_entry(ce);
772 - atomic_dec(&ce->e_refcnt);
774 - /* Incrementing before holding the lock gives readers
775 - priority over writers. */
776 - if (ce->e_used >= MB_CACHE_WRITER) {
779 - while (ce->e_used >= MB_CACHE_WRITER) {
781 - prepare_to_wait(&mb_cache_queue, &wait,
782 - TASK_UNINTERRUPTIBLE);
783 - __spin_unlock_mb_cache_entry(ce);
785 - __spin_lock_mb_cache_entry(ce);
788 - finish_wait(&mb_cache_queue, &wait);
790 - __spin_unlock_mb_cache_entry(ce);
791 - if (!list_empty(&ce->e_lru_list)) {
792 - spin_lock(&mb_cache_spinlock);
793 - list_del_init(&ce->e_lru_list);
794 - spin_unlock(&mb_cache_spinlock);
796 - if (!__mb_cache_entry_is_block_hashed(ce)) {
797 - __mb_cache_entry_release(ce);
798 - return ERR_PTR(-EAGAIN);
804 - hlist_bl_unlock(head);
810 - * mb_cache_entry_find_first()
812 - * Find the first cache entry on a given device with a certain key in
813 - * an additional index. Additional matches can be found with
814 - * mb_cache_entry_find_next(). Returns NULL if no match was found. The
815 - * returned cache entry is locked for shared access ("multiple readers").
817 - * @cache: the cache to search
818 - * @bdev: the device the cache entry should belong to
819 - * @key: the key in the index
821 -struct mb_cache_entry *
822 -mb_cache_entry_find_first(struct mb_cache *cache, struct block_device *bdev,
825 - unsigned int bucket = hash_long(key, cache->c_bucket_bits);
826 - struct hlist_bl_node *l;
827 - struct mb_cache_entry *ce = NULL;
828 - struct hlist_bl_head *index_hash_p;
830 - index_hash_p = &cache->c_index_hash[bucket];
831 - hlist_bl_lock(index_hash_p);
832 - if (!hlist_bl_empty(index_hash_p)) {
833 - l = hlist_bl_first(index_hash_p);
834 - ce = __mb_cache_entry_find(l, index_hash_p, bdev, key);
836 - hlist_bl_unlock(index_hash_p);
842 - * mb_cache_entry_find_next()
844 - * Find the next cache entry on a given device with a certain key in an
845 - * additional index. Returns NULL if no match could be found. The previous
846 - * entry is atomatically released, so that mb_cache_entry_find_next() can
847 - * be called like this:
849 - * entry = mb_cache_entry_find_first();
852 - * entry = mb_cache_entry_find_next(entry, ...);
855 - * @prev: The previous match
856 - * @bdev: the device the cache entry should belong to
857 - * @key: the key in the index
859 -struct mb_cache_entry *
860 -mb_cache_entry_find_next(struct mb_cache_entry *prev,
861 - struct block_device *bdev, unsigned int key)
863 - struct mb_cache *cache = prev->e_cache;
864 - unsigned int bucket = hash_long(key, cache->c_bucket_bits);
865 - struct hlist_bl_node *l;
866 - struct mb_cache_entry *ce;
867 - struct hlist_bl_head *index_hash_p;
869 - index_hash_p = &cache->c_index_hash[bucket];
870 - mb_assert(prev->e_index_hash_p == index_hash_p);
871 - hlist_bl_lock(index_hash_p);
872 - mb_assert(!hlist_bl_empty(index_hash_p));
873 - l = prev->e_index.o_list.next;
874 - ce = __mb_cache_entry_find(l, index_hash_p, bdev, key);
875 - __mb_cache_entry_release(prev);
879 -#endif /* !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) */
881 -static int __init init_mbcache(void)
883 - register_shrinker(&mb_cache_shrinker);
887 -static void __exit exit_mbcache(void)
889 - unregister_shrinker(&mb_cache_shrinker);
892 -module_init(init_mbcache)
893 -module_exit(exit_mbcache)
895 diff --git a/include/linux/mbcache.h b/include/linux/mbcache.h
896 deleted file mode 100644
897 index 6a392e7a723a..000000000000
898 --- a/include/linux/mbcache.h
902 - File: linux/mbcache.h
904 - (C) 2001 by Andreas Gruenbacher, <a.gruenbacher@computer.org>
906 -struct mb_cache_entry {
907 - struct list_head e_lru_list;
908 - struct mb_cache *e_cache;
909 - unsigned short e_used;
910 - unsigned short e_queued;
912 - struct block_device *e_bdev;
914 - struct hlist_bl_node e_block_list;
916 - struct hlist_bl_node o_list;
917 - unsigned int o_key;
919 - struct hlist_bl_head *e_block_hash_p;
920 - struct hlist_bl_head *e_index_hash_p;
924 - struct list_head c_cache_list;
925 - const char *c_name;
926 - atomic_t c_entry_count;
929 - struct kmem_cache *c_entry_cache;
930 - struct hlist_bl_head *c_block_hash;
931 - struct hlist_bl_head *c_index_hash;
934 -/* Functions on caches */
936 -struct mb_cache *mb_cache_create(const char *, int);
937 -void mb_cache_shrink(struct block_device *);
938 -void mb_cache_destroy(struct mb_cache *);
940 -/* Functions on cache entries */
942 -struct mb_cache_entry *mb_cache_entry_alloc(struct mb_cache *, gfp_t);
943 -int mb_cache_entry_insert(struct mb_cache_entry *, struct block_device *,
944 - sector_t, unsigned int);
945 -void mb_cache_entry_release(struct mb_cache_entry *);
946 -void mb_cache_entry_free(struct mb_cache_entry *);
947 -struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *,
948 - struct block_device *,
950 -struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
951 - struct block_device *,
953 -struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache_entry *,
954 - struct block_device *,