3 * (C) 2001-2002 Andreas Gruenbacher, <a.gruenbacher@computer.org>
7 * Filesystem Meta Information Block Cache (mbcache)
9 * The mbcache caches blocks of block devices that need to be located
10 * by their device/block number, as well as by other criteria (such
11 * as the block's contents).
13 * There can only be one cache entry in a cache per device and block number.
14 * Additional indexes need not be unique in this sense. The number of
15 * additional indexes (=other criteria) can be hardwired at compile time
16 * or specified at cache create time.
18 * Each cache entry is of fixed size. An entry may be `valid' or `invalid'
19 * in the cache. A valid entry is in the main hash tables of the cache,
20 * and may also be in the lru list. An invalid entry is not in any hashes
23 * A valid cache entry is only in the lru list if no handles refer to it.
24 * Invalid cache entries will be freed when the last handle to the cache
25 * entry is released. Entries that cannot be freed immediately are put
26 * back on the lru list.
29 #include <linux/kernel.h>
30 #include <linux/module.h>
32 #include <linux/hash.h>
35 #include <linux/slab.h>
36 #include <linux/sched.h>
37 #include <linux/init.h>
38 #include <linux/mbcache.h>
42 # define mb_debug(f...) do { \
43 printk(KERN_DEBUG f); \
46 #define mb_assert(c) do { if (!(c)) \
47 printk(KERN_ERR "assertion " #c " failed\n"); \
50 # define mb_debug(f...) do { } while(0)
51 # define mb_assert(c) do { } while(0)
53 #define mb_error(f...) do { \
58 #define MB_CACHE_WRITER ((unsigned short)~0U >> 1)
60 static DECLARE_WAIT_QUEUE_HEAD(mb_cache_queue
);
62 MODULE_AUTHOR("Andreas Gruenbacher <a.gruenbacher@computer.org>");
63 MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
64 MODULE_LICENSE("GPL");
66 EXPORT_SYMBOL(mb_cache_create
);
67 EXPORT_SYMBOL(mb_cache_shrink
);
68 EXPORT_SYMBOL(mb_cache_destroy
);
69 EXPORT_SYMBOL(mb_cache_entry_alloc
);
70 EXPORT_SYMBOL(mb_cache_entry_insert
);
71 EXPORT_SYMBOL(mb_cache_entry_release
);
72 EXPORT_SYMBOL(mb_cache_entry_free
);
73 EXPORT_SYMBOL(mb_cache_entry_get
);
74 #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
75 EXPORT_SYMBOL(mb_cache_entry_find_first
);
76 EXPORT_SYMBOL(mb_cache_entry_find_next
);
80 struct list_head c_cache_list
;
82 atomic_t c_entry_count
;
85 struct kmem_cache
*c_entry_cache
;
86 struct list_head
*c_block_hash
;
87 struct list_head
*c_index_hash
;
92 * Global data: list of all mbcache's, lru list, and a spinlock for
93 * accessing cache data structures on SMP machines. The lru list is
94 * global across all mbcaches.
97 static LIST_HEAD(mb_cache_list
);
98 static LIST_HEAD(mb_cache_lru_list
);
99 static DEFINE_SPINLOCK(mb_cache_spinlock
);
102 * What the mbcache registers as to get shrunk dynamically.
105 static int mb_cache_shrink_fn(struct shrinker
*shrink
, int nr_to_scan
, gfp_t gfp_mask
);
107 static struct shrinker mb_cache_shrinker
= {
108 .shrink
= mb_cache_shrink_fn
,
109 .seeks
= DEFAULT_SEEKS
,
113 __mb_cache_entry_is_hashed(struct mb_cache_entry
*ce
)
115 return !list_empty(&ce
->e_block_list
);
120 __mb_cache_entry_unhash(struct mb_cache_entry
*ce
)
122 if (__mb_cache_entry_is_hashed(ce
)) {
123 list_del_init(&ce
->e_block_list
);
124 list_del(&ce
->e_index
.o_list
);
130 __mb_cache_entry_forget(struct mb_cache_entry
*ce
, gfp_t gfp_mask
)
132 struct mb_cache
*cache
= ce
->e_cache
;
134 mb_assert(!(ce
->e_used
|| ce
->e_queued
));
135 kmem_cache_free(cache
->c_entry_cache
, ce
);
136 atomic_dec(&cache
->c_entry_count
);
141 __mb_cache_entry_release_unlock(struct mb_cache_entry
*ce
)
142 __releases(mb_cache_spinlock
)
144 /* Wake up all processes queuing for this cache entry. */
146 wake_up_all(&mb_cache_queue
);
147 if (ce
->e_used
>= MB_CACHE_WRITER
)
148 ce
->e_used
-= MB_CACHE_WRITER
;
150 if (!(ce
->e_used
|| ce
->e_queued
)) {
151 if (!__mb_cache_entry_is_hashed(ce
))
153 mb_assert(list_empty(&ce
->e_lru_list
));
154 list_add_tail(&ce
->e_lru_list
, &mb_cache_lru_list
);
156 spin_unlock(&mb_cache_spinlock
);
159 spin_unlock(&mb_cache_spinlock
);
160 __mb_cache_entry_forget(ce
, GFP_KERNEL
);
165 * mb_cache_shrink_fn() memory pressure callback
167 * This function is called by the kernel memory management when memory
171 * @nr_to_scan: Number of objects to scan
172 * @gfp_mask: (ignored)
174 * Returns the number of objects which are present in the cache.
177 mb_cache_shrink_fn(struct shrinker
*shrink
, int nr_to_scan
, gfp_t gfp_mask
)
179 LIST_HEAD(free_list
);
180 struct mb_cache
*cache
;
181 struct mb_cache_entry
*entry
, *tmp
;
184 mb_debug("trying to free %d entries", nr_to_scan
);
185 spin_lock(&mb_cache_spinlock
);
186 while (nr_to_scan
-- && !list_empty(&mb_cache_lru_list
)) {
187 struct mb_cache_entry
*ce
=
188 list_entry(mb_cache_lru_list
.next
,
189 struct mb_cache_entry
, e_lru_list
);
190 list_move_tail(&ce
->e_lru_list
, &free_list
);
191 __mb_cache_entry_unhash(ce
);
193 list_for_each_entry(cache
, &mb_cache_list
, c_cache_list
) {
194 mb_debug("cache %s (%d)", cache
->c_name
,
195 atomic_read(&cache
->c_entry_count
));
196 count
+= atomic_read(&cache
->c_entry_count
);
198 spin_unlock(&mb_cache_spinlock
);
199 list_for_each_entry_safe(entry
, tmp
, &free_list
, e_lru_list
) {
200 __mb_cache_entry_forget(entry
, gfp_mask
);
202 return (count
/ 100) * sysctl_vfs_cache_pressure
;
207 * mb_cache_create() create a new cache
209 * All entries in one cache are equal size. Cache entries may be from
210 * multiple devices. If this is the first mbcache created, registers
211 * the cache with kernel memory management. Returns NULL if no more
212 * memory was available.
214 * @name: name of the cache (informal)
215 * @bucket_bits: log2(number of hash buckets)
218 mb_cache_create(const char *name
, int bucket_bits
)
220 int n
, bucket_count
= 1 << bucket_bits
;
221 struct mb_cache
*cache
= NULL
;
223 cache
= kmalloc(sizeof(struct mb_cache
), GFP_KERNEL
);
226 cache
->c_name
= name
;
227 atomic_set(&cache
->c_entry_count
, 0);
228 cache
->c_bucket_bits
= bucket_bits
;
229 cache
->c_block_hash
= kmalloc(bucket_count
* sizeof(struct list_head
),
231 if (!cache
->c_block_hash
)
233 for (n
=0; n
<bucket_count
; n
++)
234 INIT_LIST_HEAD(&cache
->c_block_hash
[n
]);
235 cache
->c_index_hash
= kmalloc(bucket_count
* sizeof(struct list_head
),
237 if (!cache
->c_index_hash
)
239 for (n
=0; n
<bucket_count
; n
++)
240 INIT_LIST_HEAD(&cache
->c_index_hash
[n
]);
241 cache
->c_entry_cache
= kmem_cache_create(name
,
242 sizeof(struct mb_cache_entry
), 0,
243 SLAB_RECLAIM_ACCOUNT
|SLAB_MEM_SPREAD
, NULL
);
244 if (!cache
->c_entry_cache
)
248 * Set an upper limit on the number of cache entries so that the hash
249 * chains won't grow too long.
251 cache
->c_max_entries
= bucket_count
<< 4;
253 spin_lock(&mb_cache_spinlock
);
254 list_add(&cache
->c_cache_list
, &mb_cache_list
);
255 spin_unlock(&mb_cache_spinlock
);
259 kfree(cache
->c_index_hash
);
262 kfree(cache
->c_block_hash
);
271 * Removes all cache entries of a device from the cache. All cache entries
272 * currently in use cannot be freed, and thus remain in the cache. All others
275 * @bdev: which device's cache entries to shrink
278 mb_cache_shrink(struct block_device
*bdev
)
280 LIST_HEAD(free_list
);
281 struct list_head
*l
, *ltmp
;
283 spin_lock(&mb_cache_spinlock
);
284 list_for_each_safe(l
, ltmp
, &mb_cache_lru_list
) {
285 struct mb_cache_entry
*ce
=
286 list_entry(l
, struct mb_cache_entry
, e_lru_list
);
287 if (ce
->e_bdev
== bdev
) {
288 list_move_tail(&ce
->e_lru_list
, &free_list
);
289 __mb_cache_entry_unhash(ce
);
292 spin_unlock(&mb_cache_spinlock
);
293 list_for_each_safe(l
, ltmp
, &free_list
) {
294 __mb_cache_entry_forget(list_entry(l
, struct mb_cache_entry
,
295 e_lru_list
), GFP_KERNEL
);
303 * Shrinks the cache to its minimum possible size (hopefully 0 entries),
304 * and then destroys it. If this was the last mbcache, un-registers the
305 * mbcache from kernel memory management.
308 mb_cache_destroy(struct mb_cache
*cache
)
310 LIST_HEAD(free_list
);
311 struct list_head
*l
, *ltmp
;
313 spin_lock(&mb_cache_spinlock
);
314 list_for_each_safe(l
, ltmp
, &mb_cache_lru_list
) {
315 struct mb_cache_entry
*ce
=
316 list_entry(l
, struct mb_cache_entry
, e_lru_list
);
317 if (ce
->e_cache
== cache
) {
318 list_move_tail(&ce
->e_lru_list
, &free_list
);
319 __mb_cache_entry_unhash(ce
);
322 list_del(&cache
->c_cache_list
);
323 spin_unlock(&mb_cache_spinlock
);
325 list_for_each_safe(l
, ltmp
, &free_list
) {
326 __mb_cache_entry_forget(list_entry(l
, struct mb_cache_entry
,
327 e_lru_list
), GFP_KERNEL
);
330 if (atomic_read(&cache
->c_entry_count
) > 0) {
331 mb_error("cache %s: %d orphaned entries",
333 atomic_read(&cache
->c_entry_count
));
336 kmem_cache_destroy(cache
->c_entry_cache
);
338 kfree(cache
->c_index_hash
);
339 kfree(cache
->c_block_hash
);
344 * mb_cache_entry_alloc()
346 * Allocates a new cache entry. The new entry will not be valid initially,
347 * and thus cannot be looked up yet. It should be filled with data, and
348 * then inserted into the cache using mb_cache_entry_insert(). Returns NULL
349 * if no more memory was available.
351 struct mb_cache_entry
*
352 mb_cache_entry_alloc(struct mb_cache
*cache
, gfp_t gfp_flags
)
354 struct mb_cache_entry
*ce
= NULL
;
356 if (atomic_read(&cache
->c_entry_count
) >= cache
->c_max_entries
) {
357 spin_lock(&mb_cache_spinlock
);
358 if (!list_empty(&mb_cache_lru_list
)) {
359 ce
= list_entry(mb_cache_lru_list
.next
,
360 struct mb_cache_entry
, e_lru_list
);
361 list_del_init(&ce
->e_lru_list
);
362 __mb_cache_entry_unhash(ce
);
364 spin_unlock(&mb_cache_spinlock
);
367 ce
= kmem_cache_alloc(cache
->c_entry_cache
, gfp_flags
);
370 atomic_inc(&cache
->c_entry_count
);
371 INIT_LIST_HEAD(&ce
->e_lru_list
);
372 INIT_LIST_HEAD(&ce
->e_block_list
);
376 ce
->e_used
= 1 + MB_CACHE_WRITER
;
382 * mb_cache_entry_insert()
384 * Inserts an entry that was allocated using mb_cache_entry_alloc() into
385 * the cache. After this, the cache entry can be looked up, but is not yet
386 * in the lru list as the caller still holds a handle to it. Returns 0 on
387 * success, or -EBUSY if a cache entry for that device + inode exists
388 * already (this may happen after a failed lookup, but when another process
389 * has inserted the same cache entry in the meantime).
391 * @bdev: device the cache entry belongs to
392 * @block: block number
396 mb_cache_entry_insert(struct mb_cache_entry
*ce
, struct block_device
*bdev
,
397 sector_t block
, unsigned int key
)
399 struct mb_cache
*cache
= ce
->e_cache
;
404 bucket
= hash_long((unsigned long)bdev
+ (block
& 0xffffffff),
405 cache
->c_bucket_bits
);
406 spin_lock(&mb_cache_spinlock
);
407 list_for_each_prev(l
, &cache
->c_block_hash
[bucket
]) {
408 struct mb_cache_entry
*ce
=
409 list_entry(l
, struct mb_cache_entry
, e_block_list
);
410 if (ce
->e_bdev
== bdev
&& ce
->e_block
== block
)
413 __mb_cache_entry_unhash(ce
);
416 list_add(&ce
->e_block_list
, &cache
->c_block_hash
[bucket
]);
417 ce
->e_index
.o_key
= key
;
418 bucket
= hash_long(key
, cache
->c_bucket_bits
);
419 list_add(&ce
->e_index
.o_list
, &cache
->c_index_hash
[bucket
]);
422 spin_unlock(&mb_cache_spinlock
);
428 * mb_cache_entry_release()
430 * Release a handle to a cache entry. When the last handle to a cache entry
431 * is released it is either freed (if it is invalid) or otherwise inserted
432 * in to the lru list.
435 mb_cache_entry_release(struct mb_cache_entry
*ce
)
437 spin_lock(&mb_cache_spinlock
);
438 __mb_cache_entry_release_unlock(ce
);
443 * mb_cache_entry_free()
445 * This is equivalent to the sequence mb_cache_entry_takeout() --
446 * mb_cache_entry_release().
449 mb_cache_entry_free(struct mb_cache_entry
*ce
)
451 spin_lock(&mb_cache_spinlock
);
452 mb_assert(list_empty(&ce
->e_lru_list
));
453 __mb_cache_entry_unhash(ce
);
454 __mb_cache_entry_release_unlock(ce
);
459 * mb_cache_entry_get()
461 * Get a cache entry by device / block number. (There can only be one entry
462 * in the cache per device and block.) Returns NULL if no such cache entry
463 * exists. The returned cache entry is locked for exclusive access ("single
466 struct mb_cache_entry
*
467 mb_cache_entry_get(struct mb_cache
*cache
, struct block_device
*bdev
,
472 struct mb_cache_entry
*ce
;
474 bucket
= hash_long((unsigned long)bdev
+ (block
& 0xffffffff),
475 cache
->c_bucket_bits
);
476 spin_lock(&mb_cache_spinlock
);
477 list_for_each(l
, &cache
->c_block_hash
[bucket
]) {
478 ce
= list_entry(l
, struct mb_cache_entry
, e_block_list
);
479 if (ce
->e_bdev
== bdev
&& ce
->e_block
== block
) {
482 if (!list_empty(&ce
->e_lru_list
))
483 list_del_init(&ce
->e_lru_list
);
485 while (ce
->e_used
> 0) {
487 prepare_to_wait(&mb_cache_queue
, &wait
,
488 TASK_UNINTERRUPTIBLE
);
489 spin_unlock(&mb_cache_spinlock
);
491 spin_lock(&mb_cache_spinlock
);
494 finish_wait(&mb_cache_queue
, &wait
);
495 ce
->e_used
+= 1 + MB_CACHE_WRITER
;
497 if (!__mb_cache_entry_is_hashed(ce
)) {
498 __mb_cache_entry_release_unlock(ce
);
507 spin_unlock(&mb_cache_spinlock
);
511 #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
513 static struct mb_cache_entry
*
514 __mb_cache_entry_find(struct list_head
*l
, struct list_head
*head
,
515 struct block_device
*bdev
, unsigned int key
)
518 struct mb_cache_entry
*ce
=
519 list_entry(l
, struct mb_cache_entry
, e_index
.o_list
);
520 if (ce
->e_bdev
== bdev
&& ce
->e_index
.o_key
== key
) {
523 if (!list_empty(&ce
->e_lru_list
))
524 list_del_init(&ce
->e_lru_list
);
526 /* Incrementing before holding the lock gives readers
527 priority over writers. */
529 while (ce
->e_used
>= MB_CACHE_WRITER
) {
531 prepare_to_wait(&mb_cache_queue
, &wait
,
532 TASK_UNINTERRUPTIBLE
);
533 spin_unlock(&mb_cache_spinlock
);
535 spin_lock(&mb_cache_spinlock
);
538 finish_wait(&mb_cache_queue
, &wait
);
540 if (!__mb_cache_entry_is_hashed(ce
)) {
541 __mb_cache_entry_release_unlock(ce
);
542 spin_lock(&mb_cache_spinlock
);
543 return ERR_PTR(-EAGAIN
);
554 * mb_cache_entry_find_first()
556 * Find the first cache entry on a given device with a certain key in
557 * an additional index. Additonal matches can be found with
558 * mb_cache_entry_find_next(). Returns NULL if no match was found. The
559 * returned cache entry is locked for shared access ("multiple readers").
561 * @cache: the cache to search
562 * @bdev: the device the cache entry should belong to
563 * @key: the key in the index
565 struct mb_cache_entry
*
566 mb_cache_entry_find_first(struct mb_cache
*cache
, struct block_device
*bdev
,
569 unsigned int bucket
= hash_long(key
, cache
->c_bucket_bits
);
571 struct mb_cache_entry
*ce
;
573 spin_lock(&mb_cache_spinlock
);
574 l
= cache
->c_index_hash
[bucket
].next
;
575 ce
= __mb_cache_entry_find(l
, &cache
->c_index_hash
[bucket
], bdev
, key
);
576 spin_unlock(&mb_cache_spinlock
);
582 * mb_cache_entry_find_next()
584 * Find the next cache entry on a given device with a certain key in an
585 * additional index. Returns NULL if no match could be found. The previous
586 * entry is atomatically released, so that mb_cache_entry_find_next() can
587 * be called like this:
589 * entry = mb_cache_entry_find_first();
592 * entry = mb_cache_entry_find_next(entry, ...);
595 * @prev: The previous match
596 * @bdev: the device the cache entry should belong to
597 * @key: the key in the index
599 struct mb_cache_entry
*
600 mb_cache_entry_find_next(struct mb_cache_entry
*prev
,
601 struct block_device
*bdev
, unsigned int key
)
603 struct mb_cache
*cache
= prev
->e_cache
;
604 unsigned int bucket
= hash_long(key
, cache
->c_bucket_bits
);
606 struct mb_cache_entry
*ce
;
608 spin_lock(&mb_cache_spinlock
);
609 l
= prev
->e_index
.o_list
.next
;
610 ce
= __mb_cache_entry_find(l
, &cache
->c_index_hash
[bucket
], bdev
, key
);
611 __mb_cache_entry_release_unlock(prev
);
615 #endif /* !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) */
617 static int __init
init_mbcache(void)
619 register_shrinker(&mb_cache_shrinker
);
623 static void __exit
exit_mbcache(void)
625 unregister_shrinker(&mb_cache_shrinker
);
628 module_init(init_mbcache
)
629 module_exit(exit_mbcache
)