MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / fs / mbcache.c
blobdbc4443e6949816ebe4e2d6696ba4dd1b9d5ede1
1 /*
2 * linux/fs/mbcache.c
3 * (C) 2001-2002 Andreas Gruenbacher, <a.gruenbacher@computer.org>
4 */
6 /*
7 * Filesystem Meta Information Block Cache (mbcache)
9 * The mbcache caches blocks of block devices that need to be located
10 * by their device/block number, as well as by other criteria (such
11 * as the block's contents).
13 * There can only be one cache entry in a cache per device and block number.
14 * Additional indexes need not be unique in this sense. The number of
15 * additional indexes (=other criteria) can be hardwired at compile time
16 * or specified at cache create time.
18 * Each cache entry is of fixed size. An entry may be `valid' or `invalid'
19 * in the cache. A valid entry is in the main hash tables of the cache,
20 * and may also be in the lru list. An invalid entry is not in any hashes
21 * or lists.
23 * A valid cache entry is only in the lru list if no handles refer to it.
24 * Invalid cache entries will be freed when the last handle to the cache
25 * entry is released. Entries that cannot be freed immediately are put
26 * back on the lru list.
29 #include <linux/kernel.h>
30 #include <linux/module.h>
32 #include <linux/hash.h>
33 #include <linux/fs.h>
34 #include <linux/mm.h>
35 #include <linux/slab.h>
36 #include <linux/sched.h>
37 #include <linux/init.h>
38 #include <linux/mbcache.h>
41 #ifdef MB_CACHE_DEBUG
42 # define mb_debug(f...) do { \
43 printk(KERN_DEBUG f); \
44 printk("\n"); \
45 } while (0)
46 #define mb_assert(c) do { if (!(c)) \
47 printk(KERN_ERR "assertion " #c " failed\n"); \
48 } while(0)
49 #else
50 # define mb_debug(f...) do { } while(0)
51 # define mb_assert(c) do { } while(0)
52 #endif
53 #define mb_error(f...) do { \
54 printk(KERN_ERR f); \
55 printk("\n"); \
56 } while(0)
58 MODULE_AUTHOR("Andreas Gruenbacher <a.gruenbacher@computer.org>");
59 MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
60 MODULE_LICENSE("GPL");
62 EXPORT_SYMBOL(mb_cache_create);
63 EXPORT_SYMBOL(mb_cache_shrink);
64 EXPORT_SYMBOL(mb_cache_destroy);
65 EXPORT_SYMBOL(mb_cache_entry_alloc);
66 EXPORT_SYMBOL(mb_cache_entry_insert);
67 EXPORT_SYMBOL(mb_cache_entry_release);
68 EXPORT_SYMBOL(mb_cache_entry_takeout);
69 EXPORT_SYMBOL(mb_cache_entry_free);
70 EXPORT_SYMBOL(mb_cache_entry_dup);
71 EXPORT_SYMBOL(mb_cache_entry_get);
72 #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
73 EXPORT_SYMBOL(mb_cache_entry_find_first);
74 EXPORT_SYMBOL(mb_cache_entry_find_next);
75 #endif
79 * Global data: list of all mbcache's, lru list, and a spinlock for
80 * accessing cache data structures on SMP machines. The lru list is
81 * global across all mbcaches.
84 static LIST_HEAD(mb_cache_list);
85 static LIST_HEAD(mb_cache_lru_list);
86 static spinlock_t mb_cache_spinlock = SPIN_LOCK_UNLOCKED;
87 static struct shrinker *mb_shrinker;
89 static inline int
90 mb_cache_indexes(struct mb_cache *cache)
92 #ifdef MB_CACHE_INDEXES_COUNT
93 return MB_CACHE_INDEXES_COUNT;
94 #else
95 return cache->c_indexes_count;
96 #endif
100 * What the mbcache registers as to get shrunk dynamically.
103 static int mb_cache_shrink_fn(int nr_to_scan, unsigned int gfp_mask);
106 static inline int
107 __mb_cache_entry_is_hashed(struct mb_cache_entry *ce)
109 return !list_empty(&ce->e_block_list);
113 static inline void
114 __mb_cache_entry_unhash(struct mb_cache_entry *ce)
116 int n;
118 if (__mb_cache_entry_is_hashed(ce)) {
119 list_del_init(&ce->e_block_list);
120 for (n=0; n<mb_cache_indexes(ce->e_cache); n++)
121 list_del(&ce->e_indexes[n].o_list);
126 static inline void
127 __mb_cache_entry_forget(struct mb_cache_entry *ce, int gfp_mask)
129 struct mb_cache *cache = ce->e_cache;
131 mb_assert(atomic_read(&ce->e_used) == 0);
132 if (cache->c_op.free && cache->c_op.free(ce, gfp_mask)) {
133 /* free failed -- put back on the lru list
134 for freeing later. */
135 spin_lock(&mb_cache_spinlock);
136 list_add(&ce->e_lru_list, &mb_cache_lru_list);
137 spin_unlock(&mb_cache_spinlock);
138 } else {
139 kmem_cache_free(cache->c_entry_cache, ce);
140 atomic_dec(&cache->c_entry_count);
145 static inline void
146 __mb_cache_entry_release_unlock(struct mb_cache_entry *ce)
148 if (atomic_dec_and_test(&ce->e_used)) {
149 if (!__mb_cache_entry_is_hashed(ce))
150 goto forget;
151 list_add_tail(&ce->e_lru_list, &mb_cache_lru_list);
153 spin_unlock(&mb_cache_spinlock);
154 return;
155 forget:
156 spin_unlock(&mb_cache_spinlock);
157 __mb_cache_entry_forget(ce, GFP_KERNEL);
162 * mb_cache_shrink_fn() memory pressure callback
164 * This function is called by the kernel memory management when memory
165 * gets low.
167 * @nr_to_scan: Number of objects to scan
168 * @gfp_mask: (ignored)
170 * Returns the number of objects which are present in the cache.
172 static int
173 mb_cache_shrink_fn(int nr_to_scan, unsigned int gfp_mask)
175 LIST_HEAD(free_list);
176 struct list_head *l, *ltmp;
177 int count = 0;
179 spin_lock(&mb_cache_spinlock);
180 list_for_each(l, &mb_cache_list) {
181 struct mb_cache *cache =
182 list_entry(l, struct mb_cache, c_cache_list);
183 mb_debug("cache %s (%d)", cache->c_name,
184 atomic_read(&cache->c_entry_count));
185 count += atomic_read(&cache->c_entry_count);
187 mb_debug("trying to free %d entries", nr_to_scan);
188 if (nr_to_scan == 0) {
189 spin_unlock(&mb_cache_spinlock);
190 goto out;
192 while (nr_to_scan-- && !list_empty(&mb_cache_lru_list)) {
193 struct mb_cache_entry *ce =
194 list_entry(mb_cache_lru_list.next,
195 struct mb_cache_entry, e_lru_list);
196 list_move_tail(&ce->e_lru_list, &free_list);
197 __mb_cache_entry_unhash(ce);
199 spin_unlock(&mb_cache_spinlock);
200 list_for_each_safe(l, ltmp, &free_list) {
201 __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
202 e_lru_list), gfp_mask);
204 out:
205 return count;
210 * mb_cache_create() create a new cache
212 * All entries in one cache are equal size. Cache entries may be from
213 * multiple devices. If this is the first mbcache created, registers
214 * the cache with kernel memory management. Returns NULL if no more
215 * memory was available.
217 * @name: name of the cache (informal)
218 * @cache_op: contains the callback called when freeing a cache entry
219 * @entry_size: The size of a cache entry, including
220 * struct mb_cache_entry
221 * @indexes_count: number of additional indexes in the cache. Must equal
222 * MB_CACHE_INDEXES_COUNT if the number of indexes is
223 * hardwired.
224 * @bucket_bits: log2(number of hash buckets)
226 struct mb_cache *
227 mb_cache_create(const char *name, struct mb_cache_op *cache_op,
228 size_t entry_size, int indexes_count, int bucket_bits)
230 int m=0, n, bucket_count = 1 << bucket_bits;
231 struct mb_cache *cache = NULL;
233 if(entry_size < sizeof(struct mb_cache_entry) +
234 indexes_count * sizeof(struct mb_cache_entry_index))
235 return NULL;
237 cache = kmalloc(sizeof(struct mb_cache) +
238 indexes_count * sizeof(struct list_head), GFP_KERNEL);
239 if (!cache)
240 goto fail;
241 cache->c_name = name;
242 cache->c_op.free = NULL;
243 if (cache_op)
244 cache->c_op.free = cache_op->free;
245 atomic_set(&cache->c_entry_count, 0);
246 cache->c_bucket_bits = bucket_bits;
247 #ifdef MB_CACHE_INDEXES_COUNT
248 mb_assert(indexes_count == MB_CACHE_INDEXES_COUNT);
249 #else
250 cache->c_indexes_count = indexes_count;
251 #endif
252 cache->c_block_hash = kmalloc(bucket_count * sizeof(struct list_head),
253 GFP_KERNEL);
254 if (!cache->c_block_hash)
255 goto fail;
256 for (n=0; n<bucket_count; n++)
257 INIT_LIST_HEAD(&cache->c_block_hash[n]);
258 for (m=0; m<indexes_count; m++) {
259 cache->c_indexes_hash[m] = kmalloc(bucket_count *
260 sizeof(struct list_head),
261 GFP_KERNEL);
262 if (!cache->c_indexes_hash[m])
263 goto fail;
264 for (n=0; n<bucket_count; n++)
265 INIT_LIST_HEAD(&cache->c_indexes_hash[m][n]);
267 cache->c_entry_cache = kmem_cache_create(name, entry_size, 0,
268 SLAB_RECLAIM_ACCOUNT, NULL, NULL);
269 if (!cache->c_entry_cache)
270 goto fail;
272 spin_lock(&mb_cache_spinlock);
273 list_add(&cache->c_cache_list, &mb_cache_list);
274 spin_unlock(&mb_cache_spinlock);
275 return cache;
277 fail:
278 if (cache) {
279 while (--m >= 0)
280 kfree(cache->c_indexes_hash[m]);
281 if (cache->c_block_hash)
282 kfree(cache->c_block_hash);
283 kfree(cache);
285 return NULL;
290 * mb_cache_shrink()
292 * Removes all cache entires of a device from the cache. All cache entries
293 * currently in use cannot be freed, and thus remain in the cache. All others
294 * are freed.
296 * @cache: which cache to shrink
297 * @bdev: which device's cache entries to shrink
299 void
300 mb_cache_shrink(struct mb_cache *cache, struct block_device *bdev)
302 LIST_HEAD(free_list);
303 struct list_head *l, *ltmp;
305 spin_lock(&mb_cache_spinlock);
306 list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
307 struct mb_cache_entry *ce =
308 list_entry(l, struct mb_cache_entry, e_lru_list);
309 if (ce->e_bdev == bdev) {
310 list_move_tail(&ce->e_lru_list, &free_list);
311 __mb_cache_entry_unhash(ce);
314 spin_unlock(&mb_cache_spinlock);
315 list_for_each_safe(l, ltmp, &free_list) {
316 __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
317 e_lru_list), GFP_KERNEL);
323 * mb_cache_destroy()
325 * Shrinks the cache to its minimum possible size (hopefully 0 entries),
326 * and then destroys it. If this was the last mbcache, un-registers the
327 * mbcache from kernel memory management.
329 void
330 mb_cache_destroy(struct mb_cache *cache)
332 LIST_HEAD(free_list);
333 struct list_head *l, *ltmp;
334 int n;
336 spin_lock(&mb_cache_spinlock);
337 list_for_each_safe(l, ltmp, &mb_cache_lru_list) {
338 struct mb_cache_entry *ce =
339 list_entry(l, struct mb_cache_entry, e_lru_list);
340 if (ce->e_cache == cache) {
341 list_move_tail(&ce->e_lru_list, &free_list);
342 __mb_cache_entry_unhash(ce);
345 list_del(&cache->c_cache_list);
346 spin_unlock(&mb_cache_spinlock);
348 list_for_each_safe(l, ltmp, &free_list) {
349 __mb_cache_entry_forget(list_entry(l, struct mb_cache_entry,
350 e_lru_list), GFP_KERNEL);
353 if (atomic_read(&cache->c_entry_count) > 0) {
354 mb_error("cache %s: %d orphaned entries",
355 cache->c_name,
356 atomic_read(&cache->c_entry_count));
359 kmem_cache_destroy(cache->c_entry_cache);
361 for (n=0; n < mb_cache_indexes(cache); n++)
362 kfree(cache->c_indexes_hash[n]);
363 kfree(cache->c_block_hash);
364 kfree(cache);
369 * mb_cache_entry_alloc()
371 * Allocates a new cache entry. The new entry will not be valid initially,
372 * and thus cannot be looked up yet. It should be filled with data, and
373 * then inserted into the cache using mb_cache_entry_insert(). Returns NULL
374 * if no more memory was available.
376 struct mb_cache_entry *
377 mb_cache_entry_alloc(struct mb_cache *cache)
379 struct mb_cache_entry *ce;
381 atomic_inc(&cache->c_entry_count);
382 ce = kmem_cache_alloc(cache->c_entry_cache, GFP_KERNEL);
383 if (ce) {
384 INIT_LIST_HEAD(&ce->e_lru_list);
385 INIT_LIST_HEAD(&ce->e_block_list);
386 ce->e_cache = cache;
387 atomic_set(&ce->e_used, 1);
389 return ce;
394 * mb_cache_entry_insert()
396 * Inserts an entry that was allocated using mb_cache_entry_alloc() into
397 * the cache. After this, the cache entry can be looked up, but is not yet
398 * in the lru list as the caller still holds a handle to it. Returns 0 on
399 * success, or -EBUSY if a cache entry for that device + inode exists
400 * already (this may happen after a failed lookup, but when another process
401 * has inserted the same cache entry in the meantime).
403 * @bdev: device the cache entry belongs to
404 * @block: block number
405 * @keys: array of additional keys. There must be indexes_count entries
406 * in the array (as specified when creating the cache).
409 mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev,
410 sector_t block, unsigned int keys[])
412 struct mb_cache *cache = ce->e_cache;
413 unsigned int bucket;
414 struct list_head *l;
415 int error = -EBUSY, n;
417 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
418 cache->c_bucket_bits);
419 spin_lock(&mb_cache_spinlock);
420 list_for_each_prev(l, &cache->c_block_hash[bucket]) {
421 struct mb_cache_entry *ce =
422 list_entry(l, struct mb_cache_entry, e_block_list);
423 if (ce->e_bdev == bdev && ce->e_block == block)
424 goto out;
426 __mb_cache_entry_unhash(ce);
427 ce->e_bdev = bdev;
428 ce->e_block = block;
429 list_add(&ce->e_block_list, &cache->c_block_hash[bucket]);
430 for (n=0; n<mb_cache_indexes(cache); n++) {
431 ce->e_indexes[n].o_key = keys[n];
432 bucket = hash_long(keys[n], cache->c_bucket_bits);
433 list_add(&ce->e_indexes[n].o_list,
434 &cache->c_indexes_hash[n][bucket]);
436 error = 0;
437 out:
438 spin_unlock(&mb_cache_spinlock);
439 return error;
444 * mb_cache_entry_release()
446 * Release a handle to a cache entry. When the last handle to a cache entry
447 * is released it is either freed (if it is invalid) or otherwise inserted
448 * in to the lru list.
450 void
451 mb_cache_entry_release(struct mb_cache_entry *ce)
453 spin_lock(&mb_cache_spinlock);
454 __mb_cache_entry_release_unlock(ce);
459 * mb_cache_entry_takeout()
461 * Take a cache entry out of the cache, making it invalid. The entry can later
462 * be re-inserted using mb_cache_entry_insert(), or released using
463 * mb_cache_entry_release().
465 void
466 mb_cache_entry_takeout(struct mb_cache_entry *ce)
468 spin_lock(&mb_cache_spinlock);
469 mb_assert(list_empty(&ce->e_lru_list));
470 __mb_cache_entry_unhash(ce);
471 spin_unlock(&mb_cache_spinlock);
476 * mb_cache_entry_free()
478 * This is equivalent to the sequence mb_cache_entry_takeout() --
479 * mb_cache_entry_release().
481 void
482 mb_cache_entry_free(struct mb_cache_entry *ce)
484 spin_lock(&mb_cache_spinlock);
485 mb_assert(list_empty(&ce->e_lru_list));
486 __mb_cache_entry_unhash(ce);
487 __mb_cache_entry_release_unlock(ce);
492 * mb_cache_entry_dup()
494 * Duplicate a handle to a cache entry (does not duplicate the cache entry
495 * itself). After the call, both the old and the new handle must be released.
497 struct mb_cache_entry *
498 mb_cache_entry_dup(struct mb_cache_entry *ce)
500 atomic_inc(&ce->e_used);
501 return ce;
506 * mb_cache_entry_get()
508 * Get a cache entry by device / block number. (There can only be one entry
509 * in the cache per device and block.) Returns NULL if no such cache entry
510 * exists.
512 struct mb_cache_entry *
513 mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev,
514 sector_t block)
516 unsigned int bucket;
517 struct list_head *l;
518 struct mb_cache_entry *ce;
520 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff),
521 cache->c_bucket_bits);
522 spin_lock(&mb_cache_spinlock);
523 list_for_each(l, &cache->c_block_hash[bucket]) {
524 ce = list_entry(l, struct mb_cache_entry, e_block_list);
525 if (ce->e_bdev == bdev && ce->e_block == block) {
526 if (!list_empty(&ce->e_lru_list))
527 list_del_init(&ce->e_lru_list);
528 atomic_inc(&ce->e_used);
529 goto cleanup;
532 ce = NULL;
534 cleanup:
535 spin_unlock(&mb_cache_spinlock);
536 return ce;
539 #if !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0)
541 static struct mb_cache_entry *
542 __mb_cache_entry_find(struct list_head *l, struct list_head *head,
543 int index, struct block_device *bdev, unsigned int key)
545 while (l != head) {
546 struct mb_cache_entry *ce =
547 list_entry(l, struct mb_cache_entry,
548 e_indexes[index].o_list);
549 if (ce->e_bdev == bdev && ce->e_indexes[index].o_key == key) {
550 if (!list_empty(&ce->e_lru_list))
551 list_del_init(&ce->e_lru_list);
552 atomic_inc(&ce->e_used);
553 return ce;
555 l = l->next;
557 return NULL;
562 * mb_cache_entry_find_first()
564 * Find the first cache entry on a given device with a certain key in
565 * an additional index. Additonal matches can be found with
566 * mb_cache_entry_find_next(). Returns NULL if no match was found.
568 * @cache: the cache to search
569 * @index: the number of the additonal index to search (0<=index<indexes_count)
570 * @bdev: the device the cache entry should belong to
571 * @key: the key in the index
573 struct mb_cache_entry *
574 mb_cache_entry_find_first(struct mb_cache *cache, int index,
575 struct block_device *bdev, unsigned int key)
577 unsigned int bucket = hash_long(key, cache->c_bucket_bits);
578 struct list_head *l;
579 struct mb_cache_entry *ce;
581 mb_assert(index < mb_cache_indexes(cache));
582 spin_lock(&mb_cache_spinlock);
583 l = cache->c_indexes_hash[index][bucket].next;
584 ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket],
585 index, bdev, key);
586 spin_unlock(&mb_cache_spinlock);
587 return ce;
592 * mb_cache_entry_find_next()
594 * Find the next cache entry on a given device with a certain key in an
595 * additional index. Returns NULL if no match could be found. The previous
596 * entry is atomatically released, so that mb_cache_entry_find_next() can
597 * be called like this:
599 * entry = mb_cache_entry_find_first();
600 * while (entry) {
601 * ...
602 * entry = mb_cache_entry_find_next(entry, ...);
605 * @prev: The previous match
606 * @index: the number of the additonal index to search (0<=index<indexes_count)
607 * @bdev: the device the cache entry should belong to
608 * @key: the key in the index
610 struct mb_cache_entry *
611 mb_cache_entry_find_next(struct mb_cache_entry *prev, int index,
612 struct block_device *bdev, unsigned int key)
614 struct mb_cache *cache = prev->e_cache;
615 unsigned int bucket = hash_long(key, cache->c_bucket_bits);
616 struct list_head *l;
617 struct mb_cache_entry *ce;
619 mb_assert(index < mb_cache_indexes(cache));
620 spin_lock(&mb_cache_spinlock);
621 l = prev->e_indexes[index].o_list.next;
622 ce = __mb_cache_entry_find(l, &cache->c_indexes_hash[index][bucket],
623 index, bdev, key);
624 __mb_cache_entry_release_unlock(prev);
625 return ce;
628 #endif /* !defined(MB_CACHE_INDEXES_COUNT) || (MB_CACHE_INDEXES_COUNT > 0) */
630 static int __init init_mbcache(void)
632 mb_shrinker = set_shrinker(DEFAULT_SEEKS, mb_cache_shrink_fn);
633 return 0;
636 static void __exit exit_mbcache(void)
638 remove_shrinker(mb_shrinker);
641 module_init(init_mbcache)
642 module_exit(exit_mbcache)