2 * Copyright (C) 2009-2011 Red Hat, Inc.
4 * Author: Mikulas Patocka <mpatocka@redhat.com>
6 * This file is released under the GPL.
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/shrinker.h>
16 #include <linux/module.h>
18 #define DM_MSG_PREFIX "bufio"
21 * Memory management policy:
22 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
23 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
24 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
25 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
28 #define DM_BUFIO_MIN_BUFFERS 8
30 #define DM_BUFIO_MEMORY_PERCENT 2
31 #define DM_BUFIO_VMALLOC_PERCENT 25
32 #define DM_BUFIO_WRITEBACK_PERCENT 75
35 * Check buffer ages in this interval (seconds)
37 #define DM_BUFIO_WORK_TIMER_SECS 10
40 * Free buffers when they are older than this (seconds)
42 #define DM_BUFIO_DEFAULT_AGE_SECS 60
45 * The number of bvec entries that are embedded directly in the buffer.
46 * If the chunk size is larger, dm-io is used to do the io.
48 #define DM_BUFIO_INLINE_VECS 16
53 #define DM_BUFIO_HASH_BITS 20
54 #define DM_BUFIO_HASH(block) \
55 ((((block) >> DM_BUFIO_HASH_BITS) ^ (block)) & \
56 ((1 << DM_BUFIO_HASH_BITS) - 1))
59 * Don't try to use kmem_cache_alloc for blocks larger than this.
60 * For explanation, see alloc_buffer_data below.
62 #define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT (PAGE_SIZE >> 1)
63 #define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT (PAGE_SIZE << (MAX_ORDER - 1))
66 * dm_buffer->list_mode
74 * All buffers are linked to cache_hash with their hash_list field.
76 * Clean buffers that are not being written (B_WRITING not set)
77 * are linked to lru[LIST_CLEAN] with their lru_list field.
79 * Dirty and clean buffers that are being written are linked to
80 * lru[LIST_DIRTY] with their lru_list field. When the write
81 * finishes, the buffer cannot be relinked immediately (because we
82 * are in an interrupt context and relinking requires process
83 * context), so some clean-not-writing buffers can be held on
84 * dirty_lru too. They are later added to lru in the process
87 struct dm_bufio_client
{
90 struct list_head lru
[LIST_SIZE
];
91 unsigned long n_buffers
[LIST_SIZE
];
93 struct block_device
*bdev
;
95 unsigned char sectors_per_block_bits
;
96 unsigned char pages_per_block_bits
;
97 unsigned char blocks_per_page_bits
;
99 void (*alloc_callback
)(struct dm_buffer
*);
100 void (*write_callback
)(struct dm_buffer
*);
102 struct dm_io_client
*dm_io
;
104 struct list_head reserved_buffers
;
105 unsigned need_reserved_buffers
;
107 unsigned minimum_buffers
;
109 struct hlist_head
*cache_hash
;
110 wait_queue_head_t free_buffer_wait
;
112 int async_write_error
;
114 struct list_head client_list
;
115 struct shrinker shrinker
;
126 * Describes how the block was allocated:
127 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
128 * See the comment at alloc_buffer_data.
132 DATA_MODE_GET_FREE_PAGES
= 1,
133 DATA_MODE_VMALLOC
= 2,
138 struct hlist_node hash_list
;
139 struct list_head lru_list
;
142 enum data_mode data_mode
;
143 unsigned char list_mode
; /* LIST_* */
148 unsigned long last_accessed
;
149 struct dm_bufio_client
*c
;
150 struct list_head write_list
;
152 struct bio_vec bio_vec
[DM_BUFIO_INLINE_VECS
];
155 /*----------------------------------------------------------------*/
157 static struct kmem_cache
*dm_bufio_caches
[PAGE_SHIFT
- SECTOR_SHIFT
];
158 static char *dm_bufio_cache_names
[PAGE_SHIFT
- SECTOR_SHIFT
];
160 static inline int dm_bufio_cache_index(struct dm_bufio_client
*c
)
162 unsigned ret
= c
->blocks_per_page_bits
- 1;
164 BUG_ON(ret
>= ARRAY_SIZE(dm_bufio_caches
));
169 #define DM_BUFIO_CACHE(c) (dm_bufio_caches[dm_bufio_cache_index(c)])
170 #define DM_BUFIO_CACHE_NAME(c) (dm_bufio_cache_names[dm_bufio_cache_index(c)])
172 #define dm_bufio_in_request() (!!current->bio_list)
174 static void dm_bufio_lock(struct dm_bufio_client
*c
)
176 mutex_lock_nested(&c
->lock
, dm_bufio_in_request());
179 static int dm_bufio_trylock(struct dm_bufio_client
*c
)
181 return mutex_trylock(&c
->lock
);
184 static void dm_bufio_unlock(struct dm_bufio_client
*c
)
186 mutex_unlock(&c
->lock
);
190 * FIXME Move to sched.h?
192 #ifdef CONFIG_PREEMPT_VOLUNTARY
193 # define dm_bufio_cond_resched() \
195 if (unlikely(need_resched())) \
199 # define dm_bufio_cond_resched() do { } while (0)
202 /*----------------------------------------------------------------*/
205 * Default cache size: available memory divided by the ratio.
207 static unsigned long dm_bufio_default_cache_size
;
210 * Total cache size set by the user.
212 static unsigned long dm_bufio_cache_size
;
215 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
216 * at any time. If it disagrees, the user has changed cache size.
218 static unsigned long dm_bufio_cache_size_latch
;
220 static DEFINE_SPINLOCK(param_spinlock
);
223 * Buffers are freed after this timeout
225 static unsigned dm_bufio_max_age
= DM_BUFIO_DEFAULT_AGE_SECS
;
227 static unsigned long dm_bufio_peak_allocated
;
228 static unsigned long dm_bufio_allocated_kmem_cache
;
229 static unsigned long dm_bufio_allocated_get_free_pages
;
230 static unsigned long dm_bufio_allocated_vmalloc
;
231 static unsigned long dm_bufio_current_allocated
;
233 /*----------------------------------------------------------------*/
236 * Per-client cache: dm_bufio_cache_size / dm_bufio_client_count
238 static unsigned long dm_bufio_cache_size_per_client
;
241 * The current number of clients.
243 static int dm_bufio_client_count
;
246 * The list of all clients.
248 static LIST_HEAD(dm_bufio_all_clients
);
251 * This mutex protects dm_bufio_cache_size_latch,
252 * dm_bufio_cache_size_per_client and dm_bufio_client_count
254 static DEFINE_MUTEX(dm_bufio_clients_lock
);
256 /*----------------------------------------------------------------*/
258 static void adjust_total_allocated(enum data_mode data_mode
, long diff
)
260 static unsigned long * const class_ptr
[DATA_MODE_LIMIT
] = {
261 &dm_bufio_allocated_kmem_cache
,
262 &dm_bufio_allocated_get_free_pages
,
263 &dm_bufio_allocated_vmalloc
,
266 spin_lock(¶m_spinlock
);
268 *class_ptr
[data_mode
] += diff
;
270 dm_bufio_current_allocated
+= diff
;
272 if (dm_bufio_current_allocated
> dm_bufio_peak_allocated
)
273 dm_bufio_peak_allocated
= dm_bufio_current_allocated
;
275 spin_unlock(¶m_spinlock
);
279 * Change the number of clients and recalculate per-client limit.
281 static void __cache_size_refresh(void)
283 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock
));
284 BUG_ON(dm_bufio_client_count
< 0);
286 dm_bufio_cache_size_latch
= ACCESS_ONCE(dm_bufio_cache_size
);
289 * Use default if set to 0 and report the actual cache size used.
291 if (!dm_bufio_cache_size_latch
) {
292 (void)cmpxchg(&dm_bufio_cache_size
, 0,
293 dm_bufio_default_cache_size
);
294 dm_bufio_cache_size_latch
= dm_bufio_default_cache_size
;
297 dm_bufio_cache_size_per_client
= dm_bufio_cache_size_latch
/
298 (dm_bufio_client_count
? : 1);
302 * Allocating buffer data.
304 * Small buffers are allocated with kmem_cache, to use space optimally.
306 * For large buffers, we choose between get_free_pages and vmalloc.
307 * Each has advantages and disadvantages.
309 * __get_free_pages can randomly fail if the memory is fragmented.
310 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
311 * as low as 128M) so using it for caching is not appropriate.
313 * If the allocation may fail we use __get_free_pages. Memory fragmentation
314 * won't have a fatal effect here, but it just causes flushes of some other
315 * buffers and more I/O will be performed. Don't use __get_free_pages if it
316 * always fails (i.e. order >= MAX_ORDER).
318 * If the allocation shouldn't fail we use __vmalloc. This is only for the
319 * initial reserve allocation, so there's no risk of wasting all vmalloc
322 static void *alloc_buffer_data(struct dm_bufio_client
*c
, gfp_t gfp_mask
,
323 enum data_mode
*data_mode
)
328 if (c
->block_size
<= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT
) {
329 *data_mode
= DATA_MODE_SLAB
;
330 return kmem_cache_alloc(DM_BUFIO_CACHE(c
), gfp_mask
);
333 if (c
->block_size
<= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT
&&
334 gfp_mask
& __GFP_NORETRY
) {
335 *data_mode
= DATA_MODE_GET_FREE_PAGES
;
336 return (void *)__get_free_pages(gfp_mask
,
337 c
->pages_per_block_bits
);
340 *data_mode
= DATA_MODE_VMALLOC
;
343 * __vmalloc allocates the data pages and auxiliary structures with
344 * gfp_flags that were specified, but pagetables are always allocated
345 * with GFP_KERNEL, no matter what was specified as gfp_mask.
347 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
348 * all allocations done by this process (including pagetables) are done
349 * as if GFP_NOIO was specified.
352 if (gfp_mask
& __GFP_NORETRY
)
353 noio_flag
= memalloc_noio_save();
355 ptr
= __vmalloc(c
->block_size
, gfp_mask
| __GFP_HIGHMEM
, PAGE_KERNEL
);
357 if (gfp_mask
& __GFP_NORETRY
)
358 memalloc_noio_restore(noio_flag
);
364 * Free buffer's data.
366 static void free_buffer_data(struct dm_bufio_client
*c
,
367 void *data
, enum data_mode data_mode
)
371 kmem_cache_free(DM_BUFIO_CACHE(c
), data
);
374 case DATA_MODE_GET_FREE_PAGES
:
375 free_pages((unsigned long)data
, c
->pages_per_block_bits
);
378 case DATA_MODE_VMALLOC
:
383 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
390 * Allocate buffer and its data.
392 static struct dm_buffer
*alloc_buffer(struct dm_bufio_client
*c
, gfp_t gfp_mask
)
394 struct dm_buffer
*b
= kmalloc(sizeof(struct dm_buffer
) + c
->aux_size
,
402 b
->data
= alloc_buffer_data(c
, gfp_mask
, &b
->data_mode
);
408 adjust_total_allocated(b
->data_mode
, (long)c
->block_size
);
414 * Free buffer and its data.
416 static void free_buffer(struct dm_buffer
*b
)
418 struct dm_bufio_client
*c
= b
->c
;
420 adjust_total_allocated(b
->data_mode
, -(long)c
->block_size
);
422 free_buffer_data(c
, b
->data
, b
->data_mode
);
427 * Link buffer to the hash list and clean or dirty queue.
429 static void __link_buffer(struct dm_buffer
*b
, sector_t block
, int dirty
)
431 struct dm_bufio_client
*c
= b
->c
;
433 c
->n_buffers
[dirty
]++;
435 b
->list_mode
= dirty
;
436 list_add(&b
->lru_list
, &c
->lru
[dirty
]);
437 hlist_add_head(&b
->hash_list
, &c
->cache_hash
[DM_BUFIO_HASH(block
)]);
438 b
->last_accessed
= jiffies
;
442 * Unlink buffer from the hash list and dirty or clean queue.
444 static void __unlink_buffer(struct dm_buffer
*b
)
446 struct dm_bufio_client
*c
= b
->c
;
448 BUG_ON(!c
->n_buffers
[b
->list_mode
]);
450 c
->n_buffers
[b
->list_mode
]--;
451 hlist_del(&b
->hash_list
);
452 list_del(&b
->lru_list
);
456 * Place the buffer to the head of dirty or clean LRU queue.
458 static void __relink_lru(struct dm_buffer
*b
, int dirty
)
460 struct dm_bufio_client
*c
= b
->c
;
462 BUG_ON(!c
->n_buffers
[b
->list_mode
]);
464 c
->n_buffers
[b
->list_mode
]--;
465 c
->n_buffers
[dirty
]++;
466 b
->list_mode
= dirty
;
467 list_move(&b
->lru_list
, &c
->lru
[dirty
]);
468 b
->last_accessed
= jiffies
;
471 /*----------------------------------------------------------------
472 * Submit I/O on the buffer.
474 * Bio interface is faster but it has some problems:
475 * the vector list is limited (increasing this limit increases
476 * memory-consumption per buffer, so it is not viable);
478 * the memory must be direct-mapped, not vmalloced;
480 * the I/O driver can reject requests spuriously if it thinks that
481 * the requests are too big for the device or if they cross a
482 * controller-defined memory boundary.
484 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
485 * it is not vmalloced, try using the bio interface.
487 * If the buffer is big, if it is vmalloced or if the underlying device
488 * rejects the bio because it is too large, use dm-io layer to do the I/O.
489 * The dm-io layer splits the I/O into multiple requests, avoiding the above
491 *--------------------------------------------------------------*/
494 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
495 * that the request was handled directly with bio interface.
497 static void dmio_complete(unsigned long error
, void *context
)
499 struct dm_buffer
*b
= context
;
501 b
->bio
.bi_end_io(&b
->bio
, error
? -EIO
: 0);
504 static void use_dmio(struct dm_buffer
*b
, int rw
, sector_t block
,
505 bio_end_io_t
*end_io
)
508 struct dm_io_request io_req
= {
510 .notify
.fn
= dmio_complete
,
512 .client
= b
->c
->dm_io
,
514 struct dm_io_region region
= {
516 .sector
= block
<< b
->c
->sectors_per_block_bits
,
517 .count
= b
->c
->block_size
>> SECTOR_SHIFT
,
520 if (b
->data_mode
!= DATA_MODE_VMALLOC
) {
521 io_req
.mem
.type
= DM_IO_KMEM
;
522 io_req
.mem
.ptr
.addr
= b
->data
;
524 io_req
.mem
.type
= DM_IO_VMA
;
525 io_req
.mem
.ptr
.vma
= b
->data
;
528 b
->bio
.bi_end_io
= end_io
;
530 r
= dm_io(&io_req
, 1, ®ion
, NULL
);
535 static void use_inline_bio(struct dm_buffer
*b
, int rw
, sector_t block
,
536 bio_end_io_t
*end_io
)
542 b
->bio
.bi_io_vec
= b
->bio_vec
;
543 b
->bio
.bi_max_vecs
= DM_BUFIO_INLINE_VECS
;
544 b
->bio
.bi_iter
.bi_sector
= block
<< b
->c
->sectors_per_block_bits
;
545 b
->bio
.bi_bdev
= b
->c
->bdev
;
546 b
->bio
.bi_end_io
= end_io
;
549 * We assume that if len >= PAGE_SIZE ptr is page-aligned.
550 * If len < PAGE_SIZE the buffer doesn't cross page boundary.
553 len
= b
->c
->block_size
;
555 if (len
>= PAGE_SIZE
)
556 BUG_ON((unsigned long)ptr
& (PAGE_SIZE
- 1));
558 BUG_ON((unsigned long)ptr
& (len
- 1));
561 if (!bio_add_page(&b
->bio
, virt_to_page(ptr
),
562 len
< PAGE_SIZE
? len
: PAGE_SIZE
,
563 virt_to_phys(ptr
) & (PAGE_SIZE
- 1))) {
564 BUG_ON(b
->c
->block_size
<= PAGE_SIZE
);
565 use_dmio(b
, rw
, block
, end_io
);
573 submit_bio(rw
, &b
->bio
);
576 static void submit_io(struct dm_buffer
*b
, int rw
, sector_t block
,
577 bio_end_io_t
*end_io
)
579 if (rw
== WRITE
&& b
->c
->write_callback
)
580 b
->c
->write_callback(b
);
582 if (b
->c
->block_size
<= DM_BUFIO_INLINE_VECS
* PAGE_SIZE
&&
583 b
->data_mode
!= DATA_MODE_VMALLOC
)
584 use_inline_bio(b
, rw
, block
, end_io
);
586 use_dmio(b
, rw
, block
, end_io
);
589 /*----------------------------------------------------------------
590 * Writing dirty buffers
591 *--------------------------------------------------------------*/
594 * The endio routine for write.
596 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
599 static void write_endio(struct bio
*bio
, int error
)
601 struct dm_buffer
*b
= container_of(bio
, struct dm_buffer
, bio
);
603 b
->write_error
= error
;
604 if (unlikely(error
)) {
605 struct dm_bufio_client
*c
= b
->c
;
606 (void)cmpxchg(&c
->async_write_error
, 0, error
);
609 BUG_ON(!test_bit(B_WRITING
, &b
->state
));
611 smp_mb__before_atomic();
612 clear_bit(B_WRITING
, &b
->state
);
613 smp_mb__after_atomic();
615 wake_up_bit(&b
->state
, B_WRITING
);
619 * Initiate a write on a dirty buffer, but don't wait for it.
621 * - If the buffer is not dirty, exit.
622 * - If there some previous write going on, wait for it to finish (we can't
623 * have two writes on the same buffer simultaneously).
624 * - Submit our write and don't wait on it. We set B_WRITING indicating
625 * that there is a write in progress.
627 static void __write_dirty_buffer(struct dm_buffer
*b
,
628 struct list_head
*write_list
)
630 if (!test_bit(B_DIRTY
, &b
->state
))
633 clear_bit(B_DIRTY
, &b
->state
);
634 wait_on_bit_lock_io(&b
->state
, B_WRITING
, TASK_UNINTERRUPTIBLE
);
637 submit_io(b
, WRITE
, b
->block
, write_endio
);
639 list_add_tail(&b
->write_list
, write_list
);
642 static void __flush_write_list(struct list_head
*write_list
)
644 struct blk_plug plug
;
645 blk_start_plug(&plug
);
646 while (!list_empty(write_list
)) {
647 struct dm_buffer
*b
=
648 list_entry(write_list
->next
, struct dm_buffer
, write_list
);
649 list_del(&b
->write_list
);
650 submit_io(b
, WRITE
, b
->block
, write_endio
);
651 dm_bufio_cond_resched();
653 blk_finish_plug(&plug
);
657 * Wait until any activity on the buffer finishes. Possibly write the
658 * buffer if it is dirty. When this function finishes, there is no I/O
659 * running on the buffer and the buffer is not dirty.
661 static void __make_buffer_clean(struct dm_buffer
*b
)
663 BUG_ON(b
->hold_count
);
665 if (!b
->state
) /* fast case */
668 wait_on_bit_io(&b
->state
, B_READING
, TASK_UNINTERRUPTIBLE
);
669 __write_dirty_buffer(b
, NULL
);
670 wait_on_bit_io(&b
->state
, B_WRITING
, TASK_UNINTERRUPTIBLE
);
674 * Find some buffer that is not held by anybody, clean it, unlink it and
677 static struct dm_buffer
*__get_unclaimed_buffer(struct dm_bufio_client
*c
)
681 list_for_each_entry_reverse(b
, &c
->lru
[LIST_CLEAN
], lru_list
) {
682 BUG_ON(test_bit(B_WRITING
, &b
->state
));
683 BUG_ON(test_bit(B_DIRTY
, &b
->state
));
685 if (!b
->hold_count
) {
686 __make_buffer_clean(b
);
690 dm_bufio_cond_resched();
693 list_for_each_entry_reverse(b
, &c
->lru
[LIST_DIRTY
], lru_list
) {
694 BUG_ON(test_bit(B_READING
, &b
->state
));
696 if (!b
->hold_count
) {
697 __make_buffer_clean(b
);
701 dm_bufio_cond_resched();
708 * Wait until some other threads free some buffer or release hold count on
711 * This function is entered with c->lock held, drops it and regains it
714 static void __wait_for_free_buffer(struct dm_bufio_client
*c
)
716 DECLARE_WAITQUEUE(wait
, current
);
718 add_wait_queue(&c
->free_buffer_wait
, &wait
);
719 set_task_state(current
, TASK_UNINTERRUPTIBLE
);
724 remove_wait_queue(&c
->free_buffer_wait
, &wait
);
737 * Allocate a new buffer. If the allocation is not possible, wait until
738 * some other thread frees a buffer.
740 * May drop the lock and regain it.
742 static struct dm_buffer
*__alloc_buffer_wait_no_callback(struct dm_bufio_client
*c
, enum new_flag nf
)
747 * dm-bufio is resistant to allocation failures (it just keeps
748 * one buffer reserved in cases all the allocations fail).
749 * So set flags to not try too hard:
750 * GFP_NOIO: don't recurse into the I/O layer
751 * __GFP_NORETRY: don't retry and rather return failure
752 * __GFP_NOMEMALLOC: don't use emergency reserves
753 * __GFP_NOWARN: don't print a warning in case of failure
755 * For debugging, if we set the cache size to 1, no new buffers will
759 if (dm_bufio_cache_size_latch
!= 1) {
760 b
= alloc_buffer(c
, GFP_NOIO
| __GFP_NORETRY
| __GFP_NOMEMALLOC
| __GFP_NOWARN
);
765 if (nf
== NF_PREFETCH
)
768 if (!list_empty(&c
->reserved_buffers
)) {
769 b
= list_entry(c
->reserved_buffers
.next
,
770 struct dm_buffer
, lru_list
);
771 list_del(&b
->lru_list
);
772 c
->need_reserved_buffers
++;
777 b
= __get_unclaimed_buffer(c
);
781 __wait_for_free_buffer(c
);
785 static struct dm_buffer
*__alloc_buffer_wait(struct dm_bufio_client
*c
, enum new_flag nf
)
787 struct dm_buffer
*b
= __alloc_buffer_wait_no_callback(c
, nf
);
792 if (c
->alloc_callback
)
793 c
->alloc_callback(b
);
799 * Free a buffer and wake other threads waiting for free buffers.
801 static void __free_buffer_wake(struct dm_buffer
*b
)
803 struct dm_bufio_client
*c
= b
->c
;
805 if (!c
->need_reserved_buffers
)
808 list_add(&b
->lru_list
, &c
->reserved_buffers
);
809 c
->need_reserved_buffers
--;
812 wake_up(&c
->free_buffer_wait
);
815 static void __write_dirty_buffers_async(struct dm_bufio_client
*c
, int no_wait
,
816 struct list_head
*write_list
)
818 struct dm_buffer
*b
, *tmp
;
820 list_for_each_entry_safe_reverse(b
, tmp
, &c
->lru
[LIST_DIRTY
], lru_list
) {
821 BUG_ON(test_bit(B_READING
, &b
->state
));
823 if (!test_bit(B_DIRTY
, &b
->state
) &&
824 !test_bit(B_WRITING
, &b
->state
)) {
825 __relink_lru(b
, LIST_CLEAN
);
829 if (no_wait
&& test_bit(B_WRITING
, &b
->state
))
832 __write_dirty_buffer(b
, write_list
);
833 dm_bufio_cond_resched();
838 * Get writeback threshold and buffer limit for a given client.
840 static void __get_memory_limit(struct dm_bufio_client
*c
,
841 unsigned long *threshold_buffers
,
842 unsigned long *limit_buffers
)
844 unsigned long buffers
;
846 if (ACCESS_ONCE(dm_bufio_cache_size
) != dm_bufio_cache_size_latch
) {
847 mutex_lock(&dm_bufio_clients_lock
);
848 __cache_size_refresh();
849 mutex_unlock(&dm_bufio_clients_lock
);
852 buffers
= dm_bufio_cache_size_per_client
>>
853 (c
->sectors_per_block_bits
+ SECTOR_SHIFT
);
855 if (buffers
< c
->minimum_buffers
)
856 buffers
= c
->minimum_buffers
;
858 *limit_buffers
= buffers
;
859 *threshold_buffers
= buffers
* DM_BUFIO_WRITEBACK_PERCENT
/ 100;
863 * Check if we're over watermark.
864 * If we are over threshold_buffers, start freeing buffers.
865 * If we're over "limit_buffers", block until we get under the limit.
867 static void __check_watermark(struct dm_bufio_client
*c
,
868 struct list_head
*write_list
)
870 unsigned long threshold_buffers
, limit_buffers
;
872 __get_memory_limit(c
, &threshold_buffers
, &limit_buffers
);
874 while (c
->n_buffers
[LIST_CLEAN
] + c
->n_buffers
[LIST_DIRTY
] >
877 struct dm_buffer
*b
= __get_unclaimed_buffer(c
);
882 __free_buffer_wake(b
);
883 dm_bufio_cond_resched();
886 if (c
->n_buffers
[LIST_DIRTY
] > threshold_buffers
)
887 __write_dirty_buffers_async(c
, 1, write_list
);
891 * Find a buffer in the hash.
893 static struct dm_buffer
*__find(struct dm_bufio_client
*c
, sector_t block
)
897 hlist_for_each_entry(b
, &c
->cache_hash
[DM_BUFIO_HASH(block
)],
899 dm_bufio_cond_resched();
900 if (b
->block
== block
)
907 /*----------------------------------------------------------------
909 *--------------------------------------------------------------*/
911 static struct dm_buffer
*__bufio_new(struct dm_bufio_client
*c
, sector_t block
,
912 enum new_flag nf
, int *need_submit
,
913 struct list_head
*write_list
)
915 struct dm_buffer
*b
, *new_b
= NULL
;
919 b
= __find(c
, block
);
926 new_b
= __alloc_buffer_wait(c
, nf
);
931 * We've had a period where the mutex was unlocked, so need to
932 * recheck the hash table.
934 b
= __find(c
, block
);
936 __free_buffer_wake(new_b
);
940 __check_watermark(c
, write_list
);
946 __link_buffer(b
, block
, LIST_CLEAN
);
948 if (nf
== NF_FRESH
) {
953 b
->state
= 1 << B_READING
;
959 if (nf
== NF_PREFETCH
)
962 * Note: it is essential that we don't wait for the buffer to be
963 * read if dm_bufio_get function is used. Both dm_bufio_get and
964 * dm_bufio_prefetch can be used in the driver request routine.
965 * If the user called both dm_bufio_prefetch and dm_bufio_get on
966 * the same buffer, it would deadlock if we waited.
968 if (nf
== NF_GET
&& unlikely(test_bit(B_READING
, &b
->state
)))
972 __relink_lru(b
, test_bit(B_DIRTY
, &b
->state
) ||
973 test_bit(B_WRITING
, &b
->state
));
978 * The endio routine for reading: set the error, clear the bit and wake up
979 * anyone waiting on the buffer.
981 static void read_endio(struct bio
*bio
, int error
)
983 struct dm_buffer
*b
= container_of(bio
, struct dm_buffer
, bio
);
985 b
->read_error
= error
;
987 BUG_ON(!test_bit(B_READING
, &b
->state
));
989 smp_mb__before_atomic();
990 clear_bit(B_READING
, &b
->state
);
991 smp_mb__after_atomic();
993 wake_up_bit(&b
->state
, B_READING
);
997 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
998 * functions is similar except that dm_bufio_new doesn't read the
999 * buffer from the disk (assuming that the caller overwrites all the data
1000 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1002 static void *new_read(struct dm_bufio_client
*c
, sector_t block
,
1003 enum new_flag nf
, struct dm_buffer
**bp
)
1006 struct dm_buffer
*b
;
1008 LIST_HEAD(write_list
);
1011 b
= __bufio_new(c
, block
, nf
, &need_submit
, &write_list
);
1014 __flush_write_list(&write_list
);
1020 submit_io(b
, READ
, b
->block
, read_endio
);
1022 wait_on_bit_io(&b
->state
, B_READING
, TASK_UNINTERRUPTIBLE
);
1024 if (b
->read_error
) {
1025 int error
= b
->read_error
;
1027 dm_bufio_release(b
);
1029 return ERR_PTR(error
);
1037 void *dm_bufio_get(struct dm_bufio_client
*c
, sector_t block
,
1038 struct dm_buffer
**bp
)
1040 return new_read(c
, block
, NF_GET
, bp
);
1042 EXPORT_SYMBOL_GPL(dm_bufio_get
);
1044 void *dm_bufio_read(struct dm_bufio_client
*c
, sector_t block
,
1045 struct dm_buffer
**bp
)
1047 BUG_ON(dm_bufio_in_request());
1049 return new_read(c
, block
, NF_READ
, bp
);
1051 EXPORT_SYMBOL_GPL(dm_bufio_read
);
1053 void *dm_bufio_new(struct dm_bufio_client
*c
, sector_t block
,
1054 struct dm_buffer
**bp
)
1056 BUG_ON(dm_bufio_in_request());
1058 return new_read(c
, block
, NF_FRESH
, bp
);
1060 EXPORT_SYMBOL_GPL(dm_bufio_new
);
1062 void dm_bufio_prefetch(struct dm_bufio_client
*c
,
1063 sector_t block
, unsigned n_blocks
)
1065 struct blk_plug plug
;
1067 LIST_HEAD(write_list
);
1069 BUG_ON(dm_bufio_in_request());
1071 blk_start_plug(&plug
);
1074 for (; n_blocks
--; block
++) {
1076 struct dm_buffer
*b
;
1077 b
= __bufio_new(c
, block
, NF_PREFETCH
, &need_submit
,
1079 if (unlikely(!list_empty(&write_list
))) {
1081 blk_finish_plug(&plug
);
1082 __flush_write_list(&write_list
);
1083 blk_start_plug(&plug
);
1086 if (unlikely(b
!= NULL
)) {
1090 submit_io(b
, READ
, b
->block
, read_endio
);
1091 dm_bufio_release(b
);
1093 dm_bufio_cond_resched();
1104 blk_finish_plug(&plug
);
1106 EXPORT_SYMBOL_GPL(dm_bufio_prefetch
);
1108 void dm_bufio_release(struct dm_buffer
*b
)
1110 struct dm_bufio_client
*c
= b
->c
;
1114 BUG_ON(!b
->hold_count
);
1117 if (!b
->hold_count
) {
1118 wake_up(&c
->free_buffer_wait
);
1121 * If there were errors on the buffer, and the buffer is not
1122 * to be written, free the buffer. There is no point in caching
1125 if ((b
->read_error
|| b
->write_error
) &&
1126 !test_bit(B_READING
, &b
->state
) &&
1127 !test_bit(B_WRITING
, &b
->state
) &&
1128 !test_bit(B_DIRTY
, &b
->state
)) {
1130 __free_buffer_wake(b
);
1136 EXPORT_SYMBOL_GPL(dm_bufio_release
);
1138 void dm_bufio_mark_buffer_dirty(struct dm_buffer
*b
)
1140 struct dm_bufio_client
*c
= b
->c
;
1144 BUG_ON(test_bit(B_READING
, &b
->state
));
1146 if (!test_and_set_bit(B_DIRTY
, &b
->state
))
1147 __relink_lru(b
, LIST_DIRTY
);
1151 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty
);
1153 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client
*c
)
1155 LIST_HEAD(write_list
);
1157 BUG_ON(dm_bufio_in_request());
1160 __write_dirty_buffers_async(c
, 0, &write_list
);
1162 __flush_write_list(&write_list
);
1164 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async
);
1167 * For performance, it is essential that the buffers are written asynchronously
1168 * and simultaneously (so that the block layer can merge the writes) and then
1171 * Finally, we flush hardware disk cache.
1173 int dm_bufio_write_dirty_buffers(struct dm_bufio_client
*c
)
1176 unsigned long buffers_processed
= 0;
1177 struct dm_buffer
*b
, *tmp
;
1179 LIST_HEAD(write_list
);
1182 __write_dirty_buffers_async(c
, 0, &write_list
);
1184 __flush_write_list(&write_list
);
1188 list_for_each_entry_safe_reverse(b
, tmp
, &c
->lru
[LIST_DIRTY
], lru_list
) {
1189 int dropped_lock
= 0;
1191 if (buffers_processed
< c
->n_buffers
[LIST_DIRTY
])
1192 buffers_processed
++;
1194 BUG_ON(test_bit(B_READING
, &b
->state
));
1196 if (test_bit(B_WRITING
, &b
->state
)) {
1197 if (buffers_processed
< c
->n_buffers
[LIST_DIRTY
]) {
1201 wait_on_bit_io(&b
->state
, B_WRITING
,
1202 TASK_UNINTERRUPTIBLE
);
1206 wait_on_bit_io(&b
->state
, B_WRITING
,
1207 TASK_UNINTERRUPTIBLE
);
1210 if (!test_bit(B_DIRTY
, &b
->state
) &&
1211 !test_bit(B_WRITING
, &b
->state
))
1212 __relink_lru(b
, LIST_CLEAN
);
1214 dm_bufio_cond_resched();
1217 * If we dropped the lock, the list is no longer consistent,
1218 * so we must restart the search.
1220 * In the most common case, the buffer just processed is
1221 * relinked to the clean list, so we won't loop scanning the
1222 * same buffer again and again.
1224 * This may livelock if there is another thread simultaneously
1225 * dirtying buffers, so we count the number of buffers walked
1226 * and if it exceeds the total number of buffers, it means that
1227 * someone is doing some writes simultaneously with us. In
1228 * this case, stop, dropping the lock.
1233 wake_up(&c
->free_buffer_wait
);
1236 a
= xchg(&c
->async_write_error
, 0);
1237 f
= dm_bufio_issue_flush(c
);
1243 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers
);
1246 * Use dm-io to send and empty barrier flush the device.
1248 int dm_bufio_issue_flush(struct dm_bufio_client
*c
)
1250 struct dm_io_request io_req
= {
1251 .bi_rw
= WRITE_FLUSH
,
1252 .mem
.type
= DM_IO_KMEM
,
1253 .mem
.ptr
.addr
= NULL
,
1256 struct dm_io_region io_reg
= {
1262 BUG_ON(dm_bufio_in_request());
1264 return dm_io(&io_req
, 1, &io_reg
, NULL
);
1266 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush
);
1269 * We first delete any other buffer that may be at that new location.
1271 * Then, we write the buffer to the original location if it was dirty.
1273 * Then, if we are the only one who is holding the buffer, relink the buffer
1274 * in the hash queue for the new location.
1276 * If there was someone else holding the buffer, we write it to the new
1277 * location but not relink it, because that other user needs to have the buffer
1278 * at the same place.
1280 void dm_bufio_release_move(struct dm_buffer
*b
, sector_t new_block
)
1282 struct dm_bufio_client
*c
= b
->c
;
1283 struct dm_buffer
*new;
1285 BUG_ON(dm_bufio_in_request());
1290 new = __find(c
, new_block
);
1292 if (new->hold_count
) {
1293 __wait_for_free_buffer(c
);
1298 * FIXME: Is there any point waiting for a write that's going
1299 * to be overwritten in a bit?
1301 __make_buffer_clean(new);
1302 __unlink_buffer(new);
1303 __free_buffer_wake(new);
1306 BUG_ON(!b
->hold_count
);
1307 BUG_ON(test_bit(B_READING
, &b
->state
));
1309 __write_dirty_buffer(b
, NULL
);
1310 if (b
->hold_count
== 1) {
1311 wait_on_bit_io(&b
->state
, B_WRITING
,
1312 TASK_UNINTERRUPTIBLE
);
1313 set_bit(B_DIRTY
, &b
->state
);
1315 __link_buffer(b
, new_block
, LIST_DIRTY
);
1318 wait_on_bit_lock_io(&b
->state
, B_WRITING
,
1319 TASK_UNINTERRUPTIBLE
);
1321 * Relink buffer to "new_block" so that write_callback
1322 * sees "new_block" as a block number.
1323 * After the write, link the buffer back to old_block.
1324 * All this must be done in bufio lock, so that block number
1325 * change isn't visible to other threads.
1327 old_block
= b
->block
;
1329 __link_buffer(b
, new_block
, b
->list_mode
);
1330 submit_io(b
, WRITE
, new_block
, write_endio
);
1331 wait_on_bit_io(&b
->state
, B_WRITING
,
1332 TASK_UNINTERRUPTIBLE
);
1334 __link_buffer(b
, old_block
, b
->list_mode
);
1338 dm_bufio_release(b
);
1340 EXPORT_SYMBOL_GPL(dm_bufio_release_move
);
1343 * Free the given buffer.
1345 * This is just a hint, if the buffer is in use or dirty, this function
1348 void dm_bufio_forget(struct dm_bufio_client
*c
, sector_t block
)
1350 struct dm_buffer
*b
;
1354 b
= __find(c
, block
);
1355 if (b
&& likely(!b
->hold_count
) && likely(!b
->state
)) {
1357 __free_buffer_wake(b
);
1362 EXPORT_SYMBOL(dm_bufio_forget
);
1364 void dm_bufio_set_minimum_buffers(struct dm_bufio_client
*c
, unsigned n
)
1366 c
->minimum_buffers
= n
;
1368 EXPORT_SYMBOL(dm_bufio_set_minimum_buffers
);
1370 unsigned dm_bufio_get_block_size(struct dm_bufio_client
*c
)
1372 return c
->block_size
;
1374 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size
);
1376 sector_t
dm_bufio_get_device_size(struct dm_bufio_client
*c
)
1378 return i_size_read(c
->bdev
->bd_inode
) >>
1379 (SECTOR_SHIFT
+ c
->sectors_per_block_bits
);
1381 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size
);
1383 sector_t
dm_bufio_get_block_number(struct dm_buffer
*b
)
1387 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number
);
1389 void *dm_bufio_get_block_data(struct dm_buffer
*b
)
1393 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data
);
1395 void *dm_bufio_get_aux_data(struct dm_buffer
*b
)
1399 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data
);
1401 struct dm_bufio_client
*dm_bufio_get_client(struct dm_buffer
*b
)
1405 EXPORT_SYMBOL_GPL(dm_bufio_get_client
);
1407 static void drop_buffers(struct dm_bufio_client
*c
)
1409 struct dm_buffer
*b
;
1412 BUG_ON(dm_bufio_in_request());
1415 * An optimization so that the buffers are not written one-by-one.
1417 dm_bufio_write_dirty_buffers_async(c
);
1421 while ((b
= __get_unclaimed_buffer(c
)))
1422 __free_buffer_wake(b
);
1424 for (i
= 0; i
< LIST_SIZE
; i
++)
1425 list_for_each_entry(b
, &c
->lru
[i
], lru_list
)
1426 DMERR("leaked buffer %llx, hold count %u, list %d",
1427 (unsigned long long)b
->block
, b
->hold_count
, i
);
1429 for (i
= 0; i
< LIST_SIZE
; i
++)
1430 BUG_ON(!list_empty(&c
->lru
[i
]));
1436 * Test if the buffer is unused and too old, and commit it.
1437 * And if GFP_NOFS is used, we must not do any I/O because we hold
1438 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1439 * rerouted to different bufio client.
1441 static int __cleanup_old_buffer(struct dm_buffer
*b
, gfp_t gfp
,
1442 unsigned long max_jiffies
)
1444 if (jiffies
- b
->last_accessed
< max_jiffies
)
1447 if (!(gfp
& __GFP_FS
)) {
1448 if (test_bit(B_READING
, &b
->state
) ||
1449 test_bit(B_WRITING
, &b
->state
) ||
1450 test_bit(B_DIRTY
, &b
->state
))
1457 __make_buffer_clean(b
);
1459 __free_buffer_wake(b
);
1464 static long __scan(struct dm_bufio_client
*c
, unsigned long nr_to_scan
,
1468 struct dm_buffer
*b
, *tmp
;
1471 for (l
= 0; l
< LIST_SIZE
; l
++) {
1472 list_for_each_entry_safe_reverse(b
, tmp
, &c
->lru
[l
], lru_list
) {
1473 freed
+= __cleanup_old_buffer(b
, gfp_mask
, 0);
1476 dm_bufio_cond_resched();
1482 static unsigned long
1483 dm_bufio_shrink_scan(struct shrinker
*shrink
, struct shrink_control
*sc
)
1485 struct dm_bufio_client
*c
;
1486 unsigned long freed
;
1488 c
= container_of(shrink
, struct dm_bufio_client
, shrinker
);
1489 if (sc
->gfp_mask
& __GFP_FS
)
1491 else if (!dm_bufio_trylock(c
))
1494 freed
= __scan(c
, sc
->nr_to_scan
, sc
->gfp_mask
);
1499 static unsigned long
1500 dm_bufio_shrink_count(struct shrinker
*shrink
, struct shrink_control
*sc
)
1502 struct dm_bufio_client
*c
;
1503 unsigned long count
;
1505 c
= container_of(shrink
, struct dm_bufio_client
, shrinker
);
1506 if (sc
->gfp_mask
& __GFP_FS
)
1508 else if (!dm_bufio_trylock(c
))
1511 count
= c
->n_buffers
[LIST_CLEAN
] + c
->n_buffers
[LIST_DIRTY
];
1517 * Create the buffering interface
1519 struct dm_bufio_client
*dm_bufio_client_create(struct block_device
*bdev
, unsigned block_size
,
1520 unsigned reserved_buffers
, unsigned aux_size
,
1521 void (*alloc_callback
)(struct dm_buffer
*),
1522 void (*write_callback
)(struct dm_buffer
*))
1525 struct dm_bufio_client
*c
;
1528 BUG_ON(block_size
< 1 << SECTOR_SHIFT
||
1529 (block_size
& (block_size
- 1)));
1531 c
= kzalloc(sizeof(*c
), GFP_KERNEL
);
1536 c
->cache_hash
= vmalloc(sizeof(struct hlist_head
) << DM_BUFIO_HASH_BITS
);
1537 if (!c
->cache_hash
) {
1543 c
->block_size
= block_size
;
1544 c
->sectors_per_block_bits
= ffs(block_size
) - 1 - SECTOR_SHIFT
;
1545 c
->pages_per_block_bits
= (ffs(block_size
) - 1 >= PAGE_SHIFT
) ?
1546 ffs(block_size
) - 1 - PAGE_SHIFT
: 0;
1547 c
->blocks_per_page_bits
= (ffs(block_size
) - 1 < PAGE_SHIFT
?
1548 PAGE_SHIFT
- (ffs(block_size
) - 1) : 0);
1550 c
->aux_size
= aux_size
;
1551 c
->alloc_callback
= alloc_callback
;
1552 c
->write_callback
= write_callback
;
1554 for (i
= 0; i
< LIST_SIZE
; i
++) {
1555 INIT_LIST_HEAD(&c
->lru
[i
]);
1556 c
->n_buffers
[i
] = 0;
1559 for (i
= 0; i
< 1 << DM_BUFIO_HASH_BITS
; i
++)
1560 INIT_HLIST_HEAD(&c
->cache_hash
[i
]);
1562 mutex_init(&c
->lock
);
1563 INIT_LIST_HEAD(&c
->reserved_buffers
);
1564 c
->need_reserved_buffers
= reserved_buffers
;
1566 c
->minimum_buffers
= DM_BUFIO_MIN_BUFFERS
;
1568 init_waitqueue_head(&c
->free_buffer_wait
);
1569 c
->async_write_error
= 0;
1571 c
->dm_io
= dm_io_client_create();
1572 if (IS_ERR(c
->dm_io
)) {
1573 r
= PTR_ERR(c
->dm_io
);
1577 mutex_lock(&dm_bufio_clients_lock
);
1578 if (c
->blocks_per_page_bits
) {
1579 if (!DM_BUFIO_CACHE_NAME(c
)) {
1580 DM_BUFIO_CACHE_NAME(c
) = kasprintf(GFP_KERNEL
, "dm_bufio_cache-%u", c
->block_size
);
1581 if (!DM_BUFIO_CACHE_NAME(c
)) {
1583 mutex_unlock(&dm_bufio_clients_lock
);
1588 if (!DM_BUFIO_CACHE(c
)) {
1589 DM_BUFIO_CACHE(c
) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c
),
1591 c
->block_size
, 0, NULL
);
1592 if (!DM_BUFIO_CACHE(c
)) {
1594 mutex_unlock(&dm_bufio_clients_lock
);
1599 mutex_unlock(&dm_bufio_clients_lock
);
1601 while (c
->need_reserved_buffers
) {
1602 struct dm_buffer
*b
= alloc_buffer(c
, GFP_KERNEL
);
1608 __free_buffer_wake(b
);
1611 mutex_lock(&dm_bufio_clients_lock
);
1612 dm_bufio_client_count
++;
1613 list_add(&c
->client_list
, &dm_bufio_all_clients
);
1614 __cache_size_refresh();
1615 mutex_unlock(&dm_bufio_clients_lock
);
1617 c
->shrinker
.count_objects
= dm_bufio_shrink_count
;
1618 c
->shrinker
.scan_objects
= dm_bufio_shrink_scan
;
1619 c
->shrinker
.seeks
= 1;
1620 c
->shrinker
.batch
= 0;
1621 register_shrinker(&c
->shrinker
);
1627 while (!list_empty(&c
->reserved_buffers
)) {
1628 struct dm_buffer
*b
= list_entry(c
->reserved_buffers
.next
,
1629 struct dm_buffer
, lru_list
);
1630 list_del(&b
->lru_list
);
1633 dm_io_client_destroy(c
->dm_io
);
1635 vfree(c
->cache_hash
);
1641 EXPORT_SYMBOL_GPL(dm_bufio_client_create
);
1644 * Free the buffering interface.
1645 * It is required that there are no references on any buffers.
1647 void dm_bufio_client_destroy(struct dm_bufio_client
*c
)
1653 unregister_shrinker(&c
->shrinker
);
1655 mutex_lock(&dm_bufio_clients_lock
);
1657 list_del(&c
->client_list
);
1658 dm_bufio_client_count
--;
1659 __cache_size_refresh();
1661 mutex_unlock(&dm_bufio_clients_lock
);
1663 for (i
= 0; i
< 1 << DM_BUFIO_HASH_BITS
; i
++)
1664 BUG_ON(!hlist_empty(&c
->cache_hash
[i
]));
1666 BUG_ON(c
->need_reserved_buffers
);
1668 while (!list_empty(&c
->reserved_buffers
)) {
1669 struct dm_buffer
*b
= list_entry(c
->reserved_buffers
.next
,
1670 struct dm_buffer
, lru_list
);
1671 list_del(&b
->lru_list
);
1675 for (i
= 0; i
< LIST_SIZE
; i
++)
1676 if (c
->n_buffers
[i
])
1677 DMERR("leaked buffer count %d: %ld", i
, c
->n_buffers
[i
]);
1679 for (i
= 0; i
< LIST_SIZE
; i
++)
1680 BUG_ON(c
->n_buffers
[i
]);
1682 dm_io_client_destroy(c
->dm_io
);
1683 vfree(c
->cache_hash
);
1686 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy
);
1688 static void cleanup_old_buffers(void)
1690 unsigned long max_age
= ACCESS_ONCE(dm_bufio_max_age
);
1691 struct dm_bufio_client
*c
;
1693 if (max_age
> ULONG_MAX
/ HZ
)
1694 max_age
= ULONG_MAX
/ HZ
;
1696 mutex_lock(&dm_bufio_clients_lock
);
1697 list_for_each_entry(c
, &dm_bufio_all_clients
, client_list
) {
1698 if (!dm_bufio_trylock(c
))
1701 while (!list_empty(&c
->lru
[LIST_CLEAN
])) {
1702 struct dm_buffer
*b
;
1703 b
= list_entry(c
->lru
[LIST_CLEAN
].prev
,
1704 struct dm_buffer
, lru_list
);
1705 if (!__cleanup_old_buffer(b
, 0, max_age
* HZ
))
1707 dm_bufio_cond_resched();
1711 dm_bufio_cond_resched();
1713 mutex_unlock(&dm_bufio_clients_lock
);
1716 static struct workqueue_struct
*dm_bufio_wq
;
1717 static struct delayed_work dm_bufio_work
;
1719 static void work_fn(struct work_struct
*w
)
1721 cleanup_old_buffers();
1723 queue_delayed_work(dm_bufio_wq
, &dm_bufio_work
,
1724 DM_BUFIO_WORK_TIMER_SECS
* HZ
);
1727 /*----------------------------------------------------------------
1729 *--------------------------------------------------------------*/
1732 * This is called only once for the whole dm_bufio module.
1733 * It initializes memory limit.
1735 static int __init
dm_bufio_init(void)
1739 dm_bufio_allocated_kmem_cache
= 0;
1740 dm_bufio_allocated_get_free_pages
= 0;
1741 dm_bufio_allocated_vmalloc
= 0;
1742 dm_bufio_current_allocated
= 0;
1744 memset(&dm_bufio_caches
, 0, sizeof dm_bufio_caches
);
1745 memset(&dm_bufio_cache_names
, 0, sizeof dm_bufio_cache_names
);
1747 mem
= (__u64
)((totalram_pages
- totalhigh_pages
) *
1748 DM_BUFIO_MEMORY_PERCENT
/ 100) << PAGE_SHIFT
;
1750 if (mem
> ULONG_MAX
)
1755 * Get the size of vmalloc space the same way as VMALLOC_TOTAL
1756 * in fs/proc/internal.h
1758 if (mem
> (VMALLOC_END
- VMALLOC_START
) * DM_BUFIO_VMALLOC_PERCENT
/ 100)
1759 mem
= (VMALLOC_END
- VMALLOC_START
) * DM_BUFIO_VMALLOC_PERCENT
/ 100;
1762 dm_bufio_default_cache_size
= mem
;
1764 mutex_lock(&dm_bufio_clients_lock
);
1765 __cache_size_refresh();
1766 mutex_unlock(&dm_bufio_clients_lock
);
1768 dm_bufio_wq
= create_singlethread_workqueue("dm_bufio_cache");
1772 INIT_DELAYED_WORK(&dm_bufio_work
, work_fn
);
1773 queue_delayed_work(dm_bufio_wq
, &dm_bufio_work
,
1774 DM_BUFIO_WORK_TIMER_SECS
* HZ
);
1780 * This is called once when unloading the dm_bufio module.
1782 static void __exit
dm_bufio_exit(void)
1787 cancel_delayed_work_sync(&dm_bufio_work
);
1788 destroy_workqueue(dm_bufio_wq
);
1790 for (i
= 0; i
< ARRAY_SIZE(dm_bufio_caches
); i
++) {
1791 struct kmem_cache
*kc
= dm_bufio_caches
[i
];
1794 kmem_cache_destroy(kc
);
1797 for (i
= 0; i
< ARRAY_SIZE(dm_bufio_cache_names
); i
++)
1798 kfree(dm_bufio_cache_names
[i
]);
1800 if (dm_bufio_client_count
) {
1801 DMCRIT("%s: dm_bufio_client_count leaked: %d",
1802 __func__
, dm_bufio_client_count
);
1806 if (dm_bufio_current_allocated
) {
1807 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1808 __func__
, dm_bufio_current_allocated
);
1812 if (dm_bufio_allocated_get_free_pages
) {
1813 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1814 __func__
, dm_bufio_allocated_get_free_pages
);
1818 if (dm_bufio_allocated_vmalloc
) {
1819 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1820 __func__
, dm_bufio_allocated_vmalloc
);
1828 module_init(dm_bufio_init
)
1829 module_exit(dm_bufio_exit
)
1831 module_param_named(max_cache_size_bytes
, dm_bufio_cache_size
, ulong
, S_IRUGO
| S_IWUSR
);
1832 MODULE_PARM_DESC(max_cache_size_bytes
, "Size of metadata cache");
1834 module_param_named(max_age_seconds
, dm_bufio_max_age
, uint
, S_IRUGO
| S_IWUSR
);
1835 MODULE_PARM_DESC(max_age_seconds
, "Max age of a buffer in seconds");
1837 module_param_named(peak_allocated_bytes
, dm_bufio_peak_allocated
, ulong
, S_IRUGO
| S_IWUSR
);
1838 MODULE_PARM_DESC(peak_allocated_bytes
, "Tracks the maximum allocated memory");
1840 module_param_named(allocated_kmem_cache_bytes
, dm_bufio_allocated_kmem_cache
, ulong
, S_IRUGO
);
1841 MODULE_PARM_DESC(allocated_kmem_cache_bytes
, "Memory allocated with kmem_cache_alloc");
1843 module_param_named(allocated_get_free_pages_bytes
, dm_bufio_allocated_get_free_pages
, ulong
, S_IRUGO
);
1844 MODULE_PARM_DESC(allocated_get_free_pages_bytes
, "Memory allocated with get_free_pages");
1846 module_param_named(allocated_vmalloc_bytes
, dm_bufio_allocated_vmalloc
, ulong
, S_IRUGO
);
1847 MODULE_PARM_DESC(allocated_vmalloc_bytes
, "Memory allocated with vmalloc");
1849 module_param_named(current_allocated_bytes
, dm_bufio_current_allocated
, ulong
, S_IRUGO
);
1850 MODULE_PARM_DESC(current_allocated_bytes
, "Memory currently used by the cache");
1852 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
1853 MODULE_DESCRIPTION(DM_NAME
" buffered I/O library");
1854 MODULE_LICENSE("GPL");