2 * Copyright (C) 2003 Sistina Software
3 * Copyright (C) 2006 Red Hat GmbH
5 * This file is released under the GPL.
10 #include <linux/device-mapper.h>
12 #include <linux/bio.h>
13 #include <linux/mempool.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/dm-io.h>
19 #define DM_MSG_PREFIX "io"
21 #define DM_IO_MAX_REGIONS BITS_PER_LONG
29 * Aligning 'struct io' reduces the number of bits required to store
30 * its address. Refer to store_io_and_region_in_bio() below.
33 unsigned long error_bits
;
34 unsigned long eopnotsupp_bits
;
36 struct task_struct
*sleeper
;
37 struct dm_io_client
*client
;
38 io_notify_fn callback
;
40 } __attribute__((aligned(DM_IO_MAX_REGIONS
)));
42 static struct kmem_cache
*_dm_io_cache
;
45 * io contexts are only dynamically allocated for asynchronous
46 * io. Since async io is likely to be the majority of io we'll
47 * have the same number of io contexts as bios! (FIXME: must reduce this).
50 static unsigned int pages_to_ios(unsigned int pages
)
52 return 4 * pages
; /* too many ? */
56 * Create a client with mempool and bioset.
58 struct dm_io_client
*dm_io_client_create(unsigned num_pages
)
60 unsigned ios
= pages_to_ios(num_pages
);
61 struct dm_io_client
*client
;
63 client
= kmalloc(sizeof(*client
), GFP_KERNEL
);
65 return ERR_PTR(-ENOMEM
);
67 client
->pool
= mempool_create_slab_pool(ios
, _dm_io_cache
);
71 client
->bios
= bioset_create(16, 0);
79 mempool_destroy(client
->pool
);
81 return ERR_PTR(-ENOMEM
);
83 EXPORT_SYMBOL(dm_io_client_create
);
85 int dm_io_client_resize(unsigned num_pages
, struct dm_io_client
*client
)
87 return mempool_resize(client
->pool
, pages_to_ios(num_pages
),
90 EXPORT_SYMBOL(dm_io_client_resize
);
92 void dm_io_client_destroy(struct dm_io_client
*client
)
94 mempool_destroy(client
->pool
);
95 bioset_free(client
->bios
);
98 EXPORT_SYMBOL(dm_io_client_destroy
);
100 /*-----------------------------------------------------------------
101 * We need to keep track of which region a bio is doing io for.
102 * To avoid a memory allocation to store just 5 or 6 bits, we
103 * ensure the 'struct io' pointer is aligned so enough low bits are
104 * always zero and then combine it with the region number directly in
106 *---------------------------------------------------------------*/
107 static void store_io_and_region_in_bio(struct bio
*bio
, struct io
*io
,
110 if (unlikely(!IS_ALIGNED((unsigned long)io
, DM_IO_MAX_REGIONS
))) {
111 DMCRIT("Unaligned struct io pointer %p", io
);
115 bio
->bi_private
= (void *)((unsigned long)io
| region
);
118 static void retrieve_io_and_region_from_bio(struct bio
*bio
, struct io
**io
,
121 unsigned long val
= (unsigned long)bio
->bi_private
;
123 *io
= (void *)(val
& -(unsigned long)DM_IO_MAX_REGIONS
);
124 *region
= val
& (DM_IO_MAX_REGIONS
- 1);
127 /*-----------------------------------------------------------------
128 * We need an io object to keep track of the number of bios that
129 * have been dispatched for a particular io.
130 *---------------------------------------------------------------*/
131 static void dec_count(struct io
*io
, unsigned int region
, int error
)
134 set_bit(region
, &io
->error_bits
);
135 if (error
== -EOPNOTSUPP
)
136 set_bit(region
, &io
->eopnotsupp_bits
);
139 if (atomic_dec_and_test(&io
->count
)) {
141 wake_up_process(io
->sleeper
);
144 unsigned long r
= io
->error_bits
;
145 io_notify_fn fn
= io
->callback
;
146 void *context
= io
->context
;
148 mempool_free(io
, io
->client
->pool
);
154 static void endio(struct bio
*bio
, int error
)
159 if (error
&& bio_data_dir(bio
) == READ
)
163 * The bio destructor in bio_put() may use the io object.
165 retrieve_io_and_region_from_bio(bio
, &io
, ®ion
);
169 dec_count(io
, region
, error
);
172 /*-----------------------------------------------------------------
173 * These little objects provide an abstraction for getting a new
174 * destination page for io.
175 *---------------------------------------------------------------*/
177 void (*get_page
)(struct dpages
*dp
,
178 struct page
**p
, unsigned long *len
, unsigned *offset
);
179 void (*next_page
)(struct dpages
*dp
);
186 * Functions for getting the pages from a list.
188 static void list_get_page(struct dpages
*dp
,
189 struct page
**p
, unsigned long *len
, unsigned *offset
)
191 unsigned o
= dp
->context_u
;
192 struct page_list
*pl
= (struct page_list
*) dp
->context_ptr
;
195 *len
= PAGE_SIZE
- o
;
199 static void list_next_page(struct dpages
*dp
)
201 struct page_list
*pl
= (struct page_list
*) dp
->context_ptr
;
202 dp
->context_ptr
= pl
->next
;
206 static void list_dp_init(struct dpages
*dp
, struct page_list
*pl
, unsigned offset
)
208 dp
->get_page
= list_get_page
;
209 dp
->next_page
= list_next_page
;
210 dp
->context_u
= offset
;
211 dp
->context_ptr
= pl
;
215 * Functions for getting the pages from a bvec.
217 static void bvec_get_page(struct dpages
*dp
,
218 struct page
**p
, unsigned long *len
, unsigned *offset
)
220 struct bio_vec
*bvec
= (struct bio_vec
*) dp
->context_ptr
;
223 *offset
= bvec
->bv_offset
;
226 static void bvec_next_page(struct dpages
*dp
)
228 struct bio_vec
*bvec
= (struct bio_vec
*) dp
->context_ptr
;
229 dp
->context_ptr
= bvec
+ 1;
232 static void bvec_dp_init(struct dpages
*dp
, struct bio_vec
*bvec
)
234 dp
->get_page
= bvec_get_page
;
235 dp
->next_page
= bvec_next_page
;
236 dp
->context_ptr
= bvec
;
240 * Functions for getting the pages from a VMA.
242 static void vm_get_page(struct dpages
*dp
,
243 struct page
**p
, unsigned long *len
, unsigned *offset
)
245 *p
= vmalloc_to_page(dp
->context_ptr
);
246 *offset
= dp
->context_u
;
247 *len
= PAGE_SIZE
- dp
->context_u
;
250 static void vm_next_page(struct dpages
*dp
)
252 dp
->context_ptr
+= PAGE_SIZE
- dp
->context_u
;
256 static void vm_dp_init(struct dpages
*dp
, void *data
)
258 dp
->get_page
= vm_get_page
;
259 dp
->next_page
= vm_next_page
;
260 dp
->context_u
= ((unsigned long) data
) & (PAGE_SIZE
- 1);
261 dp
->context_ptr
= data
;
264 static void dm_bio_destructor(struct bio
*bio
)
269 retrieve_io_and_region_from_bio(bio
, &io
, ®ion
);
271 bio_free(bio
, io
->client
->bios
);
275 * Functions for getting the pages from kernel memory.
277 static void km_get_page(struct dpages
*dp
, struct page
**p
, unsigned long *len
,
280 *p
= virt_to_page(dp
->context_ptr
);
281 *offset
= dp
->context_u
;
282 *len
= PAGE_SIZE
- dp
->context_u
;
285 static void km_next_page(struct dpages
*dp
)
287 dp
->context_ptr
+= PAGE_SIZE
- dp
->context_u
;
291 static void km_dp_init(struct dpages
*dp
, void *data
)
293 dp
->get_page
= km_get_page
;
294 dp
->next_page
= km_next_page
;
295 dp
->context_u
= ((unsigned long) data
) & (PAGE_SIZE
- 1);
296 dp
->context_ptr
= data
;
299 /*-----------------------------------------------------------------
300 * IO routines that accept a list of pages.
301 *---------------------------------------------------------------*/
302 static void do_region(int rw
, unsigned region
, struct dm_io_region
*where
,
303 struct dpages
*dp
, struct io
*io
)
310 sector_t remaining
= where
->count
;
313 * where->count may be zero if rw holds a write barrier and we
314 * need to send a zero-sized barrier.
318 * Allocate a suitably sized-bio.
320 num_bvecs
= dm_sector_div_up(remaining
,
321 (PAGE_SIZE
>> SECTOR_SHIFT
));
322 num_bvecs
= min_t(int, bio_get_nr_vecs(where
->bdev
), num_bvecs
);
323 bio
= bio_alloc_bioset(GFP_NOIO
, num_bvecs
, io
->client
->bios
);
324 bio
->bi_sector
= where
->sector
+ (where
->count
- remaining
);
325 bio
->bi_bdev
= where
->bdev
;
326 bio
->bi_end_io
= endio
;
327 bio
->bi_destructor
= dm_bio_destructor
;
328 store_io_and_region_in_bio(bio
, io
, region
);
331 * Try and add as many pages as possible.
334 dp
->get_page(dp
, &page
, &len
, &offset
);
335 len
= min(len
, to_bytes(remaining
));
336 if (!bio_add_page(bio
, page
, len
, offset
))
340 remaining
-= to_sector(len
);
344 atomic_inc(&io
->count
);
349 static void dispatch_io(int rw
, unsigned int num_regions
,
350 struct dm_io_region
*where
, struct dpages
*dp
,
351 struct io
*io
, int sync
)
354 struct dpages old_pages
= *dp
;
356 BUG_ON(num_regions
> DM_IO_MAX_REGIONS
);
359 rw
|= (1 << BIO_RW_SYNCIO
) | (1 << BIO_RW_UNPLUG
);
362 * For multiple regions we need to be careful to rewind
363 * the dp object for each call to do_region.
365 for (i
= 0; i
< num_regions
; i
++) {
367 if (where
[i
].count
|| (rw
& (1 << BIO_RW_BARRIER
)))
368 do_region(rw
, i
, where
+ i
, dp
, io
);
372 * Drop the extra reference that we were holding to avoid
373 * the io being completed too early.
378 static int sync_io(struct dm_io_client
*client
, unsigned int num_regions
,
379 struct dm_io_region
*where
, int rw
, struct dpages
*dp
,
380 unsigned long *error_bits
)
383 * gcc <= 4.3 can't do the alignment for stack variables, so we must
384 * align it on our own.
385 * volatile prevents the optimizer from removing or reusing
386 * "io_" field from the stack frame (allowed in ANSI C).
388 volatile char io_
[sizeof(struct io
) + __alignof__(struct io
) - 1];
389 struct io
*io
= (struct io
*)PTR_ALIGN(&io_
, __alignof__(struct io
));
391 if (num_regions
> 1 && (rw
& RW_MASK
) != WRITE
) {
398 io
->eopnotsupp_bits
= 0;
399 atomic_set(&io
->count
, 1); /* see dispatch_io() */
400 io
->sleeper
= current
;
403 dispatch_io(rw
, num_regions
, where
, dp
, io
, 1);
406 set_current_state(TASK_UNINTERRUPTIBLE
);
408 if (!atomic_read(&io
->count
))
413 set_current_state(TASK_RUNNING
);
415 if (io
->eopnotsupp_bits
&& (rw
& (1 << BIO_RW_BARRIER
))) {
416 rw
&= ~(1 << BIO_RW_BARRIER
);
421 *error_bits
= io
->error_bits
;
423 return io
->error_bits
? -EIO
: 0;
426 static int async_io(struct dm_io_client
*client
, unsigned int num_regions
,
427 struct dm_io_region
*where
, int rw
, struct dpages
*dp
,
428 io_notify_fn fn
, void *context
)
432 if (num_regions
> 1 && (rw
& RW_MASK
) != WRITE
) {
438 io
= mempool_alloc(client
->pool
, GFP_NOIO
);
440 io
->eopnotsupp_bits
= 0;
441 atomic_set(&io
->count
, 1); /* see dispatch_io() */
445 io
->context
= context
;
447 dispatch_io(rw
, num_regions
, where
, dp
, io
, 0);
451 static int dp_init(struct dm_io_request
*io_req
, struct dpages
*dp
)
453 /* Set up dpages based on memory type */
454 switch (io_req
->mem
.type
) {
455 case DM_IO_PAGE_LIST
:
456 list_dp_init(dp
, io_req
->mem
.ptr
.pl
, io_req
->mem
.offset
);
460 bvec_dp_init(dp
, io_req
->mem
.ptr
.bvec
);
464 vm_dp_init(dp
, io_req
->mem
.ptr
.vma
);
468 km_dp_init(dp
, io_req
->mem
.ptr
.addr
);
479 * New collapsed (a)synchronous interface.
481 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
482 * the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in
483 * io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
484 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
486 int dm_io(struct dm_io_request
*io_req
, unsigned num_regions
,
487 struct dm_io_region
*where
, unsigned long *sync_error_bits
)
492 r
= dp_init(io_req
, &dp
);
496 if (!io_req
->notify
.fn
)
497 return sync_io(io_req
->client
, num_regions
, where
,
498 io_req
->bi_rw
, &dp
, sync_error_bits
);
500 return async_io(io_req
->client
, num_regions
, where
, io_req
->bi_rw
,
501 &dp
, io_req
->notify
.fn
, io_req
->notify
.context
);
503 EXPORT_SYMBOL(dm_io
);
505 int __init
dm_io_init(void)
507 _dm_io_cache
= KMEM_CACHE(io
, 0);
514 void dm_io_exit(void)
516 kmem_cache_destroy(_dm_io_cache
);