2 * Copyright (C) 2003 Sistina Software
3 * Copyright (C) 2006 Red Hat GmbH
5 * This file is released under the GPL.
10 #include <linux/bio.h>
11 #include <linux/mempool.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
16 static struct bio_set
*_bios
;
23 /* FIXME: can we shrink this ? */
27 struct task_struct
*sleeper
;
28 struct dm_io_client
*client
;
29 io_notify_fn callback
;
34 * io contexts are only dynamically allocated for asynchronous
35 * io. Since async io is likely to be the majority of io we'll
36 * have the same number of io contexts as bios! (FIXME: must reduce this).
38 static unsigned _num_ios
;
39 static mempool_t
*_io_pool
;
42 * Temporary functions to allow old and new interfaces to co-exist.
44 static struct bio_set
*bios(struct dm_io_client
*client
)
46 return client
? client
->bios
: _bios
;
49 static mempool_t
*io_pool(struct dm_io_client
*client
)
51 return client
? client
->pool
: _io_pool
;
54 static unsigned int pages_to_ios(unsigned int pages
)
56 return 4 * pages
; /* too many ? */
59 static int resize_pool(unsigned int new_ios
)
65 /* free off the pool */
66 mempool_destroy(_io_pool
);
72 r
= mempool_resize(_io_pool
, new_ios
, GFP_KERNEL
);
77 _io_pool
= mempool_create_kmalloc_pool(new_ios
,
82 _bios
= bioset_create(16, 16);
84 mempool_destroy(_io_pool
);
96 int dm_io_get(unsigned int num_pages
)
98 return resize_pool(_num_ios
+ pages_to_ios(num_pages
));
101 void dm_io_put(unsigned int num_pages
)
103 resize_pool(_num_ios
- pages_to_ios(num_pages
));
107 * Create a client with mempool and bioset.
109 struct dm_io_client
*dm_io_client_create(unsigned num_pages
)
111 unsigned ios
= pages_to_ios(num_pages
);
112 struct dm_io_client
*client
;
114 client
= kmalloc(sizeof(*client
), GFP_KERNEL
);
116 return ERR_PTR(-ENOMEM
);
118 client
->pool
= mempool_create_kmalloc_pool(ios
, sizeof(struct io
));
122 client
->bios
= bioset_create(16, 16);
130 mempool_destroy(client
->pool
);
132 return ERR_PTR(-ENOMEM
);
134 EXPORT_SYMBOL(dm_io_client_create
);
136 int dm_io_client_resize(unsigned num_pages
, struct dm_io_client
*client
)
138 return mempool_resize(client
->pool
, pages_to_ios(num_pages
),
141 EXPORT_SYMBOL(dm_io_client_resize
);
143 void dm_io_client_destroy(struct dm_io_client
*client
)
145 mempool_destroy(client
->pool
);
146 bioset_free(client
->bios
);
149 EXPORT_SYMBOL(dm_io_client_destroy
);
151 /*-----------------------------------------------------------------
152 * We need to keep track of which region a bio is doing io for.
153 * In order to save a memory allocation we store this the last
154 * bvec which we know is unused (blech).
155 * XXX This is ugly and can OOPS with some configs... find another way.
156 *---------------------------------------------------------------*/
157 static inline void bio_set_region(struct bio
*bio
, unsigned region
)
159 bio
->bi_io_vec
[bio
->bi_max_vecs
].bv_len
= region
;
162 static inline unsigned bio_get_region(struct bio
*bio
)
164 return bio
->bi_io_vec
[bio
->bi_max_vecs
].bv_len
;
167 /*-----------------------------------------------------------------
168 * We need an io object to keep track of the number of bios that
169 * have been dispatched for a particular io.
170 *---------------------------------------------------------------*/
171 static void dec_count(struct io
*io
, unsigned int region
, int error
)
174 set_bit(region
, &io
->error
);
176 if (atomic_dec_and_test(&io
->count
)) {
178 wake_up_process(io
->sleeper
);
182 io_notify_fn fn
= io
->callback
;
183 void *context
= io
->context
;
185 mempool_free(io
, io_pool(io
->client
));
191 static int endio(struct bio
*bio
, unsigned int done
, int error
)
196 /* keep going until we've finished */
200 if (error
&& bio_data_dir(bio
) == READ
)
204 * The bio destructor in bio_put() may use the io object.
206 io
= bio
->bi_private
;
207 region
= bio_get_region(bio
);
212 dec_count(io
, region
, error
);
217 /*-----------------------------------------------------------------
218 * These little objects provide an abstraction for getting a new
219 * destination page for io.
220 *---------------------------------------------------------------*/
222 void (*get_page
)(struct dpages
*dp
,
223 struct page
**p
, unsigned long *len
, unsigned *offset
);
224 void (*next_page
)(struct dpages
*dp
);
231 * Functions for getting the pages from a list.
233 static void list_get_page(struct dpages
*dp
,
234 struct page
**p
, unsigned long *len
, unsigned *offset
)
236 unsigned o
= dp
->context_u
;
237 struct page_list
*pl
= (struct page_list
*) dp
->context_ptr
;
240 *len
= PAGE_SIZE
- o
;
244 static void list_next_page(struct dpages
*dp
)
246 struct page_list
*pl
= (struct page_list
*) dp
->context_ptr
;
247 dp
->context_ptr
= pl
->next
;
251 static void list_dp_init(struct dpages
*dp
, struct page_list
*pl
, unsigned offset
)
253 dp
->get_page
= list_get_page
;
254 dp
->next_page
= list_next_page
;
255 dp
->context_u
= offset
;
256 dp
->context_ptr
= pl
;
260 * Functions for getting the pages from a bvec.
262 static void bvec_get_page(struct dpages
*dp
,
263 struct page
**p
, unsigned long *len
, unsigned *offset
)
265 struct bio_vec
*bvec
= (struct bio_vec
*) dp
->context_ptr
;
268 *offset
= bvec
->bv_offset
;
271 static void bvec_next_page(struct dpages
*dp
)
273 struct bio_vec
*bvec
= (struct bio_vec
*) dp
->context_ptr
;
274 dp
->context_ptr
= bvec
+ 1;
277 static void bvec_dp_init(struct dpages
*dp
, struct bio_vec
*bvec
)
279 dp
->get_page
= bvec_get_page
;
280 dp
->next_page
= bvec_next_page
;
281 dp
->context_ptr
= bvec
;
285 * Functions for getting the pages from a VMA.
287 static void vm_get_page(struct dpages
*dp
,
288 struct page
**p
, unsigned long *len
, unsigned *offset
)
290 *p
= vmalloc_to_page(dp
->context_ptr
);
291 *offset
= dp
->context_u
;
292 *len
= PAGE_SIZE
- dp
->context_u
;
295 static void vm_next_page(struct dpages
*dp
)
297 dp
->context_ptr
+= PAGE_SIZE
- dp
->context_u
;
301 static void vm_dp_init(struct dpages
*dp
, void *data
)
303 dp
->get_page
= vm_get_page
;
304 dp
->next_page
= vm_next_page
;
305 dp
->context_u
= ((unsigned long) data
) & (PAGE_SIZE
- 1);
306 dp
->context_ptr
= data
;
309 static void dm_bio_destructor(struct bio
*bio
)
311 struct io
*io
= bio
->bi_private
;
313 bio_free(bio
, bios(io
->client
));
317 * Functions for getting the pages from kernel memory.
319 static void km_get_page(struct dpages
*dp
, struct page
**p
, unsigned long *len
,
322 *p
= virt_to_page(dp
->context_ptr
);
323 *offset
= dp
->context_u
;
324 *len
= PAGE_SIZE
- dp
->context_u
;
327 static void km_next_page(struct dpages
*dp
)
329 dp
->context_ptr
+= PAGE_SIZE
- dp
->context_u
;
333 static void km_dp_init(struct dpages
*dp
, void *data
)
335 dp
->get_page
= km_get_page
;
336 dp
->next_page
= km_next_page
;
337 dp
->context_u
= ((unsigned long) data
) & (PAGE_SIZE
- 1);
338 dp
->context_ptr
= data
;
341 /*-----------------------------------------------------------------
342 * IO routines that accept a list of pages.
343 *---------------------------------------------------------------*/
344 static void do_region(int rw
, unsigned int region
, struct io_region
*where
,
345 struct dpages
*dp
, struct io
*io
)
352 sector_t remaining
= where
->count
;
356 * Allocate a suitably sized-bio: we add an extra
357 * bvec for bio_get/set_region() and decrement bi_max_vecs
358 * to hide it from bio_add_page().
360 num_bvecs
= (remaining
/ (PAGE_SIZE
>> SECTOR_SHIFT
)) + 2;
361 bio
= bio_alloc_bioset(GFP_NOIO
, num_bvecs
, bios(io
->client
));
362 bio
->bi_sector
= where
->sector
+ (where
->count
- remaining
);
363 bio
->bi_bdev
= where
->bdev
;
364 bio
->bi_end_io
= endio
;
365 bio
->bi_private
= io
;
366 bio
->bi_destructor
= dm_bio_destructor
;
368 bio_set_region(bio
, region
);
371 * Try and add as many pages as possible.
374 dp
->get_page(dp
, &page
, &len
, &offset
);
375 len
= min(len
, to_bytes(remaining
));
376 if (!bio_add_page(bio
, page
, len
, offset
))
380 remaining
-= to_sector(len
);
384 atomic_inc(&io
->count
);
389 static void dispatch_io(int rw
, unsigned int num_regions
,
390 struct io_region
*where
, struct dpages
*dp
,
391 struct io
*io
, int sync
)
394 struct dpages old_pages
= *dp
;
397 rw
|= (1 << BIO_RW_SYNC
);
400 * For multiple regions we need to be careful to rewind
401 * the dp object for each call to do_region.
403 for (i
= 0; i
< num_regions
; i
++) {
406 do_region(rw
, i
, where
+ i
, dp
, io
);
410 * Drop the extra reference that we were holding to avoid
411 * the io being completed too early.
416 static int sync_io(struct dm_io_client
*client
, unsigned int num_regions
,
417 struct io_region
*where
, int rw
, struct dpages
*dp
,
418 unsigned long *error_bits
)
422 if (num_regions
> 1 && rw
!= WRITE
) {
428 atomic_set(&io
.count
, 1); /* see dispatch_io() */
429 io
.sleeper
= current
;
432 dispatch_io(rw
, num_regions
, where
, dp
, &io
, 1);
435 set_current_state(TASK_UNINTERRUPTIBLE
);
437 if (!atomic_read(&io
.count
) || signal_pending(current
))
442 set_current_state(TASK_RUNNING
);
444 if (atomic_read(&io
.count
))
448 *error_bits
= io
.error
;
450 return io
.error
? -EIO
: 0;
453 static int async_io(struct dm_io_client
*client
, unsigned int num_regions
,
454 struct io_region
*where
, int rw
, struct dpages
*dp
,
455 io_notify_fn fn
, void *context
)
459 if (num_regions
> 1 && rw
!= WRITE
) {
465 io
= mempool_alloc(io_pool(client
), GFP_NOIO
);
467 atomic_set(&io
->count
, 1); /* see dispatch_io() */
471 io
->context
= context
;
473 dispatch_io(rw
, num_regions
, where
, dp
, io
, 0);
477 int dm_io_sync(unsigned int num_regions
, struct io_region
*where
, int rw
,
478 struct page_list
*pl
, unsigned int offset
,
479 unsigned long *error_bits
)
482 list_dp_init(&dp
, pl
, offset
);
483 return sync_io(NULL
, num_regions
, where
, rw
, &dp
, error_bits
);
486 int dm_io_sync_bvec(unsigned int num_regions
, struct io_region
*where
, int rw
,
487 struct bio_vec
*bvec
, unsigned long *error_bits
)
490 bvec_dp_init(&dp
, bvec
);
491 return sync_io(NULL
, num_regions
, where
, rw
, &dp
, error_bits
);
494 int dm_io_sync_vm(unsigned int num_regions
, struct io_region
*where
, int rw
,
495 void *data
, unsigned long *error_bits
)
498 vm_dp_init(&dp
, data
);
499 return sync_io(NULL
, num_regions
, where
, rw
, &dp
, error_bits
);
502 int dm_io_async(unsigned int num_regions
, struct io_region
*where
, int rw
,
503 struct page_list
*pl
, unsigned int offset
,
504 io_notify_fn fn
, void *context
)
507 list_dp_init(&dp
, pl
, offset
);
508 return async_io(NULL
, num_regions
, where
, rw
, &dp
, fn
, context
);
511 int dm_io_async_bvec(unsigned int num_regions
, struct io_region
*where
, int rw
,
512 struct bio_vec
*bvec
, io_notify_fn fn
, void *context
)
515 bvec_dp_init(&dp
, bvec
);
516 return async_io(NULL
, num_regions
, where
, rw
, &dp
, fn
, context
);
519 int dm_io_async_vm(unsigned int num_regions
, struct io_region
*where
, int rw
,
520 void *data
, io_notify_fn fn
, void *context
)
523 vm_dp_init(&dp
, data
);
524 return async_io(NULL
, num_regions
, where
, rw
, &dp
, fn
, context
);
527 static int dp_init(struct dm_io_request
*io_req
, struct dpages
*dp
)
529 /* Set up dpages based on memory type */
530 switch (io_req
->mem
.type
) {
531 case DM_IO_PAGE_LIST
:
532 list_dp_init(dp
, io_req
->mem
.ptr
.pl
, io_req
->mem
.offset
);
536 bvec_dp_init(dp
, io_req
->mem
.ptr
.bvec
);
540 vm_dp_init(dp
, io_req
->mem
.ptr
.vma
);
544 km_dp_init(dp
, io_req
->mem
.ptr
.addr
);
555 * New collapsed (a)synchronous interface
557 int dm_io(struct dm_io_request
*io_req
, unsigned num_regions
,
558 struct io_region
*where
, unsigned long *sync_error_bits
)
563 r
= dp_init(io_req
, &dp
);
567 if (!io_req
->notify
.fn
)
568 return sync_io(io_req
->client
, num_regions
, where
,
569 io_req
->bi_rw
, &dp
, sync_error_bits
);
571 return async_io(io_req
->client
, num_regions
, where
, io_req
->bi_rw
,
572 &dp
, io_req
->notify
.fn
, io_req
->notify
.context
);
574 EXPORT_SYMBOL(dm_io
);
576 EXPORT_SYMBOL(dm_io_get
);
577 EXPORT_SYMBOL(dm_io_put
);
578 EXPORT_SYMBOL(dm_io_sync
);
579 EXPORT_SYMBOL(dm_io_async
);
580 EXPORT_SYMBOL(dm_io_sync_bvec
);
581 EXPORT_SYMBOL(dm_io_async_bvec
);
582 EXPORT_SYMBOL(dm_io_sync_vm
);
583 EXPORT_SYMBOL(dm_io_async_vm
);