2 * Copyright (C) 2003 Sistina Software
3 * Copyright (C) 2006 Red Hat GmbH
5 * This file is released under the GPL.
8 #include <linux/device-mapper.h>
10 #include <linux/bio.h>
11 #include <linux/mempool.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
15 #include <linux/dm-io.h>
22 /* FIXME: can we shrink this ? */
24 unsigned long error_bits
;
25 unsigned long eopnotsupp_bits
;
27 struct task_struct
*sleeper
;
28 struct dm_io_client
*client
;
29 io_notify_fn callback
;
34 * io contexts are only dynamically allocated for asynchronous
35 * io. Since async io is likely to be the majority of io we'll
36 * have the same number of io contexts as bios! (FIXME: must reduce this).
39 static unsigned int pages_to_ios(unsigned int pages
)
41 return 4 * pages
; /* too many ? */
45 * Create a client with mempool and bioset.
47 struct dm_io_client
*dm_io_client_create(unsigned num_pages
)
49 unsigned ios
= pages_to_ios(num_pages
);
50 struct dm_io_client
*client
;
52 client
= kmalloc(sizeof(*client
), GFP_KERNEL
);
54 return ERR_PTR(-ENOMEM
);
56 client
->pool
= mempool_create_kmalloc_pool(ios
, sizeof(struct io
));
60 client
->bios
= bioset_create(16, 0);
68 mempool_destroy(client
->pool
);
70 return ERR_PTR(-ENOMEM
);
72 EXPORT_SYMBOL(dm_io_client_create
);
74 int dm_io_client_resize(unsigned num_pages
, struct dm_io_client
*client
)
76 return mempool_resize(client
->pool
, pages_to_ios(num_pages
),
79 EXPORT_SYMBOL(dm_io_client_resize
);
81 void dm_io_client_destroy(struct dm_io_client
*client
)
83 mempool_destroy(client
->pool
);
84 bioset_free(client
->bios
);
87 EXPORT_SYMBOL(dm_io_client_destroy
);
89 /*-----------------------------------------------------------------
90 * We need to keep track of which region a bio is doing io for.
91 * In order to save a memory allocation we store this the last
92 * bvec which we know is unused (blech).
93 * XXX This is ugly and can OOPS with some configs... find another way.
94 *---------------------------------------------------------------*/
95 static inline void bio_set_region(struct bio
*bio
, unsigned region
)
97 bio
->bi_io_vec
[bio
->bi_max_vecs
].bv_len
= region
;
100 static inline unsigned bio_get_region(struct bio
*bio
)
102 return bio
->bi_io_vec
[bio
->bi_max_vecs
].bv_len
;
105 /*-----------------------------------------------------------------
106 * We need an io object to keep track of the number of bios that
107 * have been dispatched for a particular io.
108 *---------------------------------------------------------------*/
109 static void dec_count(struct io
*io
, unsigned int region
, int error
)
112 set_bit(region
, &io
->error_bits
);
113 if (error
== -EOPNOTSUPP
)
114 set_bit(region
, &io
->eopnotsupp_bits
);
117 if (atomic_dec_and_test(&io
->count
)) {
119 wake_up_process(io
->sleeper
);
122 unsigned long r
= io
->error_bits
;
123 io_notify_fn fn
= io
->callback
;
124 void *context
= io
->context
;
126 mempool_free(io
, io
->client
->pool
);
132 static void endio(struct bio
*bio
, int error
)
137 if (error
&& bio_data_dir(bio
) == READ
)
141 * The bio destructor in bio_put() may use the io object.
143 io
= bio
->bi_private
;
144 region
= bio_get_region(bio
);
149 dec_count(io
, region
, error
);
152 /*-----------------------------------------------------------------
153 * These little objects provide an abstraction for getting a new
154 * destination page for io.
155 *---------------------------------------------------------------*/
157 void (*get_page
)(struct dpages
*dp
,
158 struct page
**p
, unsigned long *len
, unsigned *offset
);
159 void (*next_page
)(struct dpages
*dp
);
166 * Functions for getting the pages from a list.
168 static void list_get_page(struct dpages
*dp
,
169 struct page
**p
, unsigned long *len
, unsigned *offset
)
171 unsigned o
= dp
->context_u
;
172 struct page_list
*pl
= (struct page_list
*) dp
->context_ptr
;
175 *len
= PAGE_SIZE
- o
;
179 static void list_next_page(struct dpages
*dp
)
181 struct page_list
*pl
= (struct page_list
*) dp
->context_ptr
;
182 dp
->context_ptr
= pl
->next
;
186 static void list_dp_init(struct dpages
*dp
, struct page_list
*pl
, unsigned offset
)
188 dp
->get_page
= list_get_page
;
189 dp
->next_page
= list_next_page
;
190 dp
->context_u
= offset
;
191 dp
->context_ptr
= pl
;
195 * Functions for getting the pages from a bvec.
197 static void bvec_get_page(struct dpages
*dp
,
198 struct page
**p
, unsigned long *len
, unsigned *offset
)
200 struct bio_vec
*bvec
= (struct bio_vec
*) dp
->context_ptr
;
203 *offset
= bvec
->bv_offset
;
206 static void bvec_next_page(struct dpages
*dp
)
208 struct bio_vec
*bvec
= (struct bio_vec
*) dp
->context_ptr
;
209 dp
->context_ptr
= bvec
+ 1;
212 static void bvec_dp_init(struct dpages
*dp
, struct bio_vec
*bvec
)
214 dp
->get_page
= bvec_get_page
;
215 dp
->next_page
= bvec_next_page
;
216 dp
->context_ptr
= bvec
;
220 * Functions for getting the pages from a VMA.
222 static void vm_get_page(struct dpages
*dp
,
223 struct page
**p
, unsigned long *len
, unsigned *offset
)
225 *p
= vmalloc_to_page(dp
->context_ptr
);
226 *offset
= dp
->context_u
;
227 *len
= PAGE_SIZE
- dp
->context_u
;
230 static void vm_next_page(struct dpages
*dp
)
232 dp
->context_ptr
+= PAGE_SIZE
- dp
->context_u
;
236 static void vm_dp_init(struct dpages
*dp
, void *data
)
238 dp
->get_page
= vm_get_page
;
239 dp
->next_page
= vm_next_page
;
240 dp
->context_u
= ((unsigned long) data
) & (PAGE_SIZE
- 1);
241 dp
->context_ptr
= data
;
244 static void dm_bio_destructor(struct bio
*bio
)
246 struct io
*io
= bio
->bi_private
;
248 bio_free(bio
, io
->client
->bios
);
252 * Functions for getting the pages from kernel memory.
254 static void km_get_page(struct dpages
*dp
, struct page
**p
, unsigned long *len
,
257 *p
= virt_to_page(dp
->context_ptr
);
258 *offset
= dp
->context_u
;
259 *len
= PAGE_SIZE
- dp
->context_u
;
262 static void km_next_page(struct dpages
*dp
)
264 dp
->context_ptr
+= PAGE_SIZE
- dp
->context_u
;
268 static void km_dp_init(struct dpages
*dp
, void *data
)
270 dp
->get_page
= km_get_page
;
271 dp
->next_page
= km_next_page
;
272 dp
->context_u
= ((unsigned long) data
) & (PAGE_SIZE
- 1);
273 dp
->context_ptr
= data
;
276 /*-----------------------------------------------------------------
277 * IO routines that accept a list of pages.
278 *---------------------------------------------------------------*/
279 static void do_region(int rw
, unsigned region
, struct dm_io_region
*where
,
280 struct dpages
*dp
, struct io
*io
)
287 sector_t remaining
= where
->count
;
291 * Allocate a suitably sized-bio: we add an extra
292 * bvec for bio_get/set_region() and decrement bi_max_vecs
293 * to hide it from bio_add_page().
295 num_bvecs
= dm_sector_div_up(remaining
,
296 (PAGE_SIZE
>> SECTOR_SHIFT
));
297 num_bvecs
= 1 + min_t(int, bio_get_nr_vecs(where
->bdev
),
299 if (unlikely(num_bvecs
> BIO_MAX_PAGES
))
300 num_bvecs
= BIO_MAX_PAGES
;
301 bio
= bio_alloc_bioset(GFP_NOIO
, num_bvecs
, io
->client
->bios
);
302 bio
->bi_sector
= where
->sector
+ (where
->count
- remaining
);
303 bio
->bi_bdev
= where
->bdev
;
304 bio
->bi_end_io
= endio
;
305 bio
->bi_private
= io
;
306 bio
->bi_destructor
= dm_bio_destructor
;
308 bio_set_region(bio
, region
);
311 * Try and add as many pages as possible.
314 dp
->get_page(dp
, &page
, &len
, &offset
);
315 len
= min(len
, to_bytes(remaining
));
316 if (!bio_add_page(bio
, page
, len
, offset
))
320 remaining
-= to_sector(len
);
324 atomic_inc(&io
->count
);
329 static void dispatch_io(int rw
, unsigned int num_regions
,
330 struct dm_io_region
*where
, struct dpages
*dp
,
331 struct io
*io
, int sync
)
334 struct dpages old_pages
= *dp
;
337 rw
|= (1 << BIO_RW_SYNCIO
) | (1 << BIO_RW_UNPLUG
);
340 * For multiple regions we need to be careful to rewind
341 * the dp object for each call to do_region.
343 for (i
= 0; i
< num_regions
; i
++) {
346 do_region(rw
, i
, where
+ i
, dp
, io
);
350 * Drop the extra reference that we were holding to avoid
351 * the io being completed too early.
356 static int sync_io(struct dm_io_client
*client
, unsigned int num_regions
,
357 struct dm_io_region
*where
, int rw
, struct dpages
*dp
,
358 unsigned long *error_bits
)
362 if (num_regions
> 1 && (rw
& RW_MASK
) != WRITE
) {
369 io
.eopnotsupp_bits
= 0;
370 atomic_set(&io
.count
, 1); /* see dispatch_io() */
371 io
.sleeper
= current
;
374 dispatch_io(rw
, num_regions
, where
, dp
, &io
, 1);
377 set_current_state(TASK_UNINTERRUPTIBLE
);
379 if (!atomic_read(&io
.count
))
384 set_current_state(TASK_RUNNING
);
386 if (io
.eopnotsupp_bits
&& (rw
& (1 << BIO_RW_BARRIER
))) {
387 rw
&= ~(1 << BIO_RW_BARRIER
);
392 *error_bits
= io
.error_bits
;
394 return io
.error_bits
? -EIO
: 0;
397 static int async_io(struct dm_io_client
*client
, unsigned int num_regions
,
398 struct dm_io_region
*where
, int rw
, struct dpages
*dp
,
399 io_notify_fn fn
, void *context
)
403 if (num_regions
> 1 && (rw
& RW_MASK
) != WRITE
) {
409 io
= mempool_alloc(client
->pool
, GFP_NOIO
);
411 io
->eopnotsupp_bits
= 0;
412 atomic_set(&io
->count
, 1); /* see dispatch_io() */
416 io
->context
= context
;
418 dispatch_io(rw
, num_regions
, where
, dp
, io
, 0);
422 static int dp_init(struct dm_io_request
*io_req
, struct dpages
*dp
)
424 /* Set up dpages based on memory type */
425 switch (io_req
->mem
.type
) {
426 case DM_IO_PAGE_LIST
:
427 list_dp_init(dp
, io_req
->mem
.ptr
.pl
, io_req
->mem
.offset
);
431 bvec_dp_init(dp
, io_req
->mem
.ptr
.bvec
);
435 vm_dp_init(dp
, io_req
->mem
.ptr
.vma
);
439 km_dp_init(dp
, io_req
->mem
.ptr
.addr
);
450 * New collapsed (a)synchronous interface.
452 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
453 * the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in
454 * io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
455 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
457 int dm_io(struct dm_io_request
*io_req
, unsigned num_regions
,
458 struct dm_io_region
*where
, unsigned long *sync_error_bits
)
463 r
= dp_init(io_req
, &dp
);
467 if (!io_req
->notify
.fn
)
468 return sync_io(io_req
->client
, num_regions
, where
,
469 io_req
->bi_rw
, &dp
, sync_error_bits
);
471 return async_io(io_req
->client
, num_regions
, where
, io_req
->bi_rw
,
472 &dp
, io_req
->notify
.fn
, io_req
->notify
.context
);
474 EXPORT_SYMBOL(dm_io
);