GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / md / dm-io.c
blobbb4995e27ded70e27bb3ff225d4092f0cd956c0a
1 /*
2 * Copyright (C) 2003 Sistina Software
3 * Copyright (C) 2006 Red Hat GmbH
5 * This file is released under the GPL.
6 */
8 #include "dm.h"
10 #include <linux/device-mapper.h>
12 #include <linux/bio.h>
13 #include <linux/mempool.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/dm-io.h>
19 #define DM_MSG_PREFIX "io"
21 #define DM_IO_MAX_REGIONS BITS_PER_LONG
23 struct dm_io_client {
24 mempool_t *pool;
25 struct bio_set *bios;
29 * Aligning 'struct io' reduces the number of bits required to store
30 * its address. Refer to store_io_and_region_in_bio() below.
32 struct io {
33 unsigned long error_bits;
34 unsigned long eopnotsupp_bits;
35 atomic_t count;
36 struct task_struct *sleeper;
37 struct dm_io_client *client;
38 io_notify_fn callback;
39 void *context;
40 } __attribute__((aligned(DM_IO_MAX_REGIONS)));
42 static struct kmem_cache *_dm_io_cache;
45 static unsigned int pages_to_ios(unsigned int pages)
47 return 4 * pages; /* too many ? */
51 * Create a client with mempool and bioset.
53 struct dm_io_client *dm_io_client_create(unsigned num_pages)
55 unsigned ios = pages_to_ios(num_pages);
56 struct dm_io_client *client;
58 client = kmalloc(sizeof(*client), GFP_KERNEL);
59 if (!client)
60 return ERR_PTR(-ENOMEM);
62 client->pool = mempool_create_slab_pool(ios, _dm_io_cache);
63 if (!client->pool)
64 goto bad;
66 client->bios = bioset_create(16, 0);
67 if (!client->bios)
68 goto bad;
70 return client;
72 bad:
73 if (client->pool)
74 mempool_destroy(client->pool);
75 kfree(client);
76 return ERR_PTR(-ENOMEM);
78 EXPORT_SYMBOL(dm_io_client_create);
80 int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client)
82 return mempool_resize(client->pool, pages_to_ios(num_pages),
83 GFP_KERNEL);
85 EXPORT_SYMBOL(dm_io_client_resize);
87 void dm_io_client_destroy(struct dm_io_client *client)
89 mempool_destroy(client->pool);
90 bioset_free(client->bios);
91 kfree(client);
93 EXPORT_SYMBOL(dm_io_client_destroy);
95 /*-----------------------------------------------------------------
96 * We need to keep track of which region a bio is doing io for.
97 * To avoid a memory allocation to store just 5 or 6 bits, we
98 * ensure the 'struct io' pointer is aligned so enough low bits are
99 * always zero and then combine it with the region number directly in
100 * bi_private.
101 *---------------------------------------------------------------*/
102 static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
103 unsigned region)
105 if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
106 DMCRIT("Unaligned struct io pointer %p", io);
107 BUG();
110 bio->bi_private = (void *)((unsigned long)io | region);
113 static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
114 unsigned *region)
116 unsigned long val = (unsigned long)bio->bi_private;
118 *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
119 *region = val & (DM_IO_MAX_REGIONS - 1);
122 /*-----------------------------------------------------------------
123 * We need an io object to keep track of the number of bios that
124 * have been dispatched for a particular io.
125 *---------------------------------------------------------------*/
126 static void dec_count(struct io *io, unsigned int region, int error)
128 if (error) {
129 set_bit(region, &io->error_bits);
130 if (error == -EOPNOTSUPP)
131 set_bit(region, &io->eopnotsupp_bits);
134 if (atomic_dec_and_test(&io->count)) {
135 if (io->sleeper)
136 wake_up_process(io->sleeper);
138 else {
139 unsigned long r = io->error_bits;
140 io_notify_fn fn = io->callback;
141 void *context = io->context;
143 mempool_free(io, io->client->pool);
144 fn(r, context);
149 static void endio(struct bio *bio, int error)
151 struct io *io;
152 unsigned region;
154 if (error && bio_data_dir(bio) == READ)
155 zero_fill_bio(bio);
158 * The bio destructor in bio_put() may use the io object.
160 retrieve_io_and_region_from_bio(bio, &io, &region);
162 bio_put(bio);
164 dec_count(io, region, error);
167 /*-----------------------------------------------------------------
168 * These little objects provide an abstraction for getting a new
169 * destination page for io.
170 *---------------------------------------------------------------*/
171 struct dpages {
172 void (*get_page)(struct dpages *dp,
173 struct page **p, unsigned long *len, unsigned *offset);
174 void (*next_page)(struct dpages *dp);
176 unsigned context_u;
177 void *context_ptr;
181 * Functions for getting the pages from a list.
183 static void list_get_page(struct dpages *dp,
184 struct page **p, unsigned long *len, unsigned *offset)
186 unsigned o = dp->context_u;
187 struct page_list *pl = (struct page_list *) dp->context_ptr;
189 *p = pl->page;
190 *len = PAGE_SIZE - o;
191 *offset = o;
194 static void list_next_page(struct dpages *dp)
196 struct page_list *pl = (struct page_list *) dp->context_ptr;
197 dp->context_ptr = pl->next;
198 dp->context_u = 0;
201 static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
203 dp->get_page = list_get_page;
204 dp->next_page = list_next_page;
205 dp->context_u = offset;
206 dp->context_ptr = pl;
210 * Functions for getting the pages from a bvec.
212 static void bvec_get_page(struct dpages *dp,
213 struct page **p, unsigned long *len, unsigned *offset)
215 struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
216 *p = bvec->bv_page;
217 *len = bvec->bv_len;
218 *offset = bvec->bv_offset;
221 static void bvec_next_page(struct dpages *dp)
223 struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
224 dp->context_ptr = bvec + 1;
227 static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
229 dp->get_page = bvec_get_page;
230 dp->next_page = bvec_next_page;
231 dp->context_ptr = bvec;
235 * Functions for getting the pages from a VMA.
237 static void vm_get_page(struct dpages *dp,
238 struct page **p, unsigned long *len, unsigned *offset)
240 *p = vmalloc_to_page(dp->context_ptr);
241 *offset = dp->context_u;
242 *len = PAGE_SIZE - dp->context_u;
245 static void vm_next_page(struct dpages *dp)
247 dp->context_ptr += PAGE_SIZE - dp->context_u;
248 dp->context_u = 0;
251 static void vm_dp_init(struct dpages *dp, void *data)
253 dp->get_page = vm_get_page;
254 dp->next_page = vm_next_page;
255 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
256 dp->context_ptr = data;
259 static void dm_bio_destructor(struct bio *bio)
261 unsigned region;
262 struct io *io;
264 retrieve_io_and_region_from_bio(bio, &io, &region);
266 bio_free(bio, io->client->bios);
270 * Functions for getting the pages from kernel memory.
272 static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
273 unsigned *offset)
275 *p = virt_to_page(dp->context_ptr);
276 *offset = dp->context_u;
277 *len = PAGE_SIZE - dp->context_u;
280 static void km_next_page(struct dpages *dp)
282 dp->context_ptr += PAGE_SIZE - dp->context_u;
283 dp->context_u = 0;
286 static void km_dp_init(struct dpages *dp, void *data)
288 dp->get_page = km_get_page;
289 dp->next_page = km_next_page;
290 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
291 dp->context_ptr = data;
294 /*-----------------------------------------------------------------
295 * IO routines that accept a list of pages.
296 *---------------------------------------------------------------*/
297 static void do_region(int rw, unsigned region, struct dm_io_region *where,
298 struct dpages *dp, struct io *io)
300 struct bio *bio;
301 struct page *page;
302 unsigned long len;
303 unsigned offset;
304 unsigned num_bvecs;
305 sector_t remaining = where->count;
308 * where->count may be zero if rw holds a write barrier and we
309 * need to send a zero-sized barrier.
311 do {
313 * Allocate a suitably sized-bio.
315 num_bvecs = dm_sector_div_up(remaining,
316 (PAGE_SIZE >> SECTOR_SHIFT));
317 num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), num_bvecs);
318 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
319 bio->bi_sector = where->sector + (where->count - remaining);
320 bio->bi_bdev = where->bdev;
321 bio->bi_end_io = endio;
322 bio->bi_destructor = dm_bio_destructor;
323 store_io_and_region_in_bio(bio, io, region);
326 * Try and add as many pages as possible.
328 while (remaining) {
329 dp->get_page(dp, &page, &len, &offset);
330 len = min(len, to_bytes(remaining));
331 if (!bio_add_page(bio, page, len, offset))
332 break;
334 offset = 0;
335 remaining -= to_sector(len);
336 dp->next_page(dp);
339 atomic_inc(&io->count);
340 submit_bio(rw, bio);
341 } while (remaining);
344 static void dispatch_io(int rw, unsigned int num_regions,
345 struct dm_io_region *where, struct dpages *dp,
346 struct io *io, int sync)
348 int i;
349 struct dpages old_pages = *dp;
351 BUG_ON(num_regions > DM_IO_MAX_REGIONS);
353 if (sync)
354 rw |= REQ_SYNC | REQ_UNPLUG;
357 * For multiple regions we need to be careful to rewind
358 * the dp object for each call to do_region.
360 for (i = 0; i < num_regions; i++) {
361 *dp = old_pages;
362 if (where[i].count || (rw & REQ_HARDBARRIER))
363 do_region(rw, i, where + i, dp, io);
367 * Drop the extra reference that we were holding to avoid
368 * the io being completed too early.
370 dec_count(io, 0, 0);
373 static int sync_io(struct dm_io_client *client, unsigned int num_regions,
374 struct dm_io_region *where, int rw, struct dpages *dp,
375 unsigned long *error_bits)
378 * gcc <= 4.3 can't do the alignment for stack variables, so we must
379 * align it on our own.
380 * volatile prevents the optimizer from removing or reusing
381 * "io_" field from the stack frame (allowed in ANSI C).
383 volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
384 struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
386 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
387 WARN_ON(1);
388 return -EIO;
391 retry:
392 io->error_bits = 0;
393 io->eopnotsupp_bits = 0;
394 atomic_set(&io->count, 1); /* see dispatch_io() */
395 io->sleeper = current;
396 io->client = client;
398 dispatch_io(rw, num_regions, where, dp, io, 1);
400 while (1) {
401 set_current_state(TASK_UNINTERRUPTIBLE);
403 if (!atomic_read(&io->count))
404 break;
406 io_schedule();
408 set_current_state(TASK_RUNNING);
410 if (io->eopnotsupp_bits && (rw & REQ_HARDBARRIER)) {
411 rw &= ~REQ_HARDBARRIER;
412 goto retry;
415 if (error_bits)
416 *error_bits = io->error_bits;
418 return io->error_bits ? -EIO : 0;
421 static int async_io(struct dm_io_client *client, unsigned int num_regions,
422 struct dm_io_region *where, int rw, struct dpages *dp,
423 io_notify_fn fn, void *context)
425 struct io *io;
427 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
428 WARN_ON(1);
429 fn(1, context);
430 return -EIO;
433 io = mempool_alloc(client->pool, GFP_NOIO);
434 io->error_bits = 0;
435 io->eopnotsupp_bits = 0;
436 atomic_set(&io->count, 1); /* see dispatch_io() */
437 io->sleeper = NULL;
438 io->client = client;
439 io->callback = fn;
440 io->context = context;
442 dispatch_io(rw, num_regions, where, dp, io, 0);
443 return 0;
446 static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
448 /* Set up dpages based on memory type */
449 switch (io_req->mem.type) {
450 case DM_IO_PAGE_LIST:
451 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
452 break;
454 case DM_IO_BVEC:
455 bvec_dp_init(dp, io_req->mem.ptr.bvec);
456 break;
458 case DM_IO_VMA:
459 vm_dp_init(dp, io_req->mem.ptr.vma);
460 break;
462 case DM_IO_KMEM:
463 km_dp_init(dp, io_req->mem.ptr.addr);
464 break;
466 default:
467 return -EINVAL;
470 return 0;
474 * New collapsed (a)synchronous interface.
476 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
477 * the queue with blk_unplug() some time later or set REQ_SYNC in
478 io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
479 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
481 int dm_io(struct dm_io_request *io_req, unsigned num_regions,
482 struct dm_io_region *where, unsigned long *sync_error_bits)
484 int r;
485 struct dpages dp;
487 r = dp_init(io_req, &dp);
488 if (r)
489 return r;
491 if (!io_req->notify.fn)
492 return sync_io(io_req->client, num_regions, where,
493 io_req->bi_rw, &dp, sync_error_bits);
495 return async_io(io_req->client, num_regions, where, io_req->bi_rw,
496 &dp, io_req->notify.fn, io_req->notify.context);
498 EXPORT_SYMBOL(dm_io);
500 int __init dm_io_init(void)
502 _dm_io_cache = KMEM_CACHE(io, 0);
503 if (!_dm_io_cache)
504 return -ENOMEM;
506 return 0;
509 void dm_io_exit(void)
511 kmem_cache_destroy(_dm_io_cache);
512 _dm_io_cache = NULL;