[MAC80211]: add "invalid" interface type
[linux-2.6/mini2440.git] / drivers / md / dm-io.c
blobf3a772486437e1ee08608e28835adf85911aaaf8
1 /*
2 * Copyright (C) 2003 Sistina Software
3 * Copyright (C) 2006 Red Hat GmbH
5 * This file is released under the GPL.
6 */
8 #include "dm-io.h"
10 #include <linux/bio.h>
11 #include <linux/mempool.h>
12 #include <linux/module.h>
13 #include <linux/sched.h>
14 #include <linux/slab.h>
16 struct dm_io_client {
17 mempool_t *pool;
18 struct bio_set *bios;
21 /* FIXME: can we shrink this ? */
22 struct io {
23 unsigned long error;
24 atomic_t count;
25 struct task_struct *sleeper;
26 struct dm_io_client *client;
27 io_notify_fn callback;
28 void *context;
32 * io contexts are only dynamically allocated for asynchronous
33 * io. Since async io is likely to be the majority of io we'll
34 * have the same number of io contexts as bios! (FIXME: must reduce this).
37 static unsigned int pages_to_ios(unsigned int pages)
39 return 4 * pages; /* too many ? */
43 * Create a client with mempool and bioset.
45 struct dm_io_client *dm_io_client_create(unsigned num_pages)
47 unsigned ios = pages_to_ios(num_pages);
48 struct dm_io_client *client;
50 client = kmalloc(sizeof(*client), GFP_KERNEL);
51 if (!client)
52 return ERR_PTR(-ENOMEM);
54 client->pool = mempool_create_kmalloc_pool(ios, sizeof(struct io));
55 if (!client->pool)
56 goto bad;
58 client->bios = bioset_create(16, 16);
59 if (!client->bios)
60 goto bad;
62 return client;
64 bad:
65 if (client->pool)
66 mempool_destroy(client->pool);
67 kfree(client);
68 return ERR_PTR(-ENOMEM);
70 EXPORT_SYMBOL(dm_io_client_create);
72 int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client)
74 return mempool_resize(client->pool, pages_to_ios(num_pages),
75 GFP_KERNEL);
77 EXPORT_SYMBOL(dm_io_client_resize);
79 void dm_io_client_destroy(struct dm_io_client *client)
81 mempool_destroy(client->pool);
82 bioset_free(client->bios);
83 kfree(client);
85 EXPORT_SYMBOL(dm_io_client_destroy);
87 /*-----------------------------------------------------------------
88 * We need to keep track of which region a bio is doing io for.
89 * In order to save a memory allocation we store this the last
90 * bvec which we know is unused (blech).
91 * XXX This is ugly and can OOPS with some configs... find another way.
92 *---------------------------------------------------------------*/
93 static inline void bio_set_region(struct bio *bio, unsigned region)
95 bio->bi_io_vec[bio->bi_max_vecs].bv_len = region;
98 static inline unsigned bio_get_region(struct bio *bio)
100 return bio->bi_io_vec[bio->bi_max_vecs].bv_len;
103 /*-----------------------------------------------------------------
104 * We need an io object to keep track of the number of bios that
105 * have been dispatched for a particular io.
106 *---------------------------------------------------------------*/
107 static void dec_count(struct io *io, unsigned int region, int error)
109 if (error)
110 set_bit(region, &io->error);
112 if (atomic_dec_and_test(&io->count)) {
113 if (io->sleeper)
114 wake_up_process(io->sleeper);
116 else {
117 int r = io->error;
118 io_notify_fn fn = io->callback;
119 void *context = io->context;
121 mempool_free(io, io->client->pool);
122 fn(r, context);
127 static int endio(struct bio *bio, unsigned int done, int error)
129 struct io *io;
130 unsigned region;
132 /* keep going until we've finished */
133 if (bio->bi_size)
134 return 1;
136 if (error && bio_data_dir(bio) == READ)
137 zero_fill_bio(bio);
140 * The bio destructor in bio_put() may use the io object.
142 io = bio->bi_private;
143 region = bio_get_region(bio);
145 bio->bi_max_vecs++;
146 bio_put(bio);
148 dec_count(io, region, error);
150 return 0;
153 /*-----------------------------------------------------------------
154 * These little objects provide an abstraction for getting a new
155 * destination page for io.
156 *---------------------------------------------------------------*/
157 struct dpages {
158 void (*get_page)(struct dpages *dp,
159 struct page **p, unsigned long *len, unsigned *offset);
160 void (*next_page)(struct dpages *dp);
162 unsigned context_u;
163 void *context_ptr;
167 * Functions for getting the pages from a list.
169 static void list_get_page(struct dpages *dp,
170 struct page **p, unsigned long *len, unsigned *offset)
172 unsigned o = dp->context_u;
173 struct page_list *pl = (struct page_list *) dp->context_ptr;
175 *p = pl->page;
176 *len = PAGE_SIZE - o;
177 *offset = o;
180 static void list_next_page(struct dpages *dp)
182 struct page_list *pl = (struct page_list *) dp->context_ptr;
183 dp->context_ptr = pl->next;
184 dp->context_u = 0;
187 static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
189 dp->get_page = list_get_page;
190 dp->next_page = list_next_page;
191 dp->context_u = offset;
192 dp->context_ptr = pl;
196 * Functions for getting the pages from a bvec.
198 static void bvec_get_page(struct dpages *dp,
199 struct page **p, unsigned long *len, unsigned *offset)
201 struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
202 *p = bvec->bv_page;
203 *len = bvec->bv_len;
204 *offset = bvec->bv_offset;
207 static void bvec_next_page(struct dpages *dp)
209 struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
210 dp->context_ptr = bvec + 1;
213 static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
215 dp->get_page = bvec_get_page;
216 dp->next_page = bvec_next_page;
217 dp->context_ptr = bvec;
221 * Functions for getting the pages from a VMA.
223 static void vm_get_page(struct dpages *dp,
224 struct page **p, unsigned long *len, unsigned *offset)
226 *p = vmalloc_to_page(dp->context_ptr);
227 *offset = dp->context_u;
228 *len = PAGE_SIZE - dp->context_u;
231 static void vm_next_page(struct dpages *dp)
233 dp->context_ptr += PAGE_SIZE - dp->context_u;
234 dp->context_u = 0;
237 static void vm_dp_init(struct dpages *dp, void *data)
239 dp->get_page = vm_get_page;
240 dp->next_page = vm_next_page;
241 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
242 dp->context_ptr = data;
245 static void dm_bio_destructor(struct bio *bio)
247 struct io *io = bio->bi_private;
249 bio_free(bio, io->client->bios);
253 * Functions for getting the pages from kernel memory.
255 static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
256 unsigned *offset)
258 *p = virt_to_page(dp->context_ptr);
259 *offset = dp->context_u;
260 *len = PAGE_SIZE - dp->context_u;
263 static void km_next_page(struct dpages *dp)
265 dp->context_ptr += PAGE_SIZE - dp->context_u;
266 dp->context_u = 0;
269 static void km_dp_init(struct dpages *dp, void *data)
271 dp->get_page = km_get_page;
272 dp->next_page = km_next_page;
273 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
274 dp->context_ptr = data;
277 /*-----------------------------------------------------------------
278 * IO routines that accept a list of pages.
279 *---------------------------------------------------------------*/
280 static void do_region(int rw, unsigned int region, struct io_region *where,
281 struct dpages *dp, struct io *io)
283 struct bio *bio;
284 struct page *page;
285 unsigned long len;
286 unsigned offset;
287 unsigned num_bvecs;
288 sector_t remaining = where->count;
290 while (remaining) {
292 * Allocate a suitably sized-bio: we add an extra
293 * bvec for bio_get/set_region() and decrement bi_max_vecs
294 * to hide it from bio_add_page().
296 num_bvecs = dm_sector_div_up(remaining,
297 (PAGE_SIZE >> SECTOR_SHIFT));
298 num_bvecs = 1 + min_t(int, bio_get_nr_vecs(where->bdev),
299 num_bvecs);
300 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
301 bio->bi_sector = where->sector + (where->count - remaining);
302 bio->bi_bdev = where->bdev;
303 bio->bi_end_io = endio;
304 bio->bi_private = io;
305 bio->bi_destructor = dm_bio_destructor;
306 bio->bi_max_vecs--;
307 bio_set_region(bio, region);
310 * Try and add as many pages as possible.
312 while (remaining) {
313 dp->get_page(dp, &page, &len, &offset);
314 len = min(len, to_bytes(remaining));
315 if (!bio_add_page(bio, page, len, offset))
316 break;
318 offset = 0;
319 remaining -= to_sector(len);
320 dp->next_page(dp);
323 atomic_inc(&io->count);
324 submit_bio(rw, bio);
328 static void dispatch_io(int rw, unsigned int num_regions,
329 struct io_region *where, struct dpages *dp,
330 struct io *io, int sync)
332 int i;
333 struct dpages old_pages = *dp;
335 if (sync)
336 rw |= (1 << BIO_RW_SYNC);
339 * For multiple regions we need to be careful to rewind
340 * the dp object for each call to do_region.
342 for (i = 0; i < num_regions; i++) {
343 *dp = old_pages;
344 if (where[i].count)
345 do_region(rw, i, where + i, dp, io);
349 * Drop the extra reference that we were holding to avoid
350 * the io being completed too early.
352 dec_count(io, 0, 0);
355 static int sync_io(struct dm_io_client *client, unsigned int num_regions,
356 struct io_region *where, int rw, struct dpages *dp,
357 unsigned long *error_bits)
359 struct io io;
361 if (num_regions > 1 && rw != WRITE) {
362 WARN_ON(1);
363 return -EIO;
366 io.error = 0;
367 atomic_set(&io.count, 1); /* see dispatch_io() */
368 io.sleeper = current;
369 io.client = client;
371 dispatch_io(rw, num_regions, where, dp, &io, 1);
373 while (1) {
374 set_current_state(TASK_UNINTERRUPTIBLE);
376 if (!atomic_read(&io.count) || signal_pending(current))
377 break;
379 io_schedule();
381 set_current_state(TASK_RUNNING);
383 if (atomic_read(&io.count))
384 return -EINTR;
386 if (error_bits)
387 *error_bits = io.error;
389 return io.error ? -EIO : 0;
392 static int async_io(struct dm_io_client *client, unsigned int num_regions,
393 struct io_region *where, int rw, struct dpages *dp,
394 io_notify_fn fn, void *context)
396 struct io *io;
398 if (num_regions > 1 && rw != WRITE) {
399 WARN_ON(1);
400 fn(1, context);
401 return -EIO;
404 io = mempool_alloc(client->pool, GFP_NOIO);
405 io->error = 0;
406 atomic_set(&io->count, 1); /* see dispatch_io() */
407 io->sleeper = NULL;
408 io->client = client;
409 io->callback = fn;
410 io->context = context;
412 dispatch_io(rw, num_regions, where, dp, io, 0);
413 return 0;
416 static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
418 /* Set up dpages based on memory type */
419 switch (io_req->mem.type) {
420 case DM_IO_PAGE_LIST:
421 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
422 break;
424 case DM_IO_BVEC:
425 bvec_dp_init(dp, io_req->mem.ptr.bvec);
426 break;
428 case DM_IO_VMA:
429 vm_dp_init(dp, io_req->mem.ptr.vma);
430 break;
432 case DM_IO_KMEM:
433 km_dp_init(dp, io_req->mem.ptr.addr);
434 break;
436 default:
437 return -EINVAL;
440 return 0;
444 * New collapsed (a)synchronous interface
446 int dm_io(struct dm_io_request *io_req, unsigned num_regions,
447 struct io_region *where, unsigned long *sync_error_bits)
449 int r;
450 struct dpages dp;
452 r = dp_init(io_req, &dp);
453 if (r)
454 return r;
456 if (!io_req->notify.fn)
457 return sync_io(io_req->client, num_regions, where,
458 io_req->bi_rw, &dp, sync_error_bits);
460 return async_io(io_req->client, num_regions, where, io_req->bi_rw,
461 &dp, io_req->notify.fn, io_req->notify.context);
463 EXPORT_SYMBOL(dm_io);