hwmon: (coretemp) Fix TjMax detection for older CPUs
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / md / dm-io.c
blob2067288f61f9b5e868da28c6c7c9694b9a5269cf
1 /*
2 * Copyright (C) 2003 Sistina Software
3 * Copyright (C) 2006 Red Hat GmbH
5 * This file is released under the GPL.
6 */
8 #include "dm.h"
10 #include <linux/device-mapper.h>
12 #include <linux/bio.h>
13 #include <linux/mempool.h>
14 #include <linux/module.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/dm-io.h>
19 #define DM_MSG_PREFIX "io"
21 #define DM_IO_MAX_REGIONS BITS_PER_LONG
22 #define MIN_IOS 16
23 #define MIN_BIOS 16
25 struct dm_io_client {
26 mempool_t *pool;
27 struct bio_set *bios;
31 * Aligning 'struct io' reduces the number of bits required to store
32 * its address. Refer to store_io_and_region_in_bio() below.
34 struct io {
35 unsigned long error_bits;
36 atomic_t count;
37 struct task_struct *sleeper;
38 struct dm_io_client *client;
39 io_notify_fn callback;
40 void *context;
41 } __attribute__((aligned(DM_IO_MAX_REGIONS)));
43 static struct kmem_cache *_dm_io_cache;
46 * Create a client with mempool and bioset.
48 struct dm_io_client *dm_io_client_create(void)
50 struct dm_io_client *client;
52 client = kmalloc(sizeof(*client), GFP_KERNEL);
53 if (!client)
54 return ERR_PTR(-ENOMEM);
56 client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache);
57 if (!client->pool)
58 goto bad;
60 client->bios = bioset_create(MIN_BIOS, 0);
61 if (!client->bios)
62 goto bad;
64 return client;
66 bad:
67 if (client->pool)
68 mempool_destroy(client->pool);
69 kfree(client);
70 return ERR_PTR(-ENOMEM);
72 EXPORT_SYMBOL(dm_io_client_create);
74 void dm_io_client_destroy(struct dm_io_client *client)
76 mempool_destroy(client->pool);
77 bioset_free(client->bios);
78 kfree(client);
80 EXPORT_SYMBOL(dm_io_client_destroy);
82 /*-----------------------------------------------------------------
83 * We need to keep track of which region a bio is doing io for.
84 * To avoid a memory allocation to store just 5 or 6 bits, we
85 * ensure the 'struct io' pointer is aligned so enough low bits are
86 * always zero and then combine it with the region number directly in
87 * bi_private.
88 *---------------------------------------------------------------*/
89 static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
90 unsigned region)
92 if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
93 DMCRIT("Unaligned struct io pointer %p", io);
94 BUG();
97 bio->bi_private = (void *)((unsigned long)io | region);
100 static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
101 unsigned *region)
103 unsigned long val = (unsigned long)bio->bi_private;
105 *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
106 *region = val & (DM_IO_MAX_REGIONS - 1);
109 /*-----------------------------------------------------------------
110 * We need an io object to keep track of the number of bios that
111 * have been dispatched for a particular io.
112 *---------------------------------------------------------------*/
113 static void dec_count(struct io *io, unsigned int region, int error)
115 if (error)
116 set_bit(region, &io->error_bits);
118 if (atomic_dec_and_test(&io->count)) {
119 if (io->sleeper)
120 wake_up_process(io->sleeper);
122 else {
123 unsigned long r = io->error_bits;
124 io_notify_fn fn = io->callback;
125 void *context = io->context;
127 mempool_free(io, io->client->pool);
128 fn(r, context);
133 static void endio(struct bio *bio, int error)
135 struct io *io;
136 unsigned region;
138 if (error && bio_data_dir(bio) == READ)
139 zero_fill_bio(bio);
142 * The bio destructor in bio_put() may use the io object.
144 retrieve_io_and_region_from_bio(bio, &io, &region);
146 bio_put(bio);
148 dec_count(io, region, error);
151 /*-----------------------------------------------------------------
152 * These little objects provide an abstraction for getting a new
153 * destination page for io.
154 *---------------------------------------------------------------*/
155 struct dpages {
156 void (*get_page)(struct dpages *dp,
157 struct page **p, unsigned long *len, unsigned *offset);
158 void (*next_page)(struct dpages *dp);
160 unsigned context_u;
161 void *context_ptr;
165 * Functions for getting the pages from a list.
167 static void list_get_page(struct dpages *dp,
168 struct page **p, unsigned long *len, unsigned *offset)
170 unsigned o = dp->context_u;
171 struct page_list *pl = (struct page_list *) dp->context_ptr;
173 *p = pl->page;
174 *len = PAGE_SIZE - o;
175 *offset = o;
178 static void list_next_page(struct dpages *dp)
180 struct page_list *pl = (struct page_list *) dp->context_ptr;
181 dp->context_ptr = pl->next;
182 dp->context_u = 0;
185 static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
187 dp->get_page = list_get_page;
188 dp->next_page = list_next_page;
189 dp->context_u = offset;
190 dp->context_ptr = pl;
194 * Functions for getting the pages from a bvec.
196 static void bvec_get_page(struct dpages *dp,
197 struct page **p, unsigned long *len, unsigned *offset)
199 struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
200 *p = bvec->bv_page;
201 *len = bvec->bv_len;
202 *offset = bvec->bv_offset;
205 static void bvec_next_page(struct dpages *dp)
207 struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
208 dp->context_ptr = bvec + 1;
211 static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
213 dp->get_page = bvec_get_page;
214 dp->next_page = bvec_next_page;
215 dp->context_ptr = bvec;
219 * Functions for getting the pages from a VMA.
221 static void vm_get_page(struct dpages *dp,
222 struct page **p, unsigned long *len, unsigned *offset)
224 *p = vmalloc_to_page(dp->context_ptr);
225 *offset = dp->context_u;
226 *len = PAGE_SIZE - dp->context_u;
229 static void vm_next_page(struct dpages *dp)
231 dp->context_ptr += PAGE_SIZE - dp->context_u;
232 dp->context_u = 0;
235 static void vm_dp_init(struct dpages *dp, void *data)
237 dp->get_page = vm_get_page;
238 dp->next_page = vm_next_page;
239 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
240 dp->context_ptr = data;
243 static void dm_bio_destructor(struct bio *bio)
245 unsigned region;
246 struct io *io;
248 retrieve_io_and_region_from_bio(bio, &io, &region);
250 bio_free(bio, io->client->bios);
254 * Functions for getting the pages from kernel memory.
256 static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
257 unsigned *offset)
259 *p = virt_to_page(dp->context_ptr);
260 *offset = dp->context_u;
261 *len = PAGE_SIZE - dp->context_u;
264 static void km_next_page(struct dpages *dp)
266 dp->context_ptr += PAGE_SIZE - dp->context_u;
267 dp->context_u = 0;
270 static void km_dp_init(struct dpages *dp, void *data)
272 dp->get_page = km_get_page;
273 dp->next_page = km_next_page;
274 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
275 dp->context_ptr = data;
278 /*-----------------------------------------------------------------
279 * IO routines that accept a list of pages.
280 *---------------------------------------------------------------*/
281 static void do_region(int rw, unsigned region, struct dm_io_region *where,
282 struct dpages *dp, struct io *io)
284 struct bio *bio;
285 struct page *page;
286 unsigned long len;
287 unsigned offset;
288 unsigned num_bvecs;
289 sector_t remaining = where->count;
292 * where->count may be zero if rw holds a flush and we need to
293 * send a zero-sized flush.
295 do {
297 * Allocate a suitably sized-bio.
299 num_bvecs = dm_sector_div_up(remaining,
300 (PAGE_SIZE >> SECTOR_SHIFT));
301 num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), num_bvecs);
302 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
303 bio->bi_sector = where->sector + (where->count - remaining);
304 bio->bi_bdev = where->bdev;
305 bio->bi_end_io = endio;
306 bio->bi_destructor = dm_bio_destructor;
307 store_io_and_region_in_bio(bio, io, region);
310 * Try and add as many pages as possible.
312 while (remaining) {
313 dp->get_page(dp, &page, &len, &offset);
314 len = min(len, to_bytes(remaining));
315 if (!bio_add_page(bio, page, len, offset))
316 break;
318 offset = 0;
319 remaining -= to_sector(len);
320 dp->next_page(dp);
323 atomic_inc(&io->count);
324 submit_bio(rw, bio);
325 } while (remaining);
328 static void dispatch_io(int rw, unsigned int num_regions,
329 struct dm_io_region *where, struct dpages *dp,
330 struct io *io, int sync)
332 int i;
333 struct dpages old_pages = *dp;
335 BUG_ON(num_regions > DM_IO_MAX_REGIONS);
337 if (sync)
338 rw |= REQ_SYNC;
341 * For multiple regions we need to be careful to rewind
342 * the dp object for each call to do_region.
344 for (i = 0; i < num_regions; i++) {
345 *dp = old_pages;
346 if (where[i].count || (rw & REQ_FLUSH))
347 do_region(rw, i, where + i, dp, io);
351 * Drop the extra reference that we were holding to avoid
352 * the io being completed too early.
354 dec_count(io, 0, 0);
357 static int sync_io(struct dm_io_client *client, unsigned int num_regions,
358 struct dm_io_region *where, int rw, struct dpages *dp,
359 unsigned long *error_bits)
362 * gcc <= 4.3 can't do the alignment for stack variables, so we must
363 * align it on our own.
364 * volatile prevents the optimizer from removing or reusing
365 * "io_" field from the stack frame (allowed in ANSI C).
367 volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
368 struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
370 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
371 WARN_ON(1);
372 return -EIO;
375 io->error_bits = 0;
376 atomic_set(&io->count, 1); /* see dispatch_io() */
377 io->sleeper = current;
378 io->client = client;
380 dispatch_io(rw, num_regions, where, dp, io, 1);
382 while (1) {
383 set_current_state(TASK_UNINTERRUPTIBLE);
385 if (!atomic_read(&io->count))
386 break;
388 io_schedule();
390 set_current_state(TASK_RUNNING);
392 if (error_bits)
393 *error_bits = io->error_bits;
395 return io->error_bits ? -EIO : 0;
398 static int async_io(struct dm_io_client *client, unsigned int num_regions,
399 struct dm_io_region *where, int rw, struct dpages *dp,
400 io_notify_fn fn, void *context)
402 struct io *io;
404 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
405 WARN_ON(1);
406 fn(1, context);
407 return -EIO;
410 io = mempool_alloc(client->pool, GFP_NOIO);
411 io->error_bits = 0;
412 atomic_set(&io->count, 1); /* see dispatch_io() */
413 io->sleeper = NULL;
414 io->client = client;
415 io->callback = fn;
416 io->context = context;
418 dispatch_io(rw, num_regions, where, dp, io, 0);
419 return 0;
422 static int dp_init(struct dm_io_request *io_req, struct dpages *dp)
424 /* Set up dpages based on memory type */
425 switch (io_req->mem.type) {
426 case DM_IO_PAGE_LIST:
427 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
428 break;
430 case DM_IO_BVEC:
431 bvec_dp_init(dp, io_req->mem.ptr.bvec);
432 break;
434 case DM_IO_VMA:
435 vm_dp_init(dp, io_req->mem.ptr.vma);
436 break;
438 case DM_IO_KMEM:
439 km_dp_init(dp, io_req->mem.ptr.addr);
440 break;
442 default:
443 return -EINVAL;
446 return 0;
450 * New collapsed (a)synchronous interface.
452 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
453 * the queue with blk_unplug() some time later or set REQ_SYNC in
454 io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
455 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
457 int dm_io(struct dm_io_request *io_req, unsigned num_regions,
458 struct dm_io_region *where, unsigned long *sync_error_bits)
460 int r;
461 struct dpages dp;
463 r = dp_init(io_req, &dp);
464 if (r)
465 return r;
467 if (!io_req->notify.fn)
468 return sync_io(io_req->client, num_regions, where,
469 io_req->bi_rw, &dp, sync_error_bits);
471 return async_io(io_req->client, num_regions, where, io_req->bi_rw,
472 &dp, io_req->notify.fn, io_req->notify.context);
474 EXPORT_SYMBOL(dm_io);
476 int __init dm_io_init(void)
478 _dm_io_cache = KMEM_CACHE(io, 0);
479 if (!_dm_io_cache)
480 return -ENOMEM;
482 return 0;
485 void dm_io_exit(void)
487 kmem_cache_destroy(_dm_io_cache);
488 _dm_io_cache = NULL;