MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / drivers / md / dm.c
blob4d3c1d32adf5fae71dc86bc3b5f61d58b289d256
1 /*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
4 * This file is released under the GPL.
5 */
7 #include "dm.h"
8 #include "dm-bio-list.h"
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/blkpg.h>
14 #include <linux/bio.h>
15 #include <linux/buffer_head.h>
16 #include <linux/mempool.h>
17 #include <linux/slab.h>
18 #include <linux/idr.h>
20 static const char *_name = DM_NAME;
22 static unsigned int major = 0;
23 static unsigned int _major = 0;
26 * One of these is allocated per bio.
28 struct dm_io {
29 struct mapped_device *md;
30 int error;
31 struct bio *bio;
32 atomic_t io_count;
36 * One of these is allocated per target within a bio. Hopefully
37 * this will be simplified out one day.
39 struct target_io {
40 struct dm_io *io;
41 struct dm_target *ti;
42 union map_info info;
46 * Bits for the md->flags field.
48 #define DMF_BLOCK_IO 0
49 #define DMF_SUSPENDED 1
50 #define DMF_FS_LOCKED 2
52 struct mapped_device {
53 struct rw_semaphore lock;
54 rwlock_t map_lock;
55 atomic_t holders;
57 unsigned long flags;
59 request_queue_t *queue;
60 struct gendisk *disk;
63 * A list of ios that arrived while we were suspended.
65 atomic_t pending;
66 wait_queue_head_t wait;
67 struct bio_list deferred;
70 * The current mapping.
72 struct dm_table *map;
75 * io objects are allocated from here.
77 mempool_t *io_pool;
78 mempool_t *tio_pool;
81 * Event handling.
83 atomic_t event_nr;
84 wait_queue_head_t eventq;
87 * freeze/thaw support require holding onto a super block
89 struct super_block *frozen_sb;
92 #define MIN_IOS 256
93 static kmem_cache_t *_io_cache;
94 static kmem_cache_t *_tio_cache;
96 static int __init local_init(void)
98 int r;
100 /* allocate a slab for the dm_ios */
101 _io_cache = kmem_cache_create("dm_io",
102 sizeof(struct dm_io), 0, 0, NULL, NULL);
103 if (!_io_cache)
104 return -ENOMEM;
106 /* allocate a slab for the target ios */
107 _tio_cache = kmem_cache_create("dm_tio", sizeof(struct target_io),
108 0, 0, NULL, NULL);
109 if (!_tio_cache) {
110 kmem_cache_destroy(_io_cache);
111 return -ENOMEM;
114 _major = major;
115 r = register_blkdev(_major, _name);
116 if (r < 0) {
117 kmem_cache_destroy(_tio_cache);
118 kmem_cache_destroy(_io_cache);
119 return r;
122 if (!_major)
123 _major = r;
125 return 0;
128 static void local_exit(void)
130 kmem_cache_destroy(_tio_cache);
131 kmem_cache_destroy(_io_cache);
133 if (unregister_blkdev(_major, _name) < 0)
134 DMERR("devfs_unregister_blkdev failed");
136 _major = 0;
138 DMINFO("cleaned up");
142 * We have a lot of init/exit functions, so it seems easier to
143 * store them in an array. The disposable macro 'xx'
144 * expands a prefix into a pair of function names.
146 static struct {
147 int (*init) (void);
148 void (*exit) (void);
150 } _inits[] = {
151 #define xx(n) {n ## _init, n ## _exit},
152 xx(local)
153 xx(dm_target)
154 xx(dm_linear)
155 xx(dm_stripe)
156 xx(dm_interface)
157 #undef xx
160 static int __init dm_init(void)
162 const int count = ARRAY_SIZE(_inits);
164 int r, i;
166 for (i = 0; i < count; i++) {
167 r = _inits[i].init();
168 if (r)
169 goto bad;
172 return 0;
174 bad:
175 while (i--)
176 _inits[i].exit();
178 return r;
181 static void __exit dm_exit(void)
183 int i = ARRAY_SIZE(_inits);
185 while (i--)
186 _inits[i].exit();
190 * Block device functions
192 static int dm_blk_open(struct inode *inode, struct file *file)
194 struct mapped_device *md;
196 md = inode->i_bdev->bd_disk->private_data;
197 dm_get(md);
198 return 0;
201 static int dm_blk_close(struct inode *inode, struct file *file)
203 struct mapped_device *md;
205 md = inode->i_bdev->bd_disk->private_data;
206 dm_put(md);
207 return 0;
210 static inline struct dm_io *alloc_io(struct mapped_device *md)
212 return mempool_alloc(md->io_pool, GFP_NOIO);
215 static inline void free_io(struct mapped_device *md, struct dm_io *io)
217 mempool_free(io, md->io_pool);
220 static inline struct target_io *alloc_tio(struct mapped_device *md)
222 return mempool_alloc(md->tio_pool, GFP_NOIO);
225 static inline void free_tio(struct mapped_device *md, struct target_io *tio)
227 mempool_free(tio, md->tio_pool);
231 * Add the bio to the list of deferred io.
233 static int queue_io(struct mapped_device *md, struct bio *bio)
235 down_write(&md->lock);
237 if (!test_bit(DMF_BLOCK_IO, &md->flags)) {
238 up_write(&md->lock);
239 return 1;
242 bio_list_add(&md->deferred, bio);
244 up_write(&md->lock);
245 return 0; /* deferred successfully */
249 * Everyone (including functions in this file), should use this
250 * function to access the md->map field, and make sure they call
251 * dm_table_put() when finished.
253 struct dm_table *dm_get_table(struct mapped_device *md)
255 struct dm_table *t;
257 read_lock(&md->map_lock);
258 t = md->map;
259 if (t)
260 dm_table_get(t);
261 read_unlock(&md->map_lock);
263 return t;
266 /*-----------------------------------------------------------------
267 * CRUD START:
268 * A more elegant soln is in the works that uses the queue
269 * merge fn, unfortunately there are a couple of changes to
270 * the block layer that I want to make for this. So in the
271 * interests of getting something for people to use I give
272 * you this clearly demarcated crap.
273 *---------------------------------------------------------------*/
276 * Decrements the number of outstanding ios that a bio has been
277 * cloned into, completing the original io if necc.
279 static inline void dec_pending(struct dm_io *io, int error)
281 if (error)
282 io->error = error;
284 if (atomic_dec_and_test(&io->io_count)) {
285 if (atomic_dec_and_test(&io->md->pending))
286 /* nudge anyone waiting on suspend queue */
287 wake_up(&io->md->wait);
289 bio_endio(io->bio, io->bio->bi_size, io->error);
290 free_io(io->md, io);
294 static int clone_endio(struct bio *bio, unsigned int done, int error)
296 int r = 0;
297 struct target_io *tio = bio->bi_private;
298 struct dm_io *io = tio->io;
299 dm_endio_fn endio = tio->ti->type->end_io;
301 if (bio->bi_size)
302 return 1;
304 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
305 error = -EIO;
307 if (endio) {
308 r = endio(tio->ti, bio, error, &tio->info);
309 if (r < 0)
310 error = r;
312 else if (r > 0)
313 /* the target wants another shot at the io */
314 return 1;
317 free_tio(io->md, tio);
318 dec_pending(io, error);
319 bio_put(bio);
320 return r;
323 static sector_t max_io_len(struct mapped_device *md,
324 sector_t sector, struct dm_target *ti)
326 sector_t offset = sector - ti->begin;
327 sector_t len = ti->len - offset;
330 * Does the target need to split even further ?
332 if (ti->split_io) {
333 sector_t boundary;
334 boundary = dm_round_up(offset + 1, ti->split_io) - offset;
336 if (len > boundary)
337 len = boundary;
340 return len;
343 static void __map_bio(struct dm_target *ti, struct bio *clone,
344 struct target_io *tio)
346 int r;
349 * Sanity checks.
351 BUG_ON(!clone->bi_size);
353 clone->bi_end_io = clone_endio;
354 clone->bi_private = tio;
357 * Map the clone. If r == 0 we don't need to do
358 * anything, the target has assumed ownership of
359 * this io.
361 atomic_inc(&tio->io->io_count);
362 r = ti->type->map(ti, clone, &tio->info);
363 if (r > 0)
364 /* the bio has been remapped so dispatch it */
365 generic_make_request(clone);
367 else if (r < 0) {
368 /* error the io and bail out */
369 struct dm_io *io = tio->io;
370 free_tio(tio->io->md, tio);
371 dec_pending(io, -EIO);
372 bio_put(clone);
376 struct clone_info {
377 struct mapped_device *md;
378 struct dm_table *map;
379 struct bio *bio;
380 struct dm_io *io;
381 sector_t sector;
382 sector_t sector_count;
383 unsigned short idx;
387 * Creates a little bio that is just does part of a bvec.
389 static struct bio *split_bvec(struct bio *bio, sector_t sector,
390 unsigned short idx, unsigned int offset,
391 unsigned int len)
393 struct bio *clone;
394 struct bio_vec *bv = bio->bi_io_vec + idx;
396 clone = bio_alloc(GFP_NOIO, 1);
397 *clone->bi_io_vec = *bv;
399 clone->bi_sector = sector;
400 clone->bi_bdev = bio->bi_bdev;
401 clone->bi_rw = bio->bi_rw;
402 clone->bi_vcnt = 1;
403 clone->bi_size = to_bytes(len);
404 clone->bi_io_vec->bv_offset = offset;
405 clone->bi_io_vec->bv_len = clone->bi_size;
407 return clone;
411 * Creates a bio that consists of range of complete bvecs.
413 static struct bio *clone_bio(struct bio *bio, sector_t sector,
414 unsigned short idx, unsigned short bv_count,
415 unsigned int len)
417 struct bio *clone;
419 clone = bio_clone(bio, GFP_NOIO);
420 clone->bi_sector = sector;
421 clone->bi_idx = idx;
422 clone->bi_vcnt = idx + bv_count;
423 clone->bi_size = to_bytes(len);
424 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
426 return clone;
429 static void __clone_and_map(struct clone_info *ci)
431 struct bio *clone, *bio = ci->bio;
432 struct dm_target *ti = dm_table_find_target(ci->map, ci->sector);
433 sector_t len = 0, max = max_io_len(ci->md, ci->sector, ti);
434 struct target_io *tio;
437 * Allocate a target io object.
439 tio = alloc_tio(ci->md);
440 tio->io = ci->io;
441 tio->ti = ti;
442 memset(&tio->info, 0, sizeof(tio->info));
444 if (ci->sector_count <= max) {
446 * Optimise for the simple case where we can do all of
447 * the remaining io with a single clone.
449 clone = clone_bio(bio, ci->sector, ci->idx,
450 bio->bi_vcnt - ci->idx, ci->sector_count);
451 __map_bio(ti, clone, tio);
452 ci->sector_count = 0;
454 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
456 * There are some bvecs that don't span targets.
457 * Do as many of these as possible.
459 int i;
460 sector_t remaining = max;
461 sector_t bv_len;
463 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
464 bv_len = to_sector(bio->bi_io_vec[i].bv_len);
466 if (bv_len > remaining)
467 break;
469 remaining -= bv_len;
470 len += bv_len;
473 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len);
474 __map_bio(ti, clone, tio);
476 ci->sector += len;
477 ci->sector_count -= len;
478 ci->idx = i;
480 } else {
482 * Create two copy bios to deal with io that has
483 * been split across a target.
485 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
487 clone = split_bvec(bio, ci->sector, ci->idx,
488 bv->bv_offset, max);
489 __map_bio(ti, clone, tio);
491 ci->sector += max;
492 ci->sector_count -= max;
493 ti = dm_table_find_target(ci->map, ci->sector);
495 len = to_sector(bv->bv_len) - max;
496 clone = split_bvec(bio, ci->sector, ci->idx,
497 bv->bv_offset + to_bytes(max), len);
498 tio = alloc_tio(ci->md);
499 tio->io = ci->io;
500 tio->ti = ti;
501 memset(&tio->info, 0, sizeof(tio->info));
502 __map_bio(ti, clone, tio);
504 ci->sector += len;
505 ci->sector_count -= len;
506 ci->idx++;
511 * Split the bio into several clones.
513 static void __split_bio(struct mapped_device *md, struct bio *bio)
515 struct clone_info ci;
517 ci.map = dm_get_table(md);
518 if (!ci.map) {
519 bio_io_error(bio, bio->bi_size);
520 return;
523 ci.md = md;
524 ci.bio = bio;
525 ci.io = alloc_io(md);
526 ci.io->error = 0;
527 atomic_set(&ci.io->io_count, 1);
528 ci.io->bio = bio;
529 ci.io->md = md;
530 ci.sector = bio->bi_sector;
531 ci.sector_count = bio_sectors(bio);
532 ci.idx = bio->bi_idx;
534 atomic_inc(&md->pending);
535 while (ci.sector_count)
536 __clone_and_map(&ci);
538 /* drop the extra reference count */
539 dec_pending(ci.io, 0);
540 dm_table_put(ci.map);
542 /*-----------------------------------------------------------------
543 * CRUD END
544 *---------------------------------------------------------------*/
547 * The request function that just remaps the bio built up by
548 * dm_merge_bvec.
550 static int dm_request(request_queue_t *q, struct bio *bio)
552 int r;
553 struct mapped_device *md = q->queuedata;
555 down_read(&md->lock);
558 * If we're suspended we have to queue
559 * this io for later.
561 while (test_bit(DMF_BLOCK_IO, &md->flags)) {
562 up_read(&md->lock);
564 if (bio_rw(bio) == READA) {
565 bio_io_error(bio, bio->bi_size);
566 return 0;
569 r = queue_io(md, bio);
570 if (r < 0) {
571 bio_io_error(bio, bio->bi_size);
572 return 0;
574 } else if (r == 0)
575 return 0; /* deferred successfully */
578 * We're in a while loop, because someone could suspend
579 * before we get to the following read lock.
581 down_read(&md->lock);
584 __split_bio(md, bio);
585 up_read(&md->lock);
586 return 0;
589 static int dm_flush_all(request_queue_t *q, struct gendisk *disk,
590 sector_t *error_sector)
592 struct mapped_device *md = q->queuedata;
593 struct dm_table *map = dm_get_table(md);
594 int ret = -ENXIO;
596 if (map) {
597 ret = dm_table_flush_all(md->map);
598 dm_table_put(map);
601 return ret;
604 static void dm_unplug_all(request_queue_t *q)
606 struct mapped_device *md = q->queuedata;
607 struct dm_table *map = dm_get_table(md);
609 if (map) {
610 dm_table_unplug_all(map);
611 dm_table_put(map);
615 static int dm_any_congested(void *congested_data, int bdi_bits)
617 int r;
618 struct mapped_device *md = (struct mapped_device *) congested_data;
619 struct dm_table *map = dm_get_table(md);
621 if (!map || test_bit(DMF_BLOCK_IO, &md->flags))
622 r = bdi_bits;
623 else
624 r = dm_table_any_congested(map, bdi_bits);
626 dm_table_put(map);
627 return r;
630 /*-----------------------------------------------------------------
631 * An IDR is used to keep track of allocated minor numbers.
632 *---------------------------------------------------------------*/
633 static DECLARE_MUTEX(_minor_lock);
634 static DEFINE_IDR(_minor_idr);
636 static void free_minor(unsigned int minor)
638 down(&_minor_lock);
639 idr_remove(&_minor_idr, minor);
640 up(&_minor_lock);
644 * See if the device with a specific minor # is free.
646 static int specific_minor(unsigned int minor)
648 int r, m;
650 if (minor >= (1 << MINORBITS))
651 return -EINVAL;
653 down(&_minor_lock);
655 if (idr_find(&_minor_idr, minor)) {
656 r = -EBUSY;
657 goto out;
660 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
661 if (!r) {
662 r = -ENOMEM;
663 goto out;
666 r = idr_get_new_above(&_minor_idr, specific_minor, minor, &m);
667 if (r) {
668 goto out;
671 if (m != minor) {
672 idr_remove(&_minor_idr, m);
673 r = -EBUSY;
674 goto out;
677 out:
678 up(&_minor_lock);
679 return r;
682 static int next_free_minor(unsigned int *minor)
684 int r;
685 unsigned int m;
687 down(&_minor_lock);
689 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
690 if (!r) {
691 r = -ENOMEM;
692 goto out;
695 r = idr_get_new(&_minor_idr, next_free_minor, &m);
696 if (r) {
697 goto out;
700 if (m >= (1 << MINORBITS)) {
701 idr_remove(&_minor_idr, m);
702 r = -ENOSPC;
703 goto out;
706 *minor = m;
708 out:
709 up(&_minor_lock);
710 return r;
713 static struct block_device_operations dm_blk_dops;
716 * Allocate and initialise a blank device with a given minor.
718 static struct mapped_device *alloc_dev(unsigned int minor, int persistent)
720 int r;
721 struct mapped_device *md = kmalloc(sizeof(*md), GFP_KERNEL);
723 if (!md) {
724 DMWARN("unable to allocate device, out of memory.");
725 return NULL;
728 /* get a minor number for the dev */
729 r = persistent ? specific_minor(minor) : next_free_minor(&minor);
730 if (r < 0)
731 goto bad1;
733 memset(md, 0, sizeof(*md));
734 init_rwsem(&md->lock);
735 rwlock_init(&md->map_lock);
736 atomic_set(&md->holders, 1);
737 atomic_set(&md->event_nr, 0);
739 md->queue = blk_alloc_queue(GFP_KERNEL);
740 if (!md->queue)
741 goto bad1;
743 md->queue->queuedata = md;
744 md->queue->backing_dev_info.congested_fn = dm_any_congested;
745 md->queue->backing_dev_info.congested_data = md;
746 blk_queue_make_request(md->queue, dm_request);
747 md->queue->unplug_fn = dm_unplug_all;
748 md->queue->issue_flush_fn = dm_flush_all;
750 md->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
751 mempool_free_slab, _io_cache);
752 if (!md->io_pool)
753 goto bad2;
755 md->tio_pool = mempool_create(MIN_IOS, mempool_alloc_slab,
756 mempool_free_slab, _tio_cache);
757 if (!md->tio_pool)
758 goto bad3;
760 md->disk = alloc_disk(1);
761 if (!md->disk)
762 goto bad4;
764 md->disk->major = _major;
765 md->disk->first_minor = minor;
766 md->disk->fops = &dm_blk_dops;
767 md->disk->queue = md->queue;
768 md->disk->private_data = md;
769 sprintf(md->disk->disk_name, "dm-%d", minor);
770 add_disk(md->disk);
772 atomic_set(&md->pending, 0);
773 init_waitqueue_head(&md->wait);
774 init_waitqueue_head(&md->eventq);
776 return md;
778 bad4:
779 mempool_destroy(md->tio_pool);
780 bad3:
781 mempool_destroy(md->io_pool);
782 bad2:
783 blk_put_queue(md->queue);
784 free_minor(minor);
785 bad1:
786 kfree(md);
787 return NULL;
790 static void free_dev(struct mapped_device *md)
792 free_minor(md->disk->first_minor);
793 mempool_destroy(md->tio_pool);
794 mempool_destroy(md->io_pool);
795 del_gendisk(md->disk);
796 put_disk(md->disk);
797 blk_put_queue(md->queue);
798 kfree(md);
802 * Bind a table to the device.
804 static void event_callback(void *context)
806 struct mapped_device *md = (struct mapped_device *) context;
808 atomic_inc(&md->event_nr);;
809 wake_up(&md->eventq);
812 static void __set_size(struct gendisk *disk, sector_t size)
814 struct block_device *bdev;
816 set_capacity(disk, size);
817 bdev = bdget_disk(disk, 0);
818 if (bdev) {
819 down(&bdev->bd_inode->i_sem);
820 i_size_write(bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
821 up(&bdev->bd_inode->i_sem);
822 bdput(bdev);
826 static int __bind(struct mapped_device *md, struct dm_table *t)
828 request_queue_t *q = md->queue;
829 sector_t size;
831 size = dm_table_get_size(t);
832 __set_size(md->disk, size);
833 if (size == 0)
834 return 0;
836 write_lock(&md->map_lock);
837 md->map = t;
838 write_unlock(&md->map_lock);
840 dm_table_get(t);
841 dm_table_event_callback(md->map, event_callback, md);
842 dm_table_set_restrictions(t, q);
843 return 0;
846 static void __unbind(struct mapped_device *md)
848 struct dm_table *map = md->map;
850 if (!map)
851 return;
853 dm_table_event_callback(map, NULL, NULL);
854 write_lock(&md->map_lock);
855 md->map = NULL;
856 write_unlock(&md->map_lock);
857 dm_table_put(map);
861 * Constructor for a new device.
863 static int create_aux(unsigned int minor, int persistent,
864 struct mapped_device **result)
866 struct mapped_device *md;
868 md = alloc_dev(minor, persistent);
869 if (!md)
870 return -ENXIO;
872 *result = md;
873 return 0;
876 int dm_create(struct mapped_device **result)
878 return create_aux(0, 0, result);
881 int dm_create_with_minor(unsigned int minor, struct mapped_device **result)
883 return create_aux(minor, 1, result);
886 void dm_get(struct mapped_device *md)
888 atomic_inc(&md->holders);
891 void dm_put(struct mapped_device *md)
893 struct dm_table *map = dm_get_table(md);
895 if (atomic_dec_and_test(&md->holders)) {
896 if (!test_bit(DMF_SUSPENDED, &md->flags) && map)
897 dm_table_suspend_targets(map);
898 __unbind(md);
899 free_dev(md);
902 dm_table_put(map);
906 * Process the deferred bios
908 static void __flush_deferred_io(struct mapped_device *md, struct bio *c)
910 struct bio *n;
912 while (c) {
913 n = c->bi_next;
914 c->bi_next = NULL;
915 __split_bio(md, c);
916 c = n;
921 * Swap in a new table (destroying old one).
923 int dm_swap_table(struct mapped_device *md, struct dm_table *table)
925 int r;
927 down_write(&md->lock);
929 /* device must be suspended */
930 if (!test_bit(DMF_SUSPENDED, &md->flags)) {
931 up_write(&md->lock);
932 return -EPERM;
935 __unbind(md);
936 r = __bind(md, table);
937 if (r)
938 return r;
940 up_write(&md->lock);
941 return 0;
945 * Functions to lock and unlock any filesystem running on the
946 * device.
948 static int __lock_fs(struct mapped_device *md)
950 struct block_device *bdev;
952 if (test_and_set_bit(DMF_FS_LOCKED, &md->flags))
953 return 0;
955 bdev = bdget_disk(md->disk, 0);
956 if (!bdev) {
957 DMWARN("bdget failed in __lock_fs");
958 return -ENOMEM;
961 WARN_ON(md->frozen_sb);
962 md->frozen_sb = freeze_bdev(bdev);
963 /* don't bdput right now, we don't want the bdev
964 * to go away while it is locked. We'll bdput
965 * in __unlock_fs
967 return 0;
970 static int __unlock_fs(struct mapped_device *md)
972 struct block_device *bdev;
974 if (!test_and_clear_bit(DMF_FS_LOCKED, &md->flags))
975 return 0;
977 bdev = bdget_disk(md->disk, 0);
978 if (!bdev) {
979 DMWARN("bdget failed in __unlock_fs");
980 return -ENOMEM;
983 thaw_bdev(bdev, md->frozen_sb);
984 md->frozen_sb = NULL;
985 bdput(bdev);
986 bdput(bdev);
987 return 0;
991 * We need to be able to change a mapping table under a mounted
992 * filesystem. For example we might want to move some data in
993 * the background. Before the table can be swapped with
994 * dm_bind_table, dm_suspend must be called to flush any in
995 * flight bios and ensure that any further io gets deferred.
997 int dm_suspend(struct mapped_device *md)
999 struct dm_table *map;
1000 DECLARE_WAITQUEUE(wait, current);
1002 /* Flush I/O to the device. */
1003 down_read(&md->lock);
1004 if (test_bit(DMF_BLOCK_IO, &md->flags)) {
1005 up_read(&md->lock);
1006 return -EINVAL;
1009 __lock_fs(md);
1010 up_read(&md->lock);
1013 * First we set the BLOCK_IO flag so no more ios will be
1014 * mapped.
1016 down_write(&md->lock);
1017 if (test_bit(DMF_BLOCK_IO, &md->flags)) {
1019 * If we get here we know another thread is
1020 * trying to suspend as well, so we leave the fs
1021 * locked for this thread.
1023 up_write(&md->lock);
1024 return -EINVAL;
1027 set_bit(DMF_BLOCK_IO, &md->flags);
1028 add_wait_queue(&md->wait, &wait);
1029 up_write(&md->lock);
1031 /* unplug */
1032 map = dm_get_table(md);
1033 if (map) {
1034 dm_table_unplug_all(map);
1035 dm_table_put(map);
1039 * Then we wait for the already mapped ios to
1040 * complete.
1042 while (1) {
1043 set_current_state(TASK_INTERRUPTIBLE);
1045 if (!atomic_read(&md->pending) || signal_pending(current))
1046 break;
1048 io_schedule();
1050 set_current_state(TASK_RUNNING);
1052 down_write(&md->lock);
1053 remove_wait_queue(&md->wait, &wait);
1055 /* were we interrupted ? */
1056 if (atomic_read(&md->pending)) {
1057 __unlock_fs(md);
1058 clear_bit(DMF_BLOCK_IO, &md->flags);
1059 up_write(&md->lock);
1060 return -EINTR;
1063 set_bit(DMF_SUSPENDED, &md->flags);
1065 map = dm_get_table(md);
1066 if (map)
1067 dm_table_suspend_targets(map);
1068 dm_table_put(map);
1069 up_write(&md->lock);
1071 return 0;
1074 int dm_resume(struct mapped_device *md)
1076 struct bio *def;
1077 struct dm_table *map = dm_get_table(md);
1079 down_write(&md->lock);
1080 if (!map ||
1081 !test_bit(DMF_SUSPENDED, &md->flags) ||
1082 !dm_table_get_size(map)) {
1083 up_write(&md->lock);
1084 dm_table_put(map);
1085 return -EINVAL;
1088 dm_table_resume_targets(map);
1089 clear_bit(DMF_SUSPENDED, &md->flags);
1090 clear_bit(DMF_BLOCK_IO, &md->flags);
1092 def = bio_list_get(&md->deferred);
1093 __flush_deferred_io(md, def);
1094 up_write(&md->lock);
1095 __unlock_fs(md);
1096 dm_table_unplug_all(map);
1097 dm_table_put(map);
1099 return 0;
1102 /*-----------------------------------------------------------------
1103 * Event notification.
1104 *---------------------------------------------------------------*/
1105 uint32_t dm_get_event_nr(struct mapped_device *md)
1107 return atomic_read(&md->event_nr);
1110 int dm_wait_event(struct mapped_device *md, int event_nr)
1112 return wait_event_interruptible(md->eventq,
1113 (event_nr != atomic_read(&md->event_nr)));
1117 * The gendisk is only valid as long as you have a reference
1118 * count on 'md'.
1120 struct gendisk *dm_disk(struct mapped_device *md)
1122 return md->disk;
1125 int dm_suspended(struct mapped_device *md)
1127 return test_bit(DMF_SUSPENDED, &md->flags);
1130 static struct block_device_operations dm_blk_dops = {
1131 .open = dm_blk_open,
1132 .release = dm_blk_close,
1133 .owner = THIS_MODULE
1137 * module hooks
1139 module_init(dm_init);
1140 module_exit(dm_exit);
1142 module_param(major, uint, 0);
1143 MODULE_PARM_DESC(major, "The major number of the device mapper");
1144 MODULE_DESCRIPTION(DM_NAME " driver");
1145 MODULE_AUTHOR("Joe Thornber <thornber@sistina.com>");
1146 MODULE_LICENSE("GPL");