2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
9 #include "dm-bio-list.h"
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/moduleparam.h>
15 #include <linux/blkpg.h>
16 #include <linux/bio.h>
17 #include <linux/buffer_head.h>
18 #include <linux/mempool.h>
19 #include <linux/slab.h>
20 #include <linux/idr.h>
21 #include <linux/hdreg.h>
22 #include <linux/blktrace_api.h>
23 #include <linux/smp_lock.h>
25 #define DM_MSG_PREFIX "core"
27 static const char *_name
= DM_NAME
;
29 static unsigned int major
= 0;
30 static unsigned int _major
= 0;
32 static DEFINE_SPINLOCK(_minor_lock
);
34 * One of these is allocated per bio.
37 struct mapped_device
*md
;
41 unsigned long start_time
;
45 * One of these is allocated per target within a bio. Hopefully
46 * this will be simplified out one day.
54 union map_info
*dm_get_mapinfo(struct bio
*bio
)
56 if (bio
&& bio
->bi_private
)
57 return &((struct dm_target_io
*)bio
->bi_private
)->info
;
61 #define MINOR_ALLOCED ((void *)-1)
64 * Bits for the md->flags field.
66 #define DMF_BLOCK_IO 0
67 #define DMF_SUSPENDED 1
70 #define DMF_DELETING 4
71 #define DMF_NOFLUSH_SUSPENDING 5
73 struct mapped_device
{
74 struct rw_semaphore io_lock
;
75 struct semaphore suspend_lock
;
76 spinlock_t pushback_lock
;
83 request_queue_t
*queue
;
90 * A list of ios that arrived while we were suspended.
93 wait_queue_head_t wait
;
94 struct bio_list deferred
;
95 struct bio_list pushback
;
98 * The current mapping.
100 struct dm_table
*map
;
103 * io objects are allocated from here.
114 wait_queue_head_t eventq
;
117 * freeze/thaw support require holding onto a super block
119 struct super_block
*frozen_sb
;
120 struct block_device
*suspended_bdev
;
122 /* forced geometry settings */
123 struct hd_geometry geometry
;
127 static struct kmem_cache
*_io_cache
;
128 static struct kmem_cache
*_tio_cache
;
130 static int __init
local_init(void)
134 /* allocate a slab for the dm_ios */
135 _io_cache
= KMEM_CACHE(dm_io
, 0);
139 /* allocate a slab for the target ios */
140 _tio_cache
= KMEM_CACHE(dm_target_io
, 0);
142 kmem_cache_destroy(_io_cache
);
147 r
= register_blkdev(_major
, _name
);
149 kmem_cache_destroy(_tio_cache
);
150 kmem_cache_destroy(_io_cache
);
160 static void local_exit(void)
162 kmem_cache_destroy(_tio_cache
);
163 kmem_cache_destroy(_io_cache
);
165 if (unregister_blkdev(_major
, _name
) < 0)
166 DMERR("unregister_blkdev failed");
170 DMINFO("cleaned up");
173 int (*_inits
[])(void) __initdata
= {
181 void (*_exits
[])(void) = {
189 static int __init
dm_init(void)
191 const int count
= ARRAY_SIZE(_inits
);
195 for (i
= 0; i
< count
; i
++) {
210 static void __exit
dm_exit(void)
212 int i
= ARRAY_SIZE(_exits
);
219 * Block device functions
221 static int dm_blk_open(struct inode
*inode
, struct file
*file
)
223 struct mapped_device
*md
;
225 spin_lock(&_minor_lock
);
227 md
= inode
->i_bdev
->bd_disk
->private_data
;
231 if (test_bit(DMF_FREEING
, &md
->flags
) ||
232 test_bit(DMF_DELETING
, &md
->flags
)) {
238 atomic_inc(&md
->open_count
);
241 spin_unlock(&_minor_lock
);
243 return md
? 0 : -ENXIO
;
246 static int dm_blk_close(struct inode
*inode
, struct file
*file
)
248 struct mapped_device
*md
;
250 md
= inode
->i_bdev
->bd_disk
->private_data
;
251 atomic_dec(&md
->open_count
);
256 int dm_open_count(struct mapped_device
*md
)
258 return atomic_read(&md
->open_count
);
262 * Guarantees nothing is using the device before it's deleted.
264 int dm_lock_for_deletion(struct mapped_device
*md
)
268 spin_lock(&_minor_lock
);
270 if (dm_open_count(md
))
273 set_bit(DMF_DELETING
, &md
->flags
);
275 spin_unlock(&_minor_lock
);
280 static int dm_blk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
282 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
284 return dm_get_geometry(md
, geo
);
287 static int dm_blk_ioctl(struct inode
*inode
, struct file
*file
,
288 unsigned int cmd
, unsigned long arg
)
290 struct mapped_device
*md
;
291 struct dm_table
*map
;
292 struct dm_target
*tgt
;
295 /* We don't really need this lock, but we do need 'inode'. */
298 md
= inode
->i_bdev
->bd_disk
->private_data
;
300 map
= dm_get_table(md
);
302 if (!map
|| !dm_table_get_size(map
))
305 /* We only support devices that have a single target */
306 if (dm_table_get_num_targets(map
) != 1)
309 tgt
= dm_table_get_target(map
, 0);
311 if (dm_suspended(md
)) {
316 if (tgt
->type
->ioctl
)
317 r
= tgt
->type
->ioctl(tgt
, inode
, file
, cmd
, arg
);
326 static struct dm_io
*alloc_io(struct mapped_device
*md
)
328 return mempool_alloc(md
->io_pool
, GFP_NOIO
);
331 static void free_io(struct mapped_device
*md
, struct dm_io
*io
)
333 mempool_free(io
, md
->io_pool
);
336 static struct dm_target_io
*alloc_tio(struct mapped_device
*md
)
338 return mempool_alloc(md
->tio_pool
, GFP_NOIO
);
341 static void free_tio(struct mapped_device
*md
, struct dm_target_io
*tio
)
343 mempool_free(tio
, md
->tio_pool
);
346 static void start_io_acct(struct dm_io
*io
)
348 struct mapped_device
*md
= io
->md
;
350 io
->start_time
= jiffies
;
353 disk_round_stats(dm_disk(md
));
355 dm_disk(md
)->in_flight
= atomic_inc_return(&md
->pending
);
358 static int end_io_acct(struct dm_io
*io
)
360 struct mapped_device
*md
= io
->md
;
361 struct bio
*bio
= io
->bio
;
362 unsigned long duration
= jiffies
- io
->start_time
;
364 int rw
= bio_data_dir(bio
);
367 disk_round_stats(dm_disk(md
));
369 dm_disk(md
)->in_flight
= pending
= atomic_dec_return(&md
->pending
);
371 disk_stat_add(dm_disk(md
), ticks
[rw
], duration
);
377 * Add the bio to the list of deferred io.
379 static int queue_io(struct mapped_device
*md
, struct bio
*bio
)
381 down_write(&md
->io_lock
);
383 if (!test_bit(DMF_BLOCK_IO
, &md
->flags
)) {
384 up_write(&md
->io_lock
);
388 bio_list_add(&md
->deferred
, bio
);
390 up_write(&md
->io_lock
);
391 return 0; /* deferred successfully */
395 * Everyone (including functions in this file), should use this
396 * function to access the md->map field, and make sure they call
397 * dm_table_put() when finished.
399 struct dm_table
*dm_get_table(struct mapped_device
*md
)
403 read_lock(&md
->map_lock
);
407 read_unlock(&md
->map_lock
);
413 * Get the geometry associated with a dm device
415 int dm_get_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
423 * Set the geometry of a device.
425 int dm_set_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
427 sector_t sz
= (sector_t
)geo
->cylinders
* geo
->heads
* geo
->sectors
;
429 if (geo
->start
> sz
) {
430 DMWARN("Start sector is beyond the geometry limits.");
439 /*-----------------------------------------------------------------
441 * A more elegant soln is in the works that uses the queue
442 * merge fn, unfortunately there are a couple of changes to
443 * the block layer that I want to make for this. So in the
444 * interests of getting something for people to use I give
445 * you this clearly demarcated crap.
446 *---------------------------------------------------------------*/
448 static int __noflush_suspending(struct mapped_device
*md
)
450 return test_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
454 * Decrements the number of outstanding ios that a bio has been
455 * cloned into, completing the original io if necc.
457 static void dec_pending(struct dm_io
*io
, int error
)
461 /* Push-back supersedes any I/O errors */
462 if (error
&& !(io
->error
> 0 && __noflush_suspending(io
->md
)))
465 if (atomic_dec_and_test(&io
->io_count
)) {
466 if (io
->error
== DM_ENDIO_REQUEUE
) {
468 * Target requested pushing back the I/O.
469 * This must be handled before the sleeper on
470 * suspend queue merges the pushback list.
472 spin_lock_irqsave(&io
->md
->pushback_lock
, flags
);
473 if (__noflush_suspending(io
->md
))
474 bio_list_add(&io
->md
->pushback
, io
->bio
);
476 /* noflush suspend was interrupted. */
478 spin_unlock_irqrestore(&io
->md
->pushback_lock
, flags
);
482 /* nudge anyone waiting on suspend queue */
483 wake_up(&io
->md
->wait
);
485 if (io
->error
!= DM_ENDIO_REQUEUE
) {
486 blk_add_trace_bio(io
->md
->queue
, io
->bio
,
489 bio_endio(io
->bio
, io
->bio
->bi_size
, io
->error
);
496 static int clone_endio(struct bio
*bio
, unsigned int done
, int error
)
499 struct dm_target_io
*tio
= bio
->bi_private
;
500 struct mapped_device
*md
= tio
->io
->md
;
501 dm_endio_fn endio
= tio
->ti
->type
->end_io
;
506 if (!bio_flagged(bio
, BIO_UPTODATE
) && !error
)
510 r
= endio(tio
->ti
, bio
, error
, &tio
->info
);
511 if (r
< 0 || r
== DM_ENDIO_REQUEUE
)
513 * error and requeue request are handled
517 else if (r
== DM_ENDIO_INCOMPLETE
)
518 /* The target will handle the io */
521 DMWARN("unimplemented target endio return value: %d", r
);
526 dec_pending(tio
->io
, error
);
529 * Store md for cleanup instead of tio which is about to get freed.
531 bio
->bi_private
= md
->bs
;
538 static sector_t
max_io_len(struct mapped_device
*md
,
539 sector_t sector
, struct dm_target
*ti
)
541 sector_t offset
= sector
- ti
->begin
;
542 sector_t len
= ti
->len
- offset
;
545 * Does the target need to split even further ?
549 boundary
= ((offset
+ ti
->split_io
) & ~(ti
->split_io
- 1))
558 static void __map_bio(struct dm_target
*ti
, struct bio
*clone
,
559 struct dm_target_io
*tio
)
563 struct mapped_device
*md
;
568 BUG_ON(!clone
->bi_size
);
570 clone
->bi_end_io
= clone_endio
;
571 clone
->bi_private
= tio
;
574 * Map the clone. If r == 0 we don't need to do
575 * anything, the target has assumed ownership of
578 atomic_inc(&tio
->io
->io_count
);
579 sector
= clone
->bi_sector
;
580 r
= ti
->type
->map(ti
, clone
, &tio
->info
);
581 if (r
== DM_MAPIO_REMAPPED
) {
582 /* the bio has been remapped so dispatch it */
584 blk_add_trace_remap(bdev_get_queue(clone
->bi_bdev
), clone
,
585 tio
->io
->bio
->bi_bdev
->bd_dev
, sector
,
588 generic_make_request(clone
);
589 } else if (r
< 0 || r
== DM_MAPIO_REQUEUE
) {
590 /* error the io and bail out, or requeue it if needed */
592 dec_pending(tio
->io
, r
);
594 * Store bio_set for cleanup.
596 clone
->bi_private
= md
->bs
;
600 DMWARN("unimplemented target map return value: %d", r
);
606 struct mapped_device
*md
;
607 struct dm_table
*map
;
611 sector_t sector_count
;
615 static void dm_bio_destructor(struct bio
*bio
)
617 struct bio_set
*bs
= bio
->bi_private
;
623 * Creates a little bio that is just does part of a bvec.
625 static struct bio
*split_bvec(struct bio
*bio
, sector_t sector
,
626 unsigned short idx
, unsigned int offset
,
627 unsigned int len
, struct bio_set
*bs
)
630 struct bio_vec
*bv
= bio
->bi_io_vec
+ idx
;
632 clone
= bio_alloc_bioset(GFP_NOIO
, 1, bs
);
633 clone
->bi_destructor
= dm_bio_destructor
;
634 *clone
->bi_io_vec
= *bv
;
636 clone
->bi_sector
= sector
;
637 clone
->bi_bdev
= bio
->bi_bdev
;
638 clone
->bi_rw
= bio
->bi_rw
;
640 clone
->bi_size
= to_bytes(len
);
641 clone
->bi_io_vec
->bv_offset
= offset
;
642 clone
->bi_io_vec
->bv_len
= clone
->bi_size
;
648 * Creates a bio that consists of range of complete bvecs.
650 static struct bio
*clone_bio(struct bio
*bio
, sector_t sector
,
651 unsigned short idx
, unsigned short bv_count
,
652 unsigned int len
, struct bio_set
*bs
)
656 clone
= bio_alloc_bioset(GFP_NOIO
, bio
->bi_max_vecs
, bs
);
657 __bio_clone(clone
, bio
);
658 clone
->bi_destructor
= dm_bio_destructor
;
659 clone
->bi_sector
= sector
;
661 clone
->bi_vcnt
= idx
+ bv_count
;
662 clone
->bi_size
= to_bytes(len
);
663 clone
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
668 static void __clone_and_map(struct clone_info
*ci
)
670 struct bio
*clone
, *bio
= ci
->bio
;
671 struct dm_target
*ti
= dm_table_find_target(ci
->map
, ci
->sector
);
672 sector_t len
= 0, max
= max_io_len(ci
->md
, ci
->sector
, ti
);
673 struct dm_target_io
*tio
;
676 * Allocate a target io object.
678 tio
= alloc_tio(ci
->md
);
681 memset(&tio
->info
, 0, sizeof(tio
->info
));
683 if (ci
->sector_count
<= max
) {
685 * Optimise for the simple case where we can do all of
686 * the remaining io with a single clone.
688 clone
= clone_bio(bio
, ci
->sector
, ci
->idx
,
689 bio
->bi_vcnt
- ci
->idx
, ci
->sector_count
,
691 __map_bio(ti
, clone
, tio
);
692 ci
->sector_count
= 0;
694 } else if (to_sector(bio
->bi_io_vec
[ci
->idx
].bv_len
) <= max
) {
696 * There are some bvecs that don't span targets.
697 * Do as many of these as possible.
700 sector_t remaining
= max
;
703 for (i
= ci
->idx
; remaining
&& (i
< bio
->bi_vcnt
); i
++) {
704 bv_len
= to_sector(bio
->bi_io_vec
[i
].bv_len
);
706 if (bv_len
> remaining
)
713 clone
= clone_bio(bio
, ci
->sector
, ci
->idx
, i
- ci
->idx
, len
,
715 __map_bio(ti
, clone
, tio
);
718 ci
->sector_count
-= len
;
723 * Handle a bvec that must be split between two or more targets.
725 struct bio_vec
*bv
= bio
->bi_io_vec
+ ci
->idx
;
726 sector_t remaining
= to_sector(bv
->bv_len
);
727 unsigned int offset
= 0;
731 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
732 max
= max_io_len(ci
->md
, ci
->sector
, ti
);
734 tio
= alloc_tio(ci
->md
);
737 memset(&tio
->info
, 0, sizeof(tio
->info
));
740 len
= min(remaining
, max
);
742 clone
= split_bvec(bio
, ci
->sector
, ci
->idx
,
743 bv
->bv_offset
+ offset
, len
,
746 __map_bio(ti
, clone
, tio
);
749 ci
->sector_count
-= len
;
750 offset
+= to_bytes(len
);
751 } while (remaining
-= len
);
758 * Split the bio into several clones.
760 static void __split_bio(struct mapped_device
*md
, struct bio
*bio
)
762 struct clone_info ci
;
764 ci
.map
= dm_get_table(md
);
766 bio_io_error(bio
, bio
->bi_size
);
772 ci
.io
= alloc_io(md
);
774 atomic_set(&ci
.io
->io_count
, 1);
777 ci
.sector
= bio
->bi_sector
;
778 ci
.sector_count
= bio_sectors(bio
);
779 ci
.idx
= bio
->bi_idx
;
781 start_io_acct(ci
.io
);
782 while (ci
.sector_count
)
783 __clone_and_map(&ci
);
785 /* drop the extra reference count */
786 dec_pending(ci
.io
, 0);
787 dm_table_put(ci
.map
);
789 /*-----------------------------------------------------------------
791 *---------------------------------------------------------------*/
794 * The request function that just remaps the bio built up by
797 static int dm_request(request_queue_t
*q
, struct bio
*bio
)
800 int rw
= bio_data_dir(bio
);
801 struct mapped_device
*md
= q
->queuedata
;
804 * There is no use in forwarding any barrier request since we can't
805 * guarantee it is (or can be) handled by the targets correctly.
807 if (unlikely(bio_barrier(bio
))) {
808 bio_endio(bio
, bio
->bi_size
, -EOPNOTSUPP
);
812 down_read(&md
->io_lock
);
814 disk_stat_inc(dm_disk(md
), ios
[rw
]);
815 disk_stat_add(dm_disk(md
), sectors
[rw
], bio_sectors(bio
));
818 * If we're suspended we have to queue
821 while (test_bit(DMF_BLOCK_IO
, &md
->flags
)) {
822 up_read(&md
->io_lock
);
824 if (bio_rw(bio
) == READA
) {
825 bio_io_error(bio
, bio
->bi_size
);
829 r
= queue_io(md
, bio
);
831 bio_io_error(bio
, bio
->bi_size
);
835 return 0; /* deferred successfully */
838 * We're in a while loop, because someone could suspend
839 * before we get to the following read lock.
841 down_read(&md
->io_lock
);
844 __split_bio(md
, bio
);
845 up_read(&md
->io_lock
);
849 static int dm_flush_all(request_queue_t
*q
, struct gendisk
*disk
,
850 sector_t
*error_sector
)
852 struct mapped_device
*md
= q
->queuedata
;
853 struct dm_table
*map
= dm_get_table(md
);
857 ret
= dm_table_flush_all(map
);
864 static void dm_unplug_all(request_queue_t
*q
)
866 struct mapped_device
*md
= q
->queuedata
;
867 struct dm_table
*map
= dm_get_table(md
);
870 dm_table_unplug_all(map
);
875 static int dm_any_congested(void *congested_data
, int bdi_bits
)
878 struct mapped_device
*md
= (struct mapped_device
*) congested_data
;
879 struct dm_table
*map
= dm_get_table(md
);
881 if (!map
|| test_bit(DMF_BLOCK_IO
, &md
->flags
))
884 r
= dm_table_any_congested(map
, bdi_bits
);
890 /*-----------------------------------------------------------------
891 * An IDR is used to keep track of allocated minor numbers.
892 *---------------------------------------------------------------*/
893 static DEFINE_IDR(_minor_idr
);
895 static void free_minor(int minor
)
897 spin_lock(&_minor_lock
);
898 idr_remove(&_minor_idr
, minor
);
899 spin_unlock(&_minor_lock
);
903 * See if the device with a specific minor # is free.
905 static int specific_minor(struct mapped_device
*md
, int minor
)
909 if (minor
>= (1 << MINORBITS
))
912 r
= idr_pre_get(&_minor_idr
, GFP_KERNEL
);
916 spin_lock(&_minor_lock
);
918 if (idr_find(&_minor_idr
, minor
)) {
923 r
= idr_get_new_above(&_minor_idr
, MINOR_ALLOCED
, minor
, &m
);
928 idr_remove(&_minor_idr
, m
);
934 spin_unlock(&_minor_lock
);
938 static int next_free_minor(struct mapped_device
*md
, int *minor
)
942 r
= idr_pre_get(&_minor_idr
, GFP_KERNEL
);
946 spin_lock(&_minor_lock
);
948 r
= idr_get_new(&_minor_idr
, MINOR_ALLOCED
, &m
);
953 if (m
>= (1 << MINORBITS
)) {
954 idr_remove(&_minor_idr
, m
);
962 spin_unlock(&_minor_lock
);
966 static struct block_device_operations dm_blk_dops
;
969 * Allocate and initialise a blank device with a given minor.
971 static struct mapped_device
*alloc_dev(int minor
)
974 struct mapped_device
*md
= kmalloc(sizeof(*md
), GFP_KERNEL
);
978 DMWARN("unable to allocate device, out of memory.");
982 if (!try_module_get(THIS_MODULE
))
985 /* get a minor number for the dev */
986 if (minor
== DM_ANY_MINOR
)
987 r
= next_free_minor(md
, &minor
);
989 r
= specific_minor(md
, minor
);
993 memset(md
, 0, sizeof(*md
));
994 init_rwsem(&md
->io_lock
);
995 init_MUTEX(&md
->suspend_lock
);
996 spin_lock_init(&md
->pushback_lock
);
997 rwlock_init(&md
->map_lock
);
998 atomic_set(&md
->holders
, 1);
999 atomic_set(&md
->open_count
, 0);
1000 atomic_set(&md
->event_nr
, 0);
1002 md
->queue
= blk_alloc_queue(GFP_KERNEL
);
1004 goto bad1_free_minor
;
1006 md
->queue
->queuedata
= md
;
1007 md
->queue
->backing_dev_info
.congested_fn
= dm_any_congested
;
1008 md
->queue
->backing_dev_info
.congested_data
= md
;
1009 blk_queue_make_request(md
->queue
, dm_request
);
1010 blk_queue_bounce_limit(md
->queue
, BLK_BOUNCE_ANY
);
1011 md
->queue
->unplug_fn
= dm_unplug_all
;
1012 md
->queue
->issue_flush_fn
= dm_flush_all
;
1014 md
->io_pool
= mempool_create_slab_pool(MIN_IOS
, _io_cache
);
1018 md
->tio_pool
= mempool_create_slab_pool(MIN_IOS
, _tio_cache
);
1022 md
->bs
= bioset_create(16, 16);
1026 md
->disk
= alloc_disk(1);
1030 atomic_set(&md
->pending
, 0);
1031 init_waitqueue_head(&md
->wait
);
1032 init_waitqueue_head(&md
->eventq
);
1034 md
->disk
->major
= _major
;
1035 md
->disk
->first_minor
= minor
;
1036 md
->disk
->fops
= &dm_blk_dops
;
1037 md
->disk
->queue
= md
->queue
;
1038 md
->disk
->private_data
= md
;
1039 sprintf(md
->disk
->disk_name
, "dm-%d", minor
);
1041 format_dev_t(md
->name
, MKDEV(_major
, minor
));
1043 /* Populate the mapping, nobody knows we exist yet */
1044 spin_lock(&_minor_lock
);
1045 old_md
= idr_replace(&_minor_idr
, md
, minor
);
1046 spin_unlock(&_minor_lock
);
1048 BUG_ON(old_md
!= MINOR_ALLOCED
);
1053 bioset_free(md
->bs
);
1055 mempool_destroy(md
->tio_pool
);
1057 mempool_destroy(md
->io_pool
);
1059 blk_cleanup_queue(md
->queue
);
1063 module_put(THIS_MODULE
);
1069 static void free_dev(struct mapped_device
*md
)
1071 int minor
= md
->disk
->first_minor
;
1073 if (md
->suspended_bdev
) {
1074 thaw_bdev(md
->suspended_bdev
, NULL
);
1075 bdput(md
->suspended_bdev
);
1077 mempool_destroy(md
->tio_pool
);
1078 mempool_destroy(md
->io_pool
);
1079 bioset_free(md
->bs
);
1080 del_gendisk(md
->disk
);
1083 spin_lock(&_minor_lock
);
1084 md
->disk
->private_data
= NULL
;
1085 spin_unlock(&_minor_lock
);
1088 blk_cleanup_queue(md
->queue
);
1089 module_put(THIS_MODULE
);
1094 * Bind a table to the device.
1096 static void event_callback(void *context
)
1098 struct mapped_device
*md
= (struct mapped_device
*) context
;
1100 atomic_inc(&md
->event_nr
);
1101 wake_up(&md
->eventq
);
1104 static void __set_size(struct mapped_device
*md
, sector_t size
)
1106 set_capacity(md
->disk
, size
);
1108 mutex_lock(&md
->suspended_bdev
->bd_inode
->i_mutex
);
1109 i_size_write(md
->suspended_bdev
->bd_inode
, (loff_t
)size
<< SECTOR_SHIFT
);
1110 mutex_unlock(&md
->suspended_bdev
->bd_inode
->i_mutex
);
1113 static int __bind(struct mapped_device
*md
, struct dm_table
*t
)
1115 request_queue_t
*q
= md
->queue
;
1118 size
= dm_table_get_size(t
);
1121 * Wipe any geometry if the size of the table changed.
1123 if (size
!= get_capacity(md
->disk
))
1124 memset(&md
->geometry
, 0, sizeof(md
->geometry
));
1126 if (md
->suspended_bdev
)
1127 __set_size(md
, size
);
1132 dm_table_event_callback(t
, event_callback
, md
);
1134 write_lock(&md
->map_lock
);
1136 dm_table_set_restrictions(t
, q
);
1137 write_unlock(&md
->map_lock
);
1142 static void __unbind(struct mapped_device
*md
)
1144 struct dm_table
*map
= md
->map
;
1149 dm_table_event_callback(map
, NULL
, NULL
);
1150 write_lock(&md
->map_lock
);
1152 write_unlock(&md
->map_lock
);
1157 * Constructor for a new device.
1159 int dm_create(int minor
, struct mapped_device
**result
)
1161 struct mapped_device
*md
;
1163 md
= alloc_dev(minor
);
1171 static struct mapped_device
*dm_find_md(dev_t dev
)
1173 struct mapped_device
*md
;
1174 unsigned minor
= MINOR(dev
);
1176 if (MAJOR(dev
) != _major
|| minor
>= (1 << MINORBITS
))
1179 spin_lock(&_minor_lock
);
1181 md
= idr_find(&_minor_idr
, minor
);
1182 if (md
&& (md
== MINOR_ALLOCED
||
1183 (dm_disk(md
)->first_minor
!= minor
) ||
1184 test_bit(DMF_FREEING
, &md
->flags
))) {
1190 spin_unlock(&_minor_lock
);
1195 struct mapped_device
*dm_get_md(dev_t dev
)
1197 struct mapped_device
*md
= dm_find_md(dev
);
1205 void *dm_get_mdptr(struct mapped_device
*md
)
1207 return md
->interface_ptr
;
1210 void dm_set_mdptr(struct mapped_device
*md
, void *ptr
)
1212 md
->interface_ptr
= ptr
;
1215 void dm_get(struct mapped_device
*md
)
1217 atomic_inc(&md
->holders
);
1220 const char *dm_device_name(struct mapped_device
*md
)
1224 EXPORT_SYMBOL_GPL(dm_device_name
);
1226 void dm_put(struct mapped_device
*md
)
1228 struct dm_table
*map
;
1230 BUG_ON(test_bit(DMF_FREEING
, &md
->flags
));
1232 if (atomic_dec_and_lock(&md
->holders
, &_minor_lock
)) {
1233 map
= dm_get_table(md
);
1234 idr_replace(&_minor_idr
, MINOR_ALLOCED
, dm_disk(md
)->first_minor
);
1235 set_bit(DMF_FREEING
, &md
->flags
);
1236 spin_unlock(&_minor_lock
);
1237 if (!dm_suspended(md
)) {
1238 dm_table_presuspend_targets(map
);
1239 dm_table_postsuspend_targets(map
);
1246 EXPORT_SYMBOL_GPL(dm_put
);
1249 * Process the deferred bios
1251 static void __flush_deferred_io(struct mapped_device
*md
, struct bio
*c
)
1264 * Swap in a new table (destroying old one).
1266 int dm_swap_table(struct mapped_device
*md
, struct dm_table
*table
)
1270 down(&md
->suspend_lock
);
1272 /* device must be suspended */
1273 if (!dm_suspended(md
))
1276 /* without bdev, the device size cannot be changed */
1277 if (!md
->suspended_bdev
)
1278 if (get_capacity(md
->disk
) != dm_table_get_size(table
))
1282 r
= __bind(md
, table
);
1285 up(&md
->suspend_lock
);
1290 * Functions to lock and unlock any filesystem running on the
1293 static int lock_fs(struct mapped_device
*md
)
1297 WARN_ON(md
->frozen_sb
);
1299 md
->frozen_sb
= freeze_bdev(md
->suspended_bdev
);
1300 if (IS_ERR(md
->frozen_sb
)) {
1301 r
= PTR_ERR(md
->frozen_sb
);
1302 md
->frozen_sb
= NULL
;
1306 set_bit(DMF_FROZEN
, &md
->flags
);
1308 /* don't bdput right now, we don't want the bdev
1309 * to go away while it is locked.
1314 static void unlock_fs(struct mapped_device
*md
)
1316 if (!test_bit(DMF_FROZEN
, &md
->flags
))
1319 thaw_bdev(md
->suspended_bdev
, md
->frozen_sb
);
1320 md
->frozen_sb
= NULL
;
1321 clear_bit(DMF_FROZEN
, &md
->flags
);
1325 * We need to be able to change a mapping table under a mounted
1326 * filesystem. For example we might want to move some data in
1327 * the background. Before the table can be swapped with
1328 * dm_bind_table, dm_suspend must be called to flush any in
1329 * flight bios and ensure that any further io gets deferred.
1331 int dm_suspend(struct mapped_device
*md
, unsigned suspend_flags
)
1333 struct dm_table
*map
= NULL
;
1334 unsigned long flags
;
1335 DECLARE_WAITQUEUE(wait
, current
);
1338 int do_lockfs
= suspend_flags
& DM_SUSPEND_LOCKFS_FLAG
? 1 : 0;
1339 int noflush
= suspend_flags
& DM_SUSPEND_NOFLUSH_FLAG
? 1 : 0;
1341 down(&md
->suspend_lock
);
1343 if (dm_suspended(md
))
1346 map
= dm_get_table(md
);
1349 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
1350 * This flag is cleared before dm_suspend returns.
1353 set_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
1355 /* This does not get reverted if there's an error later. */
1356 dm_table_presuspend_targets(map
);
1358 /* bdget() can stall if the pending I/Os are not flushed */
1360 md
->suspended_bdev
= bdget_disk(md
->disk
, 0);
1361 if (!md
->suspended_bdev
) {
1362 DMWARN("bdget failed in dm_suspend");
1369 * Flush I/O to the device.
1370 * noflush supersedes do_lockfs, because lock_fs() needs to flush I/Os.
1372 if (do_lockfs
&& !noflush
) {
1379 * First we set the BLOCK_IO flag so no more ios will be mapped.
1381 down_write(&md
->io_lock
);
1382 set_bit(DMF_BLOCK_IO
, &md
->flags
);
1384 add_wait_queue(&md
->wait
, &wait
);
1385 up_write(&md
->io_lock
);
1389 dm_table_unplug_all(map
);
1392 * Then we wait for the already mapped ios to
1396 set_current_state(TASK_INTERRUPTIBLE
);
1398 if (!atomic_read(&md
->pending
) || signal_pending(current
))
1403 set_current_state(TASK_RUNNING
);
1405 down_write(&md
->io_lock
);
1406 remove_wait_queue(&md
->wait
, &wait
);
1409 spin_lock_irqsave(&md
->pushback_lock
, flags
);
1410 clear_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
1411 bio_list_merge_head(&md
->deferred
, &md
->pushback
);
1412 bio_list_init(&md
->pushback
);
1413 spin_unlock_irqrestore(&md
->pushback_lock
, flags
);
1416 /* were we interrupted ? */
1418 if (atomic_read(&md
->pending
)) {
1419 clear_bit(DMF_BLOCK_IO
, &md
->flags
);
1420 def
= bio_list_get(&md
->deferred
);
1421 __flush_deferred_io(md
, def
);
1422 up_write(&md
->io_lock
);
1424 goto out
; /* pushback list is already flushed, so skip flush */
1426 up_write(&md
->io_lock
);
1428 dm_table_postsuspend_targets(map
);
1430 set_bit(DMF_SUSPENDED
, &md
->flags
);
1437 * Because there may be already I/Os in the pushback list,
1438 * flush them before return.
1440 down_write(&md
->io_lock
);
1442 spin_lock_irqsave(&md
->pushback_lock
, flags
);
1443 clear_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
1444 bio_list_merge_head(&md
->deferred
, &md
->pushback
);
1445 bio_list_init(&md
->pushback
);
1446 spin_unlock_irqrestore(&md
->pushback_lock
, flags
);
1448 def
= bio_list_get(&md
->deferred
);
1449 __flush_deferred_io(md
, def
);
1450 up_write(&md
->io_lock
);
1454 if (r
&& md
->suspended_bdev
) {
1455 bdput(md
->suspended_bdev
);
1456 md
->suspended_bdev
= NULL
;
1462 up(&md
->suspend_lock
);
1466 int dm_resume(struct mapped_device
*md
)
1470 struct dm_table
*map
= NULL
;
1472 down(&md
->suspend_lock
);
1473 if (!dm_suspended(md
))
1476 map
= dm_get_table(md
);
1477 if (!map
|| !dm_table_get_size(map
))
1480 r
= dm_table_resume_targets(map
);
1484 down_write(&md
->io_lock
);
1485 clear_bit(DMF_BLOCK_IO
, &md
->flags
);
1487 def
= bio_list_get(&md
->deferred
);
1488 __flush_deferred_io(md
, def
);
1489 up_write(&md
->io_lock
);
1493 if (md
->suspended_bdev
) {
1494 bdput(md
->suspended_bdev
);
1495 md
->suspended_bdev
= NULL
;
1498 clear_bit(DMF_SUSPENDED
, &md
->flags
);
1500 dm_table_unplug_all(map
);
1502 kobject_uevent(&md
->disk
->kobj
, KOBJ_CHANGE
);
1508 up(&md
->suspend_lock
);
1513 /*-----------------------------------------------------------------
1514 * Event notification.
1515 *---------------------------------------------------------------*/
1516 uint32_t dm_get_event_nr(struct mapped_device
*md
)
1518 return atomic_read(&md
->event_nr
);
1521 int dm_wait_event(struct mapped_device
*md
, int event_nr
)
1523 return wait_event_interruptible(md
->eventq
,
1524 (event_nr
!= atomic_read(&md
->event_nr
)));
1528 * The gendisk is only valid as long as you have a reference
1531 struct gendisk
*dm_disk(struct mapped_device
*md
)
1536 int dm_suspended(struct mapped_device
*md
)
1538 return test_bit(DMF_SUSPENDED
, &md
->flags
);
1541 int dm_noflush_suspending(struct dm_target
*ti
)
1543 struct mapped_device
*md
= dm_table_get_md(ti
->table
);
1544 int r
= __noflush_suspending(md
);
1550 EXPORT_SYMBOL_GPL(dm_noflush_suspending
);
1552 static struct block_device_operations dm_blk_dops
= {
1553 .open
= dm_blk_open
,
1554 .release
= dm_blk_close
,
1555 .ioctl
= dm_blk_ioctl
,
1556 .getgeo
= dm_blk_getgeo
,
1557 .owner
= THIS_MODULE
1560 EXPORT_SYMBOL(dm_get_mapinfo
);
1565 module_init(dm_init
);
1566 module_exit(dm_exit
);
1568 module_param(major
, uint
, 0);
1569 MODULE_PARM_DESC(major
, "The major number of the device mapper");
1570 MODULE_DESCRIPTION(DM_NAME
" driver");
1571 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1572 MODULE_LICENSE("GPL");