2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
9 #include "dm-bio-list.h"
10 #include "dm-uevent.h"
12 #include <linux/init.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/moduleparam.h>
16 #include <linux/blkpg.h>
17 #include <linux/bio.h>
18 #include <linux/buffer_head.h>
19 #include <linux/mempool.h>
20 #include <linux/slab.h>
21 #include <linux/idr.h>
22 #include <linux/hdreg.h>
23 #include <linux/blktrace_api.h>
24 #include <trace/block.h>
26 #define DM_MSG_PREFIX "core"
28 static const char *_name
= DM_NAME
;
30 static unsigned int major
= 0;
31 static unsigned int _major
= 0;
33 static DEFINE_SPINLOCK(_minor_lock
);
36 * One of these is allocated per bio.
39 struct mapped_device
*md
;
43 unsigned long start_time
;
48 * One of these is allocated per target within a bio. Hopefully
49 * this will be simplified out one day.
57 DEFINE_TRACE(block_bio_complete
);
60 * For request-based dm.
61 * One of these is allocated per request.
63 struct dm_rq_target_io
{
64 struct mapped_device
*md
;
66 struct request
*orig
, clone
;
72 * For request-based dm.
73 * One of these is allocated per bio.
75 struct dm_rq_clone_bio_info
{
80 union map_info
*dm_get_mapinfo(struct bio
*bio
)
82 if (bio
&& bio
->bi_private
)
83 return &((struct dm_target_io
*)bio
->bi_private
)->info
;
87 #define MINOR_ALLOCED ((void *)-1)
90 * Bits for the md->flags field.
92 #define DMF_BLOCK_IO 0
93 #define DMF_SUSPENDED 1
96 #define DMF_DELETING 4
97 #define DMF_NOFLUSH_SUSPENDING 5
100 * Work processed by per-device workqueue.
102 struct mapped_device
{
103 struct rw_semaphore io_lock
;
104 struct mutex suspend_lock
;
111 struct request_queue
*queue
;
112 struct gendisk
*disk
;
118 * A list of ios that arrived while we were suspended.
121 wait_queue_head_t wait
;
122 struct work_struct work
;
123 struct bio_list deferred
;
124 spinlock_t deferred_lock
;
127 * Processing queue (flush/barriers)
129 struct workqueue_struct
*wq
;
132 * The current mapping.
134 struct dm_table
*map
;
137 * io objects are allocated from here.
148 wait_queue_head_t eventq
;
150 struct list_head uevent_list
;
151 spinlock_t uevent_lock
; /* Protect access to uevent_list */
154 * freeze/thaw support require holding onto a super block
156 struct super_block
*frozen_sb
;
157 struct block_device
*suspended_bdev
;
159 /* forced geometry settings */
160 struct hd_geometry geometry
;
167 static struct kmem_cache
*_io_cache
;
168 static struct kmem_cache
*_tio_cache
;
169 static struct kmem_cache
*_rq_tio_cache
;
170 static struct kmem_cache
*_rq_bio_info_cache
;
172 static int __init
local_init(void)
176 /* allocate a slab for the dm_ios */
177 _io_cache
= KMEM_CACHE(dm_io
, 0);
181 /* allocate a slab for the target ios */
182 _tio_cache
= KMEM_CACHE(dm_target_io
, 0);
184 goto out_free_io_cache
;
186 _rq_tio_cache
= KMEM_CACHE(dm_rq_target_io
, 0);
188 goto out_free_tio_cache
;
190 _rq_bio_info_cache
= KMEM_CACHE(dm_rq_clone_bio_info
, 0);
191 if (!_rq_bio_info_cache
)
192 goto out_free_rq_tio_cache
;
194 r
= dm_uevent_init();
196 goto out_free_rq_bio_info_cache
;
199 r
= register_blkdev(_major
, _name
);
201 goto out_uevent_exit
;
210 out_free_rq_bio_info_cache
:
211 kmem_cache_destroy(_rq_bio_info_cache
);
212 out_free_rq_tio_cache
:
213 kmem_cache_destroy(_rq_tio_cache
);
215 kmem_cache_destroy(_tio_cache
);
217 kmem_cache_destroy(_io_cache
);
222 static void local_exit(void)
224 kmem_cache_destroy(_rq_bio_info_cache
);
225 kmem_cache_destroy(_rq_tio_cache
);
226 kmem_cache_destroy(_tio_cache
);
227 kmem_cache_destroy(_io_cache
);
228 unregister_blkdev(_major
, _name
);
233 DMINFO("cleaned up");
236 static int (*_inits
[])(void) __initdata
= {
245 static void (*_exits
[])(void) = {
254 static int __init
dm_init(void)
256 const int count
= ARRAY_SIZE(_inits
);
260 for (i
= 0; i
< count
; i
++) {
275 static void __exit
dm_exit(void)
277 int i
= ARRAY_SIZE(_exits
);
284 * Block device functions
286 static int dm_blk_open(struct block_device
*bdev
, fmode_t mode
)
288 struct mapped_device
*md
;
290 spin_lock(&_minor_lock
);
292 md
= bdev
->bd_disk
->private_data
;
296 if (test_bit(DMF_FREEING
, &md
->flags
) ||
297 test_bit(DMF_DELETING
, &md
->flags
)) {
303 atomic_inc(&md
->open_count
);
306 spin_unlock(&_minor_lock
);
308 return md
? 0 : -ENXIO
;
311 static int dm_blk_close(struct gendisk
*disk
, fmode_t mode
)
313 struct mapped_device
*md
= disk
->private_data
;
314 atomic_dec(&md
->open_count
);
319 int dm_open_count(struct mapped_device
*md
)
321 return atomic_read(&md
->open_count
);
325 * Guarantees nothing is using the device before it's deleted.
327 int dm_lock_for_deletion(struct mapped_device
*md
)
331 spin_lock(&_minor_lock
);
333 if (dm_open_count(md
))
336 set_bit(DMF_DELETING
, &md
->flags
);
338 spin_unlock(&_minor_lock
);
343 static int dm_blk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
345 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
347 return dm_get_geometry(md
, geo
);
350 static int dm_blk_ioctl(struct block_device
*bdev
, fmode_t mode
,
351 unsigned int cmd
, unsigned long arg
)
353 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
354 struct dm_table
*map
= dm_get_table(md
);
355 struct dm_target
*tgt
;
358 if (!map
|| !dm_table_get_size(map
))
361 /* We only support devices that have a single target */
362 if (dm_table_get_num_targets(map
) != 1)
365 tgt
= dm_table_get_target(map
, 0);
367 if (dm_suspended(md
)) {
372 if (tgt
->type
->ioctl
)
373 r
= tgt
->type
->ioctl(tgt
, cmd
, arg
);
381 static struct dm_io
*alloc_io(struct mapped_device
*md
)
383 return mempool_alloc(md
->io_pool
, GFP_NOIO
);
386 static void free_io(struct mapped_device
*md
, struct dm_io
*io
)
388 mempool_free(io
, md
->io_pool
);
391 static struct dm_target_io
*alloc_tio(struct mapped_device
*md
)
393 return mempool_alloc(md
->tio_pool
, GFP_NOIO
);
396 static void free_tio(struct mapped_device
*md
, struct dm_target_io
*tio
)
398 mempool_free(tio
, md
->tio_pool
);
401 static void start_io_acct(struct dm_io
*io
)
403 struct mapped_device
*md
= io
->md
;
406 io
->start_time
= jiffies
;
408 cpu
= part_stat_lock();
409 part_round_stats(cpu
, &dm_disk(md
)->part0
);
411 dm_disk(md
)->part0
.in_flight
= atomic_inc_return(&md
->pending
);
414 static void end_io_acct(struct dm_io
*io
)
416 struct mapped_device
*md
= io
->md
;
417 struct bio
*bio
= io
->bio
;
418 unsigned long duration
= jiffies
- io
->start_time
;
420 int rw
= bio_data_dir(bio
);
422 cpu
= part_stat_lock();
423 part_round_stats(cpu
, &dm_disk(md
)->part0
);
424 part_stat_add(cpu
, &dm_disk(md
)->part0
, ticks
[rw
], duration
);
427 dm_disk(md
)->part0
.in_flight
= pending
=
428 atomic_dec_return(&md
->pending
);
430 /* nudge anyone waiting on suspend queue */
436 * Add the bio to the list of deferred io.
438 static int queue_io(struct mapped_device
*md
, struct bio
*bio
)
440 down_write(&md
->io_lock
);
442 if (!test_bit(DMF_BLOCK_IO
, &md
->flags
)) {
443 up_write(&md
->io_lock
);
447 spin_lock_irq(&md
->deferred_lock
);
448 bio_list_add(&md
->deferred
, bio
);
449 spin_unlock_irq(&md
->deferred_lock
);
451 up_write(&md
->io_lock
);
452 return 0; /* deferred successfully */
456 * Everyone (including functions in this file), should use this
457 * function to access the md->map field, and make sure they call
458 * dm_table_put() when finished.
460 struct dm_table
*dm_get_table(struct mapped_device
*md
)
464 read_lock(&md
->map_lock
);
468 read_unlock(&md
->map_lock
);
474 * Get the geometry associated with a dm device
476 int dm_get_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
484 * Set the geometry of a device.
486 int dm_set_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
488 sector_t sz
= (sector_t
)geo
->cylinders
* geo
->heads
* geo
->sectors
;
490 if (geo
->start
> sz
) {
491 DMWARN("Start sector is beyond the geometry limits.");
500 /*-----------------------------------------------------------------
502 * A more elegant soln is in the works that uses the queue
503 * merge fn, unfortunately there are a couple of changes to
504 * the block layer that I want to make for this. So in the
505 * interests of getting something for people to use I give
506 * you this clearly demarcated crap.
507 *---------------------------------------------------------------*/
509 static int __noflush_suspending(struct mapped_device
*md
)
511 return test_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
515 * Decrements the number of outstanding ios that a bio has been
516 * cloned into, completing the original io if necc.
518 static void dec_pending(struct dm_io
*io
, int error
)
523 struct mapped_device
*md
= io
->md
;
525 /* Push-back supersedes any I/O errors */
526 if (error
&& !(io
->error
> 0 && __noflush_suspending(md
)))
529 if (atomic_dec_and_test(&io
->io_count
)) {
530 if (io
->error
== DM_ENDIO_REQUEUE
) {
532 * Target requested pushing back the I/O.
534 spin_lock_irqsave(&md
->deferred_lock
, flags
);
535 if (__noflush_suspending(md
))
536 bio_list_add(&md
->deferred
, io
->bio
);
538 /* noflush suspend was interrupted. */
540 spin_unlock_irqrestore(&md
->deferred_lock
, flags
);
545 io_error
= io
->error
;
550 if (io_error
!= DM_ENDIO_REQUEUE
) {
551 trace_block_bio_complete(md
->queue
, bio
);
553 bio_endio(bio
, io_error
);
558 static void clone_endio(struct bio
*bio
, int error
)
561 struct dm_target_io
*tio
= bio
->bi_private
;
562 struct dm_io
*io
= tio
->io
;
563 struct mapped_device
*md
= tio
->io
->md
;
564 dm_endio_fn endio
= tio
->ti
->type
->end_io
;
566 if (!bio_flagged(bio
, BIO_UPTODATE
) && !error
)
570 r
= endio(tio
->ti
, bio
, error
, &tio
->info
);
571 if (r
< 0 || r
== DM_ENDIO_REQUEUE
)
573 * error and requeue request are handled
577 else if (r
== DM_ENDIO_INCOMPLETE
)
578 /* The target will handle the io */
581 DMWARN("unimplemented target endio return value: %d", r
);
587 * Store md for cleanup instead of tio which is about to get freed.
589 bio
->bi_private
= md
->bs
;
593 dec_pending(io
, error
);
596 static sector_t
max_io_len(struct mapped_device
*md
,
597 sector_t sector
, struct dm_target
*ti
)
599 sector_t offset
= sector
- ti
->begin
;
600 sector_t len
= ti
->len
- offset
;
603 * Does the target need to split even further ?
607 boundary
= ((offset
+ ti
->split_io
) & ~(ti
->split_io
- 1))
616 static void __map_bio(struct dm_target
*ti
, struct bio
*clone
,
617 struct dm_target_io
*tio
)
621 struct mapped_device
*md
;
626 BUG_ON(!clone
->bi_size
);
628 clone
->bi_end_io
= clone_endio
;
629 clone
->bi_private
= tio
;
632 * Map the clone. If r == 0 we don't need to do
633 * anything, the target has assumed ownership of
636 atomic_inc(&tio
->io
->io_count
);
637 sector
= clone
->bi_sector
;
638 r
= ti
->type
->map(ti
, clone
, &tio
->info
);
639 if (r
== DM_MAPIO_REMAPPED
) {
640 /* the bio has been remapped so dispatch it */
642 trace_block_remap(bdev_get_queue(clone
->bi_bdev
), clone
,
643 tio
->io
->bio
->bi_bdev
->bd_dev
,
644 clone
->bi_sector
, sector
);
646 generic_make_request(clone
);
647 } else if (r
< 0 || r
== DM_MAPIO_REQUEUE
) {
648 /* error the io and bail out, or requeue it if needed */
650 dec_pending(tio
->io
, r
);
652 * Store bio_set for cleanup.
654 clone
->bi_private
= md
->bs
;
658 DMWARN("unimplemented target map return value: %d", r
);
664 struct mapped_device
*md
;
665 struct dm_table
*map
;
669 sector_t sector_count
;
673 static void dm_bio_destructor(struct bio
*bio
)
675 struct bio_set
*bs
= bio
->bi_private
;
681 * Creates a little bio that is just does part of a bvec.
683 static struct bio
*split_bvec(struct bio
*bio
, sector_t sector
,
684 unsigned short idx
, unsigned int offset
,
685 unsigned int len
, struct bio_set
*bs
)
688 struct bio_vec
*bv
= bio
->bi_io_vec
+ idx
;
690 clone
= bio_alloc_bioset(GFP_NOIO
, 1, bs
);
691 clone
->bi_destructor
= dm_bio_destructor
;
692 *clone
->bi_io_vec
= *bv
;
694 clone
->bi_sector
= sector
;
695 clone
->bi_bdev
= bio
->bi_bdev
;
696 clone
->bi_rw
= bio
->bi_rw
;
698 clone
->bi_size
= to_bytes(len
);
699 clone
->bi_io_vec
->bv_offset
= offset
;
700 clone
->bi_io_vec
->bv_len
= clone
->bi_size
;
701 clone
->bi_flags
|= 1 << BIO_CLONED
;
707 * Creates a bio that consists of range of complete bvecs.
709 static struct bio
*clone_bio(struct bio
*bio
, sector_t sector
,
710 unsigned short idx
, unsigned short bv_count
,
711 unsigned int len
, struct bio_set
*bs
)
715 clone
= bio_alloc_bioset(GFP_NOIO
, bio
->bi_max_vecs
, bs
);
716 __bio_clone(clone
, bio
);
717 clone
->bi_destructor
= dm_bio_destructor
;
718 clone
->bi_sector
= sector
;
720 clone
->bi_vcnt
= idx
+ bv_count
;
721 clone
->bi_size
= to_bytes(len
);
722 clone
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
727 static int __clone_and_map(struct clone_info
*ci
)
729 struct bio
*clone
, *bio
= ci
->bio
;
730 struct dm_target
*ti
;
731 sector_t len
= 0, max
;
732 struct dm_target_io
*tio
;
734 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
735 if (!dm_target_is_valid(ti
))
738 max
= max_io_len(ci
->md
, ci
->sector
, ti
);
741 * Allocate a target io object.
743 tio
= alloc_tio(ci
->md
);
746 memset(&tio
->info
, 0, sizeof(tio
->info
));
748 if (ci
->sector_count
<= max
) {
750 * Optimise for the simple case where we can do all of
751 * the remaining io with a single clone.
753 clone
= clone_bio(bio
, ci
->sector
, ci
->idx
,
754 bio
->bi_vcnt
- ci
->idx
, ci
->sector_count
,
756 __map_bio(ti
, clone
, tio
);
757 ci
->sector_count
= 0;
759 } else if (to_sector(bio
->bi_io_vec
[ci
->idx
].bv_len
) <= max
) {
761 * There are some bvecs that don't span targets.
762 * Do as many of these as possible.
765 sector_t remaining
= max
;
768 for (i
= ci
->idx
; remaining
&& (i
< bio
->bi_vcnt
); i
++) {
769 bv_len
= to_sector(bio
->bi_io_vec
[i
].bv_len
);
771 if (bv_len
> remaining
)
778 clone
= clone_bio(bio
, ci
->sector
, ci
->idx
, i
- ci
->idx
, len
,
780 __map_bio(ti
, clone
, tio
);
783 ci
->sector_count
-= len
;
788 * Handle a bvec that must be split between two or more targets.
790 struct bio_vec
*bv
= bio
->bi_io_vec
+ ci
->idx
;
791 sector_t remaining
= to_sector(bv
->bv_len
);
792 unsigned int offset
= 0;
796 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
797 if (!dm_target_is_valid(ti
))
800 max
= max_io_len(ci
->md
, ci
->sector
, ti
);
802 tio
= alloc_tio(ci
->md
);
805 memset(&tio
->info
, 0, sizeof(tio
->info
));
808 len
= min(remaining
, max
);
810 clone
= split_bvec(bio
, ci
->sector
, ci
->idx
,
811 bv
->bv_offset
+ offset
, len
,
814 __map_bio(ti
, clone
, tio
);
817 ci
->sector_count
-= len
;
818 offset
+= to_bytes(len
);
819 } while (remaining
-= len
);
828 * Split the bio into several clones and submit it to targets.
830 static void __split_and_process_bio(struct mapped_device
*md
, struct bio
*bio
)
832 struct clone_info ci
;
835 ci
.map
= dm_get_table(md
);
836 if (unlikely(!ci
.map
)) {
840 if (unlikely(bio_barrier(bio
) && !dm_table_barrier_ok(ci
.map
))) {
841 dm_table_put(ci
.map
);
842 bio_endio(bio
, -EOPNOTSUPP
);
847 ci
.io
= alloc_io(md
);
849 atomic_set(&ci
.io
->io_count
, 1);
852 ci
.sector
= bio
->bi_sector
;
853 ci
.sector_count
= bio_sectors(bio
);
854 ci
.idx
= bio
->bi_idx
;
856 start_io_acct(ci
.io
);
857 while (ci
.sector_count
&& !error
)
858 error
= __clone_and_map(&ci
);
860 /* drop the extra reference count */
861 dec_pending(ci
.io
, error
);
862 dm_table_put(ci
.map
);
864 /*-----------------------------------------------------------------
866 *---------------------------------------------------------------*/
868 static int dm_merge_bvec(struct request_queue
*q
,
869 struct bvec_merge_data
*bvm
,
870 struct bio_vec
*biovec
)
872 struct mapped_device
*md
= q
->queuedata
;
873 struct dm_table
*map
= dm_get_table(md
);
874 struct dm_target
*ti
;
875 sector_t max_sectors
;
881 ti
= dm_table_find_target(map
, bvm
->bi_sector
);
882 if (!dm_target_is_valid(ti
))
886 * Find maximum amount of I/O that won't need splitting
888 max_sectors
= min(max_io_len(md
, bvm
->bi_sector
, ti
),
889 (sector_t
) BIO_MAX_SECTORS
);
890 max_size
= (max_sectors
<< SECTOR_SHIFT
) - bvm
->bi_size
;
895 * merge_bvec_fn() returns number of bytes
896 * it can accept at this offset
897 * max is precomputed maximal io size
899 if (max_size
&& ti
->type
->merge
)
900 max_size
= ti
->type
->merge(ti
, bvm
, biovec
, max_size
);
907 * Always allow an entire first page
909 if (max_size
<= biovec
->bv_len
&& !(bvm
->bi_size
>> SECTOR_SHIFT
))
910 max_size
= biovec
->bv_len
;
916 * The request function that just remaps the bio built up by
919 static int dm_request(struct request_queue
*q
, struct bio
*bio
)
922 int rw
= bio_data_dir(bio
);
923 struct mapped_device
*md
= q
->queuedata
;
926 down_read(&md
->io_lock
);
928 cpu
= part_stat_lock();
929 part_stat_inc(cpu
, &dm_disk(md
)->part0
, ios
[rw
]);
930 part_stat_add(cpu
, &dm_disk(md
)->part0
, sectors
[rw
], bio_sectors(bio
));
934 * If we're suspended we have to queue
937 while (test_bit(DMF_BLOCK_IO
, &md
->flags
)) {
938 up_read(&md
->io_lock
);
940 if (bio_rw(bio
) != READA
)
941 r
= queue_io(md
, bio
);
947 * We're in a while loop, because someone could suspend
948 * before we get to the following read lock.
950 down_read(&md
->io_lock
);
953 __split_and_process_bio(md
, bio
);
954 up_read(&md
->io_lock
);
964 static void dm_unplug_all(struct request_queue
*q
)
966 struct mapped_device
*md
= q
->queuedata
;
967 struct dm_table
*map
= dm_get_table(md
);
970 dm_table_unplug_all(map
);
975 static int dm_any_congested(void *congested_data
, int bdi_bits
)
978 struct mapped_device
*md
= congested_data
;
979 struct dm_table
*map
;
981 if (!test_bit(DMF_BLOCK_IO
, &md
->flags
)) {
982 map
= dm_get_table(md
);
984 r
= dm_table_any_congested(map
, bdi_bits
);
992 /*-----------------------------------------------------------------
993 * An IDR is used to keep track of allocated minor numbers.
994 *---------------------------------------------------------------*/
995 static DEFINE_IDR(_minor_idr
);
997 static void free_minor(int minor
)
999 spin_lock(&_minor_lock
);
1000 idr_remove(&_minor_idr
, minor
);
1001 spin_unlock(&_minor_lock
);
1005 * See if the device with a specific minor # is free.
1007 static int specific_minor(int minor
)
1011 if (minor
>= (1 << MINORBITS
))
1014 r
= idr_pre_get(&_minor_idr
, GFP_KERNEL
);
1018 spin_lock(&_minor_lock
);
1020 if (idr_find(&_minor_idr
, minor
)) {
1025 r
= idr_get_new_above(&_minor_idr
, MINOR_ALLOCED
, minor
, &m
);
1030 idr_remove(&_minor_idr
, m
);
1036 spin_unlock(&_minor_lock
);
1040 static int next_free_minor(int *minor
)
1044 r
= idr_pre_get(&_minor_idr
, GFP_KERNEL
);
1048 spin_lock(&_minor_lock
);
1050 r
= idr_get_new(&_minor_idr
, MINOR_ALLOCED
, &m
);
1054 if (m
>= (1 << MINORBITS
)) {
1055 idr_remove(&_minor_idr
, m
);
1063 spin_unlock(&_minor_lock
);
1067 static struct block_device_operations dm_blk_dops
;
1069 static void dm_wq_work(struct work_struct
*work
);
1072 * Allocate and initialise a blank device with a given minor.
1074 static struct mapped_device
*alloc_dev(int minor
)
1077 struct mapped_device
*md
= kzalloc(sizeof(*md
), GFP_KERNEL
);
1081 DMWARN("unable to allocate device, out of memory.");
1085 if (!try_module_get(THIS_MODULE
))
1086 goto bad_module_get
;
1088 /* get a minor number for the dev */
1089 if (minor
== DM_ANY_MINOR
)
1090 r
= next_free_minor(&minor
);
1092 r
= specific_minor(minor
);
1096 init_rwsem(&md
->io_lock
);
1097 mutex_init(&md
->suspend_lock
);
1098 spin_lock_init(&md
->deferred_lock
);
1099 rwlock_init(&md
->map_lock
);
1100 atomic_set(&md
->holders
, 1);
1101 atomic_set(&md
->open_count
, 0);
1102 atomic_set(&md
->event_nr
, 0);
1103 atomic_set(&md
->uevent_seq
, 0);
1104 INIT_LIST_HEAD(&md
->uevent_list
);
1105 spin_lock_init(&md
->uevent_lock
);
1107 md
->queue
= blk_alloc_queue(GFP_KERNEL
);
1111 md
->queue
->queuedata
= md
;
1112 md
->queue
->backing_dev_info
.congested_fn
= dm_any_congested
;
1113 md
->queue
->backing_dev_info
.congested_data
= md
;
1114 blk_queue_make_request(md
->queue
, dm_request
);
1115 blk_queue_ordered(md
->queue
, QUEUE_ORDERED_DRAIN
, NULL
);
1116 blk_queue_bounce_limit(md
->queue
, BLK_BOUNCE_ANY
);
1117 md
->queue
->unplug_fn
= dm_unplug_all
;
1118 blk_queue_merge_bvec(md
->queue
, dm_merge_bvec
);
1120 md
->io_pool
= mempool_create_slab_pool(MIN_IOS
, _io_cache
);
1124 md
->tio_pool
= mempool_create_slab_pool(MIN_IOS
, _tio_cache
);
1128 md
->bs
= bioset_create(16, 0);
1132 md
->disk
= alloc_disk(1);
1136 atomic_set(&md
->pending
, 0);
1137 init_waitqueue_head(&md
->wait
);
1138 INIT_WORK(&md
->work
, dm_wq_work
);
1139 init_waitqueue_head(&md
->eventq
);
1141 md
->disk
->major
= _major
;
1142 md
->disk
->first_minor
= minor
;
1143 md
->disk
->fops
= &dm_blk_dops
;
1144 md
->disk
->queue
= md
->queue
;
1145 md
->disk
->private_data
= md
;
1146 sprintf(md
->disk
->disk_name
, "dm-%d", minor
);
1148 format_dev_t(md
->name
, MKDEV(_major
, minor
));
1150 md
->wq
= create_singlethread_workqueue("kdmflush");
1154 /* Populate the mapping, nobody knows we exist yet */
1155 spin_lock(&_minor_lock
);
1156 old_md
= idr_replace(&_minor_idr
, md
, minor
);
1157 spin_unlock(&_minor_lock
);
1159 BUG_ON(old_md
!= MINOR_ALLOCED
);
1166 bioset_free(md
->bs
);
1168 mempool_destroy(md
->tio_pool
);
1170 mempool_destroy(md
->io_pool
);
1172 blk_cleanup_queue(md
->queue
);
1176 module_put(THIS_MODULE
);
1182 static void unlock_fs(struct mapped_device
*md
);
1184 static void free_dev(struct mapped_device
*md
)
1186 int minor
= MINOR(disk_devt(md
->disk
));
1188 if (md
->suspended_bdev
) {
1190 bdput(md
->suspended_bdev
);
1192 destroy_workqueue(md
->wq
);
1193 mempool_destroy(md
->tio_pool
);
1194 mempool_destroy(md
->io_pool
);
1195 bioset_free(md
->bs
);
1196 del_gendisk(md
->disk
);
1199 spin_lock(&_minor_lock
);
1200 md
->disk
->private_data
= NULL
;
1201 spin_unlock(&_minor_lock
);
1204 blk_cleanup_queue(md
->queue
);
1205 module_put(THIS_MODULE
);
1210 * Bind a table to the device.
1212 static void event_callback(void *context
)
1214 unsigned long flags
;
1216 struct mapped_device
*md
= (struct mapped_device
*) context
;
1218 spin_lock_irqsave(&md
->uevent_lock
, flags
);
1219 list_splice_init(&md
->uevent_list
, &uevents
);
1220 spin_unlock_irqrestore(&md
->uevent_lock
, flags
);
1222 dm_send_uevents(&uevents
, &disk_to_dev(md
->disk
)->kobj
);
1224 atomic_inc(&md
->event_nr
);
1225 wake_up(&md
->eventq
);
1228 static void __set_size(struct mapped_device
*md
, sector_t size
)
1230 set_capacity(md
->disk
, size
);
1232 mutex_lock(&md
->suspended_bdev
->bd_inode
->i_mutex
);
1233 i_size_write(md
->suspended_bdev
->bd_inode
, (loff_t
)size
<< SECTOR_SHIFT
);
1234 mutex_unlock(&md
->suspended_bdev
->bd_inode
->i_mutex
);
1237 static int __bind(struct mapped_device
*md
, struct dm_table
*t
)
1239 struct request_queue
*q
= md
->queue
;
1242 size
= dm_table_get_size(t
);
1245 * Wipe any geometry if the size of the table changed.
1247 if (size
!= get_capacity(md
->disk
))
1248 memset(&md
->geometry
, 0, sizeof(md
->geometry
));
1250 if (md
->suspended_bdev
)
1251 __set_size(md
, size
);
1254 dm_table_destroy(t
);
1258 dm_table_event_callback(t
, event_callback
, md
);
1260 write_lock(&md
->map_lock
);
1262 dm_table_set_restrictions(t
, q
);
1263 write_unlock(&md
->map_lock
);
1268 static void __unbind(struct mapped_device
*md
)
1270 struct dm_table
*map
= md
->map
;
1275 dm_table_event_callback(map
, NULL
, NULL
);
1276 write_lock(&md
->map_lock
);
1278 write_unlock(&md
->map_lock
);
1279 dm_table_destroy(map
);
1283 * Constructor for a new device.
1285 int dm_create(int minor
, struct mapped_device
**result
)
1287 struct mapped_device
*md
;
1289 md
= alloc_dev(minor
);
1299 static struct mapped_device
*dm_find_md(dev_t dev
)
1301 struct mapped_device
*md
;
1302 unsigned minor
= MINOR(dev
);
1304 if (MAJOR(dev
) != _major
|| minor
>= (1 << MINORBITS
))
1307 spin_lock(&_minor_lock
);
1309 md
= idr_find(&_minor_idr
, minor
);
1310 if (md
&& (md
== MINOR_ALLOCED
||
1311 (MINOR(disk_devt(dm_disk(md
))) != minor
) ||
1312 test_bit(DMF_FREEING
, &md
->flags
))) {
1318 spin_unlock(&_minor_lock
);
1323 struct mapped_device
*dm_get_md(dev_t dev
)
1325 struct mapped_device
*md
= dm_find_md(dev
);
1333 void *dm_get_mdptr(struct mapped_device
*md
)
1335 return md
->interface_ptr
;
1338 void dm_set_mdptr(struct mapped_device
*md
, void *ptr
)
1340 md
->interface_ptr
= ptr
;
1343 void dm_get(struct mapped_device
*md
)
1345 atomic_inc(&md
->holders
);
1348 const char *dm_device_name(struct mapped_device
*md
)
1352 EXPORT_SYMBOL_GPL(dm_device_name
);
1354 void dm_put(struct mapped_device
*md
)
1356 struct dm_table
*map
;
1358 BUG_ON(test_bit(DMF_FREEING
, &md
->flags
));
1360 if (atomic_dec_and_lock(&md
->holders
, &_minor_lock
)) {
1361 map
= dm_get_table(md
);
1362 idr_replace(&_minor_idr
, MINOR_ALLOCED
,
1363 MINOR(disk_devt(dm_disk(md
))));
1364 set_bit(DMF_FREEING
, &md
->flags
);
1365 spin_unlock(&_minor_lock
);
1366 if (!dm_suspended(md
)) {
1367 dm_table_presuspend_targets(map
);
1368 dm_table_postsuspend_targets(map
);
1376 EXPORT_SYMBOL_GPL(dm_put
);
1378 static int dm_wait_for_completion(struct mapped_device
*md
, int interruptible
)
1381 DECLARE_WAITQUEUE(wait
, current
);
1383 dm_unplug_all(md
->queue
);
1385 add_wait_queue(&md
->wait
, &wait
);
1388 set_current_state(interruptible
);
1391 if (!atomic_read(&md
->pending
))
1394 if (interruptible
== TASK_INTERRUPTIBLE
&&
1395 signal_pending(current
)) {
1402 set_current_state(TASK_RUNNING
);
1404 remove_wait_queue(&md
->wait
, &wait
);
1410 * Process the deferred bios
1412 static void dm_wq_work(struct work_struct
*work
)
1414 struct mapped_device
*md
= container_of(work
, struct mapped_device
,
1418 down_write(&md
->io_lock
);
1421 spin_lock_irq(&md
->deferred_lock
);
1422 c
= bio_list_pop(&md
->deferred
);
1423 spin_unlock_irq(&md
->deferred_lock
);
1426 __split_and_process_bio(md
, c
);
1430 clear_bit(DMF_BLOCK_IO
, &md
->flags
);
1432 up_write(&md
->io_lock
);
1435 static void dm_queue_flush(struct mapped_device
*md
)
1437 queue_work(md
->wq
, &md
->work
);
1438 flush_workqueue(md
->wq
);
1442 * Swap in a new table (destroying old one).
1444 int dm_swap_table(struct mapped_device
*md
, struct dm_table
*table
)
1448 mutex_lock(&md
->suspend_lock
);
1450 /* device must be suspended */
1451 if (!dm_suspended(md
))
1454 /* without bdev, the device size cannot be changed */
1455 if (!md
->suspended_bdev
)
1456 if (get_capacity(md
->disk
) != dm_table_get_size(table
))
1460 r
= __bind(md
, table
);
1463 mutex_unlock(&md
->suspend_lock
);
1468 * Functions to lock and unlock any filesystem running on the
1471 static int lock_fs(struct mapped_device
*md
)
1475 WARN_ON(md
->frozen_sb
);
1477 md
->frozen_sb
= freeze_bdev(md
->suspended_bdev
);
1478 if (IS_ERR(md
->frozen_sb
)) {
1479 r
= PTR_ERR(md
->frozen_sb
);
1480 md
->frozen_sb
= NULL
;
1484 set_bit(DMF_FROZEN
, &md
->flags
);
1486 /* don't bdput right now, we don't want the bdev
1487 * to go away while it is locked.
1492 static void unlock_fs(struct mapped_device
*md
)
1494 if (!test_bit(DMF_FROZEN
, &md
->flags
))
1497 thaw_bdev(md
->suspended_bdev
, md
->frozen_sb
);
1498 md
->frozen_sb
= NULL
;
1499 clear_bit(DMF_FROZEN
, &md
->flags
);
1503 * We need to be able to change a mapping table under a mounted
1504 * filesystem. For example we might want to move some data in
1505 * the background. Before the table can be swapped with
1506 * dm_bind_table, dm_suspend must be called to flush any in
1507 * flight bios and ensure that any further io gets deferred.
1509 int dm_suspend(struct mapped_device
*md
, unsigned suspend_flags
)
1511 struct dm_table
*map
= NULL
;
1513 int do_lockfs
= suspend_flags
& DM_SUSPEND_LOCKFS_FLAG
? 1 : 0;
1514 int noflush
= suspend_flags
& DM_SUSPEND_NOFLUSH_FLAG
? 1 : 0;
1516 mutex_lock(&md
->suspend_lock
);
1518 if (dm_suspended(md
)) {
1523 map
= dm_get_table(md
);
1526 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
1527 * This flag is cleared before dm_suspend returns.
1530 set_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
1532 /* This does not get reverted if there's an error later. */
1533 dm_table_presuspend_targets(map
);
1535 /* bdget() can stall if the pending I/Os are not flushed */
1537 md
->suspended_bdev
= bdget_disk(md
->disk
, 0);
1538 if (!md
->suspended_bdev
) {
1539 DMWARN("bdget failed in dm_suspend");
1545 * Flush I/O to the device. noflush supersedes do_lockfs,
1546 * because lock_fs() needs to flush I/Os.
1556 * First we set the BLOCK_IO flag so no more ios will be mapped.
1558 down_write(&md
->io_lock
);
1559 set_bit(DMF_BLOCK_IO
, &md
->flags
);
1561 up_write(&md
->io_lock
);
1564 * Wait for the already-mapped ios to complete.
1566 r
= dm_wait_for_completion(md
, TASK_INTERRUPTIBLE
);
1568 down_write(&md
->io_lock
);
1571 clear_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
1572 up_write(&md
->io_lock
);
1574 /* were we interrupted ? */
1579 goto out
; /* pushback list is already flushed, so skip flush */
1582 dm_table_postsuspend_targets(map
);
1584 set_bit(DMF_SUSPENDED
, &md
->flags
);
1587 if (r
&& md
->suspended_bdev
) {
1588 bdput(md
->suspended_bdev
);
1589 md
->suspended_bdev
= NULL
;
1595 mutex_unlock(&md
->suspend_lock
);
1599 int dm_resume(struct mapped_device
*md
)
1602 struct dm_table
*map
= NULL
;
1604 mutex_lock(&md
->suspend_lock
);
1605 if (!dm_suspended(md
))
1608 map
= dm_get_table(md
);
1609 if (!map
|| !dm_table_get_size(map
))
1612 r
= dm_table_resume_targets(map
);
1620 if (md
->suspended_bdev
) {
1621 bdput(md
->suspended_bdev
);
1622 md
->suspended_bdev
= NULL
;
1625 clear_bit(DMF_SUSPENDED
, &md
->flags
);
1627 dm_table_unplug_all(map
);
1629 dm_kobject_uevent(md
);
1635 mutex_unlock(&md
->suspend_lock
);
1640 /*-----------------------------------------------------------------
1641 * Event notification.
1642 *---------------------------------------------------------------*/
1643 void dm_kobject_uevent(struct mapped_device
*md
)
1645 kobject_uevent(&disk_to_dev(md
->disk
)->kobj
, KOBJ_CHANGE
);
1648 uint32_t dm_next_uevent_seq(struct mapped_device
*md
)
1650 return atomic_add_return(1, &md
->uevent_seq
);
1653 uint32_t dm_get_event_nr(struct mapped_device
*md
)
1655 return atomic_read(&md
->event_nr
);
1658 int dm_wait_event(struct mapped_device
*md
, int event_nr
)
1660 return wait_event_interruptible(md
->eventq
,
1661 (event_nr
!= atomic_read(&md
->event_nr
)));
1664 void dm_uevent_add(struct mapped_device
*md
, struct list_head
*elist
)
1666 unsigned long flags
;
1668 spin_lock_irqsave(&md
->uevent_lock
, flags
);
1669 list_add(elist
, &md
->uevent_list
);
1670 spin_unlock_irqrestore(&md
->uevent_lock
, flags
);
1674 * The gendisk is only valid as long as you have a reference
1677 struct gendisk
*dm_disk(struct mapped_device
*md
)
1682 struct kobject
*dm_kobject(struct mapped_device
*md
)
1688 * struct mapped_device should not be exported outside of dm.c
1689 * so use this check to verify that kobj is part of md structure
1691 struct mapped_device
*dm_get_from_kobject(struct kobject
*kobj
)
1693 struct mapped_device
*md
;
1695 md
= container_of(kobj
, struct mapped_device
, kobj
);
1696 if (&md
->kobj
!= kobj
)
1703 int dm_suspended(struct mapped_device
*md
)
1705 return test_bit(DMF_SUSPENDED
, &md
->flags
);
1708 int dm_noflush_suspending(struct dm_target
*ti
)
1710 struct mapped_device
*md
= dm_table_get_md(ti
->table
);
1711 int r
= __noflush_suspending(md
);
1717 EXPORT_SYMBOL_GPL(dm_noflush_suspending
);
1719 static struct block_device_operations dm_blk_dops
= {
1720 .open
= dm_blk_open
,
1721 .release
= dm_blk_close
,
1722 .ioctl
= dm_blk_ioctl
,
1723 .getgeo
= dm_blk_getgeo
,
1724 .owner
= THIS_MODULE
1727 EXPORT_SYMBOL(dm_get_mapinfo
);
1732 module_init(dm_init
);
1733 module_exit(dm_exit
);
1735 module_param(major
, uint
, 0);
1736 MODULE_PARM_DESC(major
, "The major number of the device mapper");
1737 MODULE_DESCRIPTION(DM_NAME
" driver");
1738 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1739 MODULE_LICENSE("GPL");