2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/moduleparam.h>
15 #include <linux/blkpg.h>
16 #include <linux/bio.h>
17 #include <linux/buffer_head.h>
18 #include <linux/mempool.h>
19 #include <linux/slab.h>
20 #include <linux/idr.h>
21 #include <linux/hdreg.h>
23 #include <trace/events/block.h>
25 #define DM_MSG_PREFIX "core"
28 * Cookies are numeric values sent with CHANGE and REMOVE
29 * uevents while resuming, removing or renaming the device.
31 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
32 #define DM_COOKIE_LENGTH 24
34 static const char *_name
= DM_NAME
;
36 static unsigned int major
= 0;
37 static unsigned int _major
= 0;
39 static DEFINE_SPINLOCK(_minor_lock
);
42 * One of these is allocated per bio.
45 struct mapped_device
*md
;
49 unsigned long start_time
;
54 * One of these is allocated per target within a bio. Hopefully
55 * this will be simplified out one day.
64 * For request-based dm.
65 * One of these is allocated per request.
67 struct dm_rq_target_io
{
68 struct mapped_device
*md
;
70 struct request
*orig
, clone
;
76 * For request-based dm.
77 * One of these is allocated per bio.
79 struct dm_rq_clone_bio_info
{
84 union map_info
*dm_get_mapinfo(struct bio
*bio
)
86 if (bio
&& bio
->bi_private
)
87 return &((struct dm_target_io
*)bio
->bi_private
)->info
;
91 #define MINOR_ALLOCED ((void *)-1)
94 * Bits for the md->flags field.
96 #define DMF_BLOCK_IO_FOR_SUSPEND 0
97 #define DMF_SUSPENDED 1
100 #define DMF_DELETING 4
101 #define DMF_NOFLUSH_SUSPENDING 5
102 #define DMF_QUEUE_IO_TO_THREAD 6
105 * Work processed by per-device workqueue.
107 struct mapped_device
{
108 struct rw_semaphore io_lock
;
109 struct mutex suspend_lock
;
116 struct request_queue
*queue
;
117 struct gendisk
*disk
;
123 * A list of ios that arrived while we were suspended.
126 wait_queue_head_t wait
;
127 struct work_struct work
;
128 struct bio_list deferred
;
129 spinlock_t deferred_lock
;
132 * An error from the barrier request currently being processed.
137 * Processing queue (flush/barriers)
139 struct workqueue_struct
*wq
;
142 * The current mapping.
144 struct dm_table
*map
;
147 * io objects are allocated from here.
158 wait_queue_head_t eventq
;
160 struct list_head uevent_list
;
161 spinlock_t uevent_lock
; /* Protect access to uevent_list */
164 * freeze/thaw support require holding onto a super block
166 struct super_block
*frozen_sb
;
167 struct block_device
*bdev
;
169 /* forced geometry settings */
170 struct hd_geometry geometry
;
175 /* zero-length barrier that will be cloned and submitted to targets */
176 struct bio barrier_bio
;
180 static struct kmem_cache
*_io_cache
;
181 static struct kmem_cache
*_tio_cache
;
182 static struct kmem_cache
*_rq_tio_cache
;
183 static struct kmem_cache
*_rq_bio_info_cache
;
185 static int __init
local_init(void)
189 /* allocate a slab for the dm_ios */
190 _io_cache
= KMEM_CACHE(dm_io
, 0);
194 /* allocate a slab for the target ios */
195 _tio_cache
= KMEM_CACHE(dm_target_io
, 0);
197 goto out_free_io_cache
;
199 _rq_tio_cache
= KMEM_CACHE(dm_rq_target_io
, 0);
201 goto out_free_tio_cache
;
203 _rq_bio_info_cache
= KMEM_CACHE(dm_rq_clone_bio_info
, 0);
204 if (!_rq_bio_info_cache
)
205 goto out_free_rq_tio_cache
;
207 r
= dm_uevent_init();
209 goto out_free_rq_bio_info_cache
;
212 r
= register_blkdev(_major
, _name
);
214 goto out_uevent_exit
;
223 out_free_rq_bio_info_cache
:
224 kmem_cache_destroy(_rq_bio_info_cache
);
225 out_free_rq_tio_cache
:
226 kmem_cache_destroy(_rq_tio_cache
);
228 kmem_cache_destroy(_tio_cache
);
230 kmem_cache_destroy(_io_cache
);
235 static void local_exit(void)
237 kmem_cache_destroy(_rq_bio_info_cache
);
238 kmem_cache_destroy(_rq_tio_cache
);
239 kmem_cache_destroy(_tio_cache
);
240 kmem_cache_destroy(_io_cache
);
241 unregister_blkdev(_major
, _name
);
246 DMINFO("cleaned up");
249 static int (*_inits
[])(void) __initdata
= {
258 static void (*_exits
[])(void) = {
267 static int __init
dm_init(void)
269 const int count
= ARRAY_SIZE(_inits
);
273 for (i
= 0; i
< count
; i
++) {
288 static void __exit
dm_exit(void)
290 int i
= ARRAY_SIZE(_exits
);
297 * Block device functions
299 static int dm_blk_open(struct block_device
*bdev
, fmode_t mode
)
301 struct mapped_device
*md
;
303 spin_lock(&_minor_lock
);
305 md
= bdev
->bd_disk
->private_data
;
309 if (test_bit(DMF_FREEING
, &md
->flags
) ||
310 test_bit(DMF_DELETING
, &md
->flags
)) {
316 atomic_inc(&md
->open_count
);
319 spin_unlock(&_minor_lock
);
321 return md
? 0 : -ENXIO
;
324 static int dm_blk_close(struct gendisk
*disk
, fmode_t mode
)
326 struct mapped_device
*md
= disk
->private_data
;
327 atomic_dec(&md
->open_count
);
332 int dm_open_count(struct mapped_device
*md
)
334 return atomic_read(&md
->open_count
);
338 * Guarantees nothing is using the device before it's deleted.
340 int dm_lock_for_deletion(struct mapped_device
*md
)
344 spin_lock(&_minor_lock
);
346 if (dm_open_count(md
))
349 set_bit(DMF_DELETING
, &md
->flags
);
351 spin_unlock(&_minor_lock
);
356 static int dm_blk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
358 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
360 return dm_get_geometry(md
, geo
);
363 static int dm_blk_ioctl(struct block_device
*bdev
, fmode_t mode
,
364 unsigned int cmd
, unsigned long arg
)
366 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
367 struct dm_table
*map
= dm_get_table(md
);
368 struct dm_target
*tgt
;
371 if (!map
|| !dm_table_get_size(map
))
374 /* We only support devices that have a single target */
375 if (dm_table_get_num_targets(map
) != 1)
378 tgt
= dm_table_get_target(map
, 0);
380 if (dm_suspended(md
)) {
385 if (tgt
->type
->ioctl
)
386 r
= tgt
->type
->ioctl(tgt
, cmd
, arg
);
394 static struct dm_io
*alloc_io(struct mapped_device
*md
)
396 return mempool_alloc(md
->io_pool
, GFP_NOIO
);
399 static void free_io(struct mapped_device
*md
, struct dm_io
*io
)
401 mempool_free(io
, md
->io_pool
);
404 static void free_tio(struct mapped_device
*md
, struct dm_target_io
*tio
)
406 mempool_free(tio
, md
->tio_pool
);
409 static void start_io_acct(struct dm_io
*io
)
411 struct mapped_device
*md
= io
->md
;
414 io
->start_time
= jiffies
;
416 cpu
= part_stat_lock();
417 part_round_stats(cpu
, &dm_disk(md
)->part0
);
419 dm_disk(md
)->part0
.in_flight
= atomic_inc_return(&md
->pending
);
422 static void end_io_acct(struct dm_io
*io
)
424 struct mapped_device
*md
= io
->md
;
425 struct bio
*bio
= io
->bio
;
426 unsigned long duration
= jiffies
- io
->start_time
;
428 int rw
= bio_data_dir(bio
);
430 cpu
= part_stat_lock();
431 part_round_stats(cpu
, &dm_disk(md
)->part0
);
432 part_stat_add(cpu
, &dm_disk(md
)->part0
, ticks
[rw
], duration
);
436 * After this is decremented the bio must not be touched if it is
439 dm_disk(md
)->part0
.in_flight
= pending
=
440 atomic_dec_return(&md
->pending
);
442 /* nudge anyone waiting on suspend queue */
448 * Add the bio to the list of deferred io.
450 static void queue_io(struct mapped_device
*md
, struct bio
*bio
)
452 down_write(&md
->io_lock
);
454 spin_lock_irq(&md
->deferred_lock
);
455 bio_list_add(&md
->deferred
, bio
);
456 spin_unlock_irq(&md
->deferred_lock
);
458 if (!test_and_set_bit(DMF_QUEUE_IO_TO_THREAD
, &md
->flags
))
459 queue_work(md
->wq
, &md
->work
);
461 up_write(&md
->io_lock
);
465 * Everyone (including functions in this file), should use this
466 * function to access the md->map field, and make sure they call
467 * dm_table_put() when finished.
469 struct dm_table
*dm_get_table(struct mapped_device
*md
)
473 read_lock(&md
->map_lock
);
477 read_unlock(&md
->map_lock
);
483 * Get the geometry associated with a dm device
485 int dm_get_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
493 * Set the geometry of a device.
495 int dm_set_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
497 sector_t sz
= (sector_t
)geo
->cylinders
* geo
->heads
* geo
->sectors
;
499 if (geo
->start
> sz
) {
500 DMWARN("Start sector is beyond the geometry limits.");
509 /*-----------------------------------------------------------------
511 * A more elegant soln is in the works that uses the queue
512 * merge fn, unfortunately there are a couple of changes to
513 * the block layer that I want to make for this. So in the
514 * interests of getting something for people to use I give
515 * you this clearly demarcated crap.
516 *---------------------------------------------------------------*/
518 static int __noflush_suspending(struct mapped_device
*md
)
520 return test_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
524 * Decrements the number of outstanding ios that a bio has been
525 * cloned into, completing the original io if necc.
527 static void dec_pending(struct dm_io
*io
, int error
)
532 struct mapped_device
*md
= io
->md
;
534 /* Push-back supersedes any I/O errors */
535 if (error
&& !(io
->error
> 0 && __noflush_suspending(md
)))
538 if (atomic_dec_and_test(&io
->io_count
)) {
539 if (io
->error
== DM_ENDIO_REQUEUE
) {
541 * Target requested pushing back the I/O.
543 spin_lock_irqsave(&md
->deferred_lock
, flags
);
544 if (__noflush_suspending(md
)) {
545 if (!bio_barrier(io
->bio
))
546 bio_list_add_head(&md
->deferred
,
549 /* noflush suspend was interrupted. */
551 spin_unlock_irqrestore(&md
->deferred_lock
, flags
);
554 io_error
= io
->error
;
557 if (bio_barrier(bio
)) {
559 * There can be just one barrier request so we use
560 * a per-device variable for error reporting.
561 * Note that you can't touch the bio after end_io_acct
563 if (!md
->barrier_error
&& io_error
!= -EOPNOTSUPP
)
564 md
->barrier_error
= io_error
;
569 if (io_error
!= DM_ENDIO_REQUEUE
) {
570 trace_block_bio_complete(md
->queue
, bio
);
572 bio_endio(bio
, io_error
);
580 static void clone_endio(struct bio
*bio
, int error
)
583 struct dm_target_io
*tio
= bio
->bi_private
;
584 struct dm_io
*io
= tio
->io
;
585 struct mapped_device
*md
= tio
->io
->md
;
586 dm_endio_fn endio
= tio
->ti
->type
->end_io
;
588 if (!bio_flagged(bio
, BIO_UPTODATE
) && !error
)
592 r
= endio(tio
->ti
, bio
, error
, &tio
->info
);
593 if (r
< 0 || r
== DM_ENDIO_REQUEUE
)
595 * error and requeue request are handled
599 else if (r
== DM_ENDIO_INCOMPLETE
)
600 /* The target will handle the io */
603 DMWARN("unimplemented target endio return value: %d", r
);
609 * Store md for cleanup instead of tio which is about to get freed.
611 bio
->bi_private
= md
->bs
;
615 dec_pending(io
, error
);
618 static sector_t
max_io_len(struct mapped_device
*md
,
619 sector_t sector
, struct dm_target
*ti
)
621 sector_t offset
= sector
- ti
->begin
;
622 sector_t len
= ti
->len
- offset
;
625 * Does the target need to split even further ?
629 boundary
= ((offset
+ ti
->split_io
) & ~(ti
->split_io
- 1))
638 static void __map_bio(struct dm_target
*ti
, struct bio
*clone
,
639 struct dm_target_io
*tio
)
643 struct mapped_device
*md
;
645 clone
->bi_end_io
= clone_endio
;
646 clone
->bi_private
= tio
;
649 * Map the clone. If r == 0 we don't need to do
650 * anything, the target has assumed ownership of
653 atomic_inc(&tio
->io
->io_count
);
654 sector
= clone
->bi_sector
;
655 r
= ti
->type
->map(ti
, clone
, &tio
->info
);
656 if (r
== DM_MAPIO_REMAPPED
) {
657 /* the bio has been remapped so dispatch it */
659 trace_block_remap(bdev_get_queue(clone
->bi_bdev
), clone
,
660 tio
->io
->bio
->bi_bdev
->bd_dev
, sector
);
662 generic_make_request(clone
);
663 } else if (r
< 0 || r
== DM_MAPIO_REQUEUE
) {
664 /* error the io and bail out, or requeue it if needed */
666 dec_pending(tio
->io
, r
);
668 * Store bio_set for cleanup.
670 clone
->bi_private
= md
->bs
;
674 DMWARN("unimplemented target map return value: %d", r
);
680 struct mapped_device
*md
;
681 struct dm_table
*map
;
685 sector_t sector_count
;
689 static void dm_bio_destructor(struct bio
*bio
)
691 struct bio_set
*bs
= bio
->bi_private
;
697 * Creates a little bio that is just does part of a bvec.
699 static struct bio
*split_bvec(struct bio
*bio
, sector_t sector
,
700 unsigned short idx
, unsigned int offset
,
701 unsigned int len
, struct bio_set
*bs
)
704 struct bio_vec
*bv
= bio
->bi_io_vec
+ idx
;
706 clone
= bio_alloc_bioset(GFP_NOIO
, 1, bs
);
707 clone
->bi_destructor
= dm_bio_destructor
;
708 *clone
->bi_io_vec
= *bv
;
710 clone
->bi_sector
= sector
;
711 clone
->bi_bdev
= bio
->bi_bdev
;
712 clone
->bi_rw
= bio
->bi_rw
& ~(1 << BIO_RW_BARRIER
);
714 clone
->bi_size
= to_bytes(len
);
715 clone
->bi_io_vec
->bv_offset
= offset
;
716 clone
->bi_io_vec
->bv_len
= clone
->bi_size
;
717 clone
->bi_flags
|= 1 << BIO_CLONED
;
719 if (bio_integrity(bio
)) {
720 bio_integrity_clone(clone
, bio
, GFP_NOIO
);
721 bio_integrity_trim(clone
,
722 bio_sector_offset(bio
, idx
, offset
), len
);
729 * Creates a bio that consists of range of complete bvecs.
731 static struct bio
*clone_bio(struct bio
*bio
, sector_t sector
,
732 unsigned short idx
, unsigned short bv_count
,
733 unsigned int len
, struct bio_set
*bs
)
737 clone
= bio_alloc_bioset(GFP_NOIO
, bio
->bi_max_vecs
, bs
);
738 __bio_clone(clone
, bio
);
739 clone
->bi_rw
&= ~(1 << BIO_RW_BARRIER
);
740 clone
->bi_destructor
= dm_bio_destructor
;
741 clone
->bi_sector
= sector
;
743 clone
->bi_vcnt
= idx
+ bv_count
;
744 clone
->bi_size
= to_bytes(len
);
745 clone
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
747 if (bio_integrity(bio
)) {
748 bio_integrity_clone(clone
, bio
, GFP_NOIO
);
750 if (idx
!= bio
->bi_idx
|| clone
->bi_size
< bio
->bi_size
)
751 bio_integrity_trim(clone
,
752 bio_sector_offset(bio
, idx
, 0), len
);
758 static struct dm_target_io
*alloc_tio(struct clone_info
*ci
,
759 struct dm_target
*ti
)
761 struct dm_target_io
*tio
= mempool_alloc(ci
->md
->tio_pool
, GFP_NOIO
);
765 memset(&tio
->info
, 0, sizeof(tio
->info
));
770 static void __flush_target(struct clone_info
*ci
, struct dm_target
*ti
,
773 struct dm_target_io
*tio
= alloc_tio(ci
, ti
);
776 tio
->info
.flush_request
= flush_nr
;
778 clone
= bio_alloc_bioset(GFP_NOIO
, 0, ci
->md
->bs
);
779 __bio_clone(clone
, ci
->bio
);
780 clone
->bi_destructor
= dm_bio_destructor
;
782 __map_bio(ti
, clone
, tio
);
785 static int __clone_and_map_empty_barrier(struct clone_info
*ci
)
787 unsigned target_nr
= 0, flush_nr
;
788 struct dm_target
*ti
;
790 while ((ti
= dm_table_get_target(ci
->map
, target_nr
++)))
791 for (flush_nr
= 0; flush_nr
< ti
->num_flush_requests
;
793 __flush_target(ci
, ti
, flush_nr
);
795 ci
->sector_count
= 0;
800 static int __clone_and_map(struct clone_info
*ci
)
802 struct bio
*clone
, *bio
= ci
->bio
;
803 struct dm_target
*ti
;
804 sector_t len
= 0, max
;
805 struct dm_target_io
*tio
;
807 if (unlikely(bio_empty_barrier(bio
)))
808 return __clone_and_map_empty_barrier(ci
);
810 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
811 if (!dm_target_is_valid(ti
))
814 max
= max_io_len(ci
->md
, ci
->sector
, ti
);
817 * Allocate a target io object.
819 tio
= alloc_tio(ci
, ti
);
821 if (ci
->sector_count
<= max
) {
823 * Optimise for the simple case where we can do all of
824 * the remaining io with a single clone.
826 clone
= clone_bio(bio
, ci
->sector
, ci
->idx
,
827 bio
->bi_vcnt
- ci
->idx
, ci
->sector_count
,
829 __map_bio(ti
, clone
, tio
);
830 ci
->sector_count
= 0;
832 } else if (to_sector(bio
->bi_io_vec
[ci
->idx
].bv_len
) <= max
) {
834 * There are some bvecs that don't span targets.
835 * Do as many of these as possible.
838 sector_t remaining
= max
;
841 for (i
= ci
->idx
; remaining
&& (i
< bio
->bi_vcnt
); i
++) {
842 bv_len
= to_sector(bio
->bi_io_vec
[i
].bv_len
);
844 if (bv_len
> remaining
)
851 clone
= clone_bio(bio
, ci
->sector
, ci
->idx
, i
- ci
->idx
, len
,
853 __map_bio(ti
, clone
, tio
);
856 ci
->sector_count
-= len
;
861 * Handle a bvec that must be split between two or more targets.
863 struct bio_vec
*bv
= bio
->bi_io_vec
+ ci
->idx
;
864 sector_t remaining
= to_sector(bv
->bv_len
);
865 unsigned int offset
= 0;
869 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
870 if (!dm_target_is_valid(ti
))
873 max
= max_io_len(ci
->md
, ci
->sector
, ti
);
875 tio
= alloc_tio(ci
, ti
);
878 len
= min(remaining
, max
);
880 clone
= split_bvec(bio
, ci
->sector
, ci
->idx
,
881 bv
->bv_offset
+ offset
, len
,
884 __map_bio(ti
, clone
, tio
);
887 ci
->sector_count
-= len
;
888 offset
+= to_bytes(len
);
889 } while (remaining
-= len
);
898 * Split the bio into several clones and submit it to targets.
900 static void __split_and_process_bio(struct mapped_device
*md
, struct bio
*bio
)
902 struct clone_info ci
;
905 ci
.map
= dm_get_table(md
);
906 if (unlikely(!ci
.map
)) {
907 if (!bio_barrier(bio
))
910 if (!md
->barrier_error
)
911 md
->barrier_error
= -EIO
;
917 ci
.io
= alloc_io(md
);
919 atomic_set(&ci
.io
->io_count
, 1);
922 ci
.sector
= bio
->bi_sector
;
923 ci
.sector_count
= bio_sectors(bio
);
924 if (unlikely(bio_empty_barrier(bio
)))
926 ci
.idx
= bio
->bi_idx
;
928 start_io_acct(ci
.io
);
929 while (ci
.sector_count
&& !error
)
930 error
= __clone_and_map(&ci
);
932 /* drop the extra reference count */
933 dec_pending(ci
.io
, error
);
934 dm_table_put(ci
.map
);
936 /*-----------------------------------------------------------------
938 *---------------------------------------------------------------*/
940 static int dm_merge_bvec(struct request_queue
*q
,
941 struct bvec_merge_data
*bvm
,
942 struct bio_vec
*biovec
)
944 struct mapped_device
*md
= q
->queuedata
;
945 struct dm_table
*map
= dm_get_table(md
);
946 struct dm_target
*ti
;
947 sector_t max_sectors
;
953 ti
= dm_table_find_target(map
, bvm
->bi_sector
);
954 if (!dm_target_is_valid(ti
))
958 * Find maximum amount of I/O that won't need splitting
960 max_sectors
= min(max_io_len(md
, bvm
->bi_sector
, ti
),
961 (sector_t
) BIO_MAX_SECTORS
);
962 max_size
= (max_sectors
<< SECTOR_SHIFT
) - bvm
->bi_size
;
967 * merge_bvec_fn() returns number of bytes
968 * it can accept at this offset
969 * max is precomputed maximal io size
971 if (max_size
&& ti
->type
->merge
)
972 max_size
= ti
->type
->merge(ti
, bvm
, biovec
, max_size
);
974 * If the target doesn't support merge method and some of the devices
975 * provided their merge_bvec method (we know this by looking at
976 * queue_max_hw_sectors), then we can't allow bios with multiple vector
977 * entries. So always set max_size to 0, and the code below allows
980 else if (queue_max_hw_sectors(q
) <= PAGE_SIZE
>> 9)
989 * Always allow an entire first page
991 if (max_size
<= biovec
->bv_len
&& !(bvm
->bi_size
>> SECTOR_SHIFT
))
992 max_size
= biovec
->bv_len
;
998 * The request function that just remaps the bio built up by
1001 static int dm_request(struct request_queue
*q
, struct bio
*bio
)
1003 int rw
= bio_data_dir(bio
);
1004 struct mapped_device
*md
= q
->queuedata
;
1007 down_read(&md
->io_lock
);
1009 cpu
= part_stat_lock();
1010 part_stat_inc(cpu
, &dm_disk(md
)->part0
, ios
[rw
]);
1011 part_stat_add(cpu
, &dm_disk(md
)->part0
, sectors
[rw
], bio_sectors(bio
));
1015 * If we're suspended or the thread is processing barriers
1016 * we have to queue this io for later.
1018 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD
, &md
->flags
)) ||
1019 unlikely(bio_barrier(bio
))) {
1020 up_read(&md
->io_lock
);
1022 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
)) &&
1023 bio_rw(bio
) == READA
) {
1033 __split_and_process_bio(md
, bio
);
1034 up_read(&md
->io_lock
);
1038 static void dm_unplug_all(struct request_queue
*q
)
1040 struct mapped_device
*md
= q
->queuedata
;
1041 struct dm_table
*map
= dm_get_table(md
);
1044 dm_table_unplug_all(map
);
1049 static int dm_any_congested(void *congested_data
, int bdi_bits
)
1052 struct mapped_device
*md
= congested_data
;
1053 struct dm_table
*map
;
1055 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
)) {
1056 map
= dm_get_table(md
);
1058 r
= dm_table_any_congested(map
, bdi_bits
);
1066 /*-----------------------------------------------------------------
1067 * An IDR is used to keep track of allocated minor numbers.
1068 *---------------------------------------------------------------*/
1069 static DEFINE_IDR(_minor_idr
);
1071 static void free_minor(int minor
)
1073 spin_lock(&_minor_lock
);
1074 idr_remove(&_minor_idr
, minor
);
1075 spin_unlock(&_minor_lock
);
1079 * See if the device with a specific minor # is free.
1081 static int specific_minor(int minor
)
1085 if (minor
>= (1 << MINORBITS
))
1088 r
= idr_pre_get(&_minor_idr
, GFP_KERNEL
);
1092 spin_lock(&_minor_lock
);
1094 if (idr_find(&_minor_idr
, minor
)) {
1099 r
= idr_get_new_above(&_minor_idr
, MINOR_ALLOCED
, minor
, &m
);
1104 idr_remove(&_minor_idr
, m
);
1110 spin_unlock(&_minor_lock
);
1114 static int next_free_minor(int *minor
)
1118 r
= idr_pre_get(&_minor_idr
, GFP_KERNEL
);
1122 spin_lock(&_minor_lock
);
1124 r
= idr_get_new(&_minor_idr
, MINOR_ALLOCED
, &m
);
1128 if (m
>= (1 << MINORBITS
)) {
1129 idr_remove(&_minor_idr
, m
);
1137 spin_unlock(&_minor_lock
);
1141 static struct block_device_operations dm_blk_dops
;
1143 static void dm_wq_work(struct work_struct
*work
);
1146 * Allocate and initialise a blank device with a given minor.
1148 static struct mapped_device
*alloc_dev(int minor
)
1151 struct mapped_device
*md
= kzalloc(sizeof(*md
), GFP_KERNEL
);
1155 DMWARN("unable to allocate device, out of memory.");
1159 if (!try_module_get(THIS_MODULE
))
1160 goto bad_module_get
;
1162 /* get a minor number for the dev */
1163 if (minor
== DM_ANY_MINOR
)
1164 r
= next_free_minor(&minor
);
1166 r
= specific_minor(minor
);
1170 init_rwsem(&md
->io_lock
);
1171 mutex_init(&md
->suspend_lock
);
1172 spin_lock_init(&md
->deferred_lock
);
1173 rwlock_init(&md
->map_lock
);
1174 atomic_set(&md
->holders
, 1);
1175 atomic_set(&md
->open_count
, 0);
1176 atomic_set(&md
->event_nr
, 0);
1177 atomic_set(&md
->uevent_seq
, 0);
1178 INIT_LIST_HEAD(&md
->uevent_list
);
1179 spin_lock_init(&md
->uevent_lock
);
1181 md
->queue
= blk_alloc_queue(GFP_KERNEL
);
1185 md
->queue
->queuedata
= md
;
1186 md
->queue
->backing_dev_info
.congested_fn
= dm_any_congested
;
1187 md
->queue
->backing_dev_info
.congested_data
= md
;
1188 blk_queue_make_request(md
->queue
, dm_request
);
1189 blk_queue_ordered(md
->queue
, QUEUE_ORDERED_DRAIN
, NULL
);
1190 blk_queue_bounce_limit(md
->queue
, BLK_BOUNCE_ANY
);
1191 md
->queue
->unplug_fn
= dm_unplug_all
;
1192 blk_queue_merge_bvec(md
->queue
, dm_merge_bvec
);
1194 md
->io_pool
= mempool_create_slab_pool(MIN_IOS
, _io_cache
);
1198 md
->tio_pool
= mempool_create_slab_pool(MIN_IOS
, _tio_cache
);
1202 md
->bs
= bioset_create(16, 0);
1206 md
->disk
= alloc_disk(1);
1210 atomic_set(&md
->pending
, 0);
1211 init_waitqueue_head(&md
->wait
);
1212 INIT_WORK(&md
->work
, dm_wq_work
);
1213 init_waitqueue_head(&md
->eventq
);
1215 md
->disk
->major
= _major
;
1216 md
->disk
->first_minor
= minor
;
1217 md
->disk
->fops
= &dm_blk_dops
;
1218 md
->disk
->queue
= md
->queue
;
1219 md
->disk
->private_data
= md
;
1220 sprintf(md
->disk
->disk_name
, "dm-%d", minor
);
1222 format_dev_t(md
->name
, MKDEV(_major
, minor
));
1224 md
->wq
= create_singlethread_workqueue("kdmflush");
1228 md
->bdev
= bdget_disk(md
->disk
, 0);
1232 /* Populate the mapping, nobody knows we exist yet */
1233 spin_lock(&_minor_lock
);
1234 old_md
= idr_replace(&_minor_idr
, md
, minor
);
1235 spin_unlock(&_minor_lock
);
1237 BUG_ON(old_md
!= MINOR_ALLOCED
);
1242 destroy_workqueue(md
->wq
);
1246 bioset_free(md
->bs
);
1248 mempool_destroy(md
->tio_pool
);
1250 mempool_destroy(md
->io_pool
);
1252 blk_cleanup_queue(md
->queue
);
1256 module_put(THIS_MODULE
);
1262 static void unlock_fs(struct mapped_device
*md
);
1264 static void free_dev(struct mapped_device
*md
)
1266 int minor
= MINOR(disk_devt(md
->disk
));
1270 destroy_workqueue(md
->wq
);
1271 mempool_destroy(md
->tio_pool
);
1272 mempool_destroy(md
->io_pool
);
1273 bioset_free(md
->bs
);
1274 blk_integrity_unregister(md
->disk
);
1275 del_gendisk(md
->disk
);
1278 spin_lock(&_minor_lock
);
1279 md
->disk
->private_data
= NULL
;
1280 spin_unlock(&_minor_lock
);
1283 blk_cleanup_queue(md
->queue
);
1284 module_put(THIS_MODULE
);
1289 * Bind a table to the device.
1291 static void event_callback(void *context
)
1293 unsigned long flags
;
1295 struct mapped_device
*md
= (struct mapped_device
*) context
;
1297 spin_lock_irqsave(&md
->uevent_lock
, flags
);
1298 list_splice_init(&md
->uevent_list
, &uevents
);
1299 spin_unlock_irqrestore(&md
->uevent_lock
, flags
);
1301 dm_send_uevents(&uevents
, &disk_to_dev(md
->disk
)->kobj
);
1303 atomic_inc(&md
->event_nr
);
1304 wake_up(&md
->eventq
);
1307 static void __set_size(struct mapped_device
*md
, sector_t size
)
1309 set_capacity(md
->disk
, size
);
1311 mutex_lock(&md
->bdev
->bd_inode
->i_mutex
);
1312 i_size_write(md
->bdev
->bd_inode
, (loff_t
)size
<< SECTOR_SHIFT
);
1313 mutex_unlock(&md
->bdev
->bd_inode
->i_mutex
);
1316 static int __bind(struct mapped_device
*md
, struct dm_table
*t
,
1317 struct queue_limits
*limits
)
1319 struct request_queue
*q
= md
->queue
;
1322 size
= dm_table_get_size(t
);
1325 * Wipe any geometry if the size of the table changed.
1327 if (size
!= get_capacity(md
->disk
))
1328 memset(&md
->geometry
, 0, sizeof(md
->geometry
));
1330 __set_size(md
, size
);
1333 dm_table_destroy(t
);
1337 dm_table_event_callback(t
, event_callback
, md
);
1339 write_lock(&md
->map_lock
);
1341 dm_table_set_restrictions(t
, q
, limits
);
1342 write_unlock(&md
->map_lock
);
1347 static void __unbind(struct mapped_device
*md
)
1349 struct dm_table
*map
= md
->map
;
1354 dm_table_event_callback(map
, NULL
, NULL
);
1355 write_lock(&md
->map_lock
);
1357 write_unlock(&md
->map_lock
);
1358 dm_table_destroy(map
);
1362 * Constructor for a new device.
1364 int dm_create(int minor
, struct mapped_device
**result
)
1366 struct mapped_device
*md
;
1368 md
= alloc_dev(minor
);
1378 static struct mapped_device
*dm_find_md(dev_t dev
)
1380 struct mapped_device
*md
;
1381 unsigned minor
= MINOR(dev
);
1383 if (MAJOR(dev
) != _major
|| minor
>= (1 << MINORBITS
))
1386 spin_lock(&_minor_lock
);
1388 md
= idr_find(&_minor_idr
, minor
);
1389 if (md
&& (md
== MINOR_ALLOCED
||
1390 (MINOR(disk_devt(dm_disk(md
))) != minor
) ||
1391 test_bit(DMF_FREEING
, &md
->flags
))) {
1397 spin_unlock(&_minor_lock
);
1402 struct mapped_device
*dm_get_md(dev_t dev
)
1404 struct mapped_device
*md
= dm_find_md(dev
);
1412 void *dm_get_mdptr(struct mapped_device
*md
)
1414 return md
->interface_ptr
;
1417 void dm_set_mdptr(struct mapped_device
*md
, void *ptr
)
1419 md
->interface_ptr
= ptr
;
1422 void dm_get(struct mapped_device
*md
)
1424 atomic_inc(&md
->holders
);
1427 const char *dm_device_name(struct mapped_device
*md
)
1431 EXPORT_SYMBOL_GPL(dm_device_name
);
1433 void dm_put(struct mapped_device
*md
)
1435 struct dm_table
*map
;
1437 BUG_ON(test_bit(DMF_FREEING
, &md
->flags
));
1439 if (atomic_dec_and_lock(&md
->holders
, &_minor_lock
)) {
1440 map
= dm_get_table(md
);
1441 idr_replace(&_minor_idr
, MINOR_ALLOCED
,
1442 MINOR(disk_devt(dm_disk(md
))));
1443 set_bit(DMF_FREEING
, &md
->flags
);
1444 spin_unlock(&_minor_lock
);
1445 if (!dm_suspended(md
)) {
1446 dm_table_presuspend_targets(map
);
1447 dm_table_postsuspend_targets(map
);
1455 EXPORT_SYMBOL_GPL(dm_put
);
1457 static int dm_wait_for_completion(struct mapped_device
*md
, int interruptible
)
1460 DECLARE_WAITQUEUE(wait
, current
);
1462 dm_unplug_all(md
->queue
);
1464 add_wait_queue(&md
->wait
, &wait
);
1467 set_current_state(interruptible
);
1470 if (!atomic_read(&md
->pending
))
1473 if (interruptible
== TASK_INTERRUPTIBLE
&&
1474 signal_pending(current
)) {
1481 set_current_state(TASK_RUNNING
);
1483 remove_wait_queue(&md
->wait
, &wait
);
1488 static void dm_flush(struct mapped_device
*md
)
1490 dm_wait_for_completion(md
, TASK_UNINTERRUPTIBLE
);
1492 bio_init(&md
->barrier_bio
);
1493 md
->barrier_bio
.bi_bdev
= md
->bdev
;
1494 md
->barrier_bio
.bi_rw
= WRITE_BARRIER
;
1495 __split_and_process_bio(md
, &md
->barrier_bio
);
1497 dm_wait_for_completion(md
, TASK_UNINTERRUPTIBLE
);
1500 static void process_barrier(struct mapped_device
*md
, struct bio
*bio
)
1502 md
->barrier_error
= 0;
1506 if (!bio_empty_barrier(bio
)) {
1507 __split_and_process_bio(md
, bio
);
1511 if (md
->barrier_error
!= DM_ENDIO_REQUEUE
)
1512 bio_endio(bio
, md
->barrier_error
);
1514 spin_lock_irq(&md
->deferred_lock
);
1515 bio_list_add_head(&md
->deferred
, bio
);
1516 spin_unlock_irq(&md
->deferred_lock
);
1521 * Process the deferred bios
1523 static void dm_wq_work(struct work_struct
*work
)
1525 struct mapped_device
*md
= container_of(work
, struct mapped_device
,
1529 down_write(&md
->io_lock
);
1531 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
)) {
1532 spin_lock_irq(&md
->deferred_lock
);
1533 c
= bio_list_pop(&md
->deferred
);
1534 spin_unlock_irq(&md
->deferred_lock
);
1537 clear_bit(DMF_QUEUE_IO_TO_THREAD
, &md
->flags
);
1541 up_write(&md
->io_lock
);
1544 process_barrier(md
, c
);
1546 __split_and_process_bio(md
, c
);
1548 down_write(&md
->io_lock
);
1551 up_write(&md
->io_lock
);
1554 static void dm_queue_flush(struct mapped_device
*md
)
1556 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
1557 smp_mb__after_clear_bit();
1558 queue_work(md
->wq
, &md
->work
);
1562 * Swap in a new table (destroying old one).
1564 int dm_swap_table(struct mapped_device
*md
, struct dm_table
*table
)
1566 struct queue_limits limits
;
1569 mutex_lock(&md
->suspend_lock
);
1571 /* device must be suspended */
1572 if (!dm_suspended(md
))
1575 r
= dm_calculate_queue_limits(table
, &limits
);
1580 r
= __bind(md
, table
, &limits
);
1583 mutex_unlock(&md
->suspend_lock
);
1588 * Functions to lock and unlock any filesystem running on the
1591 static int lock_fs(struct mapped_device
*md
)
1595 WARN_ON(md
->frozen_sb
);
1597 md
->frozen_sb
= freeze_bdev(md
->bdev
);
1598 if (IS_ERR(md
->frozen_sb
)) {
1599 r
= PTR_ERR(md
->frozen_sb
);
1600 md
->frozen_sb
= NULL
;
1604 set_bit(DMF_FROZEN
, &md
->flags
);
1609 static void unlock_fs(struct mapped_device
*md
)
1611 if (!test_bit(DMF_FROZEN
, &md
->flags
))
1614 thaw_bdev(md
->bdev
, md
->frozen_sb
);
1615 md
->frozen_sb
= NULL
;
1616 clear_bit(DMF_FROZEN
, &md
->flags
);
1620 * We need to be able to change a mapping table under a mounted
1621 * filesystem. For example we might want to move some data in
1622 * the background. Before the table can be swapped with
1623 * dm_bind_table, dm_suspend must be called to flush any in
1624 * flight bios and ensure that any further io gets deferred.
1626 int dm_suspend(struct mapped_device
*md
, unsigned suspend_flags
)
1628 struct dm_table
*map
= NULL
;
1630 int do_lockfs
= suspend_flags
& DM_SUSPEND_LOCKFS_FLAG
? 1 : 0;
1631 int noflush
= suspend_flags
& DM_SUSPEND_NOFLUSH_FLAG
? 1 : 0;
1633 mutex_lock(&md
->suspend_lock
);
1635 if (dm_suspended(md
)) {
1640 map
= dm_get_table(md
);
1643 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
1644 * This flag is cleared before dm_suspend returns.
1647 set_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
1649 /* This does not get reverted if there's an error later. */
1650 dm_table_presuspend_targets(map
);
1653 * Flush I/O to the device. noflush supersedes do_lockfs,
1654 * because lock_fs() needs to flush I/Os.
1656 if (!noflush
&& do_lockfs
) {
1663 * Here we must make sure that no processes are submitting requests
1664 * to target drivers i.e. no one may be executing
1665 * __split_and_process_bio. This is called from dm_request and
1668 * To get all processes out of __split_and_process_bio in dm_request,
1669 * we take the write lock. To prevent any process from reentering
1670 * __split_and_process_bio from dm_request, we set
1671 * DMF_QUEUE_IO_TO_THREAD.
1673 * To quiesce the thread (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND
1674 * and call flush_workqueue(md->wq). flush_workqueue will wait until
1675 * dm_wq_work exits and DMF_BLOCK_IO_FOR_SUSPEND will prevent any
1676 * further calls to __split_and_process_bio from dm_wq_work.
1678 down_write(&md
->io_lock
);
1679 set_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
1680 set_bit(DMF_QUEUE_IO_TO_THREAD
, &md
->flags
);
1681 up_write(&md
->io_lock
);
1683 flush_workqueue(md
->wq
);
1686 * At this point no more requests are entering target request routines.
1687 * We call dm_wait_for_completion to wait for all existing requests
1690 r
= dm_wait_for_completion(md
, TASK_INTERRUPTIBLE
);
1692 down_write(&md
->io_lock
);
1694 clear_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
1695 up_write(&md
->io_lock
);
1697 /* were we interrupted ? */
1702 goto out
; /* pushback list is already flushed, so skip flush */
1706 * If dm_wait_for_completion returned 0, the device is completely
1707 * quiescent now. There is no request-processing activity. All new
1708 * requests are being added to md->deferred list.
1711 dm_table_postsuspend_targets(map
);
1713 set_bit(DMF_SUSPENDED
, &md
->flags
);
1719 mutex_unlock(&md
->suspend_lock
);
1723 int dm_resume(struct mapped_device
*md
)
1726 struct dm_table
*map
= NULL
;
1728 mutex_lock(&md
->suspend_lock
);
1729 if (!dm_suspended(md
))
1732 map
= dm_get_table(md
);
1733 if (!map
|| !dm_table_get_size(map
))
1736 r
= dm_table_resume_targets(map
);
1744 clear_bit(DMF_SUSPENDED
, &md
->flags
);
1746 dm_table_unplug_all(map
);
1750 mutex_unlock(&md
->suspend_lock
);
1755 /*-----------------------------------------------------------------
1756 * Event notification.
1757 *---------------------------------------------------------------*/
1758 void dm_kobject_uevent(struct mapped_device
*md
, enum kobject_action action
,
1761 char udev_cookie
[DM_COOKIE_LENGTH
];
1762 char *envp
[] = { udev_cookie
, NULL
};
1765 kobject_uevent(&disk_to_dev(md
->disk
)->kobj
, action
);
1767 snprintf(udev_cookie
, DM_COOKIE_LENGTH
, "%s=%u",
1768 DM_COOKIE_ENV_VAR_NAME
, cookie
);
1769 kobject_uevent_env(&disk_to_dev(md
->disk
)->kobj
, action
, envp
);
1773 uint32_t dm_next_uevent_seq(struct mapped_device
*md
)
1775 return atomic_add_return(1, &md
->uevent_seq
);
1778 uint32_t dm_get_event_nr(struct mapped_device
*md
)
1780 return atomic_read(&md
->event_nr
);
1783 int dm_wait_event(struct mapped_device
*md
, int event_nr
)
1785 return wait_event_interruptible(md
->eventq
,
1786 (event_nr
!= atomic_read(&md
->event_nr
)));
1789 void dm_uevent_add(struct mapped_device
*md
, struct list_head
*elist
)
1791 unsigned long flags
;
1793 spin_lock_irqsave(&md
->uevent_lock
, flags
);
1794 list_add(elist
, &md
->uevent_list
);
1795 spin_unlock_irqrestore(&md
->uevent_lock
, flags
);
1799 * The gendisk is only valid as long as you have a reference
1802 struct gendisk
*dm_disk(struct mapped_device
*md
)
1807 struct kobject
*dm_kobject(struct mapped_device
*md
)
1813 * struct mapped_device should not be exported outside of dm.c
1814 * so use this check to verify that kobj is part of md structure
1816 struct mapped_device
*dm_get_from_kobject(struct kobject
*kobj
)
1818 struct mapped_device
*md
;
1820 md
= container_of(kobj
, struct mapped_device
, kobj
);
1821 if (&md
->kobj
!= kobj
)
1824 if (test_bit(DMF_FREEING
, &md
->flags
) ||
1825 test_bit(DMF_DELETING
, &md
->flags
))
1832 int dm_suspended(struct mapped_device
*md
)
1834 return test_bit(DMF_SUSPENDED
, &md
->flags
);
1837 int dm_noflush_suspending(struct dm_target
*ti
)
1839 struct mapped_device
*md
= dm_table_get_md(ti
->table
);
1840 int r
= __noflush_suspending(md
);
1846 EXPORT_SYMBOL_GPL(dm_noflush_suspending
);
1848 static struct block_device_operations dm_blk_dops
= {
1849 .open
= dm_blk_open
,
1850 .release
= dm_blk_close
,
1851 .ioctl
= dm_blk_ioctl
,
1852 .getgeo
= dm_blk_getgeo
,
1853 .owner
= THIS_MODULE
1856 EXPORT_SYMBOL(dm_get_mapinfo
);
1861 module_init(dm_init
);
1862 module_exit(dm_exit
);
1864 module_param(major
, uint
, 0);
1865 MODULE_PARM_DESC(major
, "The major number of the device mapper");
1866 MODULE_DESCRIPTION(DM_NAME
" driver");
1867 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1868 MODULE_LICENSE("GPL");