2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/moduleparam.h>
15 #include <linux/blkpg.h>
16 #include <linux/bio.h>
17 #include <linux/buffer_head.h>
18 #include <linux/mempool.h>
19 #include <linux/slab.h>
20 #include <linux/idr.h>
21 #include <linux/hdreg.h>
23 #include <trace/events/block.h>
25 #define DM_MSG_PREFIX "core"
28 * Cookies are numeric values sent with CHANGE and REMOVE
29 * uevents while resuming, removing or renaming the device.
31 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
32 #define DM_COOKIE_LENGTH 24
34 static const char *_name
= DM_NAME
;
36 static unsigned int major
= 0;
37 static unsigned int _major
= 0;
39 static DEFINE_SPINLOCK(_minor_lock
);
42 * One of these is allocated per bio.
45 struct mapped_device
*md
;
49 unsigned long start_time
;
50 spinlock_t endio_lock
;
55 * One of these is allocated per target within a bio. Hopefully
56 * this will be simplified out one day.
65 * For request-based dm.
66 * One of these is allocated per request.
68 struct dm_rq_target_io
{
69 struct mapped_device
*md
;
71 struct request
*orig
, clone
;
77 * For request-based dm.
78 * One of these is allocated per bio.
80 struct dm_rq_clone_bio_info
{
82 struct dm_rq_target_io
*tio
;
85 union map_info
*dm_get_mapinfo(struct bio
*bio
)
87 if (bio
&& bio
->bi_private
)
88 return &((struct dm_target_io
*)bio
->bi_private
)->info
;
92 union map_info
*dm_get_rq_mapinfo(struct request
*rq
)
94 if (rq
&& rq
->end_io_data
)
95 return &((struct dm_rq_target_io
*)rq
->end_io_data
)->info
;
98 EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo
);
100 #define MINOR_ALLOCED ((void *)-1)
103 * Bits for the md->flags field.
105 #define DMF_BLOCK_IO_FOR_SUSPEND 0
106 #define DMF_SUSPENDED 1
108 #define DMF_FREEING 3
109 #define DMF_DELETING 4
110 #define DMF_NOFLUSH_SUSPENDING 5
111 #define DMF_QUEUE_IO_TO_THREAD 6
114 * Work processed by per-device workqueue.
116 struct mapped_device
{
117 struct rw_semaphore io_lock
;
118 struct mutex suspend_lock
;
125 struct request_queue
*queue
;
126 struct gendisk
*disk
;
132 * A list of ios that arrived while we were suspended.
135 wait_queue_head_t wait
;
136 struct work_struct work
;
137 struct bio_list deferred
;
138 spinlock_t deferred_lock
;
141 * An error from the barrier request currently being processed.
146 * Processing queue (flush/barriers)
148 struct workqueue_struct
*wq
;
151 * The current mapping.
153 struct dm_table
*map
;
156 * io objects are allocated from here.
167 wait_queue_head_t eventq
;
169 struct list_head uevent_list
;
170 spinlock_t uevent_lock
; /* Protect access to uevent_list */
173 * freeze/thaw support require holding onto a super block
175 struct super_block
*frozen_sb
;
176 struct block_device
*bdev
;
178 /* forced geometry settings */
179 struct hd_geometry geometry
;
181 /* marker of flush suspend for request-based dm */
182 struct request suspend_rq
;
184 /* For saving the address of __make_request for request based dm */
185 make_request_fn
*saved_make_request_fn
;
190 /* zero-length barrier that will be cloned and submitted to targets */
191 struct bio barrier_bio
;
195 * For mempools pre-allocation at the table loading time.
197 struct dm_md_mempools
{
204 static struct kmem_cache
*_io_cache
;
205 static struct kmem_cache
*_tio_cache
;
206 static struct kmem_cache
*_rq_tio_cache
;
207 static struct kmem_cache
*_rq_bio_info_cache
;
209 static int __init
local_init(void)
213 /* allocate a slab for the dm_ios */
214 _io_cache
= KMEM_CACHE(dm_io
, 0);
218 /* allocate a slab for the target ios */
219 _tio_cache
= KMEM_CACHE(dm_target_io
, 0);
221 goto out_free_io_cache
;
223 _rq_tio_cache
= KMEM_CACHE(dm_rq_target_io
, 0);
225 goto out_free_tio_cache
;
227 _rq_bio_info_cache
= KMEM_CACHE(dm_rq_clone_bio_info
, 0);
228 if (!_rq_bio_info_cache
)
229 goto out_free_rq_tio_cache
;
231 r
= dm_uevent_init();
233 goto out_free_rq_bio_info_cache
;
236 r
= register_blkdev(_major
, _name
);
238 goto out_uevent_exit
;
247 out_free_rq_bio_info_cache
:
248 kmem_cache_destroy(_rq_bio_info_cache
);
249 out_free_rq_tio_cache
:
250 kmem_cache_destroy(_rq_tio_cache
);
252 kmem_cache_destroy(_tio_cache
);
254 kmem_cache_destroy(_io_cache
);
259 static void local_exit(void)
261 kmem_cache_destroy(_rq_bio_info_cache
);
262 kmem_cache_destroy(_rq_tio_cache
);
263 kmem_cache_destroy(_tio_cache
);
264 kmem_cache_destroy(_io_cache
);
265 unregister_blkdev(_major
, _name
);
270 DMINFO("cleaned up");
273 static int (*_inits
[])(void) __initdata
= {
282 static void (*_exits
[])(void) = {
291 static int __init
dm_init(void)
293 const int count
= ARRAY_SIZE(_inits
);
297 for (i
= 0; i
< count
; i
++) {
312 static void __exit
dm_exit(void)
314 int i
= ARRAY_SIZE(_exits
);
321 * Block device functions
323 static int dm_blk_open(struct block_device
*bdev
, fmode_t mode
)
325 struct mapped_device
*md
;
327 spin_lock(&_minor_lock
);
329 md
= bdev
->bd_disk
->private_data
;
333 if (test_bit(DMF_FREEING
, &md
->flags
) ||
334 test_bit(DMF_DELETING
, &md
->flags
)) {
340 atomic_inc(&md
->open_count
);
343 spin_unlock(&_minor_lock
);
345 return md
? 0 : -ENXIO
;
348 static int dm_blk_close(struct gendisk
*disk
, fmode_t mode
)
350 struct mapped_device
*md
= disk
->private_data
;
351 atomic_dec(&md
->open_count
);
356 int dm_open_count(struct mapped_device
*md
)
358 return atomic_read(&md
->open_count
);
362 * Guarantees nothing is using the device before it's deleted.
364 int dm_lock_for_deletion(struct mapped_device
*md
)
368 spin_lock(&_minor_lock
);
370 if (dm_open_count(md
))
373 set_bit(DMF_DELETING
, &md
->flags
);
375 spin_unlock(&_minor_lock
);
380 static int dm_blk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
382 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
384 return dm_get_geometry(md
, geo
);
387 static int dm_blk_ioctl(struct block_device
*bdev
, fmode_t mode
,
388 unsigned int cmd
, unsigned long arg
)
390 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
391 struct dm_table
*map
= dm_get_table(md
);
392 struct dm_target
*tgt
;
395 if (!map
|| !dm_table_get_size(map
))
398 /* We only support devices that have a single target */
399 if (dm_table_get_num_targets(map
) != 1)
402 tgt
= dm_table_get_target(map
, 0);
404 if (dm_suspended(md
)) {
409 if (tgt
->type
->ioctl
)
410 r
= tgt
->type
->ioctl(tgt
, cmd
, arg
);
418 static struct dm_io
*alloc_io(struct mapped_device
*md
)
420 return mempool_alloc(md
->io_pool
, GFP_NOIO
);
423 static void free_io(struct mapped_device
*md
, struct dm_io
*io
)
425 mempool_free(io
, md
->io_pool
);
428 static void free_tio(struct mapped_device
*md
, struct dm_target_io
*tio
)
430 mempool_free(tio
, md
->tio_pool
);
433 static struct dm_rq_target_io
*alloc_rq_tio(struct mapped_device
*md
)
435 return mempool_alloc(md
->tio_pool
, GFP_ATOMIC
);
438 static void free_rq_tio(struct dm_rq_target_io
*tio
)
440 mempool_free(tio
, tio
->md
->tio_pool
);
443 static struct dm_rq_clone_bio_info
*alloc_bio_info(struct mapped_device
*md
)
445 return mempool_alloc(md
->io_pool
, GFP_ATOMIC
);
448 static void free_bio_info(struct dm_rq_clone_bio_info
*info
)
450 mempool_free(info
, info
->tio
->md
->io_pool
);
453 static void start_io_acct(struct dm_io
*io
)
455 struct mapped_device
*md
= io
->md
;
458 io
->start_time
= jiffies
;
460 cpu
= part_stat_lock();
461 part_round_stats(cpu
, &dm_disk(md
)->part0
);
463 dm_disk(md
)->part0
.in_flight
= atomic_inc_return(&md
->pending
);
466 static void end_io_acct(struct dm_io
*io
)
468 struct mapped_device
*md
= io
->md
;
469 struct bio
*bio
= io
->bio
;
470 unsigned long duration
= jiffies
- io
->start_time
;
472 int rw
= bio_data_dir(bio
);
474 cpu
= part_stat_lock();
475 part_round_stats(cpu
, &dm_disk(md
)->part0
);
476 part_stat_add(cpu
, &dm_disk(md
)->part0
, ticks
[rw
], duration
);
480 * After this is decremented the bio must not be touched if it is
483 dm_disk(md
)->part0
.in_flight
= pending
=
484 atomic_dec_return(&md
->pending
);
486 /* nudge anyone waiting on suspend queue */
492 * Add the bio to the list of deferred io.
494 static void queue_io(struct mapped_device
*md
, struct bio
*bio
)
496 down_write(&md
->io_lock
);
498 spin_lock_irq(&md
->deferred_lock
);
499 bio_list_add(&md
->deferred
, bio
);
500 spin_unlock_irq(&md
->deferred_lock
);
502 if (!test_and_set_bit(DMF_QUEUE_IO_TO_THREAD
, &md
->flags
))
503 queue_work(md
->wq
, &md
->work
);
505 up_write(&md
->io_lock
);
509 * Everyone (including functions in this file), should use this
510 * function to access the md->map field, and make sure they call
511 * dm_table_put() when finished.
513 struct dm_table
*dm_get_table(struct mapped_device
*md
)
518 read_lock_irqsave(&md
->map_lock
, flags
);
522 read_unlock_irqrestore(&md
->map_lock
, flags
);
528 * Get the geometry associated with a dm device
530 int dm_get_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
538 * Set the geometry of a device.
540 int dm_set_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
542 sector_t sz
= (sector_t
)geo
->cylinders
* geo
->heads
* geo
->sectors
;
544 if (geo
->start
> sz
) {
545 DMWARN("Start sector is beyond the geometry limits.");
554 /*-----------------------------------------------------------------
556 * A more elegant soln is in the works that uses the queue
557 * merge fn, unfortunately there are a couple of changes to
558 * the block layer that I want to make for this. So in the
559 * interests of getting something for people to use I give
560 * you this clearly demarcated crap.
561 *---------------------------------------------------------------*/
563 static int __noflush_suspending(struct mapped_device
*md
)
565 return test_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
569 * Decrements the number of outstanding ios that a bio has been
570 * cloned into, completing the original io if necc.
572 static void dec_pending(struct dm_io
*io
, int error
)
577 struct mapped_device
*md
= io
->md
;
579 /* Push-back supersedes any I/O errors */
580 if (unlikely(error
)) {
581 spin_lock_irqsave(&io
->endio_lock
, flags
);
582 if (!(io
->error
> 0 && __noflush_suspending(md
)))
584 spin_unlock_irqrestore(&io
->endio_lock
, flags
);
587 if (atomic_dec_and_test(&io
->io_count
)) {
588 if (io
->error
== DM_ENDIO_REQUEUE
) {
590 * Target requested pushing back the I/O.
592 spin_lock_irqsave(&md
->deferred_lock
, flags
);
593 if (__noflush_suspending(md
)) {
594 if (!bio_barrier(io
->bio
))
595 bio_list_add_head(&md
->deferred
,
598 /* noflush suspend was interrupted. */
600 spin_unlock_irqrestore(&md
->deferred_lock
, flags
);
603 io_error
= io
->error
;
606 if (bio_barrier(bio
)) {
608 * There can be just one barrier request so we use
609 * a per-device variable for error reporting.
610 * Note that you can't touch the bio after end_io_acct
612 if (!md
->barrier_error
&& io_error
!= -EOPNOTSUPP
)
613 md
->barrier_error
= io_error
;
618 if (io_error
!= DM_ENDIO_REQUEUE
) {
619 trace_block_bio_complete(md
->queue
, bio
);
621 bio_endio(bio
, io_error
);
629 static void clone_endio(struct bio
*bio
, int error
)
632 struct dm_target_io
*tio
= bio
->bi_private
;
633 struct dm_io
*io
= tio
->io
;
634 struct mapped_device
*md
= tio
->io
->md
;
635 dm_endio_fn endio
= tio
->ti
->type
->end_io
;
637 if (!bio_flagged(bio
, BIO_UPTODATE
) && !error
)
641 r
= endio(tio
->ti
, bio
, error
, &tio
->info
);
642 if (r
< 0 || r
== DM_ENDIO_REQUEUE
)
644 * error and requeue request are handled
648 else if (r
== DM_ENDIO_INCOMPLETE
)
649 /* The target will handle the io */
652 DMWARN("unimplemented target endio return value: %d", r
);
658 * Store md for cleanup instead of tio which is about to get freed.
660 bio
->bi_private
= md
->bs
;
664 dec_pending(io
, error
);
668 * Partial completion handling for request-based dm
670 static void end_clone_bio(struct bio
*clone
, int error
)
672 struct dm_rq_clone_bio_info
*info
= clone
->bi_private
;
673 struct dm_rq_target_io
*tio
= info
->tio
;
674 struct bio
*bio
= info
->orig
;
675 unsigned int nr_bytes
= info
->orig
->bi_size
;
681 * An error has already been detected on the request.
682 * Once error occurred, just let clone->end_io() handle
688 * Don't notice the error to the upper layer yet.
689 * The error handling decision is made by the target driver,
690 * when the request is completed.
697 * I/O for the bio successfully completed.
698 * Notice the data completion to the upper layer.
702 * bios are processed from the head of the list.
703 * So the completing bio should always be rq->bio.
704 * If it's not, something wrong is happening.
706 if (tio
->orig
->bio
!= bio
)
707 DMERR("bio completion is going in the middle of the request");
710 * Update the original request.
711 * Do not use blk_end_request() here, because it may complete
712 * the original request before the clone, and break the ordering.
714 blk_update_request(tio
->orig
, 0, nr_bytes
);
718 * Don't touch any member of the md after calling this function because
719 * the md may be freed in dm_put() at the end of this function.
720 * Or do dm_get() before calling this function and dm_put() later.
722 static void rq_completed(struct mapped_device
*md
, int run_queue
)
724 int wakeup_waiters
= 0;
725 struct request_queue
*q
= md
->queue
;
728 spin_lock_irqsave(q
->queue_lock
, flags
);
729 if (!queue_in_flight(q
))
731 spin_unlock_irqrestore(q
->queue_lock
, flags
);
733 /* nudge anyone waiting on suspend queue */
741 * dm_put() must be at the end of this function. See the comment above
746 static void free_rq_clone(struct request
*clone
)
748 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
750 blk_rq_unprep_clone(clone
);
754 static void dm_unprep_request(struct request
*rq
)
756 struct request
*clone
= rq
->special
;
759 rq
->cmd_flags
&= ~REQ_DONTPREP
;
761 free_rq_clone(clone
);
765 * Requeue the original request of a clone.
767 void dm_requeue_unmapped_request(struct request
*clone
)
769 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
770 struct mapped_device
*md
= tio
->md
;
771 struct request
*rq
= tio
->orig
;
772 struct request_queue
*q
= rq
->q
;
775 dm_unprep_request(rq
);
777 spin_lock_irqsave(q
->queue_lock
, flags
);
778 if (elv_queue_empty(q
))
780 blk_requeue_request(q
, rq
);
781 spin_unlock_irqrestore(q
->queue_lock
, flags
);
785 EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request
);
787 static void __stop_queue(struct request_queue
*q
)
792 static void stop_queue(struct request_queue
*q
)
796 spin_lock_irqsave(q
->queue_lock
, flags
);
798 spin_unlock_irqrestore(q
->queue_lock
, flags
);
801 static void __start_queue(struct request_queue
*q
)
803 if (blk_queue_stopped(q
))
807 static void start_queue(struct request_queue
*q
)
811 spin_lock_irqsave(q
->queue_lock
, flags
);
813 spin_unlock_irqrestore(q
->queue_lock
, flags
);
817 * Complete the clone and the original request.
818 * Must be called without queue lock.
820 static void dm_end_request(struct request
*clone
, int error
)
822 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
823 struct mapped_device
*md
= tio
->md
;
824 struct request
*rq
= tio
->orig
;
826 if (blk_pc_request(rq
)) {
827 rq
->errors
= clone
->errors
;
828 rq
->resid_len
= clone
->resid_len
;
832 * We are using the sense buffer of the original
834 * So setting the length of the sense data is enough.
836 rq
->sense_len
= clone
->sense_len
;
839 free_rq_clone(clone
);
841 blk_end_request_all(rq
, error
);
847 * Request completion handler for request-based dm
849 static void dm_softirq_done(struct request
*rq
)
851 struct request
*clone
= rq
->completion_data
;
852 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
853 dm_request_endio_fn rq_end_io
= tio
->ti
->type
->rq_end_io
;
854 int error
= tio
->error
;
856 if (!(rq
->cmd_flags
& REQ_FAILED
) && rq_end_io
)
857 error
= rq_end_io(tio
->ti
, clone
, error
, &tio
->info
);
860 /* The target wants to complete the I/O */
861 dm_end_request(clone
, error
);
862 else if (error
== DM_ENDIO_INCOMPLETE
)
863 /* The target will handle the I/O */
865 else if (error
== DM_ENDIO_REQUEUE
)
866 /* The target wants to requeue the I/O */
867 dm_requeue_unmapped_request(clone
);
869 DMWARN("unimplemented target endio return value: %d", error
);
875 * Complete the clone and the original request with the error status
876 * through softirq context.
878 static void dm_complete_request(struct request
*clone
, int error
)
880 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
881 struct request
*rq
= tio
->orig
;
884 rq
->completion_data
= clone
;
885 blk_complete_request(rq
);
889 * Complete the not-mapped clone and the original request with the error status
890 * through softirq context.
891 * Target's rq_end_io() function isn't called.
892 * This may be used when the target's map_rq() function fails.
894 void dm_kill_unmapped_request(struct request
*clone
, int error
)
896 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
897 struct request
*rq
= tio
->orig
;
899 rq
->cmd_flags
|= REQ_FAILED
;
900 dm_complete_request(clone
, error
);
902 EXPORT_SYMBOL_GPL(dm_kill_unmapped_request
);
905 * Called with the queue lock held
907 static void end_clone_request(struct request
*clone
, int error
)
910 * For just cleaning up the information of the queue in which
911 * the clone was dispatched.
912 * The clone is *NOT* freed actually here because it is alloced from
913 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
915 __blk_put_request(clone
->q
, clone
);
918 * Actual request completion is done in a softirq context which doesn't
919 * hold the queue lock. Otherwise, deadlock could occur because:
920 * - another request may be submitted by the upper level driver
921 * of the stacking during the completion
922 * - the submission which requires queue lock may be done
925 dm_complete_request(clone
, error
);
928 static sector_t
max_io_len(struct mapped_device
*md
,
929 sector_t sector
, struct dm_target
*ti
)
931 sector_t offset
= sector
- ti
->begin
;
932 sector_t len
= ti
->len
- offset
;
935 * Does the target need to split even further ?
939 boundary
= ((offset
+ ti
->split_io
) & ~(ti
->split_io
- 1))
948 static void __map_bio(struct dm_target
*ti
, struct bio
*clone
,
949 struct dm_target_io
*tio
)
953 struct mapped_device
*md
;
955 clone
->bi_end_io
= clone_endio
;
956 clone
->bi_private
= tio
;
959 * Map the clone. If r == 0 we don't need to do
960 * anything, the target has assumed ownership of
963 atomic_inc(&tio
->io
->io_count
);
964 sector
= clone
->bi_sector
;
965 r
= ti
->type
->map(ti
, clone
, &tio
->info
);
966 if (r
== DM_MAPIO_REMAPPED
) {
967 /* the bio has been remapped so dispatch it */
969 trace_block_remap(bdev_get_queue(clone
->bi_bdev
), clone
,
970 tio
->io
->bio
->bi_bdev
->bd_dev
, sector
);
972 generic_make_request(clone
);
973 } else if (r
< 0 || r
== DM_MAPIO_REQUEUE
) {
974 /* error the io and bail out, or requeue it if needed */
976 dec_pending(tio
->io
, r
);
978 * Store bio_set for cleanup.
980 clone
->bi_private
= md
->bs
;
984 DMWARN("unimplemented target map return value: %d", r
);
990 struct mapped_device
*md
;
991 struct dm_table
*map
;
995 sector_t sector_count
;
999 static void dm_bio_destructor(struct bio
*bio
)
1001 struct bio_set
*bs
= bio
->bi_private
;
1007 * Creates a little bio that is just does part of a bvec.
1009 static struct bio
*split_bvec(struct bio
*bio
, sector_t sector
,
1010 unsigned short idx
, unsigned int offset
,
1011 unsigned int len
, struct bio_set
*bs
)
1014 struct bio_vec
*bv
= bio
->bi_io_vec
+ idx
;
1016 clone
= bio_alloc_bioset(GFP_NOIO
, 1, bs
);
1017 clone
->bi_destructor
= dm_bio_destructor
;
1018 *clone
->bi_io_vec
= *bv
;
1020 clone
->bi_sector
= sector
;
1021 clone
->bi_bdev
= bio
->bi_bdev
;
1022 clone
->bi_rw
= bio
->bi_rw
& ~(1 << BIO_RW_BARRIER
);
1024 clone
->bi_size
= to_bytes(len
);
1025 clone
->bi_io_vec
->bv_offset
= offset
;
1026 clone
->bi_io_vec
->bv_len
= clone
->bi_size
;
1027 clone
->bi_flags
|= 1 << BIO_CLONED
;
1029 if (bio_integrity(bio
)) {
1030 bio_integrity_clone(clone
, bio
, GFP_NOIO
, bs
);
1031 bio_integrity_trim(clone
,
1032 bio_sector_offset(bio
, idx
, offset
), len
);
1039 * Creates a bio that consists of range of complete bvecs.
1041 static struct bio
*clone_bio(struct bio
*bio
, sector_t sector
,
1042 unsigned short idx
, unsigned short bv_count
,
1043 unsigned int len
, struct bio_set
*bs
)
1047 clone
= bio_alloc_bioset(GFP_NOIO
, bio
->bi_max_vecs
, bs
);
1048 __bio_clone(clone
, bio
);
1049 clone
->bi_rw
&= ~(1 << BIO_RW_BARRIER
);
1050 clone
->bi_destructor
= dm_bio_destructor
;
1051 clone
->bi_sector
= sector
;
1052 clone
->bi_idx
= idx
;
1053 clone
->bi_vcnt
= idx
+ bv_count
;
1054 clone
->bi_size
= to_bytes(len
);
1055 clone
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
1057 if (bio_integrity(bio
)) {
1058 bio_integrity_clone(clone
, bio
, GFP_NOIO
, bs
);
1060 if (idx
!= bio
->bi_idx
|| clone
->bi_size
< bio
->bi_size
)
1061 bio_integrity_trim(clone
,
1062 bio_sector_offset(bio
, idx
, 0), len
);
1068 static struct dm_target_io
*alloc_tio(struct clone_info
*ci
,
1069 struct dm_target
*ti
)
1071 struct dm_target_io
*tio
= mempool_alloc(ci
->md
->tio_pool
, GFP_NOIO
);
1075 memset(&tio
->info
, 0, sizeof(tio
->info
));
1080 static void __flush_target(struct clone_info
*ci
, struct dm_target
*ti
,
1083 struct dm_target_io
*tio
= alloc_tio(ci
, ti
);
1086 tio
->info
.flush_request
= flush_nr
;
1088 clone
= bio_alloc_bioset(GFP_NOIO
, 0, ci
->md
->bs
);
1089 __bio_clone(clone
, ci
->bio
);
1090 clone
->bi_destructor
= dm_bio_destructor
;
1092 __map_bio(ti
, clone
, tio
);
1095 static int __clone_and_map_empty_barrier(struct clone_info
*ci
)
1097 unsigned target_nr
= 0, flush_nr
;
1098 struct dm_target
*ti
;
1100 while ((ti
= dm_table_get_target(ci
->map
, target_nr
++)))
1101 for (flush_nr
= 0; flush_nr
< ti
->num_flush_requests
;
1103 __flush_target(ci
, ti
, flush_nr
);
1105 ci
->sector_count
= 0;
1110 static int __clone_and_map(struct clone_info
*ci
)
1112 struct bio
*clone
, *bio
= ci
->bio
;
1113 struct dm_target
*ti
;
1114 sector_t len
= 0, max
;
1115 struct dm_target_io
*tio
;
1117 if (unlikely(bio_empty_barrier(bio
)))
1118 return __clone_and_map_empty_barrier(ci
);
1120 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
1121 if (!dm_target_is_valid(ti
))
1124 max
= max_io_len(ci
->md
, ci
->sector
, ti
);
1127 * Allocate a target io object.
1129 tio
= alloc_tio(ci
, ti
);
1131 if (ci
->sector_count
<= max
) {
1133 * Optimise for the simple case where we can do all of
1134 * the remaining io with a single clone.
1136 clone
= clone_bio(bio
, ci
->sector
, ci
->idx
,
1137 bio
->bi_vcnt
- ci
->idx
, ci
->sector_count
,
1139 __map_bio(ti
, clone
, tio
);
1140 ci
->sector_count
= 0;
1142 } else if (to_sector(bio
->bi_io_vec
[ci
->idx
].bv_len
) <= max
) {
1144 * There are some bvecs that don't span targets.
1145 * Do as many of these as possible.
1148 sector_t remaining
= max
;
1151 for (i
= ci
->idx
; remaining
&& (i
< bio
->bi_vcnt
); i
++) {
1152 bv_len
= to_sector(bio
->bi_io_vec
[i
].bv_len
);
1154 if (bv_len
> remaining
)
1157 remaining
-= bv_len
;
1161 clone
= clone_bio(bio
, ci
->sector
, ci
->idx
, i
- ci
->idx
, len
,
1163 __map_bio(ti
, clone
, tio
);
1166 ci
->sector_count
-= len
;
1171 * Handle a bvec that must be split between two or more targets.
1173 struct bio_vec
*bv
= bio
->bi_io_vec
+ ci
->idx
;
1174 sector_t remaining
= to_sector(bv
->bv_len
);
1175 unsigned int offset
= 0;
1179 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
1180 if (!dm_target_is_valid(ti
))
1183 max
= max_io_len(ci
->md
, ci
->sector
, ti
);
1185 tio
= alloc_tio(ci
, ti
);
1188 len
= min(remaining
, max
);
1190 clone
= split_bvec(bio
, ci
->sector
, ci
->idx
,
1191 bv
->bv_offset
+ offset
, len
,
1194 __map_bio(ti
, clone
, tio
);
1197 ci
->sector_count
-= len
;
1198 offset
+= to_bytes(len
);
1199 } while (remaining
-= len
);
1208 * Split the bio into several clones and submit it to targets.
1210 static void __split_and_process_bio(struct mapped_device
*md
, struct bio
*bio
)
1212 struct clone_info ci
;
1215 ci
.map
= dm_get_table(md
);
1216 if (unlikely(!ci
.map
)) {
1217 if (!bio_barrier(bio
))
1220 if (!md
->barrier_error
)
1221 md
->barrier_error
= -EIO
;
1227 ci
.io
= alloc_io(md
);
1229 atomic_set(&ci
.io
->io_count
, 1);
1232 spin_lock_init(&ci
.io
->endio_lock
);
1233 ci
.sector
= bio
->bi_sector
;
1234 ci
.sector_count
= bio_sectors(bio
);
1235 if (unlikely(bio_empty_barrier(bio
)))
1236 ci
.sector_count
= 1;
1237 ci
.idx
= bio
->bi_idx
;
1239 start_io_acct(ci
.io
);
1240 while (ci
.sector_count
&& !error
)
1241 error
= __clone_and_map(&ci
);
1243 /* drop the extra reference count */
1244 dec_pending(ci
.io
, error
);
1245 dm_table_put(ci
.map
);
1247 /*-----------------------------------------------------------------
1249 *---------------------------------------------------------------*/
1251 static int dm_merge_bvec(struct request_queue
*q
,
1252 struct bvec_merge_data
*bvm
,
1253 struct bio_vec
*biovec
)
1255 struct mapped_device
*md
= q
->queuedata
;
1256 struct dm_table
*map
= dm_get_table(md
);
1257 struct dm_target
*ti
;
1258 sector_t max_sectors
;
1264 ti
= dm_table_find_target(map
, bvm
->bi_sector
);
1265 if (!dm_target_is_valid(ti
))
1269 * Find maximum amount of I/O that won't need splitting
1271 max_sectors
= min(max_io_len(md
, bvm
->bi_sector
, ti
),
1272 (sector_t
) BIO_MAX_SECTORS
);
1273 max_size
= (max_sectors
<< SECTOR_SHIFT
) - bvm
->bi_size
;
1278 * merge_bvec_fn() returns number of bytes
1279 * it can accept at this offset
1280 * max is precomputed maximal io size
1282 if (max_size
&& ti
->type
->merge
)
1283 max_size
= ti
->type
->merge(ti
, bvm
, biovec
, max_size
);
1285 * If the target doesn't support merge method and some of the devices
1286 * provided their merge_bvec method (we know this by looking at
1287 * queue_max_hw_sectors), then we can't allow bios with multiple vector
1288 * entries. So always set max_size to 0, and the code below allows
1291 else if (queue_max_hw_sectors(q
) <= PAGE_SIZE
>> 9)
1300 * Always allow an entire first page
1302 if (max_size
<= biovec
->bv_len
&& !(bvm
->bi_size
>> SECTOR_SHIFT
))
1303 max_size
= biovec
->bv_len
;
1309 * The request function that just remaps the bio built up by
1312 static int _dm_request(struct request_queue
*q
, struct bio
*bio
)
1314 int rw
= bio_data_dir(bio
);
1315 struct mapped_device
*md
= q
->queuedata
;
1318 down_read(&md
->io_lock
);
1320 cpu
= part_stat_lock();
1321 part_stat_inc(cpu
, &dm_disk(md
)->part0
, ios
[rw
]);
1322 part_stat_add(cpu
, &dm_disk(md
)->part0
, sectors
[rw
], bio_sectors(bio
));
1326 * If we're suspended or the thread is processing barriers
1327 * we have to queue this io for later.
1329 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD
, &md
->flags
)) ||
1330 unlikely(bio_barrier(bio
))) {
1331 up_read(&md
->io_lock
);
1333 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
)) &&
1334 bio_rw(bio
) == READA
) {
1344 __split_and_process_bio(md
, bio
);
1345 up_read(&md
->io_lock
);
1349 static int dm_make_request(struct request_queue
*q
, struct bio
*bio
)
1351 struct mapped_device
*md
= q
->queuedata
;
1353 if (unlikely(bio_barrier(bio
))) {
1354 bio_endio(bio
, -EOPNOTSUPP
);
1358 return md
->saved_make_request_fn(q
, bio
); /* call __make_request() */
1361 static int dm_request_based(struct mapped_device
*md
)
1363 return blk_queue_stackable(md
->queue
);
1366 static int dm_request(struct request_queue
*q
, struct bio
*bio
)
1368 struct mapped_device
*md
= q
->queuedata
;
1370 if (dm_request_based(md
))
1371 return dm_make_request(q
, bio
);
1373 return _dm_request(q
, bio
);
1376 void dm_dispatch_request(struct request
*rq
)
1380 if (blk_queue_io_stat(rq
->q
))
1381 rq
->cmd_flags
|= REQ_IO_STAT
;
1383 rq
->start_time
= jiffies
;
1384 r
= blk_insert_cloned_request(rq
->q
, rq
);
1386 dm_complete_request(rq
, r
);
1388 EXPORT_SYMBOL_GPL(dm_dispatch_request
);
1390 static void dm_rq_bio_destructor(struct bio
*bio
)
1392 struct dm_rq_clone_bio_info
*info
= bio
->bi_private
;
1393 struct mapped_device
*md
= info
->tio
->md
;
1395 free_bio_info(info
);
1396 bio_free(bio
, md
->bs
);
1399 static int dm_rq_bio_constructor(struct bio
*bio
, struct bio
*bio_orig
,
1402 struct dm_rq_target_io
*tio
= data
;
1403 struct mapped_device
*md
= tio
->md
;
1404 struct dm_rq_clone_bio_info
*info
= alloc_bio_info(md
);
1409 info
->orig
= bio_orig
;
1411 bio
->bi_end_io
= end_clone_bio
;
1412 bio
->bi_private
= info
;
1413 bio
->bi_destructor
= dm_rq_bio_destructor
;
1418 static int setup_clone(struct request
*clone
, struct request
*rq
,
1419 struct dm_rq_target_io
*tio
)
1421 int r
= blk_rq_prep_clone(clone
, rq
, tio
->md
->bs
, GFP_ATOMIC
,
1422 dm_rq_bio_constructor
, tio
);
1427 clone
->cmd
= rq
->cmd
;
1428 clone
->cmd_len
= rq
->cmd_len
;
1429 clone
->sense
= rq
->sense
;
1430 clone
->buffer
= rq
->buffer
;
1431 clone
->end_io
= end_clone_request
;
1432 clone
->end_io_data
= tio
;
1437 static int dm_rq_flush_suspending(struct mapped_device
*md
)
1439 return !md
->suspend_rq
.special
;
1443 * Called with the queue lock held.
1445 static int dm_prep_fn(struct request_queue
*q
, struct request
*rq
)
1447 struct mapped_device
*md
= q
->queuedata
;
1448 struct dm_rq_target_io
*tio
;
1449 struct request
*clone
;
1451 if (unlikely(rq
== &md
->suspend_rq
)) {
1452 if (dm_rq_flush_suspending(md
))
1455 /* The flush suspend was interrupted */
1456 return BLKPREP_KILL
;
1459 if (unlikely(rq
->special
)) {
1460 DMWARN("Already has something in rq->special.");
1461 return BLKPREP_KILL
;
1464 tio
= alloc_rq_tio(md
); /* Only one for each original request */
1467 return BLKPREP_DEFER
;
1473 memset(&tio
->info
, 0, sizeof(tio
->info
));
1475 clone
= &tio
->clone
;
1476 if (setup_clone(clone
, rq
, tio
)) {
1479 return BLKPREP_DEFER
;
1482 rq
->special
= clone
;
1483 rq
->cmd_flags
|= REQ_DONTPREP
;
1488 static void map_request(struct dm_target
*ti
, struct request
*rq
,
1489 struct mapped_device
*md
)
1492 struct request
*clone
= rq
->special
;
1493 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
1496 * Hold the md reference here for the in-flight I/O.
1497 * We can't rely on the reference count by device opener,
1498 * because the device may be closed during the request completion
1499 * when all bios are completed.
1500 * See the comment in rq_completed() too.
1505 r
= ti
->type
->map_rq(ti
, clone
, &tio
->info
);
1507 case DM_MAPIO_SUBMITTED
:
1508 /* The target has taken the I/O to submit by itself later */
1510 case DM_MAPIO_REMAPPED
:
1511 /* The target has remapped the I/O so dispatch it */
1512 dm_dispatch_request(clone
);
1514 case DM_MAPIO_REQUEUE
:
1515 /* The target wants to requeue the I/O */
1516 dm_requeue_unmapped_request(clone
);
1520 DMWARN("unimplemented target map return value: %d", r
);
1524 /* The target wants to complete the I/O */
1525 dm_kill_unmapped_request(clone
, r
);
1531 * q->request_fn for request-based dm.
1532 * Called with the queue lock held.
1534 static void dm_request_fn(struct request_queue
*q
)
1536 struct mapped_device
*md
= q
->queuedata
;
1537 struct dm_table
*map
= dm_get_table(md
);
1538 struct dm_target
*ti
;
1542 * For noflush suspend, check blk_queue_stopped() to immediately
1543 * quit I/O dispatching.
1545 while (!blk_queue_plugged(q
) && !blk_queue_stopped(q
)) {
1546 rq
= blk_peek_request(q
);
1550 if (unlikely(rq
== &md
->suspend_rq
)) { /* Flush suspend maker */
1551 if (queue_in_flight(q
))
1552 /* Not quiet yet. Wait more */
1555 /* This device should be quiet now */
1557 blk_start_request(rq
);
1558 __blk_end_request_all(rq
, 0);
1563 ti
= dm_table_find_target(map
, blk_rq_pos(rq
));
1564 if (ti
->type
->busy
&& ti
->type
->busy(ti
))
1567 blk_start_request(rq
);
1568 spin_unlock(q
->queue_lock
);
1569 map_request(ti
, rq
, md
);
1570 spin_lock_irq(q
->queue_lock
);
1576 if (!elv_queue_empty(q
))
1577 /* Some requests still remain, retry later */
1586 int dm_underlying_device_busy(struct request_queue
*q
)
1588 return blk_lld_busy(q
);
1590 EXPORT_SYMBOL_GPL(dm_underlying_device_busy
);
1592 static int dm_lld_busy(struct request_queue
*q
)
1595 struct mapped_device
*md
= q
->queuedata
;
1596 struct dm_table
*map
= dm_get_table(md
);
1598 if (!map
|| test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
))
1601 r
= dm_table_any_busy_target(map
);
1608 static void dm_unplug_all(struct request_queue
*q
)
1610 struct mapped_device
*md
= q
->queuedata
;
1611 struct dm_table
*map
= dm_get_table(md
);
1614 if (dm_request_based(md
))
1615 generic_unplug_device(q
);
1617 dm_table_unplug_all(map
);
1622 static int dm_any_congested(void *congested_data
, int bdi_bits
)
1625 struct mapped_device
*md
= congested_data
;
1626 struct dm_table
*map
;
1628 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
)) {
1629 map
= dm_get_table(md
);
1632 * Request-based dm cares about only own queue for
1633 * the query about congestion status of request_queue
1635 if (dm_request_based(md
))
1636 r
= md
->queue
->backing_dev_info
.state
&
1639 r
= dm_table_any_congested(map
, bdi_bits
);
1648 /*-----------------------------------------------------------------
1649 * An IDR is used to keep track of allocated minor numbers.
1650 *---------------------------------------------------------------*/
1651 static DEFINE_IDR(_minor_idr
);
1653 static void free_minor(int minor
)
1655 spin_lock(&_minor_lock
);
1656 idr_remove(&_minor_idr
, minor
);
1657 spin_unlock(&_minor_lock
);
1661 * See if the device with a specific minor # is free.
1663 static int specific_minor(int minor
)
1667 if (minor
>= (1 << MINORBITS
))
1670 r
= idr_pre_get(&_minor_idr
, GFP_KERNEL
);
1674 spin_lock(&_minor_lock
);
1676 if (idr_find(&_minor_idr
, minor
)) {
1681 r
= idr_get_new_above(&_minor_idr
, MINOR_ALLOCED
, minor
, &m
);
1686 idr_remove(&_minor_idr
, m
);
1692 spin_unlock(&_minor_lock
);
1696 static int next_free_minor(int *minor
)
1700 r
= idr_pre_get(&_minor_idr
, GFP_KERNEL
);
1704 spin_lock(&_minor_lock
);
1706 r
= idr_get_new(&_minor_idr
, MINOR_ALLOCED
, &m
);
1710 if (m
>= (1 << MINORBITS
)) {
1711 idr_remove(&_minor_idr
, m
);
1719 spin_unlock(&_minor_lock
);
1723 static struct block_device_operations dm_blk_dops
;
1725 static void dm_wq_work(struct work_struct
*work
);
1728 * Allocate and initialise a blank device with a given minor.
1730 static struct mapped_device
*alloc_dev(int minor
)
1733 struct mapped_device
*md
= kzalloc(sizeof(*md
), GFP_KERNEL
);
1737 DMWARN("unable to allocate device, out of memory.");
1741 if (!try_module_get(THIS_MODULE
))
1742 goto bad_module_get
;
1744 /* get a minor number for the dev */
1745 if (minor
== DM_ANY_MINOR
)
1746 r
= next_free_minor(&minor
);
1748 r
= specific_minor(minor
);
1752 init_rwsem(&md
->io_lock
);
1753 mutex_init(&md
->suspend_lock
);
1754 spin_lock_init(&md
->deferred_lock
);
1755 rwlock_init(&md
->map_lock
);
1756 atomic_set(&md
->holders
, 1);
1757 atomic_set(&md
->open_count
, 0);
1758 atomic_set(&md
->event_nr
, 0);
1759 atomic_set(&md
->uevent_seq
, 0);
1760 INIT_LIST_HEAD(&md
->uevent_list
);
1761 spin_lock_init(&md
->uevent_lock
);
1763 md
->queue
= blk_init_queue(dm_request_fn
, NULL
);
1768 * Request-based dm devices cannot be stacked on top of bio-based dm
1769 * devices. The type of this dm device has not been decided yet,
1770 * although we initialized the queue using blk_init_queue().
1771 * The type is decided at the first table loading time.
1772 * To prevent problematic device stacking, clear the queue flag
1773 * for request stacking support until then.
1775 * This queue is new, so no concurrency on the queue_flags.
1777 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE
, md
->queue
);
1778 md
->saved_make_request_fn
= md
->queue
->make_request_fn
;
1779 md
->queue
->queuedata
= md
;
1780 md
->queue
->backing_dev_info
.congested_fn
= dm_any_congested
;
1781 md
->queue
->backing_dev_info
.congested_data
= md
;
1782 blk_queue_make_request(md
->queue
, dm_request
);
1783 blk_queue_bounce_limit(md
->queue
, BLK_BOUNCE_ANY
);
1784 md
->queue
->unplug_fn
= dm_unplug_all
;
1785 blk_queue_merge_bvec(md
->queue
, dm_merge_bvec
);
1786 blk_queue_softirq_done(md
->queue
, dm_softirq_done
);
1787 blk_queue_prep_rq(md
->queue
, dm_prep_fn
);
1788 blk_queue_lld_busy(md
->queue
, dm_lld_busy
);
1790 md
->disk
= alloc_disk(1);
1794 atomic_set(&md
->pending
, 0);
1795 init_waitqueue_head(&md
->wait
);
1796 INIT_WORK(&md
->work
, dm_wq_work
);
1797 init_waitqueue_head(&md
->eventq
);
1799 md
->disk
->major
= _major
;
1800 md
->disk
->first_minor
= minor
;
1801 md
->disk
->fops
= &dm_blk_dops
;
1802 md
->disk
->queue
= md
->queue
;
1803 md
->disk
->private_data
= md
;
1804 sprintf(md
->disk
->disk_name
, "dm-%d", minor
);
1806 format_dev_t(md
->name
, MKDEV(_major
, minor
));
1808 md
->wq
= create_singlethread_workqueue("kdmflush");
1812 md
->bdev
= bdget_disk(md
->disk
, 0);
1816 /* Populate the mapping, nobody knows we exist yet */
1817 spin_lock(&_minor_lock
);
1818 old_md
= idr_replace(&_minor_idr
, md
, minor
);
1819 spin_unlock(&_minor_lock
);
1821 BUG_ON(old_md
!= MINOR_ALLOCED
);
1826 destroy_workqueue(md
->wq
);
1828 del_gendisk(md
->disk
);
1831 blk_cleanup_queue(md
->queue
);
1835 module_put(THIS_MODULE
);
1841 static void unlock_fs(struct mapped_device
*md
);
1843 static void free_dev(struct mapped_device
*md
)
1845 int minor
= MINOR(disk_devt(md
->disk
));
1849 destroy_workqueue(md
->wq
);
1851 mempool_destroy(md
->tio_pool
);
1853 mempool_destroy(md
->io_pool
);
1855 bioset_free(md
->bs
);
1856 blk_integrity_unregister(md
->disk
);
1857 del_gendisk(md
->disk
);
1860 spin_lock(&_minor_lock
);
1861 md
->disk
->private_data
= NULL
;
1862 spin_unlock(&_minor_lock
);
1865 blk_cleanup_queue(md
->queue
);
1866 module_put(THIS_MODULE
);
1870 static void __bind_mempools(struct mapped_device
*md
, struct dm_table
*t
)
1872 struct dm_md_mempools
*p
;
1874 if (md
->io_pool
&& md
->tio_pool
&& md
->bs
)
1875 /* the md already has necessary mempools */
1878 p
= dm_table_get_md_mempools(t
);
1879 BUG_ON(!p
|| md
->io_pool
|| md
->tio_pool
|| md
->bs
);
1881 md
->io_pool
= p
->io_pool
;
1883 md
->tio_pool
= p
->tio_pool
;
1889 /* mempool bind completed, now no need any mempools in the table */
1890 dm_table_free_md_mempools(t
);
1894 * Bind a table to the device.
1896 static void event_callback(void *context
)
1898 unsigned long flags
;
1900 struct mapped_device
*md
= (struct mapped_device
*) context
;
1902 spin_lock_irqsave(&md
->uevent_lock
, flags
);
1903 list_splice_init(&md
->uevent_list
, &uevents
);
1904 spin_unlock_irqrestore(&md
->uevent_lock
, flags
);
1906 dm_send_uevents(&uevents
, &disk_to_dev(md
->disk
)->kobj
);
1908 atomic_inc(&md
->event_nr
);
1909 wake_up(&md
->eventq
);
1912 static void __set_size(struct mapped_device
*md
, sector_t size
)
1914 set_capacity(md
->disk
, size
);
1916 mutex_lock(&md
->bdev
->bd_inode
->i_mutex
);
1917 i_size_write(md
->bdev
->bd_inode
, (loff_t
)size
<< SECTOR_SHIFT
);
1918 mutex_unlock(&md
->bdev
->bd_inode
->i_mutex
);
1921 static int __bind(struct mapped_device
*md
, struct dm_table
*t
,
1922 struct queue_limits
*limits
)
1924 struct request_queue
*q
= md
->queue
;
1926 unsigned long flags
;
1928 size
= dm_table_get_size(t
);
1931 * Wipe any geometry if the size of the table changed.
1933 if (size
!= get_capacity(md
->disk
))
1934 memset(&md
->geometry
, 0, sizeof(md
->geometry
));
1936 __set_size(md
, size
);
1939 dm_table_destroy(t
);
1943 dm_table_event_callback(t
, event_callback
, md
);
1946 * The queue hasn't been stopped yet, if the old table type wasn't
1947 * for request-based during suspension. So stop it to prevent
1948 * I/O mapping before resume.
1949 * This must be done before setting the queue restrictions,
1950 * because request-based dm may be run just after the setting.
1952 if (dm_table_request_based(t
) && !blk_queue_stopped(q
))
1955 __bind_mempools(md
, t
);
1957 write_lock_irqsave(&md
->map_lock
, flags
);
1959 dm_table_set_restrictions(t
, q
, limits
);
1960 write_unlock_irqrestore(&md
->map_lock
, flags
);
1965 static void __unbind(struct mapped_device
*md
)
1967 struct dm_table
*map
= md
->map
;
1968 unsigned long flags
;
1973 dm_table_event_callback(map
, NULL
, NULL
);
1974 write_lock_irqsave(&md
->map_lock
, flags
);
1976 write_unlock_irqrestore(&md
->map_lock
, flags
);
1977 dm_table_destroy(map
);
1981 * Constructor for a new device.
1983 int dm_create(int minor
, struct mapped_device
**result
)
1985 struct mapped_device
*md
;
1987 md
= alloc_dev(minor
);
1997 static struct mapped_device
*dm_find_md(dev_t dev
)
1999 struct mapped_device
*md
;
2000 unsigned minor
= MINOR(dev
);
2002 if (MAJOR(dev
) != _major
|| minor
>= (1 << MINORBITS
))
2005 spin_lock(&_minor_lock
);
2007 md
= idr_find(&_minor_idr
, minor
);
2008 if (md
&& (md
== MINOR_ALLOCED
||
2009 (MINOR(disk_devt(dm_disk(md
))) != minor
) ||
2010 test_bit(DMF_FREEING
, &md
->flags
))) {
2016 spin_unlock(&_minor_lock
);
2021 struct mapped_device
*dm_get_md(dev_t dev
)
2023 struct mapped_device
*md
= dm_find_md(dev
);
2031 void *dm_get_mdptr(struct mapped_device
*md
)
2033 return md
->interface_ptr
;
2036 void dm_set_mdptr(struct mapped_device
*md
, void *ptr
)
2038 md
->interface_ptr
= ptr
;
2041 void dm_get(struct mapped_device
*md
)
2043 atomic_inc(&md
->holders
);
2046 const char *dm_device_name(struct mapped_device
*md
)
2050 EXPORT_SYMBOL_GPL(dm_device_name
);
2052 void dm_put(struct mapped_device
*md
)
2054 struct dm_table
*map
;
2056 BUG_ON(test_bit(DMF_FREEING
, &md
->flags
));
2058 if (atomic_dec_and_lock(&md
->holders
, &_minor_lock
)) {
2059 map
= dm_get_table(md
);
2060 idr_replace(&_minor_idr
, MINOR_ALLOCED
,
2061 MINOR(disk_devt(dm_disk(md
))));
2062 set_bit(DMF_FREEING
, &md
->flags
);
2063 spin_unlock(&_minor_lock
);
2064 if (!dm_suspended(md
)) {
2065 dm_table_presuspend_targets(map
);
2066 dm_table_postsuspend_targets(map
);
2074 EXPORT_SYMBOL_GPL(dm_put
);
2076 static int dm_wait_for_completion(struct mapped_device
*md
, int interruptible
)
2079 DECLARE_WAITQUEUE(wait
, current
);
2080 struct request_queue
*q
= md
->queue
;
2081 unsigned long flags
;
2083 dm_unplug_all(md
->queue
);
2085 add_wait_queue(&md
->wait
, &wait
);
2088 set_current_state(interruptible
);
2091 if (dm_request_based(md
)) {
2092 spin_lock_irqsave(q
->queue_lock
, flags
);
2093 if (!queue_in_flight(q
) && blk_queue_stopped(q
)) {
2094 spin_unlock_irqrestore(q
->queue_lock
, flags
);
2097 spin_unlock_irqrestore(q
->queue_lock
, flags
);
2098 } else if (!atomic_read(&md
->pending
))
2101 if (interruptible
== TASK_INTERRUPTIBLE
&&
2102 signal_pending(current
)) {
2109 set_current_state(TASK_RUNNING
);
2111 remove_wait_queue(&md
->wait
, &wait
);
2116 static void dm_flush(struct mapped_device
*md
)
2118 dm_wait_for_completion(md
, TASK_UNINTERRUPTIBLE
);
2120 bio_init(&md
->barrier_bio
);
2121 md
->barrier_bio
.bi_bdev
= md
->bdev
;
2122 md
->barrier_bio
.bi_rw
= WRITE_BARRIER
;
2123 __split_and_process_bio(md
, &md
->barrier_bio
);
2125 dm_wait_for_completion(md
, TASK_UNINTERRUPTIBLE
);
2128 static void process_barrier(struct mapped_device
*md
, struct bio
*bio
)
2130 md
->barrier_error
= 0;
2134 if (!bio_empty_barrier(bio
)) {
2135 __split_and_process_bio(md
, bio
);
2139 if (md
->barrier_error
!= DM_ENDIO_REQUEUE
)
2140 bio_endio(bio
, md
->barrier_error
);
2142 spin_lock_irq(&md
->deferred_lock
);
2143 bio_list_add_head(&md
->deferred
, bio
);
2144 spin_unlock_irq(&md
->deferred_lock
);
2149 * Process the deferred bios
2151 static void dm_wq_work(struct work_struct
*work
)
2153 struct mapped_device
*md
= container_of(work
, struct mapped_device
,
2157 down_write(&md
->io_lock
);
2159 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
)) {
2160 spin_lock_irq(&md
->deferred_lock
);
2161 c
= bio_list_pop(&md
->deferred
);
2162 spin_unlock_irq(&md
->deferred_lock
);
2165 clear_bit(DMF_QUEUE_IO_TO_THREAD
, &md
->flags
);
2169 up_write(&md
->io_lock
);
2171 if (dm_request_based(md
))
2172 generic_make_request(c
);
2175 process_barrier(md
, c
);
2177 __split_and_process_bio(md
, c
);
2180 down_write(&md
->io_lock
);
2183 up_write(&md
->io_lock
);
2186 static void dm_queue_flush(struct mapped_device
*md
)
2188 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
2189 smp_mb__after_clear_bit();
2190 queue_work(md
->wq
, &md
->work
);
2194 * Swap in a new table (destroying old one).
2196 int dm_swap_table(struct mapped_device
*md
, struct dm_table
*table
)
2198 struct queue_limits limits
;
2201 mutex_lock(&md
->suspend_lock
);
2203 /* device must be suspended */
2204 if (!dm_suspended(md
))
2207 r
= dm_calculate_queue_limits(table
, &limits
);
2211 /* cannot change the device type, once a table is bound */
2213 (dm_table_get_type(md
->map
) != dm_table_get_type(table
))) {
2214 DMWARN("can't change the device type after a table is bound");
2219 r
= __bind(md
, table
, &limits
);
2222 mutex_unlock(&md
->suspend_lock
);
2226 static void dm_rq_invalidate_suspend_marker(struct mapped_device
*md
)
2228 md
->suspend_rq
.special
= (void *)0x1;
2231 static void dm_rq_abort_suspend(struct mapped_device
*md
, int noflush
)
2233 struct request_queue
*q
= md
->queue
;
2234 unsigned long flags
;
2236 spin_lock_irqsave(q
->queue_lock
, flags
);
2238 dm_rq_invalidate_suspend_marker(md
);
2240 spin_unlock_irqrestore(q
->queue_lock
, flags
);
2243 static void dm_rq_start_suspend(struct mapped_device
*md
, int noflush
)
2245 struct request
*rq
= &md
->suspend_rq
;
2246 struct request_queue
*q
= md
->queue
;
2252 blk_insert_request(q
, rq
, 0, NULL
);
2256 static int dm_rq_suspend_available(struct mapped_device
*md
, int noflush
)
2259 struct request
*rq
= &md
->suspend_rq
;
2260 struct request_queue
*q
= md
->queue
;
2261 unsigned long flags
;
2266 /* The marker must be protected by queue lock if it is in use */
2267 spin_lock_irqsave(q
->queue_lock
, flags
);
2268 if (unlikely(rq
->ref_count
)) {
2270 * This can happen, when the previous flush suspend was
2271 * interrupted, the marker is still in the queue and
2272 * this flush suspend has been invoked, because we don't
2273 * remove the marker at the time of suspend interruption.
2274 * We have only one marker per mapped_device, so we can't
2275 * start another flush suspend while it is in use.
2277 BUG_ON(!rq
->special
); /* The marker should be invalidated */
2278 DMWARN("Invalidating the previous flush suspend is still in"
2279 " progress. Please retry later.");
2282 spin_unlock_irqrestore(q
->queue_lock
, flags
);
2288 * Functions to lock and unlock any filesystem running on the
2291 static int lock_fs(struct mapped_device
*md
)
2295 WARN_ON(md
->frozen_sb
);
2297 md
->frozen_sb
= freeze_bdev(md
->bdev
);
2298 if (IS_ERR(md
->frozen_sb
)) {
2299 r
= PTR_ERR(md
->frozen_sb
);
2300 md
->frozen_sb
= NULL
;
2304 set_bit(DMF_FROZEN
, &md
->flags
);
2309 static void unlock_fs(struct mapped_device
*md
)
2311 if (!test_bit(DMF_FROZEN
, &md
->flags
))
2314 thaw_bdev(md
->bdev
, md
->frozen_sb
);
2315 md
->frozen_sb
= NULL
;
2316 clear_bit(DMF_FROZEN
, &md
->flags
);
2320 * We need to be able to change a mapping table under a mounted
2321 * filesystem. For example we might want to move some data in
2322 * the background. Before the table can be swapped with
2323 * dm_bind_table, dm_suspend must be called to flush any in
2324 * flight bios and ensure that any further io gets deferred.
2327 * Suspend mechanism in request-based dm.
2329 * After the suspend starts, further incoming requests are kept in
2330 * the request_queue and deferred.
2331 * Remaining requests in the request_queue at the start of suspend are flushed
2332 * if it is flush suspend.
2333 * The suspend completes when the following conditions have been satisfied,
2335 * 1. q->in_flight is 0 (which means no in_flight request)
2336 * 2. queue has been stopped (which means no request dispatching)
2341 * Noflush suspend doesn't need to dispatch remaining requests.
2342 * So stop the queue immediately. Then, wait for all in_flight requests
2343 * to be completed or requeued.
2345 * To abort noflush suspend, start the queue.
2350 * Flush suspend needs to dispatch remaining requests. So stop the queue
2351 * after the remaining requests are completed. (Requeued request must be also
2352 * re-dispatched and completed. Until then, we can't stop the queue.)
2354 * During flushing the remaining requests, further incoming requests are also
2355 * inserted to the same queue. To distinguish which requests are to be
2356 * flushed, we insert a marker request to the queue at the time of starting
2357 * flush suspend, like a barrier.
2358 * The dispatching is blocked when the marker is found on the top of the queue.
2359 * And the queue is stopped when all in_flight requests are completed, since
2360 * that means the remaining requests are completely flushed.
2361 * Then, the marker is removed from the queue.
2363 * To abort flush suspend, we also need to take care of the marker, not only
2364 * starting the queue.
2365 * We don't remove the marker forcibly from the queue since it's against
2366 * the block-layer manner. Instead, we put a invalidated mark on the marker.
2367 * When the invalidated marker is found on the top of the queue, it is
2368 * immediately removed from the queue, so it doesn't block dispatching.
2369 * Because we have only one marker per mapped_device, we can't start another
2370 * flush suspend until the invalidated marker is removed from the queue.
2371 * So fail and return with -EBUSY in such a case.
2373 int dm_suspend(struct mapped_device
*md
, unsigned suspend_flags
)
2375 struct dm_table
*map
= NULL
;
2377 int do_lockfs
= suspend_flags
& DM_SUSPEND_LOCKFS_FLAG
? 1 : 0;
2378 int noflush
= suspend_flags
& DM_SUSPEND_NOFLUSH_FLAG
? 1 : 0;
2380 mutex_lock(&md
->suspend_lock
);
2382 if (dm_suspended(md
)) {
2387 if (dm_request_based(md
) && !dm_rq_suspend_available(md
, noflush
)) {
2392 map
= dm_get_table(md
);
2395 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2396 * This flag is cleared before dm_suspend returns.
2399 set_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
2401 /* This does not get reverted if there's an error later. */
2402 dm_table_presuspend_targets(map
);
2405 * Flush I/O to the device. noflush supersedes do_lockfs,
2406 * because lock_fs() needs to flush I/Os.
2408 if (!noflush
&& do_lockfs
) {
2415 * Here we must make sure that no processes are submitting requests
2416 * to target drivers i.e. no one may be executing
2417 * __split_and_process_bio. This is called from dm_request and
2420 * To get all processes out of __split_and_process_bio in dm_request,
2421 * we take the write lock. To prevent any process from reentering
2422 * __split_and_process_bio from dm_request, we set
2423 * DMF_QUEUE_IO_TO_THREAD.
2425 * To quiesce the thread (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND
2426 * and call flush_workqueue(md->wq). flush_workqueue will wait until
2427 * dm_wq_work exits and DMF_BLOCK_IO_FOR_SUSPEND will prevent any
2428 * further calls to __split_and_process_bio from dm_wq_work.
2430 down_write(&md
->io_lock
);
2431 set_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
2432 set_bit(DMF_QUEUE_IO_TO_THREAD
, &md
->flags
);
2433 up_write(&md
->io_lock
);
2435 flush_workqueue(md
->wq
);
2437 if (dm_request_based(md
))
2438 dm_rq_start_suspend(md
, noflush
);
2441 * At this point no more requests are entering target request routines.
2442 * We call dm_wait_for_completion to wait for all existing requests
2445 r
= dm_wait_for_completion(md
, TASK_INTERRUPTIBLE
);
2447 down_write(&md
->io_lock
);
2449 clear_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
2450 up_write(&md
->io_lock
);
2452 /* were we interrupted ? */
2456 if (dm_request_based(md
))
2457 dm_rq_abort_suspend(md
, noflush
);
2460 goto out
; /* pushback list is already flushed, so skip flush */
2464 * If dm_wait_for_completion returned 0, the device is completely
2465 * quiescent now. There is no request-processing activity. All new
2466 * requests are being added to md->deferred list.
2469 dm_table_postsuspend_targets(map
);
2471 set_bit(DMF_SUSPENDED
, &md
->flags
);
2477 mutex_unlock(&md
->suspend_lock
);
2481 int dm_resume(struct mapped_device
*md
)
2484 struct dm_table
*map
= NULL
;
2486 mutex_lock(&md
->suspend_lock
);
2487 if (!dm_suspended(md
))
2490 map
= dm_get_table(md
);
2491 if (!map
|| !dm_table_get_size(map
))
2494 r
= dm_table_resume_targets(map
);
2501 * Flushing deferred I/Os must be done after targets are resumed
2502 * so that mapping of targets can work correctly.
2503 * Request-based dm is queueing the deferred I/Os in its request_queue.
2505 if (dm_request_based(md
))
2506 start_queue(md
->queue
);
2510 clear_bit(DMF_SUSPENDED
, &md
->flags
);
2512 dm_table_unplug_all(map
);
2516 mutex_unlock(&md
->suspend_lock
);
2521 /*-----------------------------------------------------------------
2522 * Event notification.
2523 *---------------------------------------------------------------*/
2524 void dm_kobject_uevent(struct mapped_device
*md
, enum kobject_action action
,
2527 char udev_cookie
[DM_COOKIE_LENGTH
];
2528 char *envp
[] = { udev_cookie
, NULL
};
2531 kobject_uevent(&disk_to_dev(md
->disk
)->kobj
, action
);
2533 snprintf(udev_cookie
, DM_COOKIE_LENGTH
, "%s=%u",
2534 DM_COOKIE_ENV_VAR_NAME
, cookie
);
2535 kobject_uevent_env(&disk_to_dev(md
->disk
)->kobj
, action
, envp
);
2539 uint32_t dm_next_uevent_seq(struct mapped_device
*md
)
2541 return atomic_add_return(1, &md
->uevent_seq
);
2544 uint32_t dm_get_event_nr(struct mapped_device
*md
)
2546 return atomic_read(&md
->event_nr
);
2549 int dm_wait_event(struct mapped_device
*md
, int event_nr
)
2551 return wait_event_interruptible(md
->eventq
,
2552 (event_nr
!= atomic_read(&md
->event_nr
)));
2555 void dm_uevent_add(struct mapped_device
*md
, struct list_head
*elist
)
2557 unsigned long flags
;
2559 spin_lock_irqsave(&md
->uevent_lock
, flags
);
2560 list_add(elist
, &md
->uevent_list
);
2561 spin_unlock_irqrestore(&md
->uevent_lock
, flags
);
2565 * The gendisk is only valid as long as you have a reference
2568 struct gendisk
*dm_disk(struct mapped_device
*md
)
2573 struct kobject
*dm_kobject(struct mapped_device
*md
)
2579 * struct mapped_device should not be exported outside of dm.c
2580 * so use this check to verify that kobj is part of md structure
2582 struct mapped_device
*dm_get_from_kobject(struct kobject
*kobj
)
2584 struct mapped_device
*md
;
2586 md
= container_of(kobj
, struct mapped_device
, kobj
);
2587 if (&md
->kobj
!= kobj
)
2590 if (test_bit(DMF_FREEING
, &md
->flags
) ||
2591 test_bit(DMF_DELETING
, &md
->flags
))
2598 int dm_suspended(struct mapped_device
*md
)
2600 return test_bit(DMF_SUSPENDED
, &md
->flags
);
2603 int dm_noflush_suspending(struct dm_target
*ti
)
2605 struct mapped_device
*md
= dm_table_get_md(ti
->table
);
2606 int r
= __noflush_suspending(md
);
2612 EXPORT_SYMBOL_GPL(dm_noflush_suspending
);
2614 struct dm_md_mempools
*dm_alloc_md_mempools(unsigned type
)
2616 struct dm_md_mempools
*pools
= kmalloc(sizeof(*pools
), GFP_KERNEL
);
2621 pools
->io_pool
= (type
== DM_TYPE_BIO_BASED
) ?
2622 mempool_create_slab_pool(MIN_IOS
, _io_cache
) :
2623 mempool_create_slab_pool(MIN_IOS
, _rq_bio_info_cache
);
2624 if (!pools
->io_pool
)
2625 goto free_pools_and_out
;
2627 pools
->tio_pool
= (type
== DM_TYPE_BIO_BASED
) ?
2628 mempool_create_slab_pool(MIN_IOS
, _tio_cache
) :
2629 mempool_create_slab_pool(MIN_IOS
, _rq_tio_cache
);
2630 if (!pools
->tio_pool
)
2631 goto free_io_pool_and_out
;
2633 pools
->bs
= (type
== DM_TYPE_BIO_BASED
) ?
2634 bioset_create(16, 0) : bioset_create(MIN_IOS
, 0);
2636 goto free_tio_pool_and_out
;
2640 free_tio_pool_and_out
:
2641 mempool_destroy(pools
->tio_pool
);
2643 free_io_pool_and_out
:
2644 mempool_destroy(pools
->io_pool
);
2652 void dm_free_md_mempools(struct dm_md_mempools
*pools
)
2658 mempool_destroy(pools
->io_pool
);
2660 if (pools
->tio_pool
)
2661 mempool_destroy(pools
->tio_pool
);
2664 bioset_free(pools
->bs
);
2669 static struct block_device_operations dm_blk_dops
= {
2670 .open
= dm_blk_open
,
2671 .release
= dm_blk_close
,
2672 .ioctl
= dm_blk_ioctl
,
2673 .getgeo
= dm_blk_getgeo
,
2674 .owner
= THIS_MODULE
2677 EXPORT_SYMBOL(dm_get_mapinfo
);
2682 module_init(dm_init
);
2683 module_exit(dm_exit
);
2685 module_param(major
, uint
, 0);
2686 MODULE_PARM_DESC(major
, "The major number of the device mapper");
2687 MODULE_DESCRIPTION(DM_NAME
" driver");
2688 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2689 MODULE_LICENSE("GPL");