2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/moduleparam.h>
15 #include <linux/blkpg.h>
16 #include <linux/bio.h>
17 #include <linux/mempool.h>
18 #include <linux/slab.h>
19 #include <linux/idr.h>
20 #include <linux/hdreg.h>
21 #include <linux/delay.h>
23 #include <trace/events/block.h>
25 #define DM_MSG_PREFIX "core"
29 * ratelimit state to be used in DMXXX_LIMIT().
31 DEFINE_RATELIMIT_STATE(dm_ratelimit_state
,
32 DEFAULT_RATELIMIT_INTERVAL
,
33 DEFAULT_RATELIMIT_BURST
);
34 EXPORT_SYMBOL(dm_ratelimit_state
);
38 * Cookies are numeric values sent with CHANGE and REMOVE
39 * uevents while resuming, removing or renaming the device.
41 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
42 #define DM_COOKIE_LENGTH 24
44 static const char *_name
= DM_NAME
;
46 static unsigned int major
= 0;
47 static unsigned int _major
= 0;
49 static DEFINE_IDR(_minor_idr
);
51 static DEFINE_SPINLOCK(_minor_lock
);
54 * One of these is allocated per bio.
57 struct mapped_device
*md
;
61 unsigned long start_time
;
62 spinlock_t endio_lock
;
67 * One of these is allocated per target within a bio. Hopefully
68 * this will be simplified out one day.
78 * For request-based dm.
79 * One of these is allocated per request.
81 struct dm_rq_target_io
{
82 struct mapped_device
*md
;
84 struct request
*orig
, clone
;
90 * For request-based dm - the bio clones we allocate are embedded in these
93 * We allocate these with bio_alloc_bioset, using the front_pad parameter when
94 * the bioset is created - this means the bio has to come at the end of the
97 struct dm_rq_clone_bio_info
{
99 struct dm_rq_target_io
*tio
;
103 union map_info
*dm_get_mapinfo(struct bio
*bio
)
105 if (bio
&& bio
->bi_private
)
106 return &((struct dm_target_io
*)bio
->bi_private
)->info
;
110 union map_info
*dm_get_rq_mapinfo(struct request
*rq
)
112 if (rq
&& rq
->end_io_data
)
113 return &((struct dm_rq_target_io
*)rq
->end_io_data
)->info
;
116 EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo
);
118 #define MINOR_ALLOCED ((void *)-1)
121 * Bits for the md->flags field.
123 #define DMF_BLOCK_IO_FOR_SUSPEND 0
124 #define DMF_SUSPENDED 1
126 #define DMF_FREEING 3
127 #define DMF_DELETING 4
128 #define DMF_NOFLUSH_SUSPENDING 5
129 #define DMF_MERGE_IS_OPTIONAL 6
132 * Work processed by per-device workqueue.
134 struct mapped_device
{
135 struct rw_semaphore io_lock
;
136 struct mutex suspend_lock
;
143 struct request_queue
*queue
;
145 /* Protect queue and type against concurrent access. */
146 struct mutex type_lock
;
148 struct target_type
*immutable_target_type
;
150 struct gendisk
*disk
;
156 * A list of ios that arrived while we were suspended.
159 wait_queue_head_t wait
;
160 struct work_struct work
;
161 struct bio_list deferred
;
162 spinlock_t deferred_lock
;
165 * Processing queue (flush)
167 struct workqueue_struct
*wq
;
170 * The current mapping.
172 struct dm_table
*map
;
175 * io objects are allocated from here.
186 wait_queue_head_t eventq
;
188 struct list_head uevent_list
;
189 spinlock_t uevent_lock
; /* Protect access to uevent_list */
192 * freeze/thaw support require holding onto a super block
194 struct super_block
*frozen_sb
;
195 struct block_device
*bdev
;
197 /* forced geometry settings */
198 struct hd_geometry geometry
;
203 /* zero-length flush that will be cloned and submitted to targets */
204 struct bio flush_bio
;
208 * For mempools pre-allocation at the table loading time.
210 struct dm_md_mempools
{
217 static struct kmem_cache
*_io_cache
;
218 static struct kmem_cache
*_rq_tio_cache
;
221 * Unused now, and needs to be deleted. But since io_pool is overloaded and it's
222 * still used for _io_cache, I'm leaving this for a later cleanup
224 static struct kmem_cache
*_rq_bio_info_cache
;
226 static int __init
local_init(void)
230 /* allocate a slab for the dm_ios */
231 _io_cache
= KMEM_CACHE(dm_io
, 0);
235 _rq_tio_cache
= KMEM_CACHE(dm_rq_target_io
, 0);
237 goto out_free_io_cache
;
239 _rq_bio_info_cache
= KMEM_CACHE(dm_rq_clone_bio_info
, 0);
240 if (!_rq_bio_info_cache
)
241 goto out_free_rq_tio_cache
;
243 r
= dm_uevent_init();
245 goto out_free_rq_bio_info_cache
;
248 r
= register_blkdev(_major
, _name
);
250 goto out_uevent_exit
;
259 out_free_rq_bio_info_cache
:
260 kmem_cache_destroy(_rq_bio_info_cache
);
261 out_free_rq_tio_cache
:
262 kmem_cache_destroy(_rq_tio_cache
);
264 kmem_cache_destroy(_io_cache
);
269 static void local_exit(void)
271 kmem_cache_destroy(_rq_bio_info_cache
);
272 kmem_cache_destroy(_rq_tio_cache
);
273 kmem_cache_destroy(_io_cache
);
274 unregister_blkdev(_major
, _name
);
279 DMINFO("cleaned up");
282 static int (*_inits
[])(void) __initdata
= {
292 static void (*_exits
[])(void) = {
302 static int __init
dm_init(void)
304 const int count
= ARRAY_SIZE(_inits
);
308 for (i
= 0; i
< count
; i
++) {
323 static void __exit
dm_exit(void)
325 int i
= ARRAY_SIZE(_exits
);
331 * Should be empty by this point.
333 idr_remove_all(&_minor_idr
);
334 idr_destroy(&_minor_idr
);
338 * Block device functions
340 int dm_deleting_md(struct mapped_device
*md
)
342 return test_bit(DMF_DELETING
, &md
->flags
);
345 static int dm_blk_open(struct block_device
*bdev
, fmode_t mode
)
347 struct mapped_device
*md
;
349 spin_lock(&_minor_lock
);
351 md
= bdev
->bd_disk
->private_data
;
355 if (test_bit(DMF_FREEING
, &md
->flags
) ||
356 dm_deleting_md(md
)) {
362 atomic_inc(&md
->open_count
);
365 spin_unlock(&_minor_lock
);
367 return md
? 0 : -ENXIO
;
370 static int dm_blk_close(struct gendisk
*disk
, fmode_t mode
)
372 struct mapped_device
*md
= disk
->private_data
;
374 spin_lock(&_minor_lock
);
376 atomic_dec(&md
->open_count
);
379 spin_unlock(&_minor_lock
);
384 int dm_open_count(struct mapped_device
*md
)
386 return atomic_read(&md
->open_count
);
390 * Guarantees nothing is using the device before it's deleted.
392 int dm_lock_for_deletion(struct mapped_device
*md
)
396 spin_lock(&_minor_lock
);
398 if (dm_open_count(md
))
401 set_bit(DMF_DELETING
, &md
->flags
);
403 spin_unlock(&_minor_lock
);
408 static int dm_blk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
410 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
412 return dm_get_geometry(md
, geo
);
415 static int dm_blk_ioctl(struct block_device
*bdev
, fmode_t mode
,
416 unsigned int cmd
, unsigned long arg
)
418 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
419 struct dm_table
*map
= dm_get_live_table(md
);
420 struct dm_target
*tgt
;
423 if (!map
|| !dm_table_get_size(map
))
426 /* We only support devices that have a single target */
427 if (dm_table_get_num_targets(map
) != 1)
430 tgt
= dm_table_get_target(map
, 0);
432 if (dm_suspended_md(md
)) {
437 if (tgt
->type
->ioctl
)
438 r
= tgt
->type
->ioctl(tgt
, cmd
, arg
);
446 static struct dm_io
*alloc_io(struct mapped_device
*md
)
448 return mempool_alloc(md
->io_pool
, GFP_NOIO
);
451 static void free_io(struct mapped_device
*md
, struct dm_io
*io
)
453 mempool_free(io
, md
->io_pool
);
456 static void free_tio(struct mapped_device
*md
, struct dm_target_io
*tio
)
458 bio_put(&tio
->clone
);
461 static struct dm_rq_target_io
*alloc_rq_tio(struct mapped_device
*md
,
464 return mempool_alloc(md
->tio_pool
, gfp_mask
);
467 static void free_rq_tio(struct dm_rq_target_io
*tio
)
469 mempool_free(tio
, tio
->md
->tio_pool
);
472 static int md_in_flight(struct mapped_device
*md
)
474 return atomic_read(&md
->pending
[READ
]) +
475 atomic_read(&md
->pending
[WRITE
]);
478 static void start_io_acct(struct dm_io
*io
)
480 struct mapped_device
*md
= io
->md
;
482 int rw
= bio_data_dir(io
->bio
);
484 io
->start_time
= jiffies
;
486 cpu
= part_stat_lock();
487 part_round_stats(cpu
, &dm_disk(md
)->part0
);
489 atomic_set(&dm_disk(md
)->part0
.in_flight
[rw
],
490 atomic_inc_return(&md
->pending
[rw
]));
493 static void end_io_acct(struct dm_io
*io
)
495 struct mapped_device
*md
= io
->md
;
496 struct bio
*bio
= io
->bio
;
497 unsigned long duration
= jiffies
- io
->start_time
;
499 int rw
= bio_data_dir(bio
);
501 cpu
= part_stat_lock();
502 part_round_stats(cpu
, &dm_disk(md
)->part0
);
503 part_stat_add(cpu
, &dm_disk(md
)->part0
, ticks
[rw
], duration
);
507 * After this is decremented the bio must not be touched if it is
510 pending
= atomic_dec_return(&md
->pending
[rw
]);
511 atomic_set(&dm_disk(md
)->part0
.in_flight
[rw
], pending
);
512 pending
+= atomic_read(&md
->pending
[rw
^0x1]);
514 /* nudge anyone waiting on suspend queue */
520 * Add the bio to the list of deferred io.
522 static void queue_io(struct mapped_device
*md
, struct bio
*bio
)
526 spin_lock_irqsave(&md
->deferred_lock
, flags
);
527 bio_list_add(&md
->deferred
, bio
);
528 spin_unlock_irqrestore(&md
->deferred_lock
, flags
);
529 queue_work(md
->wq
, &md
->work
);
533 * Everyone (including functions in this file), should use this
534 * function to access the md->map field, and make sure they call
535 * dm_table_put() when finished.
537 struct dm_table
*dm_get_live_table(struct mapped_device
*md
)
542 read_lock_irqsave(&md
->map_lock
, flags
);
546 read_unlock_irqrestore(&md
->map_lock
, flags
);
552 * Get the geometry associated with a dm device
554 int dm_get_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
562 * Set the geometry of a device.
564 int dm_set_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
566 sector_t sz
= (sector_t
)geo
->cylinders
* geo
->heads
* geo
->sectors
;
568 if (geo
->start
> sz
) {
569 DMWARN("Start sector is beyond the geometry limits.");
578 /*-----------------------------------------------------------------
580 * A more elegant soln is in the works that uses the queue
581 * merge fn, unfortunately there are a couple of changes to
582 * the block layer that I want to make for this. So in the
583 * interests of getting something for people to use I give
584 * you this clearly demarcated crap.
585 *---------------------------------------------------------------*/
587 static int __noflush_suspending(struct mapped_device
*md
)
589 return test_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
593 * Decrements the number of outstanding ios that a bio has been
594 * cloned into, completing the original io if necc.
596 static void dec_pending(struct dm_io
*io
, int error
)
601 struct mapped_device
*md
= io
->md
;
603 /* Push-back supersedes any I/O errors */
604 if (unlikely(error
)) {
605 spin_lock_irqsave(&io
->endio_lock
, flags
);
606 if (!(io
->error
> 0 && __noflush_suspending(md
)))
608 spin_unlock_irqrestore(&io
->endio_lock
, flags
);
611 if (atomic_dec_and_test(&io
->io_count
)) {
612 if (io
->error
== DM_ENDIO_REQUEUE
) {
614 * Target requested pushing back the I/O.
616 spin_lock_irqsave(&md
->deferred_lock
, flags
);
617 if (__noflush_suspending(md
))
618 bio_list_add_head(&md
->deferred
, io
->bio
);
620 /* noflush suspend was interrupted. */
622 spin_unlock_irqrestore(&md
->deferred_lock
, flags
);
625 io_error
= io
->error
;
630 if (io_error
== DM_ENDIO_REQUEUE
)
633 if ((bio
->bi_rw
& REQ_FLUSH
) && bio
->bi_size
) {
635 * Preflush done for flush with data, reissue
638 bio
->bi_rw
&= ~REQ_FLUSH
;
641 /* done with normal IO or empty flush */
642 trace_block_bio_complete(md
->queue
, bio
, io_error
);
643 bio_endio(bio
, io_error
);
648 static void clone_endio(struct bio
*bio
, int error
)
651 struct dm_target_io
*tio
= bio
->bi_private
;
652 struct dm_io
*io
= tio
->io
;
653 struct mapped_device
*md
= tio
->io
->md
;
654 dm_endio_fn endio
= tio
->ti
->type
->end_io
;
656 if (!bio_flagged(bio
, BIO_UPTODATE
) && !error
)
660 r
= endio(tio
->ti
, bio
, error
, &tio
->info
);
661 if (r
< 0 || r
== DM_ENDIO_REQUEUE
)
663 * error and requeue request are handled
667 else if (r
== DM_ENDIO_INCOMPLETE
)
668 /* The target will handle the io */
671 DMWARN("unimplemented target endio return value: %d", r
);
677 dec_pending(io
, error
);
681 * Partial completion handling for request-based dm
683 static void end_clone_bio(struct bio
*clone
, int error
)
685 struct dm_rq_clone_bio_info
*info
= clone
->bi_private
;
686 struct dm_rq_target_io
*tio
= info
->tio
;
687 struct bio
*bio
= info
->orig
;
688 unsigned int nr_bytes
= info
->orig
->bi_size
;
694 * An error has already been detected on the request.
695 * Once error occurred, just let clone->end_io() handle
701 * Don't notice the error to the upper layer yet.
702 * The error handling decision is made by the target driver,
703 * when the request is completed.
710 * I/O for the bio successfully completed.
711 * Notice the data completion to the upper layer.
715 * bios are processed from the head of the list.
716 * So the completing bio should always be rq->bio.
717 * If it's not, something wrong is happening.
719 if (tio
->orig
->bio
!= bio
)
720 DMERR("bio completion is going in the middle of the request");
723 * Update the original request.
724 * Do not use blk_end_request() here, because it may complete
725 * the original request before the clone, and break the ordering.
727 blk_update_request(tio
->orig
, 0, nr_bytes
);
731 * Don't touch any member of the md after calling this function because
732 * the md may be freed in dm_put() at the end of this function.
733 * Or do dm_get() before calling this function and dm_put() later.
735 static void rq_completed(struct mapped_device
*md
, int rw
, int run_queue
)
737 atomic_dec(&md
->pending
[rw
]);
739 /* nudge anyone waiting on suspend queue */
740 if (!md_in_flight(md
))
744 blk_run_queue(md
->queue
);
747 * dm_put() must be at the end of this function. See the comment above
752 static void free_rq_clone(struct request
*clone
)
754 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
756 blk_rq_unprep_clone(clone
);
761 * Complete the clone and the original request.
762 * Must be called without queue lock.
764 static void dm_end_request(struct request
*clone
, int error
)
766 int rw
= rq_data_dir(clone
);
767 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
768 struct mapped_device
*md
= tio
->md
;
769 struct request
*rq
= tio
->orig
;
771 if (rq
->cmd_type
== REQ_TYPE_BLOCK_PC
) {
772 rq
->errors
= clone
->errors
;
773 rq
->resid_len
= clone
->resid_len
;
777 * We are using the sense buffer of the original
779 * So setting the length of the sense data is enough.
781 rq
->sense_len
= clone
->sense_len
;
784 free_rq_clone(clone
);
785 blk_end_request_all(rq
, error
);
786 rq_completed(md
, rw
, true);
789 static void dm_unprep_request(struct request
*rq
)
791 struct request
*clone
= rq
->special
;
794 rq
->cmd_flags
&= ~REQ_DONTPREP
;
796 free_rq_clone(clone
);
800 * Requeue the original request of a clone.
802 void dm_requeue_unmapped_request(struct request
*clone
)
804 int rw
= rq_data_dir(clone
);
805 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
806 struct mapped_device
*md
= tio
->md
;
807 struct request
*rq
= tio
->orig
;
808 struct request_queue
*q
= rq
->q
;
811 dm_unprep_request(rq
);
813 spin_lock_irqsave(q
->queue_lock
, flags
);
814 blk_requeue_request(q
, rq
);
815 spin_unlock_irqrestore(q
->queue_lock
, flags
);
817 rq_completed(md
, rw
, 0);
819 EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request
);
821 static void __stop_queue(struct request_queue
*q
)
826 static void stop_queue(struct request_queue
*q
)
830 spin_lock_irqsave(q
->queue_lock
, flags
);
832 spin_unlock_irqrestore(q
->queue_lock
, flags
);
835 static void __start_queue(struct request_queue
*q
)
837 if (blk_queue_stopped(q
))
841 static void start_queue(struct request_queue
*q
)
845 spin_lock_irqsave(q
->queue_lock
, flags
);
847 spin_unlock_irqrestore(q
->queue_lock
, flags
);
850 static void dm_done(struct request
*clone
, int error
, bool mapped
)
853 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
854 dm_request_endio_fn rq_end_io
= NULL
;
857 rq_end_io
= tio
->ti
->type
->rq_end_io
;
859 if (mapped
&& rq_end_io
)
860 r
= rq_end_io(tio
->ti
, clone
, error
, &tio
->info
);
864 /* The target wants to complete the I/O */
865 dm_end_request(clone
, r
);
866 else if (r
== DM_ENDIO_INCOMPLETE
)
867 /* The target will handle the I/O */
869 else if (r
== DM_ENDIO_REQUEUE
)
870 /* The target wants to requeue the I/O */
871 dm_requeue_unmapped_request(clone
);
873 DMWARN("unimplemented target endio return value: %d", r
);
879 * Request completion handler for request-based dm
881 static void dm_softirq_done(struct request
*rq
)
884 struct request
*clone
= rq
->completion_data
;
885 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
887 if (rq
->cmd_flags
& REQ_FAILED
)
890 dm_done(clone
, tio
->error
, mapped
);
894 * Complete the clone and the original request with the error status
895 * through softirq context.
897 static void dm_complete_request(struct request
*clone
, int error
)
899 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
900 struct request
*rq
= tio
->orig
;
903 rq
->completion_data
= clone
;
904 blk_complete_request(rq
);
908 * Complete the not-mapped clone and the original request with the error status
909 * through softirq context.
910 * Target's rq_end_io() function isn't called.
911 * This may be used when the target's map_rq() function fails.
913 void dm_kill_unmapped_request(struct request
*clone
, int error
)
915 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
916 struct request
*rq
= tio
->orig
;
918 rq
->cmd_flags
|= REQ_FAILED
;
919 dm_complete_request(clone
, error
);
921 EXPORT_SYMBOL_GPL(dm_kill_unmapped_request
);
924 * Called with the queue lock held
926 static void end_clone_request(struct request
*clone
, int error
)
929 * For just cleaning up the information of the queue in which
930 * the clone was dispatched.
931 * The clone is *NOT* freed actually here because it is alloced from
932 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
934 __blk_put_request(clone
->q
, clone
);
937 * Actual request completion is done in a softirq context which doesn't
938 * hold the queue lock. Otherwise, deadlock could occur because:
939 * - another request may be submitted by the upper level driver
940 * of the stacking during the completion
941 * - the submission which requires queue lock may be done
944 dm_complete_request(clone
, error
);
948 * Return maximum size of I/O possible at the supplied sector up to the current
951 static sector_t
max_io_len_target_boundary(sector_t sector
, struct dm_target
*ti
)
953 sector_t target_offset
= dm_target_offset(ti
, sector
);
955 return ti
->len
- target_offset
;
958 static sector_t
max_io_len(sector_t sector
, struct dm_target
*ti
)
960 sector_t len
= max_io_len_target_boundary(sector
, ti
);
961 sector_t offset
, max_len
;
964 * Does the target need to split even further?
966 if (ti
->max_io_len
) {
967 offset
= dm_target_offset(ti
, sector
);
968 if (unlikely(ti
->max_io_len
& (ti
->max_io_len
- 1)))
969 max_len
= sector_div(offset
, ti
->max_io_len
);
971 max_len
= offset
& (ti
->max_io_len
- 1);
972 max_len
= ti
->max_io_len
- max_len
;
981 int dm_set_target_max_io_len(struct dm_target
*ti
, sector_t len
)
983 if (len
> UINT_MAX
) {
984 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
985 (unsigned long long)len
, UINT_MAX
);
986 ti
->error
= "Maximum size of target IO is too large";
990 ti
->max_io_len
= (uint32_t) len
;
994 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len
);
996 static void __map_bio(struct dm_target
*ti
, struct dm_target_io
*tio
)
1000 struct mapped_device
*md
;
1001 struct bio
*clone
= &tio
->clone
;
1003 clone
->bi_end_io
= clone_endio
;
1004 clone
->bi_private
= tio
;
1007 * Map the clone. If r == 0 we don't need to do
1008 * anything, the target has assumed ownership of
1011 atomic_inc(&tio
->io
->io_count
);
1012 sector
= clone
->bi_sector
;
1013 r
= ti
->type
->map(ti
, clone
, &tio
->info
);
1014 if (r
== DM_MAPIO_REMAPPED
) {
1015 /* the bio has been remapped so dispatch it */
1017 trace_block_bio_remap(bdev_get_queue(clone
->bi_bdev
), clone
,
1018 tio
->io
->bio
->bi_bdev
->bd_dev
, sector
);
1020 generic_make_request(clone
);
1021 } else if (r
< 0 || r
== DM_MAPIO_REQUEUE
) {
1022 /* error the io and bail out, or requeue it if needed */
1024 dec_pending(tio
->io
, r
);
1027 DMWARN("unimplemented target map return value: %d", r
);
1033 struct mapped_device
*md
;
1034 struct dm_table
*map
;
1038 sector_t sector_count
;
1043 * Creates a little bio that just does part of a bvec.
1045 static void split_bvec(struct dm_target_io
*tio
, struct bio
*bio
,
1046 sector_t sector
, unsigned short idx
, unsigned int offset
,
1047 unsigned int len
, struct bio_set
*bs
)
1049 struct bio
*clone
= &tio
->clone
;
1050 struct bio_vec
*bv
= bio
->bi_io_vec
+ idx
;
1052 *clone
->bi_io_vec
= *bv
;
1054 clone
->bi_sector
= sector
;
1055 clone
->bi_bdev
= bio
->bi_bdev
;
1056 clone
->bi_rw
= bio
->bi_rw
;
1058 clone
->bi_size
= to_bytes(len
);
1059 clone
->bi_io_vec
->bv_offset
= offset
;
1060 clone
->bi_io_vec
->bv_len
= clone
->bi_size
;
1061 clone
->bi_flags
|= 1 << BIO_CLONED
;
1063 if (bio_integrity(bio
)) {
1064 bio_integrity_clone(clone
, bio
, GFP_NOIO
);
1065 bio_integrity_trim(clone
,
1066 bio_sector_offset(bio
, idx
, offset
), len
);
1071 * Creates a bio that consists of range of complete bvecs.
1073 static void clone_bio(struct dm_target_io
*tio
, struct bio
*bio
,
1074 sector_t sector
, unsigned short idx
,
1075 unsigned short bv_count
, unsigned int len
,
1078 struct bio
*clone
= &tio
->clone
;
1080 __bio_clone(clone
, bio
);
1081 clone
->bi_sector
= sector
;
1082 clone
->bi_idx
= idx
;
1083 clone
->bi_vcnt
= idx
+ bv_count
;
1084 clone
->bi_size
= to_bytes(len
);
1085 clone
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
1087 if (bio_integrity(bio
)) {
1088 bio_integrity_clone(clone
, bio
, GFP_NOIO
);
1090 if (idx
!= bio
->bi_idx
|| clone
->bi_size
< bio
->bi_size
)
1091 bio_integrity_trim(clone
,
1092 bio_sector_offset(bio
, idx
, 0), len
);
1096 static struct dm_target_io
*alloc_tio(struct clone_info
*ci
,
1097 struct dm_target
*ti
, int nr_iovecs
)
1099 struct dm_target_io
*tio
;
1102 clone
= bio_alloc_bioset(GFP_NOIO
, nr_iovecs
, ci
->md
->bs
);
1103 tio
= container_of(clone
, struct dm_target_io
, clone
);
1107 memset(&tio
->info
, 0, sizeof(tio
->info
));
1112 static void __issue_target_request(struct clone_info
*ci
, struct dm_target
*ti
,
1113 unsigned request_nr
, sector_t len
)
1115 struct dm_target_io
*tio
= alloc_tio(ci
, ti
, ci
->bio
->bi_max_vecs
);
1116 struct bio
*clone
= &tio
->clone
;
1118 tio
->info
.target_request_nr
= request_nr
;
1121 * Discard requests require the bio's inline iovecs be initialized.
1122 * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
1123 * and discard, so no need for concern about wasted bvec allocations.
1126 __bio_clone(clone
, ci
->bio
);
1128 clone
->bi_sector
= ci
->sector
;
1129 clone
->bi_size
= to_bytes(len
);
1135 static void __issue_target_requests(struct clone_info
*ci
, struct dm_target
*ti
,
1136 unsigned num_requests
, sector_t len
)
1138 unsigned request_nr
;
1140 for (request_nr
= 0; request_nr
< num_requests
; request_nr
++)
1141 __issue_target_request(ci
, ti
, request_nr
, len
);
1144 static int __clone_and_map_empty_flush(struct clone_info
*ci
)
1146 unsigned target_nr
= 0;
1147 struct dm_target
*ti
;
1149 BUG_ON(bio_has_data(ci
->bio
));
1150 while ((ti
= dm_table_get_target(ci
->map
, target_nr
++)))
1151 __issue_target_requests(ci
, ti
, ti
->num_flush_requests
, 0);
1157 * Perform all io with a single clone.
1159 static void __clone_and_map_simple(struct clone_info
*ci
, struct dm_target
*ti
)
1161 struct bio
*bio
= ci
->bio
;
1162 struct dm_target_io
*tio
;
1164 tio
= alloc_tio(ci
, ti
, bio
->bi_max_vecs
);
1165 clone_bio(tio
, bio
, ci
->sector
, ci
->idx
, bio
->bi_vcnt
- ci
->idx
,
1166 ci
->sector_count
, ci
->md
->bs
);
1168 ci
->sector_count
= 0;
1171 static int __clone_and_map_discard(struct clone_info
*ci
)
1173 struct dm_target
*ti
;
1177 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
1178 if (!dm_target_is_valid(ti
))
1182 * Even though the device advertised discard support,
1183 * that does not mean every target supports it, and
1184 * reconfiguration might also have changed that since the
1185 * check was performed.
1187 if (!ti
->num_discard_requests
)
1190 if (!ti
->split_discard_requests
)
1191 len
= min(ci
->sector_count
, max_io_len_target_boundary(ci
->sector
, ti
));
1193 len
= min(ci
->sector_count
, max_io_len(ci
->sector
, ti
));
1195 __issue_target_requests(ci
, ti
, ti
->num_discard_requests
, len
);
1198 } while (ci
->sector_count
-= len
);
1203 static int __clone_and_map(struct clone_info
*ci
)
1205 struct bio
*bio
= ci
->bio
;
1206 struct dm_target
*ti
;
1207 sector_t len
= 0, max
;
1208 struct dm_target_io
*tio
;
1210 if (unlikely(bio
->bi_rw
& REQ_DISCARD
))
1211 return __clone_and_map_discard(ci
);
1213 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
1214 if (!dm_target_is_valid(ti
))
1217 max
= max_io_len(ci
->sector
, ti
);
1219 if (ci
->sector_count
<= max
) {
1221 * Optimise for the simple case where we can do all of
1222 * the remaining io with a single clone.
1224 __clone_and_map_simple(ci
, ti
);
1226 } else if (to_sector(bio
->bi_io_vec
[ci
->idx
].bv_len
) <= max
) {
1228 * There are some bvecs that don't span targets.
1229 * Do as many of these as possible.
1232 sector_t remaining
= max
;
1235 for (i
= ci
->idx
; remaining
&& (i
< bio
->bi_vcnt
); i
++) {
1236 bv_len
= to_sector(bio
->bi_io_vec
[i
].bv_len
);
1238 if (bv_len
> remaining
)
1241 remaining
-= bv_len
;
1245 tio
= alloc_tio(ci
, ti
, bio
->bi_max_vecs
);
1246 clone_bio(tio
, bio
, ci
->sector
, ci
->idx
, i
- ci
->idx
, len
,
1251 ci
->sector_count
-= len
;
1256 * Handle a bvec that must be split between two or more targets.
1258 struct bio_vec
*bv
= bio
->bi_io_vec
+ ci
->idx
;
1259 sector_t remaining
= to_sector(bv
->bv_len
);
1260 unsigned int offset
= 0;
1264 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
1265 if (!dm_target_is_valid(ti
))
1268 max
= max_io_len(ci
->sector
, ti
);
1271 len
= min(remaining
, max
);
1273 tio
= alloc_tio(ci
, ti
, 1);
1274 split_bvec(tio
, bio
, ci
->sector
, ci
->idx
,
1275 bv
->bv_offset
+ offset
, len
, ci
->md
->bs
);
1280 ci
->sector_count
-= len
;
1281 offset
+= to_bytes(len
);
1282 } while (remaining
-= len
);
1291 * Split the bio into several clones and submit it to targets.
1293 static void __split_and_process_bio(struct mapped_device
*md
, struct bio
*bio
)
1295 struct clone_info ci
;
1298 ci
.map
= dm_get_live_table(md
);
1299 if (unlikely(!ci
.map
)) {
1305 ci
.io
= alloc_io(md
);
1307 atomic_set(&ci
.io
->io_count
, 1);
1310 spin_lock_init(&ci
.io
->endio_lock
);
1311 ci
.sector
= bio
->bi_sector
;
1312 ci
.idx
= bio
->bi_idx
;
1314 start_io_acct(ci
.io
);
1315 if (bio
->bi_rw
& REQ_FLUSH
) {
1316 ci
.bio
= &ci
.md
->flush_bio
;
1317 ci
.sector_count
= 0;
1318 error
= __clone_and_map_empty_flush(&ci
);
1319 /* dec_pending submits any data associated with flush */
1322 ci
.sector_count
= bio_sectors(bio
);
1323 while (ci
.sector_count
&& !error
)
1324 error
= __clone_and_map(&ci
);
1327 /* drop the extra reference count */
1328 dec_pending(ci
.io
, error
);
1329 dm_table_put(ci
.map
);
1331 /*-----------------------------------------------------------------
1333 *---------------------------------------------------------------*/
1335 static int dm_merge_bvec(struct request_queue
*q
,
1336 struct bvec_merge_data
*bvm
,
1337 struct bio_vec
*biovec
)
1339 struct mapped_device
*md
= q
->queuedata
;
1340 struct dm_table
*map
= dm_get_live_table(md
);
1341 struct dm_target
*ti
;
1342 sector_t max_sectors
;
1348 ti
= dm_table_find_target(map
, bvm
->bi_sector
);
1349 if (!dm_target_is_valid(ti
))
1353 * Find maximum amount of I/O that won't need splitting
1355 max_sectors
= min(max_io_len(bvm
->bi_sector
, ti
),
1356 (sector_t
) BIO_MAX_SECTORS
);
1357 max_size
= (max_sectors
<< SECTOR_SHIFT
) - bvm
->bi_size
;
1362 * merge_bvec_fn() returns number of bytes
1363 * it can accept at this offset
1364 * max is precomputed maximal io size
1366 if (max_size
&& ti
->type
->merge
)
1367 max_size
= ti
->type
->merge(ti
, bvm
, biovec
, max_size
);
1369 * If the target doesn't support merge method and some of the devices
1370 * provided their merge_bvec method (we know this by looking at
1371 * queue_max_hw_sectors), then we can't allow bios with multiple vector
1372 * entries. So always set max_size to 0, and the code below allows
1375 else if (queue_max_hw_sectors(q
) <= PAGE_SIZE
>> 9)
1384 * Always allow an entire first page
1386 if (max_size
<= biovec
->bv_len
&& !(bvm
->bi_size
>> SECTOR_SHIFT
))
1387 max_size
= biovec
->bv_len
;
1393 * The request function that just remaps the bio built up by
1396 static void _dm_request(struct request_queue
*q
, struct bio
*bio
)
1398 int rw
= bio_data_dir(bio
);
1399 struct mapped_device
*md
= q
->queuedata
;
1402 down_read(&md
->io_lock
);
1404 cpu
= part_stat_lock();
1405 part_stat_inc(cpu
, &dm_disk(md
)->part0
, ios
[rw
]);
1406 part_stat_add(cpu
, &dm_disk(md
)->part0
, sectors
[rw
], bio_sectors(bio
));
1409 /* if we're suspended, we have to queue this io for later */
1410 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
))) {
1411 up_read(&md
->io_lock
);
1413 if (bio_rw(bio
) != READA
)
1420 __split_and_process_bio(md
, bio
);
1421 up_read(&md
->io_lock
);
1425 static int dm_request_based(struct mapped_device
*md
)
1427 return blk_queue_stackable(md
->queue
);
1430 static void dm_request(struct request_queue
*q
, struct bio
*bio
)
1432 struct mapped_device
*md
= q
->queuedata
;
1434 if (dm_request_based(md
))
1435 blk_queue_bio(q
, bio
);
1437 _dm_request(q
, bio
);
1440 void dm_dispatch_request(struct request
*rq
)
1444 if (blk_queue_io_stat(rq
->q
))
1445 rq
->cmd_flags
|= REQ_IO_STAT
;
1447 rq
->start_time
= jiffies
;
1448 r
= blk_insert_cloned_request(rq
->q
, rq
);
1450 dm_complete_request(rq
, r
);
1452 EXPORT_SYMBOL_GPL(dm_dispatch_request
);
1454 static int dm_rq_bio_constructor(struct bio
*bio
, struct bio
*bio_orig
,
1457 struct dm_rq_target_io
*tio
= data
;
1458 struct dm_rq_clone_bio_info
*info
=
1459 container_of(bio
, struct dm_rq_clone_bio_info
, clone
);
1461 info
->orig
= bio_orig
;
1463 bio
->bi_end_io
= end_clone_bio
;
1464 bio
->bi_private
= info
;
1469 static int setup_clone(struct request
*clone
, struct request
*rq
,
1470 struct dm_rq_target_io
*tio
)
1474 r
= blk_rq_prep_clone(clone
, rq
, tio
->md
->bs
, GFP_ATOMIC
,
1475 dm_rq_bio_constructor
, tio
);
1479 clone
->cmd
= rq
->cmd
;
1480 clone
->cmd_len
= rq
->cmd_len
;
1481 clone
->sense
= rq
->sense
;
1482 clone
->buffer
= rq
->buffer
;
1483 clone
->end_io
= end_clone_request
;
1484 clone
->end_io_data
= tio
;
1489 static struct request
*clone_rq(struct request
*rq
, struct mapped_device
*md
,
1492 struct request
*clone
;
1493 struct dm_rq_target_io
*tio
;
1495 tio
= alloc_rq_tio(md
, gfp_mask
);
1503 memset(&tio
->info
, 0, sizeof(tio
->info
));
1505 clone
= &tio
->clone
;
1506 if (setup_clone(clone
, rq
, tio
)) {
1516 * Called with the queue lock held.
1518 static int dm_prep_fn(struct request_queue
*q
, struct request
*rq
)
1520 struct mapped_device
*md
= q
->queuedata
;
1521 struct request
*clone
;
1523 if (unlikely(rq
->special
)) {
1524 DMWARN("Already has something in rq->special.");
1525 return BLKPREP_KILL
;
1528 clone
= clone_rq(rq
, md
, GFP_ATOMIC
);
1530 return BLKPREP_DEFER
;
1532 rq
->special
= clone
;
1533 rq
->cmd_flags
|= REQ_DONTPREP
;
1540 * 0 : the request has been processed (not requeued)
1541 * !0 : the request has been requeued
1543 static int map_request(struct dm_target
*ti
, struct request
*clone
,
1544 struct mapped_device
*md
)
1546 int r
, requeued
= 0;
1547 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
1550 r
= ti
->type
->map_rq(ti
, clone
, &tio
->info
);
1552 case DM_MAPIO_SUBMITTED
:
1553 /* The target has taken the I/O to submit by itself later */
1555 case DM_MAPIO_REMAPPED
:
1556 /* The target has remapped the I/O so dispatch it */
1557 trace_block_rq_remap(clone
->q
, clone
, disk_devt(dm_disk(md
)),
1558 blk_rq_pos(tio
->orig
));
1559 dm_dispatch_request(clone
);
1561 case DM_MAPIO_REQUEUE
:
1562 /* The target wants to requeue the I/O */
1563 dm_requeue_unmapped_request(clone
);
1568 DMWARN("unimplemented target map return value: %d", r
);
1572 /* The target wants to complete the I/O */
1573 dm_kill_unmapped_request(clone
, r
);
1580 static struct request
*dm_start_request(struct mapped_device
*md
, struct request
*orig
)
1582 struct request
*clone
;
1584 blk_start_request(orig
);
1585 clone
= orig
->special
;
1586 atomic_inc(&md
->pending
[rq_data_dir(clone
)]);
1589 * Hold the md reference here for the in-flight I/O.
1590 * We can't rely on the reference count by device opener,
1591 * because the device may be closed during the request completion
1592 * when all bios are completed.
1593 * See the comment in rq_completed() too.
1601 * q->request_fn for request-based dm.
1602 * Called with the queue lock held.
1604 static void dm_request_fn(struct request_queue
*q
)
1606 struct mapped_device
*md
= q
->queuedata
;
1607 struct dm_table
*map
= dm_get_live_table(md
);
1608 struct dm_target
*ti
;
1609 struct request
*rq
, *clone
;
1613 * For suspend, check blk_queue_stopped() and increment
1614 * ->pending within a single queue_lock not to increment the
1615 * number of in-flight I/Os after the queue is stopped in
1618 while (!blk_queue_stopped(q
)) {
1619 rq
= blk_peek_request(q
);
1623 /* always use block 0 to find the target for flushes for now */
1625 if (!(rq
->cmd_flags
& REQ_FLUSH
))
1626 pos
= blk_rq_pos(rq
);
1628 ti
= dm_table_find_target(map
, pos
);
1629 if (!dm_target_is_valid(ti
)) {
1631 * Must perform setup, that dm_done() requires,
1632 * before calling dm_kill_unmapped_request
1634 DMERR_LIMIT("request attempted access beyond the end of device");
1635 clone
= dm_start_request(md
, rq
);
1636 dm_kill_unmapped_request(clone
, -EIO
);
1640 if (ti
->type
->busy
&& ti
->type
->busy(ti
))
1643 clone
= dm_start_request(md
, rq
);
1645 spin_unlock(q
->queue_lock
);
1646 if (map_request(ti
, clone
, md
))
1649 BUG_ON(!irqs_disabled());
1650 spin_lock(q
->queue_lock
);
1656 BUG_ON(!irqs_disabled());
1657 spin_lock(q
->queue_lock
);
1660 blk_delay_queue(q
, HZ
/ 10);
1665 int dm_underlying_device_busy(struct request_queue
*q
)
1667 return blk_lld_busy(q
);
1669 EXPORT_SYMBOL_GPL(dm_underlying_device_busy
);
1671 static int dm_lld_busy(struct request_queue
*q
)
1674 struct mapped_device
*md
= q
->queuedata
;
1675 struct dm_table
*map
= dm_get_live_table(md
);
1677 if (!map
|| test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
))
1680 r
= dm_table_any_busy_target(map
);
1687 static int dm_any_congested(void *congested_data
, int bdi_bits
)
1690 struct mapped_device
*md
= congested_data
;
1691 struct dm_table
*map
;
1693 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
)) {
1694 map
= dm_get_live_table(md
);
1697 * Request-based dm cares about only own queue for
1698 * the query about congestion status of request_queue
1700 if (dm_request_based(md
))
1701 r
= md
->queue
->backing_dev_info
.state
&
1704 r
= dm_table_any_congested(map
, bdi_bits
);
1713 /*-----------------------------------------------------------------
1714 * An IDR is used to keep track of allocated minor numbers.
1715 *---------------------------------------------------------------*/
1716 static void free_minor(int minor
)
1718 spin_lock(&_minor_lock
);
1719 idr_remove(&_minor_idr
, minor
);
1720 spin_unlock(&_minor_lock
);
1724 * See if the device with a specific minor # is free.
1726 static int specific_minor(int minor
)
1730 if (minor
>= (1 << MINORBITS
))
1733 r
= idr_pre_get(&_minor_idr
, GFP_KERNEL
);
1737 spin_lock(&_minor_lock
);
1739 if (idr_find(&_minor_idr
, minor
)) {
1744 r
= idr_get_new_above(&_minor_idr
, MINOR_ALLOCED
, minor
, &m
);
1749 idr_remove(&_minor_idr
, m
);
1755 spin_unlock(&_minor_lock
);
1759 static int next_free_minor(int *minor
)
1763 r
= idr_pre_get(&_minor_idr
, GFP_KERNEL
);
1767 spin_lock(&_minor_lock
);
1769 r
= idr_get_new(&_minor_idr
, MINOR_ALLOCED
, &m
);
1773 if (m
>= (1 << MINORBITS
)) {
1774 idr_remove(&_minor_idr
, m
);
1782 spin_unlock(&_minor_lock
);
1786 static const struct block_device_operations dm_blk_dops
;
1788 static void dm_wq_work(struct work_struct
*work
);
1790 static void dm_init_md_queue(struct mapped_device
*md
)
1793 * Request-based dm devices cannot be stacked on top of bio-based dm
1794 * devices. The type of this dm device has not been decided yet.
1795 * The type is decided at the first table loading time.
1796 * To prevent problematic device stacking, clear the queue flag
1797 * for request stacking support until then.
1799 * This queue is new, so no concurrency on the queue_flags.
1801 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE
, md
->queue
);
1803 md
->queue
->queuedata
= md
;
1804 md
->queue
->backing_dev_info
.congested_fn
= dm_any_congested
;
1805 md
->queue
->backing_dev_info
.congested_data
= md
;
1806 blk_queue_make_request(md
->queue
, dm_request
);
1807 blk_queue_bounce_limit(md
->queue
, BLK_BOUNCE_ANY
);
1808 blk_queue_merge_bvec(md
->queue
, dm_merge_bvec
);
1812 * Allocate and initialise a blank device with a given minor.
1814 static struct mapped_device
*alloc_dev(int minor
)
1817 struct mapped_device
*md
= kzalloc(sizeof(*md
), GFP_KERNEL
);
1821 DMWARN("unable to allocate device, out of memory.");
1825 if (!try_module_get(THIS_MODULE
))
1826 goto bad_module_get
;
1828 /* get a minor number for the dev */
1829 if (minor
== DM_ANY_MINOR
)
1830 r
= next_free_minor(&minor
);
1832 r
= specific_minor(minor
);
1836 md
->type
= DM_TYPE_NONE
;
1837 init_rwsem(&md
->io_lock
);
1838 mutex_init(&md
->suspend_lock
);
1839 mutex_init(&md
->type_lock
);
1840 spin_lock_init(&md
->deferred_lock
);
1841 rwlock_init(&md
->map_lock
);
1842 atomic_set(&md
->holders
, 1);
1843 atomic_set(&md
->open_count
, 0);
1844 atomic_set(&md
->event_nr
, 0);
1845 atomic_set(&md
->uevent_seq
, 0);
1846 INIT_LIST_HEAD(&md
->uevent_list
);
1847 spin_lock_init(&md
->uevent_lock
);
1849 md
->queue
= blk_alloc_queue(GFP_KERNEL
);
1853 dm_init_md_queue(md
);
1855 md
->disk
= alloc_disk(1);
1859 atomic_set(&md
->pending
[0], 0);
1860 atomic_set(&md
->pending
[1], 0);
1861 init_waitqueue_head(&md
->wait
);
1862 INIT_WORK(&md
->work
, dm_wq_work
);
1863 init_waitqueue_head(&md
->eventq
);
1865 md
->disk
->major
= _major
;
1866 md
->disk
->first_minor
= minor
;
1867 md
->disk
->fops
= &dm_blk_dops
;
1868 md
->disk
->queue
= md
->queue
;
1869 md
->disk
->private_data
= md
;
1870 sprintf(md
->disk
->disk_name
, "dm-%d", minor
);
1872 format_dev_t(md
->name
, MKDEV(_major
, minor
));
1874 md
->wq
= alloc_workqueue("kdmflush",
1875 WQ_NON_REENTRANT
| WQ_MEM_RECLAIM
, 0);
1879 md
->bdev
= bdget_disk(md
->disk
, 0);
1883 bio_init(&md
->flush_bio
);
1884 md
->flush_bio
.bi_bdev
= md
->bdev
;
1885 md
->flush_bio
.bi_rw
= WRITE_FLUSH
;
1887 /* Populate the mapping, nobody knows we exist yet */
1888 spin_lock(&_minor_lock
);
1889 old_md
= idr_replace(&_minor_idr
, md
, minor
);
1890 spin_unlock(&_minor_lock
);
1892 BUG_ON(old_md
!= MINOR_ALLOCED
);
1897 destroy_workqueue(md
->wq
);
1899 del_gendisk(md
->disk
);
1902 blk_cleanup_queue(md
->queue
);
1906 module_put(THIS_MODULE
);
1912 static void unlock_fs(struct mapped_device
*md
);
1914 static void free_dev(struct mapped_device
*md
)
1916 int minor
= MINOR(disk_devt(md
->disk
));
1920 destroy_workqueue(md
->wq
);
1922 mempool_destroy(md
->tio_pool
);
1924 mempool_destroy(md
->io_pool
);
1926 bioset_free(md
->bs
);
1927 blk_integrity_unregister(md
->disk
);
1928 del_gendisk(md
->disk
);
1931 spin_lock(&_minor_lock
);
1932 md
->disk
->private_data
= NULL
;
1933 spin_unlock(&_minor_lock
);
1936 blk_cleanup_queue(md
->queue
);
1937 module_put(THIS_MODULE
);
1941 static void __bind_mempools(struct mapped_device
*md
, struct dm_table
*t
)
1943 struct dm_md_mempools
*p
;
1945 if (md
->io_pool
&& (md
->tio_pool
|| dm_table_get_type(t
) == DM_TYPE_BIO_BASED
) && md
->bs
)
1946 /* the md already has necessary mempools */
1949 p
= dm_table_get_md_mempools(t
);
1950 BUG_ON(!p
|| md
->io_pool
|| md
->tio_pool
|| md
->bs
);
1952 md
->io_pool
= p
->io_pool
;
1954 md
->tio_pool
= p
->tio_pool
;
1960 /* mempool bind completed, now no need any mempools in the table */
1961 dm_table_free_md_mempools(t
);
1965 * Bind a table to the device.
1967 static void event_callback(void *context
)
1969 unsigned long flags
;
1971 struct mapped_device
*md
= (struct mapped_device
*) context
;
1973 spin_lock_irqsave(&md
->uevent_lock
, flags
);
1974 list_splice_init(&md
->uevent_list
, &uevents
);
1975 spin_unlock_irqrestore(&md
->uevent_lock
, flags
);
1977 dm_send_uevents(&uevents
, &disk_to_dev(md
->disk
)->kobj
);
1979 atomic_inc(&md
->event_nr
);
1980 wake_up(&md
->eventq
);
1984 * Protected by md->suspend_lock obtained by dm_swap_table().
1986 static void __set_size(struct mapped_device
*md
, sector_t size
)
1988 set_capacity(md
->disk
, size
);
1990 i_size_write(md
->bdev
->bd_inode
, (loff_t
)size
<< SECTOR_SHIFT
);
1994 * Return 1 if the queue has a compulsory merge_bvec_fn function.
1996 * If this function returns 0, then the device is either a non-dm
1997 * device without a merge_bvec_fn, or it is a dm device that is
1998 * able to split any bios it receives that are too big.
2000 int dm_queue_merge_is_compulsory(struct request_queue
*q
)
2002 struct mapped_device
*dev_md
;
2004 if (!q
->merge_bvec_fn
)
2007 if (q
->make_request_fn
== dm_request
) {
2008 dev_md
= q
->queuedata
;
2009 if (test_bit(DMF_MERGE_IS_OPTIONAL
, &dev_md
->flags
))
2016 static int dm_device_merge_is_compulsory(struct dm_target
*ti
,
2017 struct dm_dev
*dev
, sector_t start
,
2018 sector_t len
, void *data
)
2020 struct block_device
*bdev
= dev
->bdev
;
2021 struct request_queue
*q
= bdev_get_queue(bdev
);
2023 return dm_queue_merge_is_compulsory(q
);
2027 * Return 1 if it is acceptable to ignore merge_bvec_fn based
2028 * on the properties of the underlying devices.
2030 static int dm_table_merge_is_optional(struct dm_table
*table
)
2033 struct dm_target
*ti
;
2035 while (i
< dm_table_get_num_targets(table
)) {
2036 ti
= dm_table_get_target(table
, i
++);
2038 if (ti
->type
->iterate_devices
&&
2039 ti
->type
->iterate_devices(ti
, dm_device_merge_is_compulsory
, NULL
))
2047 * Returns old map, which caller must destroy.
2049 static struct dm_table
*__bind(struct mapped_device
*md
, struct dm_table
*t
,
2050 struct queue_limits
*limits
)
2052 struct dm_table
*old_map
;
2053 struct request_queue
*q
= md
->queue
;
2055 unsigned long flags
;
2056 int merge_is_optional
;
2058 size
= dm_table_get_size(t
);
2061 * Wipe any geometry if the size of the table changed.
2063 if (size
!= get_capacity(md
->disk
))
2064 memset(&md
->geometry
, 0, sizeof(md
->geometry
));
2066 __set_size(md
, size
);
2068 dm_table_event_callback(t
, event_callback
, md
);
2071 * The queue hasn't been stopped yet, if the old table type wasn't
2072 * for request-based during suspension. So stop it to prevent
2073 * I/O mapping before resume.
2074 * This must be done before setting the queue restrictions,
2075 * because request-based dm may be run just after the setting.
2077 if (dm_table_request_based(t
) && !blk_queue_stopped(q
))
2080 __bind_mempools(md
, t
);
2082 merge_is_optional
= dm_table_merge_is_optional(t
);
2084 write_lock_irqsave(&md
->map_lock
, flags
);
2087 md
->immutable_target_type
= dm_table_get_immutable_target_type(t
);
2089 dm_table_set_restrictions(t
, q
, limits
);
2090 if (merge_is_optional
)
2091 set_bit(DMF_MERGE_IS_OPTIONAL
, &md
->flags
);
2093 clear_bit(DMF_MERGE_IS_OPTIONAL
, &md
->flags
);
2094 write_unlock_irqrestore(&md
->map_lock
, flags
);
2100 * Returns unbound table for the caller to free.
2102 static struct dm_table
*__unbind(struct mapped_device
*md
)
2104 struct dm_table
*map
= md
->map
;
2105 unsigned long flags
;
2110 dm_table_event_callback(map
, NULL
, NULL
);
2111 write_lock_irqsave(&md
->map_lock
, flags
);
2113 write_unlock_irqrestore(&md
->map_lock
, flags
);
2119 * Constructor for a new device.
2121 int dm_create(int minor
, struct mapped_device
**result
)
2123 struct mapped_device
*md
;
2125 md
= alloc_dev(minor
);
2136 * Functions to manage md->type.
2137 * All are required to hold md->type_lock.
2139 void dm_lock_md_type(struct mapped_device
*md
)
2141 mutex_lock(&md
->type_lock
);
2144 void dm_unlock_md_type(struct mapped_device
*md
)
2146 mutex_unlock(&md
->type_lock
);
2149 void dm_set_md_type(struct mapped_device
*md
, unsigned type
)
2154 unsigned dm_get_md_type(struct mapped_device
*md
)
2159 struct target_type
*dm_get_immutable_target_type(struct mapped_device
*md
)
2161 return md
->immutable_target_type
;
2165 * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
2167 static int dm_init_request_based_queue(struct mapped_device
*md
)
2169 struct request_queue
*q
= NULL
;
2171 if (md
->queue
->elevator
)
2174 /* Fully initialize the queue */
2175 q
= blk_init_allocated_queue(md
->queue
, dm_request_fn
, NULL
);
2180 dm_init_md_queue(md
);
2181 blk_queue_softirq_done(md
->queue
, dm_softirq_done
);
2182 blk_queue_prep_rq(md
->queue
, dm_prep_fn
);
2183 blk_queue_lld_busy(md
->queue
, dm_lld_busy
);
2185 elv_register_queue(md
->queue
);
2191 * Setup the DM device's queue based on md's type
2193 int dm_setup_md_queue(struct mapped_device
*md
)
2195 if ((dm_get_md_type(md
) == DM_TYPE_REQUEST_BASED
) &&
2196 !dm_init_request_based_queue(md
)) {
2197 DMWARN("Cannot initialize queue for request-based mapped device");
2204 static struct mapped_device
*dm_find_md(dev_t dev
)
2206 struct mapped_device
*md
;
2207 unsigned minor
= MINOR(dev
);
2209 if (MAJOR(dev
) != _major
|| minor
>= (1 << MINORBITS
))
2212 spin_lock(&_minor_lock
);
2214 md
= idr_find(&_minor_idr
, minor
);
2215 if (md
&& (md
== MINOR_ALLOCED
||
2216 (MINOR(disk_devt(dm_disk(md
))) != minor
) ||
2217 dm_deleting_md(md
) ||
2218 test_bit(DMF_FREEING
, &md
->flags
))) {
2224 spin_unlock(&_minor_lock
);
2229 struct mapped_device
*dm_get_md(dev_t dev
)
2231 struct mapped_device
*md
= dm_find_md(dev
);
2238 EXPORT_SYMBOL_GPL(dm_get_md
);
2240 void *dm_get_mdptr(struct mapped_device
*md
)
2242 return md
->interface_ptr
;
2245 void dm_set_mdptr(struct mapped_device
*md
, void *ptr
)
2247 md
->interface_ptr
= ptr
;
2250 void dm_get(struct mapped_device
*md
)
2252 atomic_inc(&md
->holders
);
2253 BUG_ON(test_bit(DMF_FREEING
, &md
->flags
));
2256 const char *dm_device_name(struct mapped_device
*md
)
2260 EXPORT_SYMBOL_GPL(dm_device_name
);
2262 static void __dm_destroy(struct mapped_device
*md
, bool wait
)
2264 struct dm_table
*map
;
2268 spin_lock(&_minor_lock
);
2269 map
= dm_get_live_table(md
);
2270 idr_replace(&_minor_idr
, MINOR_ALLOCED
, MINOR(disk_devt(dm_disk(md
))));
2271 set_bit(DMF_FREEING
, &md
->flags
);
2272 spin_unlock(&_minor_lock
);
2274 if (!dm_suspended_md(md
)) {
2275 dm_table_presuspend_targets(map
);
2276 dm_table_postsuspend_targets(map
);
2280 * Rare, but there may be I/O requests still going to complete,
2281 * for example. Wait for all references to disappear.
2282 * No one should increment the reference count of the mapped_device,
2283 * after the mapped_device state becomes DMF_FREEING.
2286 while (atomic_read(&md
->holders
))
2288 else if (atomic_read(&md
->holders
))
2289 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2290 dm_device_name(md
), atomic_read(&md
->holders
));
2294 dm_table_destroy(__unbind(md
));
2298 void dm_destroy(struct mapped_device
*md
)
2300 __dm_destroy(md
, true);
2303 void dm_destroy_immediate(struct mapped_device
*md
)
2305 __dm_destroy(md
, false);
2308 void dm_put(struct mapped_device
*md
)
2310 atomic_dec(&md
->holders
);
2312 EXPORT_SYMBOL_GPL(dm_put
);
2314 static int dm_wait_for_completion(struct mapped_device
*md
, int interruptible
)
2317 DECLARE_WAITQUEUE(wait
, current
);
2319 add_wait_queue(&md
->wait
, &wait
);
2322 set_current_state(interruptible
);
2324 if (!md_in_flight(md
))
2327 if (interruptible
== TASK_INTERRUPTIBLE
&&
2328 signal_pending(current
)) {
2335 set_current_state(TASK_RUNNING
);
2337 remove_wait_queue(&md
->wait
, &wait
);
2343 * Process the deferred bios
2345 static void dm_wq_work(struct work_struct
*work
)
2347 struct mapped_device
*md
= container_of(work
, struct mapped_device
,
2351 down_read(&md
->io_lock
);
2353 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
)) {
2354 spin_lock_irq(&md
->deferred_lock
);
2355 c
= bio_list_pop(&md
->deferred
);
2356 spin_unlock_irq(&md
->deferred_lock
);
2361 up_read(&md
->io_lock
);
2363 if (dm_request_based(md
))
2364 generic_make_request(c
);
2366 __split_and_process_bio(md
, c
);
2368 down_read(&md
->io_lock
);
2371 up_read(&md
->io_lock
);
2374 static void dm_queue_flush(struct mapped_device
*md
)
2376 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
2377 smp_mb__after_clear_bit();
2378 queue_work(md
->wq
, &md
->work
);
2382 * Swap in a new table, returning the old one for the caller to destroy.
2384 struct dm_table
*dm_swap_table(struct mapped_device
*md
, struct dm_table
*table
)
2386 struct dm_table
*live_map
, *map
= ERR_PTR(-EINVAL
);
2387 struct queue_limits limits
;
2390 mutex_lock(&md
->suspend_lock
);
2392 /* device must be suspended */
2393 if (!dm_suspended_md(md
))
2397 * If the new table has no data devices, retain the existing limits.
2398 * This helps multipath with queue_if_no_path if all paths disappear,
2399 * then new I/O is queued based on these limits, and then some paths
2402 if (dm_table_has_no_data_devices(table
)) {
2403 live_map
= dm_get_live_table(md
);
2405 limits
= md
->queue
->limits
;
2406 dm_table_put(live_map
);
2409 r
= dm_calculate_queue_limits(table
, &limits
);
2415 map
= __bind(md
, table
, &limits
);
2418 mutex_unlock(&md
->suspend_lock
);
2423 * Functions to lock and unlock any filesystem running on the
2426 static int lock_fs(struct mapped_device
*md
)
2430 WARN_ON(md
->frozen_sb
);
2432 md
->frozen_sb
= freeze_bdev(md
->bdev
);
2433 if (IS_ERR(md
->frozen_sb
)) {
2434 r
= PTR_ERR(md
->frozen_sb
);
2435 md
->frozen_sb
= NULL
;
2439 set_bit(DMF_FROZEN
, &md
->flags
);
2444 static void unlock_fs(struct mapped_device
*md
)
2446 if (!test_bit(DMF_FROZEN
, &md
->flags
))
2449 thaw_bdev(md
->bdev
, md
->frozen_sb
);
2450 md
->frozen_sb
= NULL
;
2451 clear_bit(DMF_FROZEN
, &md
->flags
);
2455 * We need to be able to change a mapping table under a mounted
2456 * filesystem. For example we might want to move some data in
2457 * the background. Before the table can be swapped with
2458 * dm_bind_table, dm_suspend must be called to flush any in
2459 * flight bios and ensure that any further io gets deferred.
2462 * Suspend mechanism in request-based dm.
2464 * 1. Flush all I/Os by lock_fs() if needed.
2465 * 2. Stop dispatching any I/O by stopping the request_queue.
2466 * 3. Wait for all in-flight I/Os to be completed or requeued.
2468 * To abort suspend, start the request_queue.
2470 int dm_suspend(struct mapped_device
*md
, unsigned suspend_flags
)
2472 struct dm_table
*map
= NULL
;
2474 int do_lockfs
= suspend_flags
& DM_SUSPEND_LOCKFS_FLAG
? 1 : 0;
2475 int noflush
= suspend_flags
& DM_SUSPEND_NOFLUSH_FLAG
? 1 : 0;
2477 mutex_lock(&md
->suspend_lock
);
2479 if (dm_suspended_md(md
)) {
2484 map
= dm_get_live_table(md
);
2487 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2488 * This flag is cleared before dm_suspend returns.
2491 set_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
2493 /* This does not get reverted if there's an error later. */
2494 dm_table_presuspend_targets(map
);
2497 * Flush I/O to the device.
2498 * Any I/O submitted after lock_fs() may not be flushed.
2499 * noflush takes precedence over do_lockfs.
2500 * (lock_fs() flushes I/Os and waits for them to complete.)
2502 if (!noflush
&& do_lockfs
) {
2509 * Here we must make sure that no processes are submitting requests
2510 * to target drivers i.e. no one may be executing
2511 * __split_and_process_bio. This is called from dm_request and
2514 * To get all processes out of __split_and_process_bio in dm_request,
2515 * we take the write lock. To prevent any process from reentering
2516 * __split_and_process_bio from dm_request and quiesce the thread
2517 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
2518 * flush_workqueue(md->wq).
2520 down_write(&md
->io_lock
);
2521 set_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
2522 up_write(&md
->io_lock
);
2525 * Stop md->queue before flushing md->wq in case request-based
2526 * dm defers requests to md->wq from md->queue.
2528 if (dm_request_based(md
))
2529 stop_queue(md
->queue
);
2531 flush_workqueue(md
->wq
);
2534 * At this point no more requests are entering target request routines.
2535 * We call dm_wait_for_completion to wait for all existing requests
2538 r
= dm_wait_for_completion(md
, TASK_INTERRUPTIBLE
);
2540 down_write(&md
->io_lock
);
2542 clear_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
2543 up_write(&md
->io_lock
);
2545 /* were we interrupted ? */
2549 if (dm_request_based(md
))
2550 start_queue(md
->queue
);
2553 goto out
; /* pushback list is already flushed, so skip flush */
2557 * If dm_wait_for_completion returned 0, the device is completely
2558 * quiescent now. There is no request-processing activity. All new
2559 * requests are being added to md->deferred list.
2562 set_bit(DMF_SUSPENDED
, &md
->flags
);
2564 dm_table_postsuspend_targets(map
);
2570 mutex_unlock(&md
->suspend_lock
);
2574 int dm_resume(struct mapped_device
*md
)
2577 struct dm_table
*map
= NULL
;
2579 mutex_lock(&md
->suspend_lock
);
2580 if (!dm_suspended_md(md
))
2583 map
= dm_get_live_table(md
);
2584 if (!map
|| !dm_table_get_size(map
))
2587 r
= dm_table_resume_targets(map
);
2594 * Flushing deferred I/Os must be done after targets are resumed
2595 * so that mapping of targets can work correctly.
2596 * Request-based dm is queueing the deferred I/Os in its request_queue.
2598 if (dm_request_based(md
))
2599 start_queue(md
->queue
);
2603 clear_bit(DMF_SUSPENDED
, &md
->flags
);
2608 mutex_unlock(&md
->suspend_lock
);
2613 /*-----------------------------------------------------------------
2614 * Event notification.
2615 *---------------------------------------------------------------*/
2616 int dm_kobject_uevent(struct mapped_device
*md
, enum kobject_action action
,
2619 char udev_cookie
[DM_COOKIE_LENGTH
];
2620 char *envp
[] = { udev_cookie
, NULL
};
2623 return kobject_uevent(&disk_to_dev(md
->disk
)->kobj
, action
);
2625 snprintf(udev_cookie
, DM_COOKIE_LENGTH
, "%s=%u",
2626 DM_COOKIE_ENV_VAR_NAME
, cookie
);
2627 return kobject_uevent_env(&disk_to_dev(md
->disk
)->kobj
,
2632 uint32_t dm_next_uevent_seq(struct mapped_device
*md
)
2634 return atomic_add_return(1, &md
->uevent_seq
);
2637 uint32_t dm_get_event_nr(struct mapped_device
*md
)
2639 return atomic_read(&md
->event_nr
);
2642 int dm_wait_event(struct mapped_device
*md
, int event_nr
)
2644 return wait_event_interruptible(md
->eventq
,
2645 (event_nr
!= atomic_read(&md
->event_nr
)));
2648 void dm_uevent_add(struct mapped_device
*md
, struct list_head
*elist
)
2650 unsigned long flags
;
2652 spin_lock_irqsave(&md
->uevent_lock
, flags
);
2653 list_add(elist
, &md
->uevent_list
);
2654 spin_unlock_irqrestore(&md
->uevent_lock
, flags
);
2658 * The gendisk is only valid as long as you have a reference
2661 struct gendisk
*dm_disk(struct mapped_device
*md
)
2666 struct kobject
*dm_kobject(struct mapped_device
*md
)
2672 * struct mapped_device should not be exported outside of dm.c
2673 * so use this check to verify that kobj is part of md structure
2675 struct mapped_device
*dm_get_from_kobject(struct kobject
*kobj
)
2677 struct mapped_device
*md
;
2679 md
= container_of(kobj
, struct mapped_device
, kobj
);
2680 if (&md
->kobj
!= kobj
)
2683 if (test_bit(DMF_FREEING
, &md
->flags
) ||
2691 int dm_suspended_md(struct mapped_device
*md
)
2693 return test_bit(DMF_SUSPENDED
, &md
->flags
);
2696 int dm_suspended(struct dm_target
*ti
)
2698 return dm_suspended_md(dm_table_get_md(ti
->table
));
2700 EXPORT_SYMBOL_GPL(dm_suspended
);
2702 int dm_noflush_suspending(struct dm_target
*ti
)
2704 return __noflush_suspending(dm_table_get_md(ti
->table
));
2706 EXPORT_SYMBOL_GPL(dm_noflush_suspending
);
2708 struct dm_md_mempools
*dm_alloc_md_mempools(unsigned type
, unsigned integrity
)
2710 struct dm_md_mempools
*pools
= kmalloc(sizeof(*pools
), GFP_KERNEL
);
2711 unsigned int pool_size
= (type
== DM_TYPE_BIO_BASED
) ? 16 : MIN_IOS
;
2716 pools
->io_pool
= (type
== DM_TYPE_BIO_BASED
) ?
2717 mempool_create_slab_pool(MIN_IOS
, _io_cache
) :
2718 mempool_create_slab_pool(MIN_IOS
, _rq_bio_info_cache
);
2719 if (!pools
->io_pool
)
2720 goto free_pools_and_out
;
2722 pools
->tio_pool
= NULL
;
2723 if (type
== DM_TYPE_REQUEST_BASED
) {
2724 pools
->tio_pool
= mempool_create_slab_pool(MIN_IOS
, _rq_tio_cache
);
2725 if (!pools
->tio_pool
)
2726 goto free_io_pool_and_out
;
2729 pools
->bs
= (type
== DM_TYPE_BIO_BASED
) ?
2730 bioset_create(pool_size
,
2731 offsetof(struct dm_target_io
, clone
)) :
2732 bioset_create(pool_size
,
2733 offsetof(struct dm_rq_clone_bio_info
, clone
));
2735 goto free_tio_pool_and_out
;
2737 if (integrity
&& bioset_integrity_create(pools
->bs
, pool_size
))
2738 goto free_bioset_and_out
;
2742 free_bioset_and_out
:
2743 bioset_free(pools
->bs
);
2745 free_tio_pool_and_out
:
2746 if (pools
->tio_pool
)
2747 mempool_destroy(pools
->tio_pool
);
2749 free_io_pool_and_out
:
2750 mempool_destroy(pools
->io_pool
);
2758 void dm_free_md_mempools(struct dm_md_mempools
*pools
)
2764 mempool_destroy(pools
->io_pool
);
2766 if (pools
->tio_pool
)
2767 mempool_destroy(pools
->tio_pool
);
2770 bioset_free(pools
->bs
);
2775 static const struct block_device_operations dm_blk_dops
= {
2776 .open
= dm_blk_open
,
2777 .release
= dm_blk_close
,
2778 .ioctl
= dm_blk_ioctl
,
2779 .getgeo
= dm_blk_getgeo
,
2780 .owner
= THIS_MODULE
2783 EXPORT_SYMBOL(dm_get_mapinfo
);
2788 module_init(dm_init
);
2789 module_exit(dm_exit
);
2791 module_param(major
, uint
, 0);
2792 MODULE_PARM_DESC(major
, "The major number of the device mapper");
2793 MODULE_DESCRIPTION(DM_NAME
" driver");
2794 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2795 MODULE_LICENSE("GPL");