2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
4 * This file is released under the GPL.
8 #include "dm-bio-list.h"
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/blkpg.h>
14 #include <linux/bio.h>
15 #include <linux/buffer_head.h>
16 #include <linux/mempool.h>
17 #include <linux/slab.h>
18 #include <linux/idr.h>
20 static const char *_name
= DM_NAME
;
22 static unsigned int major
= 0;
23 static unsigned int _major
= 0;
26 * One of these is allocated per bio.
29 struct mapped_device
*md
;
36 * One of these is allocated per target within a bio. Hopefully
37 * this will be simplified out one day.
46 * Bits for the md->flags field.
48 #define DMF_BLOCK_IO 0
49 #define DMF_SUSPENDED 1
50 #define DMF_FS_LOCKED 2
52 struct mapped_device
{
53 struct rw_semaphore lock
;
59 request_queue_t
*queue
;
63 * A list of ios that arrived while we were suspended.
66 wait_queue_head_t wait
;
67 struct bio_list deferred
;
70 * The current mapping.
75 * io objects are allocated from here.
84 wait_queue_head_t eventq
;
87 * freeze/thaw support require holding onto a super block
89 struct super_block
*frozen_sb
;
93 static kmem_cache_t
*_io_cache
;
94 static kmem_cache_t
*_tio_cache
;
96 static int __init
local_init(void)
100 /* allocate a slab for the dm_ios */
101 _io_cache
= kmem_cache_create("dm_io",
102 sizeof(struct dm_io
), 0, 0, NULL
, NULL
);
106 /* allocate a slab for the target ios */
107 _tio_cache
= kmem_cache_create("dm_tio", sizeof(struct target_io
),
110 kmem_cache_destroy(_io_cache
);
115 r
= register_blkdev(_major
, _name
);
117 kmem_cache_destroy(_tio_cache
);
118 kmem_cache_destroy(_io_cache
);
128 static void local_exit(void)
130 kmem_cache_destroy(_tio_cache
);
131 kmem_cache_destroy(_io_cache
);
133 if (unregister_blkdev(_major
, _name
) < 0)
134 DMERR("devfs_unregister_blkdev failed");
138 DMINFO("cleaned up");
142 * We have a lot of init/exit functions, so it seems easier to
143 * store them in an array. The disposable macro 'xx'
144 * expands a prefix into a pair of function names.
151 #define xx(n) {n ## _init, n ## _exit},
160 static int __init
dm_init(void)
162 const int count
= ARRAY_SIZE(_inits
);
166 for (i
= 0; i
< count
; i
++) {
167 r
= _inits
[i
].init();
181 static void __exit
dm_exit(void)
183 int i
= ARRAY_SIZE(_inits
);
190 * Block device functions
192 static int dm_blk_open(struct inode
*inode
, struct file
*file
)
194 struct mapped_device
*md
;
196 md
= inode
->i_bdev
->bd_disk
->private_data
;
201 static int dm_blk_close(struct inode
*inode
, struct file
*file
)
203 struct mapped_device
*md
;
205 md
= inode
->i_bdev
->bd_disk
->private_data
;
210 static inline struct dm_io
*alloc_io(struct mapped_device
*md
)
212 return mempool_alloc(md
->io_pool
, GFP_NOIO
);
215 static inline void free_io(struct mapped_device
*md
, struct dm_io
*io
)
217 mempool_free(io
, md
->io_pool
);
220 static inline struct target_io
*alloc_tio(struct mapped_device
*md
)
222 return mempool_alloc(md
->tio_pool
, GFP_NOIO
);
225 static inline void free_tio(struct mapped_device
*md
, struct target_io
*tio
)
227 mempool_free(tio
, md
->tio_pool
);
231 * Add the bio to the list of deferred io.
233 static int queue_io(struct mapped_device
*md
, struct bio
*bio
)
235 down_write(&md
->lock
);
237 if (!test_bit(DMF_BLOCK_IO
, &md
->flags
)) {
242 bio_list_add(&md
->deferred
, bio
);
245 return 0; /* deferred successfully */
249 * Everyone (including functions in this file), should use this
250 * function to access the md->map field, and make sure they call
251 * dm_table_put() when finished.
253 struct dm_table
*dm_get_table(struct mapped_device
*md
)
257 read_lock(&md
->map_lock
);
261 read_unlock(&md
->map_lock
);
266 /*-----------------------------------------------------------------
268 * A more elegant soln is in the works that uses the queue
269 * merge fn, unfortunately there are a couple of changes to
270 * the block layer that I want to make for this. So in the
271 * interests of getting something for people to use I give
272 * you this clearly demarcated crap.
273 *---------------------------------------------------------------*/
276 * Decrements the number of outstanding ios that a bio has been
277 * cloned into, completing the original io if necc.
279 static inline void dec_pending(struct dm_io
*io
, int error
)
284 if (atomic_dec_and_test(&io
->io_count
)) {
285 if (atomic_dec_and_test(&io
->md
->pending
))
286 /* nudge anyone waiting on suspend queue */
287 wake_up(&io
->md
->wait
);
289 bio_endio(io
->bio
, io
->bio
->bi_size
, io
->error
);
294 static int clone_endio(struct bio
*bio
, unsigned int done
, int error
)
297 struct target_io
*tio
= bio
->bi_private
;
298 struct dm_io
*io
= tio
->io
;
299 dm_endio_fn endio
= tio
->ti
->type
->end_io
;
304 if (!bio_flagged(bio
, BIO_UPTODATE
) && !error
)
308 r
= endio(tio
->ti
, bio
, error
, &tio
->info
);
313 /* the target wants another shot at the io */
317 free_tio(io
->md
, tio
);
318 dec_pending(io
, error
);
323 static sector_t
max_io_len(struct mapped_device
*md
,
324 sector_t sector
, struct dm_target
*ti
)
326 sector_t offset
= sector
- ti
->begin
;
327 sector_t len
= ti
->len
- offset
;
330 * Does the target need to split even further ?
334 boundary
= dm_round_up(offset
+ 1, ti
->split_io
) - offset
;
343 static void __map_bio(struct dm_target
*ti
, struct bio
*clone
,
344 struct target_io
*tio
)
351 BUG_ON(!clone
->bi_size
);
353 clone
->bi_end_io
= clone_endio
;
354 clone
->bi_private
= tio
;
357 * Map the clone. If r == 0 we don't need to do
358 * anything, the target has assumed ownership of
361 atomic_inc(&tio
->io
->io_count
);
362 r
= ti
->type
->map(ti
, clone
, &tio
->info
);
364 /* the bio has been remapped so dispatch it */
365 generic_make_request(clone
);
368 /* error the io and bail out */
369 struct dm_io
*io
= tio
->io
;
370 free_tio(tio
->io
->md
, tio
);
371 dec_pending(io
, -EIO
);
377 struct mapped_device
*md
;
378 struct dm_table
*map
;
382 sector_t sector_count
;
387 * Creates a little bio that is just does part of a bvec.
389 static struct bio
*split_bvec(struct bio
*bio
, sector_t sector
,
390 unsigned short idx
, unsigned int offset
,
394 struct bio_vec
*bv
= bio
->bi_io_vec
+ idx
;
396 clone
= bio_alloc(GFP_NOIO
, 1);
397 *clone
->bi_io_vec
= *bv
;
399 clone
->bi_sector
= sector
;
400 clone
->bi_bdev
= bio
->bi_bdev
;
401 clone
->bi_rw
= bio
->bi_rw
;
403 clone
->bi_size
= to_bytes(len
);
404 clone
->bi_io_vec
->bv_offset
= offset
;
405 clone
->bi_io_vec
->bv_len
= clone
->bi_size
;
411 * Creates a bio that consists of range of complete bvecs.
413 static struct bio
*clone_bio(struct bio
*bio
, sector_t sector
,
414 unsigned short idx
, unsigned short bv_count
,
419 clone
= bio_clone(bio
, GFP_NOIO
);
420 clone
->bi_sector
= sector
;
422 clone
->bi_vcnt
= idx
+ bv_count
;
423 clone
->bi_size
= to_bytes(len
);
424 clone
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
429 static void __clone_and_map(struct clone_info
*ci
)
431 struct bio
*clone
, *bio
= ci
->bio
;
432 struct dm_target
*ti
= dm_table_find_target(ci
->map
, ci
->sector
);
433 sector_t len
= 0, max
= max_io_len(ci
->md
, ci
->sector
, ti
);
434 struct target_io
*tio
;
437 * Allocate a target io object.
439 tio
= alloc_tio(ci
->md
);
442 memset(&tio
->info
, 0, sizeof(tio
->info
));
444 if (ci
->sector_count
<= max
) {
446 * Optimise for the simple case where we can do all of
447 * the remaining io with a single clone.
449 clone
= clone_bio(bio
, ci
->sector
, ci
->idx
,
450 bio
->bi_vcnt
- ci
->idx
, ci
->sector_count
);
451 __map_bio(ti
, clone
, tio
);
452 ci
->sector_count
= 0;
454 } else if (to_sector(bio
->bi_io_vec
[ci
->idx
].bv_len
) <= max
) {
456 * There are some bvecs that don't span targets.
457 * Do as many of these as possible.
460 sector_t remaining
= max
;
463 for (i
= ci
->idx
; remaining
&& (i
< bio
->bi_vcnt
); i
++) {
464 bv_len
= to_sector(bio
->bi_io_vec
[i
].bv_len
);
466 if (bv_len
> remaining
)
473 clone
= clone_bio(bio
, ci
->sector
, ci
->idx
, i
- ci
->idx
, len
);
474 __map_bio(ti
, clone
, tio
);
477 ci
->sector_count
-= len
;
482 * Create two copy bios to deal with io that has
483 * been split across a target.
485 struct bio_vec
*bv
= bio
->bi_io_vec
+ ci
->idx
;
487 clone
= split_bvec(bio
, ci
->sector
, ci
->idx
,
489 __map_bio(ti
, clone
, tio
);
492 ci
->sector_count
-= max
;
493 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
495 len
= to_sector(bv
->bv_len
) - max
;
496 clone
= split_bvec(bio
, ci
->sector
, ci
->idx
,
497 bv
->bv_offset
+ to_bytes(max
), len
);
498 tio
= alloc_tio(ci
->md
);
501 memset(&tio
->info
, 0, sizeof(tio
->info
));
502 __map_bio(ti
, clone
, tio
);
505 ci
->sector_count
-= len
;
511 * Split the bio into several clones.
513 static void __split_bio(struct mapped_device
*md
, struct bio
*bio
)
515 struct clone_info ci
;
517 ci
.map
= dm_get_table(md
);
519 bio_io_error(bio
, bio
->bi_size
);
525 ci
.io
= alloc_io(md
);
527 atomic_set(&ci
.io
->io_count
, 1);
530 ci
.sector
= bio
->bi_sector
;
531 ci
.sector_count
= bio_sectors(bio
);
532 ci
.idx
= bio
->bi_idx
;
534 atomic_inc(&md
->pending
);
535 while (ci
.sector_count
)
536 __clone_and_map(&ci
);
538 /* drop the extra reference count */
539 dec_pending(ci
.io
, 0);
540 dm_table_put(ci
.map
);
542 /*-----------------------------------------------------------------
544 *---------------------------------------------------------------*/
547 * The request function that just remaps the bio built up by
550 static int dm_request(request_queue_t
*q
, struct bio
*bio
)
553 struct mapped_device
*md
= q
->queuedata
;
555 down_read(&md
->lock
);
558 * If we're suspended we have to queue
561 while (test_bit(DMF_BLOCK_IO
, &md
->flags
)) {
564 if (bio_rw(bio
) == READA
) {
565 bio_io_error(bio
, bio
->bi_size
);
569 r
= queue_io(md
, bio
);
571 bio_io_error(bio
, bio
->bi_size
);
575 return 0; /* deferred successfully */
578 * We're in a while loop, because someone could suspend
579 * before we get to the following read lock.
581 down_read(&md
->lock
);
584 __split_bio(md
, bio
);
589 static int dm_flush_all(request_queue_t
*q
, struct gendisk
*disk
,
590 sector_t
*error_sector
)
592 struct mapped_device
*md
= q
->queuedata
;
593 struct dm_table
*map
= dm_get_table(md
);
597 ret
= dm_table_flush_all(md
->map
);
604 static void dm_unplug_all(request_queue_t
*q
)
606 struct mapped_device
*md
= q
->queuedata
;
607 struct dm_table
*map
= dm_get_table(md
);
610 dm_table_unplug_all(map
);
615 static int dm_any_congested(void *congested_data
, int bdi_bits
)
618 struct mapped_device
*md
= (struct mapped_device
*) congested_data
;
619 struct dm_table
*map
= dm_get_table(md
);
621 if (!map
|| test_bit(DMF_BLOCK_IO
, &md
->flags
))
624 r
= dm_table_any_congested(map
, bdi_bits
);
630 /*-----------------------------------------------------------------
631 * An IDR is used to keep track of allocated minor numbers.
632 *---------------------------------------------------------------*/
633 static DECLARE_MUTEX(_minor_lock
);
634 static DEFINE_IDR(_minor_idr
);
636 static void free_minor(unsigned int minor
)
639 idr_remove(&_minor_idr
, minor
);
644 * See if the device with a specific minor # is free.
646 static int specific_minor(unsigned int minor
)
650 if (minor
>= (1 << MINORBITS
))
655 if (idr_find(&_minor_idr
, minor
)) {
660 r
= idr_pre_get(&_minor_idr
, GFP_KERNEL
);
666 r
= idr_get_new_above(&_minor_idr
, specific_minor
, minor
, &m
);
672 idr_remove(&_minor_idr
, m
);
682 static int next_free_minor(unsigned int *minor
)
689 r
= idr_pre_get(&_minor_idr
, GFP_KERNEL
);
695 r
= idr_get_new(&_minor_idr
, next_free_minor
, &m
);
700 if (m
>= (1 << MINORBITS
)) {
701 idr_remove(&_minor_idr
, m
);
713 static struct block_device_operations dm_blk_dops
;
716 * Allocate and initialise a blank device with a given minor.
718 static struct mapped_device
*alloc_dev(unsigned int minor
, int persistent
)
721 struct mapped_device
*md
= kmalloc(sizeof(*md
), GFP_KERNEL
);
724 DMWARN("unable to allocate device, out of memory.");
728 /* get a minor number for the dev */
729 r
= persistent
? specific_minor(minor
) : next_free_minor(&minor
);
733 memset(md
, 0, sizeof(*md
));
734 init_rwsem(&md
->lock
);
735 rwlock_init(&md
->map_lock
);
736 atomic_set(&md
->holders
, 1);
737 atomic_set(&md
->event_nr
, 0);
739 md
->queue
= blk_alloc_queue(GFP_KERNEL
);
743 md
->queue
->queuedata
= md
;
744 md
->queue
->backing_dev_info
.congested_fn
= dm_any_congested
;
745 md
->queue
->backing_dev_info
.congested_data
= md
;
746 blk_queue_make_request(md
->queue
, dm_request
);
747 md
->queue
->unplug_fn
= dm_unplug_all
;
748 md
->queue
->issue_flush_fn
= dm_flush_all
;
750 md
->io_pool
= mempool_create(MIN_IOS
, mempool_alloc_slab
,
751 mempool_free_slab
, _io_cache
);
755 md
->tio_pool
= mempool_create(MIN_IOS
, mempool_alloc_slab
,
756 mempool_free_slab
, _tio_cache
);
760 md
->disk
= alloc_disk(1);
764 md
->disk
->major
= _major
;
765 md
->disk
->first_minor
= minor
;
766 md
->disk
->fops
= &dm_blk_dops
;
767 md
->disk
->queue
= md
->queue
;
768 md
->disk
->private_data
= md
;
769 sprintf(md
->disk
->disk_name
, "dm-%d", minor
);
772 atomic_set(&md
->pending
, 0);
773 init_waitqueue_head(&md
->wait
);
774 init_waitqueue_head(&md
->eventq
);
779 mempool_destroy(md
->tio_pool
);
781 mempool_destroy(md
->io_pool
);
783 blk_put_queue(md
->queue
);
790 static void free_dev(struct mapped_device
*md
)
792 free_minor(md
->disk
->first_minor
);
793 mempool_destroy(md
->tio_pool
);
794 mempool_destroy(md
->io_pool
);
795 del_gendisk(md
->disk
);
797 blk_put_queue(md
->queue
);
802 * Bind a table to the device.
804 static void event_callback(void *context
)
806 struct mapped_device
*md
= (struct mapped_device
*) context
;
808 atomic_inc(&md
->event_nr
);;
809 wake_up(&md
->eventq
);
812 static void __set_size(struct gendisk
*disk
, sector_t size
)
814 struct block_device
*bdev
;
816 set_capacity(disk
, size
);
817 bdev
= bdget_disk(disk
, 0);
819 down(&bdev
->bd_inode
->i_sem
);
820 i_size_write(bdev
->bd_inode
, (loff_t
)size
<< SECTOR_SHIFT
);
821 up(&bdev
->bd_inode
->i_sem
);
826 static int __bind(struct mapped_device
*md
, struct dm_table
*t
)
828 request_queue_t
*q
= md
->queue
;
831 size
= dm_table_get_size(t
);
832 __set_size(md
->disk
, size
);
836 write_lock(&md
->map_lock
);
838 write_unlock(&md
->map_lock
);
841 dm_table_event_callback(md
->map
, event_callback
, md
);
842 dm_table_set_restrictions(t
, q
);
846 static void __unbind(struct mapped_device
*md
)
848 struct dm_table
*map
= md
->map
;
853 dm_table_event_callback(map
, NULL
, NULL
);
854 write_lock(&md
->map_lock
);
856 write_unlock(&md
->map_lock
);
861 * Constructor for a new device.
863 static int create_aux(unsigned int minor
, int persistent
,
864 struct mapped_device
**result
)
866 struct mapped_device
*md
;
868 md
= alloc_dev(minor
, persistent
);
876 int dm_create(struct mapped_device
**result
)
878 return create_aux(0, 0, result
);
881 int dm_create_with_minor(unsigned int minor
, struct mapped_device
**result
)
883 return create_aux(minor
, 1, result
);
886 void dm_get(struct mapped_device
*md
)
888 atomic_inc(&md
->holders
);
891 void dm_put(struct mapped_device
*md
)
893 struct dm_table
*map
= dm_get_table(md
);
895 if (atomic_dec_and_test(&md
->holders
)) {
896 if (!test_bit(DMF_SUSPENDED
, &md
->flags
) && map
)
897 dm_table_suspend_targets(map
);
906 * Process the deferred bios
908 static void __flush_deferred_io(struct mapped_device
*md
, struct bio
*c
)
921 * Swap in a new table (destroying old one).
923 int dm_swap_table(struct mapped_device
*md
, struct dm_table
*table
)
927 down_write(&md
->lock
);
929 /* device must be suspended */
930 if (!test_bit(DMF_SUSPENDED
, &md
->flags
)) {
936 r
= __bind(md
, table
);
945 * Functions to lock and unlock any filesystem running on the
948 static int __lock_fs(struct mapped_device
*md
)
950 struct block_device
*bdev
;
952 if (test_and_set_bit(DMF_FS_LOCKED
, &md
->flags
))
955 bdev
= bdget_disk(md
->disk
, 0);
957 DMWARN("bdget failed in __lock_fs");
961 WARN_ON(md
->frozen_sb
);
962 md
->frozen_sb
= freeze_bdev(bdev
);
963 /* don't bdput right now, we don't want the bdev
964 * to go away while it is locked. We'll bdput
970 static int __unlock_fs(struct mapped_device
*md
)
972 struct block_device
*bdev
;
974 if (!test_and_clear_bit(DMF_FS_LOCKED
, &md
->flags
))
977 bdev
= bdget_disk(md
->disk
, 0);
979 DMWARN("bdget failed in __unlock_fs");
983 thaw_bdev(bdev
, md
->frozen_sb
);
984 md
->frozen_sb
= NULL
;
991 * We need to be able to change a mapping table under a mounted
992 * filesystem. For example we might want to move some data in
993 * the background. Before the table can be swapped with
994 * dm_bind_table, dm_suspend must be called to flush any in
995 * flight bios and ensure that any further io gets deferred.
997 int dm_suspend(struct mapped_device
*md
)
999 struct dm_table
*map
;
1000 DECLARE_WAITQUEUE(wait
, current
);
1002 /* Flush I/O to the device. */
1003 down_read(&md
->lock
);
1004 if (test_bit(DMF_BLOCK_IO
, &md
->flags
)) {
1013 * First we set the BLOCK_IO flag so no more ios will be
1016 down_write(&md
->lock
);
1017 if (test_bit(DMF_BLOCK_IO
, &md
->flags
)) {
1019 * If we get here we know another thread is
1020 * trying to suspend as well, so we leave the fs
1021 * locked for this thread.
1023 up_write(&md
->lock
);
1027 set_bit(DMF_BLOCK_IO
, &md
->flags
);
1028 add_wait_queue(&md
->wait
, &wait
);
1029 up_write(&md
->lock
);
1032 map
= dm_get_table(md
);
1034 dm_table_unplug_all(map
);
1039 * Then we wait for the already mapped ios to
1043 set_current_state(TASK_INTERRUPTIBLE
);
1045 if (!atomic_read(&md
->pending
) || signal_pending(current
))
1050 set_current_state(TASK_RUNNING
);
1052 down_write(&md
->lock
);
1053 remove_wait_queue(&md
->wait
, &wait
);
1055 /* were we interrupted ? */
1056 if (atomic_read(&md
->pending
)) {
1058 clear_bit(DMF_BLOCK_IO
, &md
->flags
);
1059 up_write(&md
->lock
);
1063 set_bit(DMF_SUSPENDED
, &md
->flags
);
1065 map
= dm_get_table(md
);
1067 dm_table_suspend_targets(map
);
1069 up_write(&md
->lock
);
1074 int dm_resume(struct mapped_device
*md
)
1077 struct dm_table
*map
= dm_get_table(md
);
1079 down_write(&md
->lock
);
1081 !test_bit(DMF_SUSPENDED
, &md
->flags
) ||
1082 !dm_table_get_size(map
)) {
1083 up_write(&md
->lock
);
1088 dm_table_resume_targets(map
);
1089 clear_bit(DMF_SUSPENDED
, &md
->flags
);
1090 clear_bit(DMF_BLOCK_IO
, &md
->flags
);
1092 def
= bio_list_get(&md
->deferred
);
1093 __flush_deferred_io(md
, def
);
1094 up_write(&md
->lock
);
1096 dm_table_unplug_all(map
);
1102 /*-----------------------------------------------------------------
1103 * Event notification.
1104 *---------------------------------------------------------------*/
1105 uint32_t dm_get_event_nr(struct mapped_device
*md
)
1107 return atomic_read(&md
->event_nr
);
1110 int dm_wait_event(struct mapped_device
*md
, int event_nr
)
1112 return wait_event_interruptible(md
->eventq
,
1113 (event_nr
!= atomic_read(&md
->event_nr
)));
1117 * The gendisk is only valid as long as you have a reference
1120 struct gendisk
*dm_disk(struct mapped_device
*md
)
1125 int dm_suspended(struct mapped_device
*md
)
1127 return test_bit(DMF_SUSPENDED
, &md
->flags
);
1130 static struct block_device_operations dm_blk_dops
= {
1131 .open
= dm_blk_open
,
1132 .release
= dm_blk_close
,
1133 .owner
= THIS_MODULE
1139 module_init(dm_init
);
1140 module_exit(dm_exit
);
1142 module_param(major
, uint
, 0);
1143 MODULE_PARM_DESC(major
, "The major number of the device mapper");
1144 MODULE_DESCRIPTION(DM_NAME
" driver");
1145 MODULE_AUTHOR("Joe Thornber <thornber@sistina.com>");
1146 MODULE_LICENSE("GPL");