2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
9 #include "dm-bio-list.h"
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/moduleparam.h>
15 #include <linux/blkpg.h>
16 #include <linux/bio.h>
17 #include <linux/buffer_head.h>
18 #include <linux/mempool.h>
19 #include <linux/slab.h>
20 #include <linux/idr.h>
21 #include <linux/hdreg.h>
22 #include <linux/blktrace_api.h>
24 static const char *_name
= DM_NAME
;
26 static unsigned int major
= 0;
27 static unsigned int _major
= 0;
29 static DEFINE_SPINLOCK(_minor_lock
);
31 * One of these is allocated per bio.
34 struct mapped_device
*md
;
38 unsigned long start_time
;
42 * One of these is allocated per target within a bio. Hopefully
43 * this will be simplified out one day.
51 union map_info
*dm_get_mapinfo(struct bio
*bio
)
53 if (bio
&& bio
->bi_private
)
54 return &((struct target_io
*)bio
->bi_private
)->info
;
58 #define MINOR_ALLOCED ((void *)-1)
61 * Bits for the md->flags field.
63 #define DMF_BLOCK_IO 0
64 #define DMF_SUSPENDED 1
68 struct mapped_device
{
69 struct rw_semaphore io_lock
;
70 struct semaphore suspend_lock
;
76 request_queue_t
*queue
;
83 * A list of ios that arrived while we were suspended.
86 wait_queue_head_t wait
;
87 struct bio_list deferred
;
90 * The current mapping.
95 * io objects are allocated from here.
104 wait_queue_head_t eventq
;
107 * freeze/thaw support require holding onto a super block
109 struct super_block
*frozen_sb
;
110 struct block_device
*suspended_bdev
;
112 /* forced geometry settings */
113 struct hd_geometry geometry
;
117 static kmem_cache_t
*_io_cache
;
118 static kmem_cache_t
*_tio_cache
;
120 static struct bio_set
*dm_set
;
122 static int __init
local_init(void)
126 dm_set
= bioset_create(16, 16, 4);
130 /* allocate a slab for the dm_ios */
131 _io_cache
= kmem_cache_create("dm_io",
132 sizeof(struct dm_io
), 0, 0, NULL
, NULL
);
136 /* allocate a slab for the target ios */
137 _tio_cache
= kmem_cache_create("dm_tio", sizeof(struct target_io
),
140 kmem_cache_destroy(_io_cache
);
145 r
= register_blkdev(_major
, _name
);
147 kmem_cache_destroy(_tio_cache
);
148 kmem_cache_destroy(_io_cache
);
158 static void local_exit(void)
160 kmem_cache_destroy(_tio_cache
);
161 kmem_cache_destroy(_io_cache
);
165 if (unregister_blkdev(_major
, _name
) < 0)
166 DMERR("devfs_unregister_blkdev failed");
170 DMINFO("cleaned up");
173 int (*_inits
[])(void) __initdata
= {
181 void (*_exits
[])(void) = {
189 static int __init
dm_init(void)
191 const int count
= ARRAY_SIZE(_inits
);
195 for (i
= 0; i
< count
; i
++) {
210 static void __exit
dm_exit(void)
212 int i
= ARRAY_SIZE(_exits
);
219 * Block device functions
221 static int dm_blk_open(struct inode
*inode
, struct file
*file
)
223 struct mapped_device
*md
;
225 spin_lock(&_minor_lock
);
227 md
= inode
->i_bdev
->bd_disk
->private_data
;
231 if (test_bit(DMF_FREEING
, &md
->flags
)) {
239 spin_unlock(&_minor_lock
);
241 return md
? 0 : -ENXIO
;
244 static int dm_blk_close(struct inode
*inode
, struct file
*file
)
246 struct mapped_device
*md
;
248 md
= inode
->i_bdev
->bd_disk
->private_data
;
253 static int dm_blk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
255 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
257 return dm_get_geometry(md
, geo
);
260 static inline struct dm_io
*alloc_io(struct mapped_device
*md
)
262 return mempool_alloc(md
->io_pool
, GFP_NOIO
);
265 static inline void free_io(struct mapped_device
*md
, struct dm_io
*io
)
267 mempool_free(io
, md
->io_pool
);
270 static inline struct target_io
*alloc_tio(struct mapped_device
*md
)
272 return mempool_alloc(md
->tio_pool
, GFP_NOIO
);
275 static inline void free_tio(struct mapped_device
*md
, struct target_io
*tio
)
277 mempool_free(tio
, md
->tio_pool
);
280 static void start_io_acct(struct dm_io
*io
)
282 struct mapped_device
*md
= io
->md
;
284 io
->start_time
= jiffies
;
287 disk_round_stats(dm_disk(md
));
289 dm_disk(md
)->in_flight
= atomic_inc_return(&md
->pending
);
292 static int end_io_acct(struct dm_io
*io
)
294 struct mapped_device
*md
= io
->md
;
295 struct bio
*bio
= io
->bio
;
296 unsigned long duration
= jiffies
- io
->start_time
;
298 int rw
= bio_data_dir(bio
);
301 disk_round_stats(dm_disk(md
));
303 dm_disk(md
)->in_flight
= pending
= atomic_dec_return(&md
->pending
);
305 disk_stat_add(dm_disk(md
), ticks
[rw
], duration
);
311 * Add the bio to the list of deferred io.
313 static int queue_io(struct mapped_device
*md
, struct bio
*bio
)
315 down_write(&md
->io_lock
);
317 if (!test_bit(DMF_BLOCK_IO
, &md
->flags
)) {
318 up_write(&md
->io_lock
);
322 bio_list_add(&md
->deferred
, bio
);
324 up_write(&md
->io_lock
);
325 return 0; /* deferred successfully */
329 * Everyone (including functions in this file), should use this
330 * function to access the md->map field, and make sure they call
331 * dm_table_put() when finished.
333 struct dm_table
*dm_get_table(struct mapped_device
*md
)
337 read_lock(&md
->map_lock
);
341 read_unlock(&md
->map_lock
);
347 * Get the geometry associated with a dm device
349 int dm_get_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
357 * Set the geometry of a device.
359 int dm_set_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
361 sector_t sz
= (sector_t
)geo
->cylinders
* geo
->heads
* geo
->sectors
;
363 if (geo
->start
> sz
) {
364 DMWARN("Start sector is beyond the geometry limits.");
373 /*-----------------------------------------------------------------
375 * A more elegant soln is in the works that uses the queue
376 * merge fn, unfortunately there are a couple of changes to
377 * the block layer that I want to make for this. So in the
378 * interests of getting something for people to use I give
379 * you this clearly demarcated crap.
380 *---------------------------------------------------------------*/
383 * Decrements the number of outstanding ios that a bio has been
384 * cloned into, completing the original io if necc.
386 static void dec_pending(struct dm_io
*io
, int error
)
391 if (atomic_dec_and_test(&io
->io_count
)) {
393 /* nudge anyone waiting on suspend queue */
394 wake_up(&io
->md
->wait
);
396 blk_add_trace_bio(io
->md
->queue
, io
->bio
, BLK_TA_COMPLETE
);
398 bio_endio(io
->bio
, io
->bio
->bi_size
, io
->error
);
403 static int clone_endio(struct bio
*bio
, unsigned int done
, int error
)
406 struct target_io
*tio
= bio
->bi_private
;
407 struct dm_io
*io
= tio
->io
;
408 dm_endio_fn endio
= tio
->ti
->type
->end_io
;
413 if (!bio_flagged(bio
, BIO_UPTODATE
) && !error
)
417 r
= endio(tio
->ti
, bio
, error
, &tio
->info
);
422 /* the target wants another shot at the io */
426 free_tio(io
->md
, tio
);
427 dec_pending(io
, error
);
432 static sector_t
max_io_len(struct mapped_device
*md
,
433 sector_t sector
, struct dm_target
*ti
)
435 sector_t offset
= sector
- ti
->begin
;
436 sector_t len
= ti
->len
- offset
;
439 * Does the target need to split even further ?
443 boundary
= ((offset
+ ti
->split_io
) & ~(ti
->split_io
- 1))
452 static void __map_bio(struct dm_target
*ti
, struct bio
*clone
,
453 struct target_io
*tio
)
461 BUG_ON(!clone
->bi_size
);
463 clone
->bi_end_io
= clone_endio
;
464 clone
->bi_private
= tio
;
467 * Map the clone. If r == 0 we don't need to do
468 * anything, the target has assumed ownership of
471 atomic_inc(&tio
->io
->io_count
);
472 sector
= clone
->bi_sector
;
473 r
= ti
->type
->map(ti
, clone
, &tio
->info
);
475 /* the bio has been remapped so dispatch it */
477 blk_add_trace_remap(bdev_get_queue(clone
->bi_bdev
), clone
,
478 tio
->io
->bio
->bi_bdev
->bd_dev
, sector
,
481 generic_make_request(clone
);
485 /* error the io and bail out */
486 struct dm_io
*io
= tio
->io
;
487 free_tio(tio
->io
->md
, tio
);
494 struct mapped_device
*md
;
495 struct dm_table
*map
;
499 sector_t sector_count
;
503 static void dm_bio_destructor(struct bio
*bio
)
505 bio_free(bio
, dm_set
);
509 * Creates a little bio that is just does part of a bvec.
511 static struct bio
*split_bvec(struct bio
*bio
, sector_t sector
,
512 unsigned short idx
, unsigned int offset
,
516 struct bio_vec
*bv
= bio
->bi_io_vec
+ idx
;
518 clone
= bio_alloc_bioset(GFP_NOIO
, 1, dm_set
);
519 clone
->bi_destructor
= dm_bio_destructor
;
520 *clone
->bi_io_vec
= *bv
;
522 clone
->bi_sector
= sector
;
523 clone
->bi_bdev
= bio
->bi_bdev
;
524 clone
->bi_rw
= bio
->bi_rw
;
526 clone
->bi_size
= to_bytes(len
);
527 clone
->bi_io_vec
->bv_offset
= offset
;
528 clone
->bi_io_vec
->bv_len
= clone
->bi_size
;
534 * Creates a bio that consists of range of complete bvecs.
536 static struct bio
*clone_bio(struct bio
*bio
, sector_t sector
,
537 unsigned short idx
, unsigned short bv_count
,
542 clone
= bio_clone(bio
, GFP_NOIO
);
543 clone
->bi_sector
= sector
;
545 clone
->bi_vcnt
= idx
+ bv_count
;
546 clone
->bi_size
= to_bytes(len
);
547 clone
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
552 static void __clone_and_map(struct clone_info
*ci
)
554 struct bio
*clone
, *bio
= ci
->bio
;
555 struct dm_target
*ti
= dm_table_find_target(ci
->map
, ci
->sector
);
556 sector_t len
= 0, max
= max_io_len(ci
->md
, ci
->sector
, ti
);
557 struct target_io
*tio
;
560 * Allocate a target io object.
562 tio
= alloc_tio(ci
->md
);
565 memset(&tio
->info
, 0, sizeof(tio
->info
));
567 if (ci
->sector_count
<= max
) {
569 * Optimise for the simple case where we can do all of
570 * the remaining io with a single clone.
572 clone
= clone_bio(bio
, ci
->sector
, ci
->idx
,
573 bio
->bi_vcnt
- ci
->idx
, ci
->sector_count
);
574 __map_bio(ti
, clone
, tio
);
575 ci
->sector_count
= 0;
577 } else if (to_sector(bio
->bi_io_vec
[ci
->idx
].bv_len
) <= max
) {
579 * There are some bvecs that don't span targets.
580 * Do as many of these as possible.
583 sector_t remaining
= max
;
586 for (i
= ci
->idx
; remaining
&& (i
< bio
->bi_vcnt
); i
++) {
587 bv_len
= to_sector(bio
->bi_io_vec
[i
].bv_len
);
589 if (bv_len
> remaining
)
596 clone
= clone_bio(bio
, ci
->sector
, ci
->idx
, i
- ci
->idx
, len
);
597 __map_bio(ti
, clone
, tio
);
600 ci
->sector_count
-= len
;
605 * Handle a bvec that must be split between two or more targets.
607 struct bio_vec
*bv
= bio
->bi_io_vec
+ ci
->idx
;
608 sector_t remaining
= to_sector(bv
->bv_len
);
609 unsigned int offset
= 0;
613 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
614 max
= max_io_len(ci
->md
, ci
->sector
, ti
);
616 tio
= alloc_tio(ci
->md
);
619 memset(&tio
->info
, 0, sizeof(tio
->info
));
622 len
= min(remaining
, max
);
624 clone
= split_bvec(bio
, ci
->sector
, ci
->idx
,
625 bv
->bv_offset
+ offset
, len
);
627 __map_bio(ti
, clone
, tio
);
630 ci
->sector_count
-= len
;
631 offset
+= to_bytes(len
);
632 } while (remaining
-= len
);
639 * Split the bio into several clones.
641 static void __split_bio(struct mapped_device
*md
, struct bio
*bio
)
643 struct clone_info ci
;
645 ci
.map
= dm_get_table(md
);
647 bio_io_error(bio
, bio
->bi_size
);
653 ci
.io
= alloc_io(md
);
655 atomic_set(&ci
.io
->io_count
, 1);
658 ci
.sector
= bio
->bi_sector
;
659 ci
.sector_count
= bio_sectors(bio
);
660 ci
.idx
= bio
->bi_idx
;
662 start_io_acct(ci
.io
);
663 while (ci
.sector_count
)
664 __clone_and_map(&ci
);
666 /* drop the extra reference count */
667 dec_pending(ci
.io
, 0);
668 dm_table_put(ci
.map
);
670 /*-----------------------------------------------------------------
672 *---------------------------------------------------------------*/
675 * The request function that just remaps the bio built up by
678 static int dm_request(request_queue_t
*q
, struct bio
*bio
)
681 int rw
= bio_data_dir(bio
);
682 struct mapped_device
*md
= q
->queuedata
;
684 down_read(&md
->io_lock
);
686 disk_stat_inc(dm_disk(md
), ios
[rw
]);
687 disk_stat_add(dm_disk(md
), sectors
[rw
], bio_sectors(bio
));
690 * If we're suspended we have to queue
693 while (test_bit(DMF_BLOCK_IO
, &md
->flags
)) {
694 up_read(&md
->io_lock
);
696 if (bio_rw(bio
) == READA
) {
697 bio_io_error(bio
, bio
->bi_size
);
701 r
= queue_io(md
, bio
);
703 bio_io_error(bio
, bio
->bi_size
);
707 return 0; /* deferred successfully */
710 * We're in a while loop, because someone could suspend
711 * before we get to the following read lock.
713 down_read(&md
->io_lock
);
716 __split_bio(md
, bio
);
717 up_read(&md
->io_lock
);
721 static int dm_flush_all(request_queue_t
*q
, struct gendisk
*disk
,
722 sector_t
*error_sector
)
724 struct mapped_device
*md
= q
->queuedata
;
725 struct dm_table
*map
= dm_get_table(md
);
729 ret
= dm_table_flush_all(map
);
736 static void dm_unplug_all(request_queue_t
*q
)
738 struct mapped_device
*md
= q
->queuedata
;
739 struct dm_table
*map
= dm_get_table(md
);
742 dm_table_unplug_all(map
);
747 static int dm_any_congested(void *congested_data
, int bdi_bits
)
750 struct mapped_device
*md
= (struct mapped_device
*) congested_data
;
751 struct dm_table
*map
= dm_get_table(md
);
753 if (!map
|| test_bit(DMF_BLOCK_IO
, &md
->flags
))
756 r
= dm_table_any_congested(map
, bdi_bits
);
762 /*-----------------------------------------------------------------
763 * An IDR is used to keep track of allocated minor numbers.
764 *---------------------------------------------------------------*/
765 static DEFINE_IDR(_minor_idr
);
767 static void free_minor(int minor
)
769 spin_lock(&_minor_lock
);
770 idr_remove(&_minor_idr
, minor
);
771 spin_unlock(&_minor_lock
);
775 * See if the device with a specific minor # is free.
777 static int specific_minor(struct mapped_device
*md
, int minor
)
781 if (minor
>= (1 << MINORBITS
))
784 r
= idr_pre_get(&_minor_idr
, GFP_KERNEL
);
788 spin_lock(&_minor_lock
);
790 if (idr_find(&_minor_idr
, minor
)) {
795 r
= idr_get_new_above(&_minor_idr
, MINOR_ALLOCED
, minor
, &m
);
800 idr_remove(&_minor_idr
, m
);
806 spin_unlock(&_minor_lock
);
810 static int next_free_minor(struct mapped_device
*md
, int *minor
)
814 r
= idr_pre_get(&_minor_idr
, GFP_KERNEL
);
818 spin_lock(&_minor_lock
);
820 r
= idr_get_new(&_minor_idr
, MINOR_ALLOCED
, &m
);
825 if (m
>= (1 << MINORBITS
)) {
826 idr_remove(&_minor_idr
, m
);
834 spin_unlock(&_minor_lock
);
838 static struct block_device_operations dm_blk_dops
;
841 * Allocate and initialise a blank device with a given minor.
843 static struct mapped_device
*alloc_dev(int minor
)
846 struct mapped_device
*md
= kmalloc(sizeof(*md
), GFP_KERNEL
);
850 DMWARN("unable to allocate device, out of memory.");
854 if (!try_module_get(THIS_MODULE
))
857 /* get a minor number for the dev */
858 if (minor
== DM_ANY_MINOR
)
859 r
= next_free_minor(md
, &minor
);
861 r
= specific_minor(md
, minor
);
865 memset(md
, 0, sizeof(*md
));
866 init_rwsem(&md
->io_lock
);
867 init_MUTEX(&md
->suspend_lock
);
868 rwlock_init(&md
->map_lock
);
869 atomic_set(&md
->holders
, 1);
870 atomic_set(&md
->event_nr
, 0);
872 md
->queue
= blk_alloc_queue(GFP_KERNEL
);
876 md
->queue
->queuedata
= md
;
877 md
->queue
->backing_dev_info
.congested_fn
= dm_any_congested
;
878 md
->queue
->backing_dev_info
.congested_data
= md
;
879 blk_queue_make_request(md
->queue
, dm_request
);
880 blk_queue_bounce_limit(md
->queue
, BLK_BOUNCE_ANY
);
881 md
->queue
->unplug_fn
= dm_unplug_all
;
882 md
->queue
->issue_flush_fn
= dm_flush_all
;
884 md
->io_pool
= mempool_create_slab_pool(MIN_IOS
, _io_cache
);
888 md
->tio_pool
= mempool_create_slab_pool(MIN_IOS
, _tio_cache
);
892 md
->disk
= alloc_disk(1);
896 atomic_set(&md
->pending
, 0);
897 init_waitqueue_head(&md
->wait
);
898 init_waitqueue_head(&md
->eventq
);
900 md
->disk
->major
= _major
;
901 md
->disk
->first_minor
= minor
;
902 md
->disk
->fops
= &dm_blk_dops
;
903 md
->disk
->queue
= md
->queue
;
904 md
->disk
->private_data
= md
;
905 sprintf(md
->disk
->disk_name
, "dm-%d", minor
);
907 format_dev_t(md
->name
, MKDEV(_major
, minor
));
909 /* Populate the mapping, nobody knows we exist yet */
910 spin_lock(&_minor_lock
);
911 old_md
= idr_replace(&_minor_idr
, md
, minor
);
912 spin_unlock(&_minor_lock
);
914 BUG_ON(old_md
!= MINOR_ALLOCED
);
919 mempool_destroy(md
->tio_pool
);
921 mempool_destroy(md
->io_pool
);
923 blk_cleanup_queue(md
->queue
);
926 module_put(THIS_MODULE
);
932 static void free_dev(struct mapped_device
*md
)
934 int minor
= md
->disk
->first_minor
;
936 if (md
->suspended_bdev
) {
937 thaw_bdev(md
->suspended_bdev
, NULL
);
938 bdput(md
->suspended_bdev
);
940 mempool_destroy(md
->tio_pool
);
941 mempool_destroy(md
->io_pool
);
942 del_gendisk(md
->disk
);
945 spin_lock(&_minor_lock
);
946 md
->disk
->private_data
= NULL
;
947 spin_unlock(&_minor_lock
);
950 blk_cleanup_queue(md
->queue
);
951 module_put(THIS_MODULE
);
956 * Bind a table to the device.
958 static void event_callback(void *context
)
960 struct mapped_device
*md
= (struct mapped_device
*) context
;
962 atomic_inc(&md
->event_nr
);
963 wake_up(&md
->eventq
);
966 static void __set_size(struct mapped_device
*md
, sector_t size
)
968 set_capacity(md
->disk
, size
);
970 mutex_lock(&md
->suspended_bdev
->bd_inode
->i_mutex
);
971 i_size_write(md
->suspended_bdev
->bd_inode
, (loff_t
)size
<< SECTOR_SHIFT
);
972 mutex_unlock(&md
->suspended_bdev
->bd_inode
->i_mutex
);
975 static int __bind(struct mapped_device
*md
, struct dm_table
*t
)
977 request_queue_t
*q
= md
->queue
;
980 size
= dm_table_get_size(t
);
983 * Wipe any geometry if the size of the table changed.
985 if (size
!= get_capacity(md
->disk
))
986 memset(&md
->geometry
, 0, sizeof(md
->geometry
));
988 __set_size(md
, size
);
993 dm_table_event_callback(t
, event_callback
, md
);
995 write_lock(&md
->map_lock
);
997 dm_table_set_restrictions(t
, q
);
998 write_unlock(&md
->map_lock
);
1003 static void __unbind(struct mapped_device
*md
)
1005 struct dm_table
*map
= md
->map
;
1010 dm_table_event_callback(map
, NULL
, NULL
);
1011 write_lock(&md
->map_lock
);
1013 write_unlock(&md
->map_lock
);
1018 * Constructor for a new device.
1020 int dm_create(int minor
, struct mapped_device
**result
)
1022 struct mapped_device
*md
;
1024 md
= alloc_dev(minor
);
1032 static struct mapped_device
*dm_find_md(dev_t dev
)
1034 struct mapped_device
*md
;
1035 unsigned minor
= MINOR(dev
);
1037 if (MAJOR(dev
) != _major
|| minor
>= (1 << MINORBITS
))
1040 spin_lock(&_minor_lock
);
1042 md
= idr_find(&_minor_idr
, minor
);
1043 if (md
&& (md
== MINOR_ALLOCED
||
1044 (dm_disk(md
)->first_minor
!= minor
) ||
1045 test_bit(DMF_FREEING
, &md
->flags
))) {
1051 spin_unlock(&_minor_lock
);
1056 struct mapped_device
*dm_get_md(dev_t dev
)
1058 struct mapped_device
*md
= dm_find_md(dev
);
1066 void *dm_get_mdptr(struct mapped_device
*md
)
1068 return md
->interface_ptr
;
1071 void dm_set_mdptr(struct mapped_device
*md
, void *ptr
)
1073 md
->interface_ptr
= ptr
;
1076 void dm_get(struct mapped_device
*md
)
1078 atomic_inc(&md
->holders
);
1081 void dm_put(struct mapped_device
*md
)
1083 struct dm_table
*map
;
1085 BUG_ON(test_bit(DMF_FREEING
, &md
->flags
));
1087 if (atomic_dec_and_lock(&md
->holders
, &_minor_lock
)) {
1088 map
= dm_get_table(md
);
1089 idr_replace(&_minor_idr
, MINOR_ALLOCED
, dm_disk(md
)->first_minor
);
1090 set_bit(DMF_FREEING
, &md
->flags
);
1091 spin_unlock(&_minor_lock
);
1092 if (!dm_suspended(md
)) {
1093 dm_table_presuspend_targets(map
);
1094 dm_table_postsuspend_targets(map
);
1103 * Process the deferred bios
1105 static void __flush_deferred_io(struct mapped_device
*md
, struct bio
*c
)
1118 * Swap in a new table (destroying old one).
1120 int dm_swap_table(struct mapped_device
*md
, struct dm_table
*table
)
1124 down(&md
->suspend_lock
);
1126 /* device must be suspended */
1127 if (!dm_suspended(md
))
1131 r
= __bind(md
, table
);
1134 up(&md
->suspend_lock
);
1139 * Functions to lock and unlock any filesystem running on the
1142 static int lock_fs(struct mapped_device
*md
)
1146 WARN_ON(md
->frozen_sb
);
1148 md
->frozen_sb
= freeze_bdev(md
->suspended_bdev
);
1149 if (IS_ERR(md
->frozen_sb
)) {
1150 r
= PTR_ERR(md
->frozen_sb
);
1151 md
->frozen_sb
= NULL
;
1155 set_bit(DMF_FROZEN
, &md
->flags
);
1157 /* don't bdput right now, we don't want the bdev
1158 * to go away while it is locked.
1163 static void unlock_fs(struct mapped_device
*md
)
1165 if (!test_bit(DMF_FROZEN
, &md
->flags
))
1168 thaw_bdev(md
->suspended_bdev
, md
->frozen_sb
);
1169 md
->frozen_sb
= NULL
;
1170 clear_bit(DMF_FROZEN
, &md
->flags
);
1174 * We need to be able to change a mapping table under a mounted
1175 * filesystem. For example we might want to move some data in
1176 * the background. Before the table can be swapped with
1177 * dm_bind_table, dm_suspend must be called to flush any in
1178 * flight bios and ensure that any further io gets deferred.
1180 int dm_suspend(struct mapped_device
*md
, int do_lockfs
)
1182 struct dm_table
*map
= NULL
;
1183 DECLARE_WAITQUEUE(wait
, current
);
1187 down(&md
->suspend_lock
);
1189 if (dm_suspended(md
))
1192 map
= dm_get_table(md
);
1194 /* This does not get reverted if there's an error later. */
1195 dm_table_presuspend_targets(map
);
1197 md
->suspended_bdev
= bdget_disk(md
->disk
, 0);
1198 if (!md
->suspended_bdev
) {
1199 DMWARN("bdget failed in dm_suspend");
1204 /* Flush I/O to the device. */
1212 * First we set the BLOCK_IO flag so no more ios will be mapped.
1214 down_write(&md
->io_lock
);
1215 set_bit(DMF_BLOCK_IO
, &md
->flags
);
1217 add_wait_queue(&md
->wait
, &wait
);
1218 up_write(&md
->io_lock
);
1222 dm_table_unplug_all(map
);
1225 * Then we wait for the already mapped ios to
1229 set_current_state(TASK_INTERRUPTIBLE
);
1231 if (!atomic_read(&md
->pending
) || signal_pending(current
))
1236 set_current_state(TASK_RUNNING
);
1238 down_write(&md
->io_lock
);
1239 remove_wait_queue(&md
->wait
, &wait
);
1241 /* were we interrupted ? */
1243 if (atomic_read(&md
->pending
)) {
1244 clear_bit(DMF_BLOCK_IO
, &md
->flags
);
1245 def
= bio_list_get(&md
->deferred
);
1246 __flush_deferred_io(md
, def
);
1247 up_write(&md
->io_lock
);
1251 up_write(&md
->io_lock
);
1253 dm_table_postsuspend_targets(map
);
1255 set_bit(DMF_SUSPENDED
, &md
->flags
);
1260 if (r
&& md
->suspended_bdev
) {
1261 bdput(md
->suspended_bdev
);
1262 md
->suspended_bdev
= NULL
;
1266 up(&md
->suspend_lock
);
1270 int dm_resume(struct mapped_device
*md
)
1274 struct dm_table
*map
= NULL
;
1276 down(&md
->suspend_lock
);
1277 if (!dm_suspended(md
))
1280 map
= dm_get_table(md
);
1281 if (!map
|| !dm_table_get_size(map
))
1284 dm_table_resume_targets(map
);
1286 down_write(&md
->io_lock
);
1287 clear_bit(DMF_BLOCK_IO
, &md
->flags
);
1289 def
= bio_list_get(&md
->deferred
);
1290 __flush_deferred_io(md
, def
);
1291 up_write(&md
->io_lock
);
1295 bdput(md
->suspended_bdev
);
1296 md
->suspended_bdev
= NULL
;
1298 clear_bit(DMF_SUSPENDED
, &md
->flags
);
1300 dm_table_unplug_all(map
);
1306 up(&md
->suspend_lock
);
1311 /*-----------------------------------------------------------------
1312 * Event notification.
1313 *---------------------------------------------------------------*/
1314 uint32_t dm_get_event_nr(struct mapped_device
*md
)
1316 return atomic_read(&md
->event_nr
);
1319 int dm_wait_event(struct mapped_device
*md
, int event_nr
)
1321 return wait_event_interruptible(md
->eventq
,
1322 (event_nr
!= atomic_read(&md
->event_nr
)));
1326 * The gendisk is only valid as long as you have a reference
1329 struct gendisk
*dm_disk(struct mapped_device
*md
)
1334 int dm_suspended(struct mapped_device
*md
)
1336 return test_bit(DMF_SUSPENDED
, &md
->flags
);
1339 static struct block_device_operations dm_blk_dops
= {
1340 .open
= dm_blk_open
,
1341 .release
= dm_blk_close
,
1342 .getgeo
= dm_blk_getgeo
,
1343 .owner
= THIS_MODULE
1346 EXPORT_SYMBOL(dm_get_mapinfo
);
1351 module_init(dm_init
);
1352 module_exit(dm_exit
);
1354 module_param(major
, uint
, 0);
1355 MODULE_PARM_DESC(major
, "The major number of the device mapper");
1356 MODULE_DESCRIPTION(DM_NAME
" driver");
1357 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1358 MODULE_LICENSE("GPL");