4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
6 * This file is released under the GPL.
9 #include <linux/blkdev.h>
10 #include <linux/device-mapper.h>
11 #include <linux/delay.h>
13 #include <linux/init.h>
14 #include <linux/kdev_t.h>
15 #include <linux/list.h>
16 #include <linux/mempool.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/log2.h>
21 #include <linux/dm-kcopyd.h>
23 #include "dm-exception-store.h"
25 #define DM_MSG_PREFIX "snapshots"
27 static const char dm_snapshot_merge_target_name
[] = "snapshot-merge";
29 #define dm_target_is_snapshot_merge(ti) \
30 ((ti)->type->name == dm_snapshot_merge_target_name)
33 * The percentage increment we will wake up users at
35 #define WAKE_UP_PERCENT 5
38 * kcopyd priority of snapshot operations
40 #define SNAPSHOT_COPY_PRIORITY 2
43 * The size of the mempool used to track chunks in use.
47 #define DM_TRACKED_CHUNK_HASH_SIZE 16
48 #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
49 (DM_TRACKED_CHUNK_HASH_SIZE - 1))
51 struct dm_exception_table
{
54 struct list_head
*table
;
58 struct rw_semaphore lock
;
60 struct dm_dev
*origin
;
65 /* List of snapshots per Origin */
66 struct list_head list
;
69 * You can't use a snapshot if this is 0 (e.g. if full).
70 * A snapshot-merge target never clears this.
74 /* Origin writes don't trigger exceptions until this is set */
77 atomic_t pending_exceptions_count
;
79 mempool_t
*pending_pool
;
81 struct dm_exception_table pending
;
82 struct dm_exception_table complete
;
85 * pe_lock protects all pending_exception operations and access
86 * as well as the snapshot_bios list.
90 /* Chunks with outstanding reads */
91 spinlock_t tracked_chunk_lock
;
92 mempool_t
*tracked_chunk_pool
;
93 struct hlist_head tracked_chunk_hash
[DM_TRACKED_CHUNK_HASH_SIZE
];
95 /* The on disk metadata handler */
96 struct dm_exception_store
*store
;
98 struct dm_kcopyd_client
*kcopyd_client
;
100 /* Wait for events based on state_bits */
101 unsigned long state_bits
;
103 /* Range of chunks currently being merged. */
104 chunk_t first_merging_chunk
;
105 int num_merging_chunks
;
108 * The merge operation failed if this flag is set.
109 * Failure modes are handled as follows:
110 * - I/O error reading the header
111 * => don't load the target; abort.
112 * - Header does not have "valid" flag set
113 * => use the origin; forget about the snapshot.
114 * - I/O error when reading exceptions
115 * => don't load the target; abort.
116 * (We can't use the intermediate origin state.)
117 * - I/O error while merging
118 * => stop merging; set merge_failed; process I/O normally.
123 * Incoming bios that overlap with chunks being merged must wait
124 * for them to be committed.
126 struct bio_list bios_queued_during_merge
;
131 * RUNNING_MERGE - Merge operation is in progress.
132 * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
133 * cleared afterwards.
135 #define RUNNING_MERGE 0
136 #define SHUTDOWN_MERGE 1
138 struct dm_dev
*dm_snap_origin(struct dm_snapshot
*s
)
142 EXPORT_SYMBOL(dm_snap_origin
);
144 struct dm_dev
*dm_snap_cow(struct dm_snapshot
*s
)
148 EXPORT_SYMBOL(dm_snap_cow
);
150 static sector_t
chunk_to_sector(struct dm_exception_store
*store
,
153 return chunk
<< store
->chunk_shift
;
156 static int bdev_equal(struct block_device
*lhs
, struct block_device
*rhs
)
159 * There is only ever one instance of a particular block
160 * device so we can compare pointers safely.
165 struct dm_snap_pending_exception
{
166 struct dm_exception e
;
169 * Origin buffers waiting for this to complete are held
172 struct bio_list origin_bios
;
173 struct bio_list snapshot_bios
;
175 /* Pointer back to snapshot context */
176 struct dm_snapshot
*snap
;
179 * 1 indicates the exception has already been sent to
186 * Hash table mapping origin volumes to lists of snapshots and
187 * a lock to protect it
189 static struct kmem_cache
*exception_cache
;
190 static struct kmem_cache
*pending_cache
;
192 struct dm_snap_tracked_chunk
{
193 struct hlist_node node
;
197 static struct kmem_cache
*tracked_chunk_cache
;
199 static struct dm_snap_tracked_chunk
*track_chunk(struct dm_snapshot
*s
,
202 struct dm_snap_tracked_chunk
*c
= mempool_alloc(s
->tracked_chunk_pool
,
208 spin_lock_irqsave(&s
->tracked_chunk_lock
, flags
);
209 hlist_add_head(&c
->node
,
210 &s
->tracked_chunk_hash
[DM_TRACKED_CHUNK_HASH(chunk
)]);
211 spin_unlock_irqrestore(&s
->tracked_chunk_lock
, flags
);
216 static void stop_tracking_chunk(struct dm_snapshot
*s
,
217 struct dm_snap_tracked_chunk
*c
)
221 spin_lock_irqsave(&s
->tracked_chunk_lock
, flags
);
223 spin_unlock_irqrestore(&s
->tracked_chunk_lock
, flags
);
225 mempool_free(c
, s
->tracked_chunk_pool
);
228 static int __chunk_is_tracked(struct dm_snapshot
*s
, chunk_t chunk
)
230 struct dm_snap_tracked_chunk
*c
;
231 struct hlist_node
*hn
;
234 spin_lock_irq(&s
->tracked_chunk_lock
);
236 hlist_for_each_entry(c
, hn
,
237 &s
->tracked_chunk_hash
[DM_TRACKED_CHUNK_HASH(chunk
)], node
) {
238 if (c
->chunk
== chunk
) {
244 spin_unlock_irq(&s
->tracked_chunk_lock
);
250 * This conflicting I/O is extremely improbable in the caller,
251 * so msleep(1) is sufficient and there is no need for a wait queue.
253 static void __check_for_conflicting_io(struct dm_snapshot
*s
, chunk_t chunk
)
255 while (__chunk_is_tracked(s
, chunk
))
260 * One of these per registered origin, held in the snapshot_origins hash
263 /* The origin device */
264 struct block_device
*bdev
;
266 struct list_head hash_list
;
268 /* List of snapshots for this origin */
269 struct list_head snapshots
;
273 * Size of the hash table for origin volumes. If we make this
274 * the size of the minors list then it should be nearly perfect
276 #define ORIGIN_HASH_SIZE 256
277 #define ORIGIN_MASK 0xFF
278 static struct list_head
*_origins
;
279 static struct rw_semaphore _origins_lock
;
281 static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done
);
282 static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock
);
283 static uint64_t _pending_exceptions_done_count
;
285 static int init_origin_hash(void)
289 _origins
= kmalloc(ORIGIN_HASH_SIZE
* sizeof(struct list_head
),
292 DMERR("unable to allocate memory");
296 for (i
= 0; i
< ORIGIN_HASH_SIZE
; i
++)
297 INIT_LIST_HEAD(_origins
+ i
);
298 init_rwsem(&_origins_lock
);
303 static void exit_origin_hash(void)
308 static unsigned origin_hash(struct block_device
*bdev
)
310 return bdev
->bd_dev
& ORIGIN_MASK
;
313 static struct origin
*__lookup_origin(struct block_device
*origin
)
315 struct list_head
*ol
;
318 ol
= &_origins
[origin_hash(origin
)];
319 list_for_each_entry (o
, ol
, hash_list
)
320 if (bdev_equal(o
->bdev
, origin
))
326 static void __insert_origin(struct origin
*o
)
328 struct list_head
*sl
= &_origins
[origin_hash(o
->bdev
)];
329 list_add_tail(&o
->hash_list
, sl
);
333 * _origins_lock must be held when calling this function.
334 * Returns number of snapshots registered using the supplied cow device, plus:
335 * snap_src - a snapshot suitable for use as a source of exception handover
336 * snap_dest - a snapshot capable of receiving exception handover.
337 * snap_merge - an existing snapshot-merge target linked to the same origin.
338 * There can be at most one snapshot-merge target. The parameter is optional.
340 * Possible return values and states of snap_src and snap_dest.
341 * 0: NULL, NULL - first new snapshot
342 * 1: snap_src, NULL - normal snapshot
343 * 2: snap_src, snap_dest - waiting for handover
344 * 2: snap_src, NULL - handed over, waiting for old to be deleted
345 * 1: NULL, snap_dest - source got destroyed without handover
347 static int __find_snapshots_sharing_cow(struct dm_snapshot
*snap
,
348 struct dm_snapshot
**snap_src
,
349 struct dm_snapshot
**snap_dest
,
350 struct dm_snapshot
**snap_merge
)
352 struct dm_snapshot
*s
;
357 o
= __lookup_origin(snap
->origin
->bdev
);
361 list_for_each_entry(s
, &o
->snapshots
, list
) {
362 if (dm_target_is_snapshot_merge(s
->ti
) && snap_merge
)
364 if (!bdev_equal(s
->cow
->bdev
, snap
->cow
->bdev
))
374 } else if (snap_dest
)
385 * On success, returns 1 if this snapshot is a handover destination,
386 * otherwise returns 0.
388 static int __validate_exception_handover(struct dm_snapshot
*snap
)
390 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
391 struct dm_snapshot
*snap_merge
= NULL
;
393 /* Does snapshot need exceptions handed over to it? */
394 if ((__find_snapshots_sharing_cow(snap
, &snap_src
, &snap_dest
,
395 &snap_merge
) == 2) ||
397 snap
->ti
->error
= "Snapshot cow pairing for exception "
398 "table handover failed";
403 * If no snap_src was found, snap cannot become a handover
410 * Non-snapshot-merge handover?
412 if (!dm_target_is_snapshot_merge(snap
->ti
))
416 * Do not allow more than one merging snapshot.
419 snap
->ti
->error
= "A snapshot is already merging.";
423 if (!snap_src
->store
->type
->prepare_merge
||
424 !snap_src
->store
->type
->commit_merge
) {
425 snap
->ti
->error
= "Snapshot exception store does not "
426 "support snapshot-merge.";
433 static void __insert_snapshot(struct origin
*o
, struct dm_snapshot
*s
)
435 struct dm_snapshot
*l
;
437 /* Sort the list according to chunk size, largest-first smallest-last */
438 list_for_each_entry(l
, &o
->snapshots
, list
)
439 if (l
->store
->chunk_size
< s
->store
->chunk_size
)
441 list_add_tail(&s
->list
, &l
->list
);
445 * Make a note of the snapshot and its origin so we can look it
446 * up when the origin has a write on it.
448 * Also validate snapshot exception store handovers.
449 * On success, returns 1 if this registration is a handover destination,
450 * otherwise returns 0.
452 static int register_snapshot(struct dm_snapshot
*snap
)
454 struct origin
*o
, *new_o
= NULL
;
455 struct block_device
*bdev
= snap
->origin
->bdev
;
458 new_o
= kmalloc(sizeof(*new_o
), GFP_KERNEL
);
462 down_write(&_origins_lock
);
464 r
= __validate_exception_handover(snap
);
470 o
= __lookup_origin(bdev
);
477 /* Initialise the struct */
478 INIT_LIST_HEAD(&o
->snapshots
);
484 __insert_snapshot(o
, snap
);
487 up_write(&_origins_lock
);
493 * Move snapshot to correct place in list according to chunk size.
495 static void reregister_snapshot(struct dm_snapshot
*s
)
497 struct block_device
*bdev
= s
->origin
->bdev
;
499 down_write(&_origins_lock
);
502 __insert_snapshot(__lookup_origin(bdev
), s
);
504 up_write(&_origins_lock
);
507 static void unregister_snapshot(struct dm_snapshot
*s
)
511 down_write(&_origins_lock
);
512 o
= __lookup_origin(s
->origin
->bdev
);
515 if (o
&& list_empty(&o
->snapshots
)) {
516 list_del(&o
->hash_list
);
520 up_write(&_origins_lock
);
524 * Implementation of the exception hash tables.
525 * The lowest hash_shift bits of the chunk number are ignored, allowing
526 * some consecutive chunks to be grouped together.
528 static int dm_exception_table_init(struct dm_exception_table
*et
,
529 uint32_t size
, unsigned hash_shift
)
533 et
->hash_shift
= hash_shift
;
534 et
->hash_mask
= size
- 1;
535 et
->table
= dm_vcalloc(size
, sizeof(struct list_head
));
539 for (i
= 0; i
< size
; i
++)
540 INIT_LIST_HEAD(et
->table
+ i
);
545 static void dm_exception_table_exit(struct dm_exception_table
*et
,
546 struct kmem_cache
*mem
)
548 struct list_head
*slot
;
549 struct dm_exception
*ex
, *next
;
552 size
= et
->hash_mask
+ 1;
553 for (i
= 0; i
< size
; i
++) {
554 slot
= et
->table
+ i
;
556 list_for_each_entry_safe (ex
, next
, slot
, hash_list
)
557 kmem_cache_free(mem
, ex
);
563 static uint32_t exception_hash(struct dm_exception_table
*et
, chunk_t chunk
)
565 return (chunk
>> et
->hash_shift
) & et
->hash_mask
;
568 static void dm_remove_exception(struct dm_exception
*e
)
570 list_del(&e
->hash_list
);
574 * Return the exception data for a sector, or NULL if not
577 static struct dm_exception
*dm_lookup_exception(struct dm_exception_table
*et
,
580 struct list_head
*slot
;
581 struct dm_exception
*e
;
583 slot
= &et
->table
[exception_hash(et
, chunk
)];
584 list_for_each_entry (e
, slot
, hash_list
)
585 if (chunk
>= e
->old_chunk
&&
586 chunk
<= e
->old_chunk
+ dm_consecutive_chunk_count(e
))
592 static struct dm_exception
*alloc_completed_exception(void)
594 struct dm_exception
*e
;
596 e
= kmem_cache_alloc(exception_cache
, GFP_NOIO
);
598 e
= kmem_cache_alloc(exception_cache
, GFP_ATOMIC
);
603 static void free_completed_exception(struct dm_exception
*e
)
605 kmem_cache_free(exception_cache
, e
);
608 static struct dm_snap_pending_exception
*alloc_pending_exception(struct dm_snapshot
*s
)
610 struct dm_snap_pending_exception
*pe
= mempool_alloc(s
->pending_pool
,
613 atomic_inc(&s
->pending_exceptions_count
);
619 static void free_pending_exception(struct dm_snap_pending_exception
*pe
)
621 struct dm_snapshot
*s
= pe
->snap
;
623 mempool_free(pe
, s
->pending_pool
);
624 smp_mb__before_atomic_dec();
625 atomic_dec(&s
->pending_exceptions_count
);
628 static void dm_insert_exception(struct dm_exception_table
*eh
,
629 struct dm_exception
*new_e
)
632 struct dm_exception
*e
= NULL
;
634 l
= &eh
->table
[exception_hash(eh
, new_e
->old_chunk
)];
636 /* Add immediately if this table doesn't support consecutive chunks */
640 /* List is ordered by old_chunk */
641 list_for_each_entry_reverse(e
, l
, hash_list
) {
642 /* Insert after an existing chunk? */
643 if (new_e
->old_chunk
== (e
->old_chunk
+
644 dm_consecutive_chunk_count(e
) + 1) &&
645 new_e
->new_chunk
== (dm_chunk_number(e
->new_chunk
) +
646 dm_consecutive_chunk_count(e
) + 1)) {
647 dm_consecutive_chunk_count_inc(e
);
648 free_completed_exception(new_e
);
652 /* Insert before an existing chunk? */
653 if (new_e
->old_chunk
== (e
->old_chunk
- 1) &&
654 new_e
->new_chunk
== (dm_chunk_number(e
->new_chunk
) - 1)) {
655 dm_consecutive_chunk_count_inc(e
);
658 free_completed_exception(new_e
);
662 if (new_e
->old_chunk
> e
->old_chunk
)
667 list_add(&new_e
->hash_list
, e
? &e
->hash_list
: l
);
671 * Callback used by the exception stores to load exceptions when
674 static int dm_add_exception(void *context
, chunk_t old
, chunk_t
new)
676 struct dm_snapshot
*s
= context
;
677 struct dm_exception
*e
;
679 e
= alloc_completed_exception();
685 /* Consecutive_count is implicitly initialised to zero */
688 dm_insert_exception(&s
->complete
, e
);
694 * Return a minimum chunk size of all snapshots that have the specified origin.
695 * Return zero if the origin has no snapshots.
697 static sector_t
__minimum_chunk_size(struct origin
*o
)
699 struct dm_snapshot
*snap
;
700 unsigned chunk_size
= 0;
703 list_for_each_entry(snap
, &o
->snapshots
, list
)
704 chunk_size
= min_not_zero(chunk_size
,
705 snap
->store
->chunk_size
);
713 static int calc_max_buckets(void)
715 /* use a fixed size of 2MB */
716 unsigned long mem
= 2 * 1024 * 1024;
717 mem
/= sizeof(struct list_head
);
723 * Allocate room for a suitable hash table.
725 static int init_hash_tables(struct dm_snapshot
*s
)
727 sector_t hash_size
, cow_dev_size
, origin_dev_size
, max_buckets
;
730 * Calculate based on the size of the original volume or
733 cow_dev_size
= get_dev_size(s
->cow
->bdev
);
734 origin_dev_size
= get_dev_size(s
->origin
->bdev
);
735 max_buckets
= calc_max_buckets();
737 hash_size
= min(origin_dev_size
, cow_dev_size
) >> s
->store
->chunk_shift
;
738 hash_size
= min(hash_size
, max_buckets
);
742 hash_size
= rounddown_pow_of_two(hash_size
);
743 if (dm_exception_table_init(&s
->complete
, hash_size
,
744 DM_CHUNK_CONSECUTIVE_BITS
))
748 * Allocate hash table for in-flight exceptions
749 * Make this smaller than the real hash table
755 if (dm_exception_table_init(&s
->pending
, hash_size
, 0)) {
756 dm_exception_table_exit(&s
->complete
, exception_cache
);
763 static void merge_shutdown(struct dm_snapshot
*s
)
765 clear_bit_unlock(RUNNING_MERGE
, &s
->state_bits
);
766 smp_mb__after_clear_bit();
767 wake_up_bit(&s
->state_bits
, RUNNING_MERGE
);
770 static struct bio
*__release_queued_bios_after_merge(struct dm_snapshot
*s
)
772 s
->first_merging_chunk
= 0;
773 s
->num_merging_chunks
= 0;
775 return bio_list_get(&s
->bios_queued_during_merge
);
779 * Remove one chunk from the index of completed exceptions.
781 static int __remove_single_exception_chunk(struct dm_snapshot
*s
,
784 struct dm_exception
*e
;
786 e
= dm_lookup_exception(&s
->complete
, old_chunk
);
788 DMERR("Corruption detected: exception for block %llu is "
789 "on disk but not in memory",
790 (unsigned long long)old_chunk
);
795 * If this is the only chunk using this exception, remove exception.
797 if (!dm_consecutive_chunk_count(e
)) {
798 dm_remove_exception(e
);
799 free_completed_exception(e
);
804 * The chunk may be either at the beginning or the end of a
805 * group of consecutive chunks - never in the middle. We are
806 * removing chunks in the opposite order to that in which they
807 * were added, so this should always be true.
808 * Decrement the consecutive chunk counter and adjust the
809 * starting point if necessary.
811 if (old_chunk
== e
->old_chunk
) {
814 } else if (old_chunk
!= e
->old_chunk
+
815 dm_consecutive_chunk_count(e
)) {
816 DMERR("Attempt to merge block %llu from the "
817 "middle of a chunk range [%llu - %llu]",
818 (unsigned long long)old_chunk
,
819 (unsigned long long)e
->old_chunk
,
821 e
->old_chunk
+ dm_consecutive_chunk_count(e
));
825 dm_consecutive_chunk_count_dec(e
);
830 static void flush_bios(struct bio
*bio
);
832 static int remove_single_exception_chunk(struct dm_snapshot
*s
)
834 struct bio
*b
= NULL
;
836 chunk_t old_chunk
= s
->first_merging_chunk
+ s
->num_merging_chunks
- 1;
838 down_write(&s
->lock
);
841 * Process chunks (and associated exceptions) in reverse order
842 * so that dm_consecutive_chunk_count_dec() accounting works.
845 r
= __remove_single_exception_chunk(s
, old_chunk
);
848 } while (old_chunk
-- > s
->first_merging_chunk
);
850 b
= __release_queued_bios_after_merge(s
);
860 static int origin_write_extent(struct dm_snapshot
*merging_snap
,
861 sector_t sector
, unsigned chunk_size
);
863 static void merge_callback(int read_err
, unsigned long write_err
,
866 static uint64_t read_pending_exceptions_done_count(void)
868 uint64_t pending_exceptions_done
;
870 spin_lock(&_pending_exceptions_done_spinlock
);
871 pending_exceptions_done
= _pending_exceptions_done_count
;
872 spin_unlock(&_pending_exceptions_done_spinlock
);
874 return pending_exceptions_done
;
877 static void increment_pending_exceptions_done_count(void)
879 spin_lock(&_pending_exceptions_done_spinlock
);
880 _pending_exceptions_done_count
++;
881 spin_unlock(&_pending_exceptions_done_spinlock
);
883 wake_up_all(&_pending_exceptions_done
);
886 static void snapshot_merge_next_chunks(struct dm_snapshot
*s
)
888 int i
, linear_chunks
;
889 chunk_t old_chunk
, new_chunk
;
890 struct dm_io_region src
, dest
;
892 uint64_t previous_count
;
894 BUG_ON(!test_bit(RUNNING_MERGE
, &s
->state_bits
));
895 if (unlikely(test_bit(SHUTDOWN_MERGE
, &s
->state_bits
)))
899 * valid flag never changes during merge, so no lock required.
902 DMERR("Snapshot is invalid: can't merge");
906 linear_chunks
= s
->store
->type
->prepare_merge(s
->store
, &old_chunk
,
908 if (linear_chunks
<= 0) {
909 if (linear_chunks
< 0) {
910 DMERR("Read error in exception store: "
911 "shutting down merge");
912 down_write(&s
->lock
);
919 /* Adjust old_chunk and new_chunk to reflect start of linear region */
920 old_chunk
= old_chunk
+ 1 - linear_chunks
;
921 new_chunk
= new_chunk
+ 1 - linear_chunks
;
924 * Use one (potentially large) I/O to copy all 'linear_chunks'
925 * from the exception store to the origin
927 io_size
= linear_chunks
* s
->store
->chunk_size
;
929 dest
.bdev
= s
->origin
->bdev
;
930 dest
.sector
= chunk_to_sector(s
->store
, old_chunk
);
931 dest
.count
= min(io_size
, get_dev_size(dest
.bdev
) - dest
.sector
);
933 src
.bdev
= s
->cow
->bdev
;
934 src
.sector
= chunk_to_sector(s
->store
, new_chunk
);
935 src
.count
= dest
.count
;
938 * Reallocate any exceptions needed in other snapshots then
939 * wait for the pending exceptions to complete.
940 * Each time any pending exception (globally on the system)
941 * completes we are woken and repeat the process to find out
942 * if we can proceed. While this may not seem a particularly
943 * efficient algorithm, it is not expected to have any
944 * significant impact on performance.
946 previous_count
= read_pending_exceptions_done_count();
947 while (origin_write_extent(s
, dest
.sector
, io_size
)) {
948 wait_event(_pending_exceptions_done
,
949 (read_pending_exceptions_done_count() !=
951 /* Retry after the wait, until all exceptions are done. */
952 previous_count
= read_pending_exceptions_done_count();
955 down_write(&s
->lock
);
956 s
->first_merging_chunk
= old_chunk
;
957 s
->num_merging_chunks
= linear_chunks
;
960 /* Wait until writes to all 'linear_chunks' drain */
961 for (i
= 0; i
< linear_chunks
; i
++)
962 __check_for_conflicting_io(s
, old_chunk
+ i
);
964 dm_kcopyd_copy(s
->kcopyd_client
, &src
, 1, &dest
, 0, merge_callback
, s
);
971 static void error_bios(struct bio
*bio
);
973 static void merge_callback(int read_err
, unsigned long write_err
, void *context
)
975 struct dm_snapshot
*s
= context
;
976 struct bio
*b
= NULL
;
978 if (read_err
|| write_err
) {
980 DMERR("Read error: shutting down merge.");
982 DMERR("Write error: shutting down merge.");
986 if (s
->store
->type
->commit_merge(s
->store
,
987 s
->num_merging_chunks
) < 0) {
988 DMERR("Write error in exception store: shutting down merge");
992 if (remove_single_exception_chunk(s
) < 0)
995 snapshot_merge_next_chunks(s
);
1000 down_write(&s
->lock
);
1001 s
->merge_failed
= 1;
1002 b
= __release_queued_bios_after_merge(s
);
1009 static void start_merge(struct dm_snapshot
*s
)
1011 if (!test_and_set_bit(RUNNING_MERGE
, &s
->state_bits
))
1012 snapshot_merge_next_chunks(s
);
1015 static int wait_schedule(void *ptr
)
1023 * Stop the merging process and wait until it finishes.
1025 static void stop_merge(struct dm_snapshot
*s
)
1027 set_bit(SHUTDOWN_MERGE
, &s
->state_bits
);
1028 wait_on_bit(&s
->state_bits
, RUNNING_MERGE
, wait_schedule
,
1029 TASK_UNINTERRUPTIBLE
);
1030 clear_bit(SHUTDOWN_MERGE
, &s
->state_bits
);
1034 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
1036 static int snapshot_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
1038 struct dm_snapshot
*s
;
1041 char *origin_path
, *cow_path
;
1042 unsigned args_used
, num_flush_requests
= 1;
1043 fmode_t origin_mode
= FMODE_READ
;
1046 ti
->error
= "requires exactly 4 arguments";
1051 if (dm_target_is_snapshot_merge(ti
)) {
1052 num_flush_requests
= 2;
1053 origin_mode
= FMODE_WRITE
;
1056 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
1058 ti
->error
= "Cannot allocate snapshot context private "
1064 origin_path
= argv
[0];
1068 r
= dm_get_device(ti
, origin_path
, origin_mode
, &s
->origin
);
1070 ti
->error
= "Cannot get origin device";
1078 r
= dm_get_device(ti
, cow_path
, dm_table_get_mode(ti
->table
), &s
->cow
);
1080 ti
->error
= "Cannot get COW device";
1084 r
= dm_exception_store_create(ti
, argc
, argv
, s
, &args_used
, &s
->store
);
1086 ti
->error
= "Couldn't create exception store";
1097 atomic_set(&s
->pending_exceptions_count
, 0);
1098 init_rwsem(&s
->lock
);
1099 INIT_LIST_HEAD(&s
->list
);
1100 spin_lock_init(&s
->pe_lock
);
1102 s
->merge_failed
= 0;
1103 s
->first_merging_chunk
= 0;
1104 s
->num_merging_chunks
= 0;
1105 bio_list_init(&s
->bios_queued_during_merge
);
1107 /* Allocate hash table for COW data */
1108 if (init_hash_tables(s
)) {
1109 ti
->error
= "Unable to allocate hash table space";
1111 goto bad_hash_tables
;
1114 s
->kcopyd_client
= dm_kcopyd_client_create();
1115 if (IS_ERR(s
->kcopyd_client
)) {
1116 r
= PTR_ERR(s
->kcopyd_client
);
1117 ti
->error
= "Could not create kcopyd client";
1121 s
->pending_pool
= mempool_create_slab_pool(MIN_IOS
, pending_cache
);
1122 if (!s
->pending_pool
) {
1123 ti
->error
= "Could not allocate mempool for pending exceptions";
1124 goto bad_pending_pool
;
1127 s
->tracked_chunk_pool
= mempool_create_slab_pool(MIN_IOS
,
1128 tracked_chunk_cache
);
1129 if (!s
->tracked_chunk_pool
) {
1130 ti
->error
= "Could not allocate tracked_chunk mempool for "
1132 goto bad_tracked_chunk_pool
;
1135 for (i
= 0; i
< DM_TRACKED_CHUNK_HASH_SIZE
; i
++)
1136 INIT_HLIST_HEAD(&s
->tracked_chunk_hash
[i
]);
1138 spin_lock_init(&s
->tracked_chunk_lock
);
1141 ti
->num_flush_requests
= num_flush_requests
;
1143 /* Add snapshot to the list of snapshots for this origin */
1144 /* Exceptions aren't triggered till snapshot_resume() is called */
1145 r
= register_snapshot(s
);
1147 ti
->error
= "Snapshot origin struct allocation failed";
1148 goto bad_load_and_register
;
1150 /* invalid handover, register_snapshot has set ti->error */
1151 goto bad_load_and_register
;
1155 * Metadata must only be loaded into one table at once, so skip this
1156 * if metadata will be handed over during resume.
1157 * Chunk size will be set during the handover - set it to zero to
1158 * ensure it's ignored.
1161 s
->store
->chunk_size
= 0;
1165 r
= s
->store
->type
->read_metadata(s
->store
, dm_add_exception
,
1168 ti
->error
= "Failed to read snapshot metadata";
1169 goto bad_read_metadata
;
1172 DMWARN("Snapshot is marked invalid.");
1175 if (!s
->store
->chunk_size
) {
1176 ti
->error
= "Chunk size not set";
1177 goto bad_read_metadata
;
1179 ti
->split_io
= s
->store
->chunk_size
;
1184 unregister_snapshot(s
);
1186 bad_load_and_register
:
1187 mempool_destroy(s
->tracked_chunk_pool
);
1189 bad_tracked_chunk_pool
:
1190 mempool_destroy(s
->pending_pool
);
1193 dm_kcopyd_client_destroy(s
->kcopyd_client
);
1196 dm_exception_table_exit(&s
->pending
, pending_cache
);
1197 dm_exception_table_exit(&s
->complete
, exception_cache
);
1200 dm_exception_store_destroy(s
->store
);
1203 dm_put_device(ti
, s
->cow
);
1206 dm_put_device(ti
, s
->origin
);
1215 static void __free_exceptions(struct dm_snapshot
*s
)
1217 dm_kcopyd_client_destroy(s
->kcopyd_client
);
1218 s
->kcopyd_client
= NULL
;
1220 dm_exception_table_exit(&s
->pending
, pending_cache
);
1221 dm_exception_table_exit(&s
->complete
, exception_cache
);
1224 static void __handover_exceptions(struct dm_snapshot
*snap_src
,
1225 struct dm_snapshot
*snap_dest
)
1228 struct dm_exception_table table_swap
;
1229 struct dm_exception_store
*store_swap
;
1233 * Swap all snapshot context information between the two instances.
1235 u
.table_swap
= snap_dest
->complete
;
1236 snap_dest
->complete
= snap_src
->complete
;
1237 snap_src
->complete
= u
.table_swap
;
1239 u
.store_swap
= snap_dest
->store
;
1240 snap_dest
->store
= snap_src
->store
;
1241 snap_src
->store
= u
.store_swap
;
1243 snap_dest
->store
->snap
= snap_dest
;
1244 snap_src
->store
->snap
= snap_src
;
1246 snap_dest
->ti
->split_io
= snap_dest
->store
->chunk_size
;
1247 snap_dest
->valid
= snap_src
->valid
;
1250 * Set source invalid to ensure it receives no further I/O.
1252 snap_src
->valid
= 0;
1255 static void snapshot_dtr(struct dm_target
*ti
)
1257 #ifdef CONFIG_DM_DEBUG
1260 struct dm_snapshot
*s
= ti
->private;
1261 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
1263 down_read(&_origins_lock
);
1264 /* Check whether exception handover must be cancelled */
1265 (void) __find_snapshots_sharing_cow(s
, &snap_src
, &snap_dest
, NULL
);
1266 if (snap_src
&& snap_dest
&& (s
== snap_src
)) {
1267 down_write(&snap_dest
->lock
);
1268 snap_dest
->valid
= 0;
1269 up_write(&snap_dest
->lock
);
1270 DMERR("Cancelling snapshot handover.");
1272 up_read(&_origins_lock
);
1274 if (dm_target_is_snapshot_merge(ti
))
1277 /* Prevent further origin writes from using this snapshot. */
1278 /* After this returns there can be no new kcopyd jobs. */
1279 unregister_snapshot(s
);
1281 while (atomic_read(&s
->pending_exceptions_count
))
1284 * Ensure instructions in mempool_destroy aren't reordered
1285 * before atomic_read.
1289 #ifdef CONFIG_DM_DEBUG
1290 for (i
= 0; i
< DM_TRACKED_CHUNK_HASH_SIZE
; i
++)
1291 BUG_ON(!hlist_empty(&s
->tracked_chunk_hash
[i
]));
1294 mempool_destroy(s
->tracked_chunk_pool
);
1296 __free_exceptions(s
);
1298 mempool_destroy(s
->pending_pool
);
1300 dm_exception_store_destroy(s
->store
);
1302 dm_put_device(ti
, s
->cow
);
1304 dm_put_device(ti
, s
->origin
);
1310 * Flush a list of buffers.
1312 static void flush_bios(struct bio
*bio
)
1318 bio
->bi_next
= NULL
;
1319 generic_make_request(bio
);
1324 static int do_origin(struct dm_dev
*origin
, struct bio
*bio
);
1327 * Flush a list of buffers.
1329 static void retry_origin_bios(struct dm_snapshot
*s
, struct bio
*bio
)
1336 bio
->bi_next
= NULL
;
1337 r
= do_origin(s
->origin
, bio
);
1338 if (r
== DM_MAPIO_REMAPPED
)
1339 generic_make_request(bio
);
1345 * Error a list of buffers.
1347 static void error_bios(struct bio
*bio
)
1353 bio
->bi_next
= NULL
;
1359 static void __invalidate_snapshot(struct dm_snapshot
*s
, int err
)
1365 DMERR("Invalidating snapshot: Error reading/writing.");
1366 else if (err
== -ENOMEM
)
1367 DMERR("Invalidating snapshot: Unable to allocate exception.");
1369 if (s
->store
->type
->drop_snapshot
)
1370 s
->store
->type
->drop_snapshot(s
->store
);
1374 dm_table_event(s
->ti
->table
);
1377 static void pending_complete(struct dm_snap_pending_exception
*pe
, int success
)
1379 struct dm_exception
*e
;
1380 struct dm_snapshot
*s
= pe
->snap
;
1381 struct bio
*origin_bios
= NULL
;
1382 struct bio
*snapshot_bios
= NULL
;
1386 /* Read/write error - snapshot is unusable */
1387 down_write(&s
->lock
);
1388 __invalidate_snapshot(s
, -EIO
);
1393 e
= alloc_completed_exception();
1395 down_write(&s
->lock
);
1396 __invalidate_snapshot(s
, -ENOMEM
);
1402 down_write(&s
->lock
);
1404 free_completed_exception(e
);
1409 /* Check for conflicting reads */
1410 __check_for_conflicting_io(s
, pe
->e
.old_chunk
);
1413 * Add a proper exception, and remove the
1414 * in-flight exception from the list.
1416 dm_insert_exception(&s
->complete
, e
);
1419 dm_remove_exception(&pe
->e
);
1420 snapshot_bios
= bio_list_get(&pe
->snapshot_bios
);
1421 origin_bios
= bio_list_get(&pe
->origin_bios
);
1422 free_pending_exception(pe
);
1424 increment_pending_exceptions_done_count();
1428 /* Submit any pending write bios */
1430 error_bios(snapshot_bios
);
1432 flush_bios(snapshot_bios
);
1434 retry_origin_bios(s
, origin_bios
);
1437 static void commit_callback(void *context
, int success
)
1439 struct dm_snap_pending_exception
*pe
= context
;
1441 pending_complete(pe
, success
);
1445 * Called when the copy I/O has finished. kcopyd actually runs
1446 * this code so don't block.
1448 static void copy_callback(int read_err
, unsigned long write_err
, void *context
)
1450 struct dm_snap_pending_exception
*pe
= context
;
1451 struct dm_snapshot
*s
= pe
->snap
;
1453 if (read_err
|| write_err
)
1454 pending_complete(pe
, 0);
1457 /* Update the metadata if we are persistent */
1458 s
->store
->type
->commit_exception(s
->store
, &pe
->e
,
1459 commit_callback
, pe
);
1463 * Dispatches the copy operation to kcopyd.
1465 static void start_copy(struct dm_snap_pending_exception
*pe
)
1467 struct dm_snapshot
*s
= pe
->snap
;
1468 struct dm_io_region src
, dest
;
1469 struct block_device
*bdev
= s
->origin
->bdev
;
1472 dev_size
= get_dev_size(bdev
);
1475 src
.sector
= chunk_to_sector(s
->store
, pe
->e
.old_chunk
);
1476 src
.count
= min((sector_t
)s
->store
->chunk_size
, dev_size
- src
.sector
);
1478 dest
.bdev
= s
->cow
->bdev
;
1479 dest
.sector
= chunk_to_sector(s
->store
, pe
->e
.new_chunk
);
1480 dest
.count
= src
.count
;
1482 /* Hand over to kcopyd */
1483 dm_kcopyd_copy(s
->kcopyd_client
,
1484 &src
, 1, &dest
, 0, copy_callback
, pe
);
1487 static struct dm_snap_pending_exception
*
1488 __lookup_pending_exception(struct dm_snapshot
*s
, chunk_t chunk
)
1490 struct dm_exception
*e
= dm_lookup_exception(&s
->pending
, chunk
);
1495 return container_of(e
, struct dm_snap_pending_exception
, e
);
1499 * Looks to see if this snapshot already has a pending exception
1500 * for this chunk, otherwise it allocates a new one and inserts
1501 * it into the pending table.
1503 * NOTE: a write lock must be held on snap->lock before calling
1506 static struct dm_snap_pending_exception
*
1507 __find_pending_exception(struct dm_snapshot
*s
,
1508 struct dm_snap_pending_exception
*pe
, chunk_t chunk
)
1510 struct dm_snap_pending_exception
*pe2
;
1512 pe2
= __lookup_pending_exception(s
, chunk
);
1514 free_pending_exception(pe
);
1518 pe
->e
.old_chunk
= chunk
;
1519 bio_list_init(&pe
->origin_bios
);
1520 bio_list_init(&pe
->snapshot_bios
);
1523 if (s
->store
->type
->prepare_exception(s
->store
, &pe
->e
)) {
1524 free_pending_exception(pe
);
1528 dm_insert_exception(&s
->pending
, &pe
->e
);
1533 static void remap_exception(struct dm_snapshot
*s
, struct dm_exception
*e
,
1534 struct bio
*bio
, chunk_t chunk
)
1536 bio
->bi_bdev
= s
->cow
->bdev
;
1537 bio
->bi_sector
= chunk_to_sector(s
->store
,
1538 dm_chunk_number(e
->new_chunk
) +
1539 (chunk
- e
->old_chunk
)) +
1541 s
->store
->chunk_mask
);
1544 static int snapshot_map(struct dm_target
*ti
, struct bio
*bio
,
1545 union map_info
*map_context
)
1547 struct dm_exception
*e
;
1548 struct dm_snapshot
*s
= ti
->private;
1549 int r
= DM_MAPIO_REMAPPED
;
1551 struct dm_snap_pending_exception
*pe
= NULL
;
1553 if (bio
->bi_rw
& REQ_FLUSH
) {
1554 bio
->bi_bdev
= s
->cow
->bdev
;
1555 return DM_MAPIO_REMAPPED
;
1558 chunk
= sector_to_chunk(s
->store
, bio
->bi_sector
);
1560 /* Full snapshots are not usable */
1561 /* To get here the table must be live so s->active is always set. */
1565 /* FIXME: should only take write lock if we need
1566 * to copy an exception */
1567 down_write(&s
->lock
);
1574 /* If the block is already remapped - use that, else remap it */
1575 e
= dm_lookup_exception(&s
->complete
, chunk
);
1577 remap_exception(s
, e
, bio
, chunk
);
1582 * Write to snapshot - higher level takes care of RW/RO
1583 * flags so we should only get this if we are
1586 if (bio_rw(bio
) == WRITE
) {
1587 pe
= __lookup_pending_exception(s
, chunk
);
1590 pe
= alloc_pending_exception(s
);
1591 down_write(&s
->lock
);
1594 free_pending_exception(pe
);
1599 e
= dm_lookup_exception(&s
->complete
, chunk
);
1601 free_pending_exception(pe
);
1602 remap_exception(s
, e
, bio
, chunk
);
1606 pe
= __find_pending_exception(s
, pe
, chunk
);
1608 __invalidate_snapshot(s
, -ENOMEM
);
1614 remap_exception(s
, &pe
->e
, bio
, chunk
);
1615 bio_list_add(&pe
->snapshot_bios
, bio
);
1617 r
= DM_MAPIO_SUBMITTED
;
1620 /* this is protected by snap->lock */
1627 bio
->bi_bdev
= s
->origin
->bdev
;
1628 map_context
->ptr
= track_chunk(s
, chunk
);
1638 * A snapshot-merge target behaves like a combination of a snapshot
1639 * target and a snapshot-origin target. It only generates new
1640 * exceptions in other snapshots and not in the one that is being
1643 * For each chunk, if there is an existing exception, it is used to
1644 * redirect I/O to the cow device. Otherwise I/O is sent to the origin,
1645 * which in turn might generate exceptions in other snapshots.
1646 * If merging is currently taking place on the chunk in question, the
1647 * I/O is deferred by adding it to s->bios_queued_during_merge.
1649 static int snapshot_merge_map(struct dm_target
*ti
, struct bio
*bio
,
1650 union map_info
*map_context
)
1652 struct dm_exception
*e
;
1653 struct dm_snapshot
*s
= ti
->private;
1654 int r
= DM_MAPIO_REMAPPED
;
1657 if (bio
->bi_rw
& REQ_FLUSH
) {
1658 if (!map_context
->target_request_nr
)
1659 bio
->bi_bdev
= s
->origin
->bdev
;
1661 bio
->bi_bdev
= s
->cow
->bdev
;
1662 map_context
->ptr
= NULL
;
1663 return DM_MAPIO_REMAPPED
;
1666 chunk
= sector_to_chunk(s
->store
, bio
->bi_sector
);
1668 down_write(&s
->lock
);
1670 /* Full merging snapshots are redirected to the origin */
1672 goto redirect_to_origin
;
1674 /* If the block is already remapped - use that */
1675 e
= dm_lookup_exception(&s
->complete
, chunk
);
1677 /* Queue writes overlapping with chunks being merged */
1678 if (bio_rw(bio
) == WRITE
&&
1679 chunk
>= s
->first_merging_chunk
&&
1680 chunk
< (s
->first_merging_chunk
+
1681 s
->num_merging_chunks
)) {
1682 bio
->bi_bdev
= s
->origin
->bdev
;
1683 bio_list_add(&s
->bios_queued_during_merge
, bio
);
1684 r
= DM_MAPIO_SUBMITTED
;
1688 remap_exception(s
, e
, bio
, chunk
);
1690 if (bio_rw(bio
) == WRITE
)
1691 map_context
->ptr
= track_chunk(s
, chunk
);
1696 bio
->bi_bdev
= s
->origin
->bdev
;
1698 if (bio_rw(bio
) == WRITE
) {
1700 return do_origin(s
->origin
, bio
);
1709 static int snapshot_end_io(struct dm_target
*ti
, struct bio
*bio
,
1710 int error
, union map_info
*map_context
)
1712 struct dm_snapshot
*s
= ti
->private;
1713 struct dm_snap_tracked_chunk
*c
= map_context
->ptr
;
1716 stop_tracking_chunk(s
, c
);
1721 static void snapshot_merge_presuspend(struct dm_target
*ti
)
1723 struct dm_snapshot
*s
= ti
->private;
1728 static int snapshot_preresume(struct dm_target
*ti
)
1731 struct dm_snapshot
*s
= ti
->private;
1732 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
1734 down_read(&_origins_lock
);
1735 (void) __find_snapshots_sharing_cow(s
, &snap_src
, &snap_dest
, NULL
);
1736 if (snap_src
&& snap_dest
) {
1737 down_read(&snap_src
->lock
);
1738 if (s
== snap_src
) {
1739 DMERR("Unable to resume snapshot source until "
1740 "handover completes.");
1742 } else if (!dm_suspended(snap_src
->ti
)) {
1743 DMERR("Unable to perform snapshot handover until "
1744 "source is suspended.");
1747 up_read(&snap_src
->lock
);
1749 up_read(&_origins_lock
);
1754 static void snapshot_resume(struct dm_target
*ti
)
1756 struct dm_snapshot
*s
= ti
->private;
1757 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
1759 down_read(&_origins_lock
);
1760 (void) __find_snapshots_sharing_cow(s
, &snap_src
, &snap_dest
, NULL
);
1761 if (snap_src
&& snap_dest
) {
1762 down_write(&snap_src
->lock
);
1763 down_write_nested(&snap_dest
->lock
, SINGLE_DEPTH_NESTING
);
1764 __handover_exceptions(snap_src
, snap_dest
);
1765 up_write(&snap_dest
->lock
);
1766 up_write(&snap_src
->lock
);
1768 up_read(&_origins_lock
);
1770 /* Now we have correct chunk size, reregister */
1771 reregister_snapshot(s
);
1773 down_write(&s
->lock
);
1778 static sector_t
get_origin_minimum_chunksize(struct block_device
*bdev
)
1780 sector_t min_chunksize
;
1782 down_read(&_origins_lock
);
1783 min_chunksize
= __minimum_chunk_size(__lookup_origin(bdev
));
1784 up_read(&_origins_lock
);
1786 return min_chunksize
;
1789 static void snapshot_merge_resume(struct dm_target
*ti
)
1791 struct dm_snapshot
*s
= ti
->private;
1794 * Handover exceptions from existing snapshot.
1796 snapshot_resume(ti
);
1799 * snapshot-merge acts as an origin, so set ti->split_io
1801 ti
->split_io
= get_origin_minimum_chunksize(s
->origin
->bdev
);
1806 static int snapshot_status(struct dm_target
*ti
, status_type_t type
,
1807 char *result
, unsigned int maxlen
)
1810 struct dm_snapshot
*snap
= ti
->private;
1813 case STATUSTYPE_INFO
:
1815 down_write(&snap
->lock
);
1819 else if (snap
->merge_failed
)
1820 DMEMIT("Merge failed");
1822 if (snap
->store
->type
->usage
) {
1823 sector_t total_sectors
, sectors_allocated
,
1825 snap
->store
->type
->usage(snap
->store
,
1829 DMEMIT("%llu/%llu %llu",
1830 (unsigned long long)sectors_allocated
,
1831 (unsigned long long)total_sectors
,
1832 (unsigned long long)metadata_sectors
);
1838 up_write(&snap
->lock
);
1842 case STATUSTYPE_TABLE
:
1844 * kdevname returns a static pointer so we need
1845 * to make private copies if the output is to
1848 DMEMIT("%s %s", snap
->origin
->name
, snap
->cow
->name
);
1849 snap
->store
->type
->status(snap
->store
, type
, result
+ sz
,
1857 static int snapshot_iterate_devices(struct dm_target
*ti
,
1858 iterate_devices_callout_fn fn
, void *data
)
1860 struct dm_snapshot
*snap
= ti
->private;
1863 r
= fn(ti
, snap
->origin
, 0, ti
->len
, data
);
1866 r
= fn(ti
, snap
->cow
, 0, get_dev_size(snap
->cow
->bdev
), data
);
1872 /*-----------------------------------------------------------------
1874 *---------------------------------------------------------------*/
1877 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
1878 * supplied bio was ignored. The caller may submit it immediately.
1879 * (No remapping actually occurs as the origin is always a direct linear
1882 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
1883 * and any supplied bio is added to a list to be submitted once all
1884 * the necessary exceptions exist.
1886 static int __origin_write(struct list_head
*snapshots
, sector_t sector
,
1889 int r
= DM_MAPIO_REMAPPED
;
1890 struct dm_snapshot
*snap
;
1891 struct dm_exception
*e
;
1892 struct dm_snap_pending_exception
*pe
;
1893 struct dm_snap_pending_exception
*pe_to_start_now
= NULL
;
1894 struct dm_snap_pending_exception
*pe_to_start_last
= NULL
;
1897 /* Do all the snapshots on this origin */
1898 list_for_each_entry (snap
, snapshots
, list
) {
1900 * Don't make new exceptions in a merging snapshot
1901 * because it has effectively been deleted
1903 if (dm_target_is_snapshot_merge(snap
->ti
))
1906 down_write(&snap
->lock
);
1908 /* Only deal with valid and active snapshots */
1909 if (!snap
->valid
|| !snap
->active
)
1912 /* Nothing to do if writing beyond end of snapshot */
1913 if (sector
>= dm_table_get_size(snap
->ti
->table
))
1917 * Remember, different snapshots can have
1918 * different chunk sizes.
1920 chunk
= sector_to_chunk(snap
->store
, sector
);
1923 * Check exception table to see if block
1924 * is already remapped in this snapshot
1925 * and trigger an exception if not.
1927 e
= dm_lookup_exception(&snap
->complete
, chunk
);
1931 pe
= __lookup_pending_exception(snap
, chunk
);
1933 up_write(&snap
->lock
);
1934 pe
= alloc_pending_exception(snap
);
1935 down_write(&snap
->lock
);
1938 free_pending_exception(pe
);
1942 e
= dm_lookup_exception(&snap
->complete
, chunk
);
1944 free_pending_exception(pe
);
1948 pe
= __find_pending_exception(snap
, pe
, chunk
);
1950 __invalidate_snapshot(snap
, -ENOMEM
);
1955 r
= DM_MAPIO_SUBMITTED
;
1958 * If an origin bio was supplied, queue it to wait for the
1959 * completion of this exception, and start this one last,
1960 * at the end of the function.
1963 bio_list_add(&pe
->origin_bios
, bio
);
1968 pe_to_start_last
= pe
;
1974 pe_to_start_now
= pe
;
1978 up_write(&snap
->lock
);
1980 if (pe_to_start_now
) {
1981 start_copy(pe_to_start_now
);
1982 pe_to_start_now
= NULL
;
1987 * Submit the exception against which the bio is queued last,
1988 * to give the other exceptions a head start.
1990 if (pe_to_start_last
)
1991 start_copy(pe_to_start_last
);
1997 * Called on a write from the origin driver.
1999 static int do_origin(struct dm_dev
*origin
, struct bio
*bio
)
2002 int r
= DM_MAPIO_REMAPPED
;
2004 down_read(&_origins_lock
);
2005 o
= __lookup_origin(origin
->bdev
);
2007 r
= __origin_write(&o
->snapshots
, bio
->bi_sector
, bio
);
2008 up_read(&_origins_lock
);
2014 * Trigger exceptions in all non-merging snapshots.
2016 * The chunk size of the merging snapshot may be larger than the chunk
2017 * size of some other snapshot so we may need to reallocate multiple
2018 * chunks in other snapshots.
2020 * We scan all the overlapping exceptions in the other snapshots.
2021 * Returns 1 if anything was reallocated and must be waited for,
2022 * otherwise returns 0.
2024 * size must be a multiple of merging_snap's chunk_size.
2026 static int origin_write_extent(struct dm_snapshot
*merging_snap
,
2027 sector_t sector
, unsigned size
)
2034 * The origin's __minimum_chunk_size() got stored in split_io
2035 * by snapshot_merge_resume().
2037 down_read(&_origins_lock
);
2038 o
= __lookup_origin(merging_snap
->origin
->bdev
);
2039 for (n
= 0; n
< size
; n
+= merging_snap
->ti
->split_io
)
2040 if (__origin_write(&o
->snapshots
, sector
+ n
, NULL
) ==
2043 up_read(&_origins_lock
);
2049 * Origin: maps a linear range of a device, with hooks for snapshotting.
2053 * Construct an origin mapping: <dev_path>
2054 * The context for an origin is merely a 'struct dm_dev *'
2055 * pointing to the real device.
2057 static int origin_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
2063 ti
->error
= "origin: incorrect number of arguments";
2067 r
= dm_get_device(ti
, argv
[0], dm_table_get_mode(ti
->table
), &dev
);
2069 ti
->error
= "Cannot get target device";
2074 ti
->num_flush_requests
= 1;
2079 static void origin_dtr(struct dm_target
*ti
)
2081 struct dm_dev
*dev
= ti
->private;
2082 dm_put_device(ti
, dev
);
2085 static int origin_map(struct dm_target
*ti
, struct bio
*bio
,
2086 union map_info
*map_context
)
2088 struct dm_dev
*dev
= ti
->private;
2089 bio
->bi_bdev
= dev
->bdev
;
2091 if (bio
->bi_rw
& REQ_FLUSH
)
2092 return DM_MAPIO_REMAPPED
;
2094 /* Only tell snapshots if this is a write */
2095 return (bio_rw(bio
) == WRITE
) ? do_origin(dev
, bio
) : DM_MAPIO_REMAPPED
;
2099 * Set the target "split_io" field to the minimum of all the snapshots'
2102 static void origin_resume(struct dm_target
*ti
)
2104 struct dm_dev
*dev
= ti
->private;
2106 ti
->split_io
= get_origin_minimum_chunksize(dev
->bdev
);
2109 static int origin_status(struct dm_target
*ti
, status_type_t type
, char *result
,
2110 unsigned int maxlen
)
2112 struct dm_dev
*dev
= ti
->private;
2115 case STATUSTYPE_INFO
:
2119 case STATUSTYPE_TABLE
:
2120 snprintf(result
, maxlen
, "%s", dev
->name
);
2127 static int origin_merge(struct dm_target
*ti
, struct bvec_merge_data
*bvm
,
2128 struct bio_vec
*biovec
, int max_size
)
2130 struct dm_dev
*dev
= ti
->private;
2131 struct request_queue
*q
= bdev_get_queue(dev
->bdev
);
2133 if (!q
->merge_bvec_fn
)
2136 bvm
->bi_bdev
= dev
->bdev
;
2137 bvm
->bi_sector
= bvm
->bi_sector
;
2139 return min(max_size
, q
->merge_bvec_fn(q
, bvm
, biovec
));
2142 static int origin_iterate_devices(struct dm_target
*ti
,
2143 iterate_devices_callout_fn fn
, void *data
)
2145 struct dm_dev
*dev
= ti
->private;
2147 return fn(ti
, dev
, 0, ti
->len
, data
);
2150 static struct target_type origin_target
= {
2151 .name
= "snapshot-origin",
2152 .version
= {1, 7, 1},
2153 .module
= THIS_MODULE
,
2157 .resume
= origin_resume
,
2158 .status
= origin_status
,
2159 .merge
= origin_merge
,
2160 .iterate_devices
= origin_iterate_devices
,
2163 static struct target_type snapshot_target
= {
2165 .version
= {1, 10, 0},
2166 .module
= THIS_MODULE
,
2167 .ctr
= snapshot_ctr
,
2168 .dtr
= snapshot_dtr
,
2169 .map
= snapshot_map
,
2170 .end_io
= snapshot_end_io
,
2171 .preresume
= snapshot_preresume
,
2172 .resume
= snapshot_resume
,
2173 .status
= snapshot_status
,
2174 .iterate_devices
= snapshot_iterate_devices
,
2177 static struct target_type merge_target
= {
2178 .name
= dm_snapshot_merge_target_name
,
2179 .version
= {1, 1, 0},
2180 .module
= THIS_MODULE
,
2181 .ctr
= snapshot_ctr
,
2182 .dtr
= snapshot_dtr
,
2183 .map
= snapshot_merge_map
,
2184 .end_io
= snapshot_end_io
,
2185 .presuspend
= snapshot_merge_presuspend
,
2186 .preresume
= snapshot_preresume
,
2187 .resume
= snapshot_merge_resume
,
2188 .status
= snapshot_status
,
2189 .iterate_devices
= snapshot_iterate_devices
,
2192 static int __init
dm_snapshot_init(void)
2196 r
= dm_exception_store_init();
2198 DMERR("Failed to initialize exception stores");
2202 r
= dm_register_target(&snapshot_target
);
2204 DMERR("snapshot target register failed %d", r
);
2205 goto bad_register_snapshot_target
;
2208 r
= dm_register_target(&origin_target
);
2210 DMERR("Origin target register failed %d", r
);
2211 goto bad_register_origin_target
;
2214 r
= dm_register_target(&merge_target
);
2216 DMERR("Merge target register failed %d", r
);
2217 goto bad_register_merge_target
;
2220 r
= init_origin_hash();
2222 DMERR("init_origin_hash failed.");
2223 goto bad_origin_hash
;
2226 exception_cache
= KMEM_CACHE(dm_exception
, 0);
2227 if (!exception_cache
) {
2228 DMERR("Couldn't create exception cache.");
2230 goto bad_exception_cache
;
2233 pending_cache
= KMEM_CACHE(dm_snap_pending_exception
, 0);
2234 if (!pending_cache
) {
2235 DMERR("Couldn't create pending cache.");
2237 goto bad_pending_cache
;
2240 tracked_chunk_cache
= KMEM_CACHE(dm_snap_tracked_chunk
, 0);
2241 if (!tracked_chunk_cache
) {
2242 DMERR("Couldn't create cache to track chunks in use.");
2244 goto bad_tracked_chunk_cache
;
2249 bad_tracked_chunk_cache
:
2250 kmem_cache_destroy(pending_cache
);
2252 kmem_cache_destroy(exception_cache
);
2253 bad_exception_cache
:
2256 dm_unregister_target(&merge_target
);
2257 bad_register_merge_target
:
2258 dm_unregister_target(&origin_target
);
2259 bad_register_origin_target
:
2260 dm_unregister_target(&snapshot_target
);
2261 bad_register_snapshot_target
:
2262 dm_exception_store_exit();
2267 static void __exit
dm_snapshot_exit(void)
2269 dm_unregister_target(&snapshot_target
);
2270 dm_unregister_target(&origin_target
);
2271 dm_unregister_target(&merge_target
);
2274 kmem_cache_destroy(pending_cache
);
2275 kmem_cache_destroy(exception_cache
);
2276 kmem_cache_destroy(tracked_chunk_cache
);
2278 dm_exception_store_exit();
2282 module_init(dm_snapshot_init
);
2283 module_exit(dm_snapshot_exit
);
2285 MODULE_DESCRIPTION(DM_NAME
" snapshot target");
2286 MODULE_AUTHOR("Joe Thornber");
2287 MODULE_LICENSE("GPL");