4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
6 * This file is released under the GPL.
9 #include <linux/blkdev.h>
10 #include <linux/device-mapper.h>
11 #include <linux/delay.h>
13 #include <linux/init.h>
14 #include <linux/kdev_t.h>
15 #include <linux/list.h>
16 #include <linux/mempool.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/log2.h>
21 #include <linux/dm-kcopyd.h>
22 #include <linux/workqueue.h>
24 #include "dm-exception-store.h"
26 #define DM_MSG_PREFIX "snapshots"
28 static const char dm_snapshot_merge_target_name
[] = "snapshot-merge";
30 #define dm_target_is_snapshot_merge(ti) \
31 ((ti)->type->name == dm_snapshot_merge_target_name)
34 * The percentage increment we will wake up users at
36 #define WAKE_UP_PERCENT 5
39 * kcopyd priority of snapshot operations
41 #define SNAPSHOT_COPY_PRIORITY 2
44 * Reserve 1MB for each snapshot initially (with minimum of 1 page).
46 #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
49 * The size of the mempool used to track chunks in use.
53 #define DM_TRACKED_CHUNK_HASH_SIZE 16
54 #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
55 (DM_TRACKED_CHUNK_HASH_SIZE - 1))
57 struct dm_exception_table
{
60 struct list_head
*table
;
64 struct rw_semaphore lock
;
66 struct dm_dev
*origin
;
71 /* List of snapshots per Origin */
72 struct list_head list
;
75 * You can't use a snapshot if this is 0 (e.g. if full).
76 * A snapshot-merge target never clears this.
80 /* Origin writes don't trigger exceptions until this is set */
83 /* Whether or not owning mapped_device is suspended */
86 atomic_t pending_exceptions_count
;
88 mempool_t
*pending_pool
;
90 struct dm_exception_table pending
;
91 struct dm_exception_table complete
;
94 * pe_lock protects all pending_exception operations and access
95 * as well as the snapshot_bios list.
99 /* Chunks with outstanding reads */
100 spinlock_t tracked_chunk_lock
;
101 mempool_t
*tracked_chunk_pool
;
102 struct hlist_head tracked_chunk_hash
[DM_TRACKED_CHUNK_HASH_SIZE
];
104 /* The on disk metadata handler */
105 struct dm_exception_store
*store
;
107 struct dm_kcopyd_client
*kcopyd_client
;
109 /* Queue of snapshot writes for ksnapd to flush */
110 struct bio_list queued_bios
;
111 struct work_struct queued_bios_work
;
113 /* Wait for events based on state_bits */
114 unsigned long state_bits
;
116 /* Range of chunks currently being merged. */
117 chunk_t first_merging_chunk
;
118 int num_merging_chunks
;
121 * The merge operation failed if this flag is set.
122 * Failure modes are handled as follows:
123 * - I/O error reading the header
124 * => don't load the target; abort.
125 * - Header does not have "valid" flag set
126 * => use the origin; forget about the snapshot.
127 * - I/O error when reading exceptions
128 * => don't load the target; abort.
129 * (We can't use the intermediate origin state.)
130 * - I/O error while merging
131 * => stop merging; set merge_failed; process I/O normally.
136 * Incoming bios that overlap with chunks being merged must wait
137 * for them to be committed.
139 struct bio_list bios_queued_during_merge
;
144 * RUNNING_MERGE - Merge operation is in progress.
145 * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped;
146 * cleared afterwards.
148 #define RUNNING_MERGE 0
149 #define SHUTDOWN_MERGE 1
151 struct dm_dev
*dm_snap_cow(struct dm_snapshot
*s
)
155 EXPORT_SYMBOL(dm_snap_cow
);
157 static struct workqueue_struct
*ksnapd
;
158 static void flush_queued_bios(struct work_struct
*work
);
160 static sector_t
chunk_to_sector(struct dm_exception_store
*store
,
163 return chunk
<< store
->chunk_shift
;
166 static int bdev_equal(struct block_device
*lhs
, struct block_device
*rhs
)
169 * There is only ever one instance of a particular block
170 * device so we can compare pointers safely.
175 struct dm_snap_pending_exception
{
176 struct dm_exception e
;
179 * Origin buffers waiting for this to complete are held
182 struct bio_list origin_bios
;
183 struct bio_list snapshot_bios
;
185 /* Pointer back to snapshot context */
186 struct dm_snapshot
*snap
;
189 * 1 indicates the exception has already been sent to
196 * Hash table mapping origin volumes to lists of snapshots and
197 * a lock to protect it
199 static struct kmem_cache
*exception_cache
;
200 static struct kmem_cache
*pending_cache
;
202 struct dm_snap_tracked_chunk
{
203 struct hlist_node node
;
207 static struct kmem_cache
*tracked_chunk_cache
;
209 static struct dm_snap_tracked_chunk
*track_chunk(struct dm_snapshot
*s
,
212 struct dm_snap_tracked_chunk
*c
= mempool_alloc(s
->tracked_chunk_pool
,
218 spin_lock_irqsave(&s
->tracked_chunk_lock
, flags
);
219 hlist_add_head(&c
->node
,
220 &s
->tracked_chunk_hash
[DM_TRACKED_CHUNK_HASH(chunk
)]);
221 spin_unlock_irqrestore(&s
->tracked_chunk_lock
, flags
);
226 static void stop_tracking_chunk(struct dm_snapshot
*s
,
227 struct dm_snap_tracked_chunk
*c
)
231 spin_lock_irqsave(&s
->tracked_chunk_lock
, flags
);
233 spin_unlock_irqrestore(&s
->tracked_chunk_lock
, flags
);
235 mempool_free(c
, s
->tracked_chunk_pool
);
238 static int __chunk_is_tracked(struct dm_snapshot
*s
, chunk_t chunk
)
240 struct dm_snap_tracked_chunk
*c
;
241 struct hlist_node
*hn
;
244 spin_lock_irq(&s
->tracked_chunk_lock
);
246 hlist_for_each_entry(c
, hn
,
247 &s
->tracked_chunk_hash
[DM_TRACKED_CHUNK_HASH(chunk
)], node
) {
248 if (c
->chunk
== chunk
) {
254 spin_unlock_irq(&s
->tracked_chunk_lock
);
260 * This conflicting I/O is extremely improbable in the caller,
261 * so msleep(1) is sufficient and there is no need for a wait queue.
263 static void __check_for_conflicting_io(struct dm_snapshot
*s
, chunk_t chunk
)
265 while (__chunk_is_tracked(s
, chunk
))
270 * One of these per registered origin, held in the snapshot_origins hash
273 /* The origin device */
274 struct block_device
*bdev
;
276 struct list_head hash_list
;
278 /* List of snapshots for this origin */
279 struct list_head snapshots
;
283 * Size of the hash table for origin volumes. If we make this
284 * the size of the minors list then it should be nearly perfect
286 #define ORIGIN_HASH_SIZE 256
287 #define ORIGIN_MASK 0xFF
288 static struct list_head
*_origins
;
289 static struct rw_semaphore _origins_lock
;
291 static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done
);
292 static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock
);
293 static uint64_t _pending_exceptions_done_count
;
295 static int init_origin_hash(void)
299 _origins
= kmalloc(ORIGIN_HASH_SIZE
* sizeof(struct list_head
),
302 DMERR("unable to allocate memory");
306 for (i
= 0; i
< ORIGIN_HASH_SIZE
; i
++)
307 INIT_LIST_HEAD(_origins
+ i
);
308 init_rwsem(&_origins_lock
);
313 static void exit_origin_hash(void)
318 static unsigned origin_hash(struct block_device
*bdev
)
320 return bdev
->bd_dev
& ORIGIN_MASK
;
323 static struct origin
*__lookup_origin(struct block_device
*origin
)
325 struct list_head
*ol
;
328 ol
= &_origins
[origin_hash(origin
)];
329 list_for_each_entry (o
, ol
, hash_list
)
330 if (bdev_equal(o
->bdev
, origin
))
336 static void __insert_origin(struct origin
*o
)
338 struct list_head
*sl
= &_origins
[origin_hash(o
->bdev
)];
339 list_add_tail(&o
->hash_list
, sl
);
343 * _origins_lock must be held when calling this function.
344 * Returns number of snapshots registered using the supplied cow device, plus:
345 * snap_src - a snapshot suitable for use as a source of exception handover
346 * snap_dest - a snapshot capable of receiving exception handover.
347 * snap_merge - an existing snapshot-merge target linked to the same origin.
348 * There can be at most one snapshot-merge target. The parameter is optional.
350 * Possible return values and states of snap_src and snap_dest.
351 * 0: NULL, NULL - first new snapshot
352 * 1: snap_src, NULL - normal snapshot
353 * 2: snap_src, snap_dest - waiting for handover
354 * 2: snap_src, NULL - handed over, waiting for old to be deleted
355 * 1: NULL, snap_dest - source got destroyed without handover
357 static int __find_snapshots_sharing_cow(struct dm_snapshot
*snap
,
358 struct dm_snapshot
**snap_src
,
359 struct dm_snapshot
**snap_dest
,
360 struct dm_snapshot
**snap_merge
)
362 struct dm_snapshot
*s
;
367 o
= __lookup_origin(snap
->origin
->bdev
);
371 list_for_each_entry(s
, &o
->snapshots
, list
) {
372 if (dm_target_is_snapshot_merge(s
->ti
) && snap_merge
)
374 if (!bdev_equal(s
->cow
->bdev
, snap
->cow
->bdev
))
384 } else if (snap_dest
)
395 * On success, returns 1 if this snapshot is a handover destination,
396 * otherwise returns 0.
398 static int __validate_exception_handover(struct dm_snapshot
*snap
)
400 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
401 struct dm_snapshot
*snap_merge
= NULL
;
403 /* Does snapshot need exceptions handed over to it? */
404 if ((__find_snapshots_sharing_cow(snap
, &snap_src
, &snap_dest
,
405 &snap_merge
) == 2) ||
407 snap
->ti
->error
= "Snapshot cow pairing for exception "
408 "table handover failed";
413 * If no snap_src was found, snap cannot become a handover
420 * Non-snapshot-merge handover?
422 if (!dm_target_is_snapshot_merge(snap
->ti
))
426 * Do not allow more than one merging snapshot.
429 snap
->ti
->error
= "A snapshot is already merging.";
433 if (!snap_src
->store
->type
->prepare_merge
||
434 !snap_src
->store
->type
->commit_merge
) {
435 snap
->ti
->error
= "Snapshot exception store does not "
436 "support snapshot-merge.";
443 static void __insert_snapshot(struct origin
*o
, struct dm_snapshot
*s
)
445 struct dm_snapshot
*l
;
447 /* Sort the list according to chunk size, largest-first smallest-last */
448 list_for_each_entry(l
, &o
->snapshots
, list
)
449 if (l
->store
->chunk_size
< s
->store
->chunk_size
)
451 list_add_tail(&s
->list
, &l
->list
);
455 * Make a note of the snapshot and its origin so we can look it
456 * up when the origin has a write on it.
458 * Also validate snapshot exception store handovers.
459 * On success, returns 1 if this registration is a handover destination,
460 * otherwise returns 0.
462 static int register_snapshot(struct dm_snapshot
*snap
)
464 struct origin
*o
, *new_o
= NULL
;
465 struct block_device
*bdev
= snap
->origin
->bdev
;
468 new_o
= kmalloc(sizeof(*new_o
), GFP_KERNEL
);
472 down_write(&_origins_lock
);
474 r
= __validate_exception_handover(snap
);
480 o
= __lookup_origin(bdev
);
487 /* Initialise the struct */
488 INIT_LIST_HEAD(&o
->snapshots
);
494 __insert_snapshot(o
, snap
);
497 up_write(&_origins_lock
);
503 * Move snapshot to correct place in list according to chunk size.
505 static void reregister_snapshot(struct dm_snapshot
*s
)
507 struct block_device
*bdev
= s
->origin
->bdev
;
509 down_write(&_origins_lock
);
512 __insert_snapshot(__lookup_origin(bdev
), s
);
514 up_write(&_origins_lock
);
517 static void unregister_snapshot(struct dm_snapshot
*s
)
521 down_write(&_origins_lock
);
522 o
= __lookup_origin(s
->origin
->bdev
);
525 if (o
&& list_empty(&o
->snapshots
)) {
526 list_del(&o
->hash_list
);
530 up_write(&_origins_lock
);
534 * Implementation of the exception hash tables.
535 * The lowest hash_shift bits of the chunk number are ignored, allowing
536 * some consecutive chunks to be grouped together.
538 static int dm_exception_table_init(struct dm_exception_table
*et
,
539 uint32_t size
, unsigned hash_shift
)
543 et
->hash_shift
= hash_shift
;
544 et
->hash_mask
= size
- 1;
545 et
->table
= dm_vcalloc(size
, sizeof(struct list_head
));
549 for (i
= 0; i
< size
; i
++)
550 INIT_LIST_HEAD(et
->table
+ i
);
555 static void dm_exception_table_exit(struct dm_exception_table
*et
,
556 struct kmem_cache
*mem
)
558 struct list_head
*slot
;
559 struct dm_exception
*ex
, *next
;
562 size
= et
->hash_mask
+ 1;
563 for (i
= 0; i
< size
; i
++) {
564 slot
= et
->table
+ i
;
566 list_for_each_entry_safe (ex
, next
, slot
, hash_list
)
567 kmem_cache_free(mem
, ex
);
573 static uint32_t exception_hash(struct dm_exception_table
*et
, chunk_t chunk
)
575 return (chunk
>> et
->hash_shift
) & et
->hash_mask
;
578 static void dm_remove_exception(struct dm_exception
*e
)
580 list_del(&e
->hash_list
);
584 * Return the exception data for a sector, or NULL if not
587 static struct dm_exception
*dm_lookup_exception(struct dm_exception_table
*et
,
590 struct list_head
*slot
;
591 struct dm_exception
*e
;
593 slot
= &et
->table
[exception_hash(et
, chunk
)];
594 list_for_each_entry (e
, slot
, hash_list
)
595 if (chunk
>= e
->old_chunk
&&
596 chunk
<= e
->old_chunk
+ dm_consecutive_chunk_count(e
))
602 static struct dm_exception
*alloc_completed_exception(void)
604 struct dm_exception
*e
;
606 e
= kmem_cache_alloc(exception_cache
, GFP_NOIO
);
608 e
= kmem_cache_alloc(exception_cache
, GFP_ATOMIC
);
613 static void free_completed_exception(struct dm_exception
*e
)
615 kmem_cache_free(exception_cache
, e
);
618 static struct dm_snap_pending_exception
*alloc_pending_exception(struct dm_snapshot
*s
)
620 struct dm_snap_pending_exception
*pe
= mempool_alloc(s
->pending_pool
,
623 atomic_inc(&s
->pending_exceptions_count
);
629 static void free_pending_exception(struct dm_snap_pending_exception
*pe
)
631 struct dm_snapshot
*s
= pe
->snap
;
633 mempool_free(pe
, s
->pending_pool
);
634 smp_mb__before_atomic_dec();
635 atomic_dec(&s
->pending_exceptions_count
);
638 static void dm_insert_exception(struct dm_exception_table
*eh
,
639 struct dm_exception
*new_e
)
642 struct dm_exception
*e
= NULL
;
644 l
= &eh
->table
[exception_hash(eh
, new_e
->old_chunk
)];
646 /* Add immediately if this table doesn't support consecutive chunks */
650 /* List is ordered by old_chunk */
651 list_for_each_entry_reverse(e
, l
, hash_list
) {
652 /* Insert after an existing chunk? */
653 if (new_e
->old_chunk
== (e
->old_chunk
+
654 dm_consecutive_chunk_count(e
) + 1) &&
655 new_e
->new_chunk
== (dm_chunk_number(e
->new_chunk
) +
656 dm_consecutive_chunk_count(e
) + 1)) {
657 dm_consecutive_chunk_count_inc(e
);
658 free_completed_exception(new_e
);
662 /* Insert before an existing chunk? */
663 if (new_e
->old_chunk
== (e
->old_chunk
- 1) &&
664 new_e
->new_chunk
== (dm_chunk_number(e
->new_chunk
) - 1)) {
665 dm_consecutive_chunk_count_inc(e
);
668 free_completed_exception(new_e
);
672 if (new_e
->old_chunk
> e
->old_chunk
)
677 list_add(&new_e
->hash_list
, e
? &e
->hash_list
: l
);
681 * Callback used by the exception stores to load exceptions when
684 static int dm_add_exception(void *context
, chunk_t old
, chunk_t
new)
686 struct dm_snapshot
*s
= context
;
687 struct dm_exception
*e
;
689 e
= alloc_completed_exception();
695 /* Consecutive_count is implicitly initialised to zero */
698 dm_insert_exception(&s
->complete
, e
);
703 #define min_not_zero(l, r) (((l) == 0) ? (r) : (((r) == 0) ? (l) : min(l, r)))
706 * Return a minimum chunk size of all snapshots that have the specified origin.
707 * Return zero if the origin has no snapshots.
709 static sector_t
__minimum_chunk_size(struct origin
*o
)
711 struct dm_snapshot
*snap
;
712 unsigned chunk_size
= 0;
715 list_for_each_entry(snap
, &o
->snapshots
, list
)
716 chunk_size
= min_not_zero(chunk_size
,
717 snap
->store
->chunk_size
);
725 static int calc_max_buckets(void)
727 /* use a fixed size of 2MB */
728 unsigned long mem
= 2 * 1024 * 1024;
729 mem
/= sizeof(struct list_head
);
735 * Allocate room for a suitable hash table.
737 static int init_hash_tables(struct dm_snapshot
*s
)
739 sector_t hash_size
, cow_dev_size
, origin_dev_size
, max_buckets
;
742 * Calculate based on the size of the original volume or
745 cow_dev_size
= get_dev_size(s
->cow
->bdev
);
746 origin_dev_size
= get_dev_size(s
->origin
->bdev
);
747 max_buckets
= calc_max_buckets();
749 hash_size
= min(origin_dev_size
, cow_dev_size
) >> s
->store
->chunk_shift
;
750 hash_size
= min(hash_size
, max_buckets
);
754 hash_size
= rounddown_pow_of_two(hash_size
);
755 if (dm_exception_table_init(&s
->complete
, hash_size
,
756 DM_CHUNK_CONSECUTIVE_BITS
))
760 * Allocate hash table for in-flight exceptions
761 * Make this smaller than the real hash table
767 if (dm_exception_table_init(&s
->pending
, hash_size
, 0)) {
768 dm_exception_table_exit(&s
->complete
, exception_cache
);
775 static void merge_shutdown(struct dm_snapshot
*s
)
777 clear_bit_unlock(RUNNING_MERGE
, &s
->state_bits
);
778 smp_mb__after_clear_bit();
779 wake_up_bit(&s
->state_bits
, RUNNING_MERGE
);
782 static struct bio
*__release_queued_bios_after_merge(struct dm_snapshot
*s
)
784 s
->first_merging_chunk
= 0;
785 s
->num_merging_chunks
= 0;
787 return bio_list_get(&s
->bios_queued_during_merge
);
791 * Remove one chunk from the index of completed exceptions.
793 static int __remove_single_exception_chunk(struct dm_snapshot
*s
,
796 struct dm_exception
*e
;
798 e
= dm_lookup_exception(&s
->complete
, old_chunk
);
800 DMERR("Corruption detected: exception for block %llu is "
801 "on disk but not in memory",
802 (unsigned long long)old_chunk
);
807 * If this is the only chunk using this exception, remove exception.
809 if (!dm_consecutive_chunk_count(e
)) {
810 dm_remove_exception(e
);
811 free_completed_exception(e
);
816 * The chunk may be either at the beginning or the end of a
817 * group of consecutive chunks - never in the middle. We are
818 * removing chunks in the opposite order to that in which they
819 * were added, so this should always be true.
820 * Decrement the consecutive chunk counter and adjust the
821 * starting point if necessary.
823 if (old_chunk
== e
->old_chunk
) {
826 } else if (old_chunk
!= e
->old_chunk
+
827 dm_consecutive_chunk_count(e
)) {
828 DMERR("Attempt to merge block %llu from the "
829 "middle of a chunk range [%llu - %llu]",
830 (unsigned long long)old_chunk
,
831 (unsigned long long)e
->old_chunk
,
833 e
->old_chunk
+ dm_consecutive_chunk_count(e
));
837 dm_consecutive_chunk_count_dec(e
);
842 static void flush_bios(struct bio
*bio
);
844 static int remove_single_exception_chunk(struct dm_snapshot
*s
)
846 struct bio
*b
= NULL
;
848 chunk_t old_chunk
= s
->first_merging_chunk
+ s
->num_merging_chunks
- 1;
850 down_write(&s
->lock
);
853 * Process chunks (and associated exceptions) in reverse order
854 * so that dm_consecutive_chunk_count_dec() accounting works.
857 r
= __remove_single_exception_chunk(s
, old_chunk
);
860 } while (old_chunk
-- > s
->first_merging_chunk
);
862 b
= __release_queued_bios_after_merge(s
);
872 static int origin_write_extent(struct dm_snapshot
*merging_snap
,
873 sector_t sector
, unsigned chunk_size
);
875 static void merge_callback(int read_err
, unsigned long write_err
,
878 static uint64_t read_pending_exceptions_done_count(void)
880 uint64_t pending_exceptions_done
;
882 spin_lock(&_pending_exceptions_done_spinlock
);
883 pending_exceptions_done
= _pending_exceptions_done_count
;
884 spin_unlock(&_pending_exceptions_done_spinlock
);
886 return pending_exceptions_done
;
889 static void increment_pending_exceptions_done_count(void)
891 spin_lock(&_pending_exceptions_done_spinlock
);
892 _pending_exceptions_done_count
++;
893 spin_unlock(&_pending_exceptions_done_spinlock
);
895 wake_up_all(&_pending_exceptions_done
);
898 static void snapshot_merge_next_chunks(struct dm_snapshot
*s
)
900 int i
, linear_chunks
;
901 chunk_t old_chunk
, new_chunk
;
902 struct dm_io_region src
, dest
;
904 uint64_t previous_count
;
906 BUG_ON(!test_bit(RUNNING_MERGE
, &s
->state_bits
));
907 if (unlikely(test_bit(SHUTDOWN_MERGE
, &s
->state_bits
)))
911 * valid flag never changes during merge, so no lock required.
914 DMERR("Snapshot is invalid: can't merge");
918 linear_chunks
= s
->store
->type
->prepare_merge(s
->store
, &old_chunk
,
920 if (linear_chunks
<= 0) {
921 if (linear_chunks
< 0) {
922 DMERR("Read error in exception store: "
923 "shutting down merge");
924 down_write(&s
->lock
);
931 /* Adjust old_chunk and new_chunk to reflect start of linear region */
932 old_chunk
= old_chunk
+ 1 - linear_chunks
;
933 new_chunk
= new_chunk
+ 1 - linear_chunks
;
936 * Use one (potentially large) I/O to copy all 'linear_chunks'
937 * from the exception store to the origin
939 io_size
= linear_chunks
* s
->store
->chunk_size
;
941 dest
.bdev
= s
->origin
->bdev
;
942 dest
.sector
= chunk_to_sector(s
->store
, old_chunk
);
943 dest
.count
= min(io_size
, get_dev_size(dest
.bdev
) - dest
.sector
);
945 src
.bdev
= s
->cow
->bdev
;
946 src
.sector
= chunk_to_sector(s
->store
, new_chunk
);
947 src
.count
= dest
.count
;
950 * Reallocate any exceptions needed in other snapshots then
951 * wait for the pending exceptions to complete.
952 * Each time any pending exception (globally on the system)
953 * completes we are woken and repeat the process to find out
954 * if we can proceed. While this may not seem a particularly
955 * efficient algorithm, it is not expected to have any
956 * significant impact on performance.
958 previous_count
= read_pending_exceptions_done_count();
959 while (origin_write_extent(s
, dest
.sector
, io_size
)) {
960 wait_event(_pending_exceptions_done
,
961 (read_pending_exceptions_done_count() !=
963 /* Retry after the wait, until all exceptions are done. */
964 previous_count
= read_pending_exceptions_done_count();
967 down_write(&s
->lock
);
968 s
->first_merging_chunk
= old_chunk
;
969 s
->num_merging_chunks
= linear_chunks
;
972 /* Wait until writes to all 'linear_chunks' drain */
973 for (i
= 0; i
< linear_chunks
; i
++)
974 __check_for_conflicting_io(s
, old_chunk
+ i
);
976 dm_kcopyd_copy(s
->kcopyd_client
, &src
, 1, &dest
, 0, merge_callback
, s
);
983 static void error_bios(struct bio
*bio
);
985 static void merge_callback(int read_err
, unsigned long write_err
, void *context
)
987 struct dm_snapshot
*s
= context
;
988 struct bio
*b
= NULL
;
990 if (read_err
|| write_err
) {
992 DMERR("Read error: shutting down merge.");
994 DMERR("Write error: shutting down merge.");
998 if (s
->store
->type
->commit_merge(s
->store
,
999 s
->num_merging_chunks
) < 0) {
1000 DMERR("Write error in exception store: shutting down merge");
1004 if (remove_single_exception_chunk(s
) < 0)
1007 snapshot_merge_next_chunks(s
);
1012 down_write(&s
->lock
);
1013 s
->merge_failed
= 1;
1014 b
= __release_queued_bios_after_merge(s
);
1021 static void start_merge(struct dm_snapshot
*s
)
1023 if (!test_and_set_bit(RUNNING_MERGE
, &s
->state_bits
))
1024 snapshot_merge_next_chunks(s
);
1027 static int wait_schedule(void *ptr
)
1035 * Stop the merging process and wait until it finishes.
1037 static void stop_merge(struct dm_snapshot
*s
)
1039 set_bit(SHUTDOWN_MERGE
, &s
->state_bits
);
1040 wait_on_bit(&s
->state_bits
, RUNNING_MERGE
, wait_schedule
,
1041 TASK_UNINTERRUPTIBLE
);
1042 clear_bit(SHUTDOWN_MERGE
, &s
->state_bits
);
1046 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
1048 static int snapshot_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
1050 struct dm_snapshot
*s
;
1053 char *origin_path
, *cow_path
;
1054 unsigned args_used
, num_flush_requests
= 1;
1055 fmode_t origin_mode
= FMODE_READ
;
1058 ti
->error
= "requires exactly 4 arguments";
1063 if (dm_target_is_snapshot_merge(ti
)) {
1064 num_flush_requests
= 2;
1065 origin_mode
= FMODE_WRITE
;
1068 origin_path
= argv
[0];
1072 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
1074 ti
->error
= "Cannot allocate snapshot context private "
1084 r
= dm_get_device(ti
, cow_path
, FMODE_READ
| FMODE_WRITE
, &s
->cow
);
1086 ti
->error
= "Cannot get COW device";
1090 r
= dm_exception_store_create(ti
, argc
, argv
, s
, &args_used
, &s
->store
);
1092 ti
->error
= "Couldn't create exception store";
1100 r
= dm_get_device(ti
, origin_path
, origin_mode
, &s
->origin
);
1102 ti
->error
= "Cannot get origin device";
1110 atomic_set(&s
->pending_exceptions_count
, 0);
1111 init_rwsem(&s
->lock
);
1112 INIT_LIST_HEAD(&s
->list
);
1113 spin_lock_init(&s
->pe_lock
);
1115 s
->merge_failed
= 0;
1116 s
->first_merging_chunk
= 0;
1117 s
->num_merging_chunks
= 0;
1118 bio_list_init(&s
->bios_queued_during_merge
);
1120 /* Allocate hash table for COW data */
1121 if (init_hash_tables(s
)) {
1122 ti
->error
= "Unable to allocate hash table space";
1124 goto bad_hash_tables
;
1127 r
= dm_kcopyd_client_create(SNAPSHOT_PAGES
, &s
->kcopyd_client
);
1129 ti
->error
= "Could not create kcopyd client";
1133 s
->pending_pool
= mempool_create_slab_pool(MIN_IOS
, pending_cache
);
1134 if (!s
->pending_pool
) {
1135 ti
->error
= "Could not allocate mempool for pending exceptions";
1136 goto bad_pending_pool
;
1139 s
->tracked_chunk_pool
= mempool_create_slab_pool(MIN_IOS
,
1140 tracked_chunk_cache
);
1141 if (!s
->tracked_chunk_pool
) {
1142 ti
->error
= "Could not allocate tracked_chunk mempool for "
1144 goto bad_tracked_chunk_pool
;
1147 for (i
= 0; i
< DM_TRACKED_CHUNK_HASH_SIZE
; i
++)
1148 INIT_HLIST_HEAD(&s
->tracked_chunk_hash
[i
]);
1150 spin_lock_init(&s
->tracked_chunk_lock
);
1152 bio_list_init(&s
->queued_bios
);
1153 INIT_WORK(&s
->queued_bios_work
, flush_queued_bios
);
1156 ti
->num_flush_requests
= num_flush_requests
;
1158 /* Add snapshot to the list of snapshots for this origin */
1159 /* Exceptions aren't triggered till snapshot_resume() is called */
1160 r
= register_snapshot(s
);
1162 ti
->error
= "Snapshot origin struct allocation failed";
1163 goto bad_load_and_register
;
1165 /* invalid handover, register_snapshot has set ti->error */
1166 goto bad_load_and_register
;
1170 * Metadata must only be loaded into one table at once, so skip this
1171 * if metadata will be handed over during resume.
1172 * Chunk size will be set during the handover - set it to zero to
1173 * ensure it's ignored.
1176 s
->store
->chunk_size
= 0;
1180 r
= s
->store
->type
->read_metadata(s
->store
, dm_add_exception
,
1183 ti
->error
= "Failed to read snapshot metadata";
1184 goto bad_read_metadata
;
1187 DMWARN("Snapshot is marked invalid.");
1190 if (!s
->store
->chunk_size
) {
1191 ti
->error
= "Chunk size not set";
1192 goto bad_read_metadata
;
1194 ti
->split_io
= s
->store
->chunk_size
;
1199 unregister_snapshot(s
);
1201 bad_load_and_register
:
1202 mempool_destroy(s
->tracked_chunk_pool
);
1204 bad_tracked_chunk_pool
:
1205 mempool_destroy(s
->pending_pool
);
1208 dm_kcopyd_client_destroy(s
->kcopyd_client
);
1211 dm_exception_table_exit(&s
->pending
, pending_cache
);
1212 dm_exception_table_exit(&s
->complete
, exception_cache
);
1215 dm_put_device(ti
, s
->origin
);
1218 dm_exception_store_destroy(s
->store
);
1221 dm_put_device(ti
, s
->cow
);
1230 static void __free_exceptions(struct dm_snapshot
*s
)
1232 dm_kcopyd_client_destroy(s
->kcopyd_client
);
1233 s
->kcopyd_client
= NULL
;
1235 dm_exception_table_exit(&s
->pending
, pending_cache
);
1236 dm_exception_table_exit(&s
->complete
, exception_cache
);
1239 static void __handover_exceptions(struct dm_snapshot
*snap_src
,
1240 struct dm_snapshot
*snap_dest
)
1243 struct dm_exception_table table_swap
;
1244 struct dm_exception_store
*store_swap
;
1248 * Swap all snapshot context information between the two instances.
1250 u
.table_swap
= snap_dest
->complete
;
1251 snap_dest
->complete
= snap_src
->complete
;
1252 snap_src
->complete
= u
.table_swap
;
1254 u
.store_swap
= snap_dest
->store
;
1255 snap_dest
->store
= snap_src
->store
;
1256 snap_src
->store
= u
.store_swap
;
1258 snap_dest
->store
->snap
= snap_dest
;
1259 snap_src
->store
->snap
= snap_src
;
1261 snap_dest
->ti
->split_io
= snap_dest
->store
->chunk_size
;
1262 snap_dest
->valid
= snap_src
->valid
;
1265 * Set source invalid to ensure it receives no further I/O.
1267 snap_src
->valid
= 0;
1270 static void snapshot_dtr(struct dm_target
*ti
)
1272 #ifdef CONFIG_DM_DEBUG
1275 struct dm_snapshot
*s
= ti
->private;
1276 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
1278 flush_workqueue(ksnapd
);
1280 down_read(&_origins_lock
);
1281 /* Check whether exception handover must be cancelled */
1282 (void) __find_snapshots_sharing_cow(s
, &snap_src
, &snap_dest
, NULL
);
1283 if (snap_src
&& snap_dest
&& (s
== snap_src
)) {
1284 down_write(&snap_dest
->lock
);
1285 snap_dest
->valid
= 0;
1286 up_write(&snap_dest
->lock
);
1287 DMERR("Cancelling snapshot handover.");
1289 up_read(&_origins_lock
);
1291 if (dm_target_is_snapshot_merge(ti
))
1294 /* Prevent further origin writes from using this snapshot. */
1295 /* After this returns there can be no new kcopyd jobs. */
1296 unregister_snapshot(s
);
1298 while (atomic_read(&s
->pending_exceptions_count
))
1301 * Ensure instructions in mempool_destroy aren't reordered
1302 * before atomic_read.
1306 #ifdef CONFIG_DM_DEBUG
1307 for (i
= 0; i
< DM_TRACKED_CHUNK_HASH_SIZE
; i
++)
1308 BUG_ON(!hlist_empty(&s
->tracked_chunk_hash
[i
]));
1311 mempool_destroy(s
->tracked_chunk_pool
);
1313 __free_exceptions(s
);
1315 mempool_destroy(s
->pending_pool
);
1317 dm_put_device(ti
, s
->origin
);
1319 dm_exception_store_destroy(s
->store
);
1321 dm_put_device(ti
, s
->cow
);
1327 * Flush a list of buffers.
1329 static void flush_bios(struct bio
*bio
)
1335 bio
->bi_next
= NULL
;
1336 generic_make_request(bio
);
1341 static void flush_queued_bios(struct work_struct
*work
)
1343 struct dm_snapshot
*s
=
1344 container_of(work
, struct dm_snapshot
, queued_bios_work
);
1345 struct bio
*queued_bios
;
1346 unsigned long flags
;
1348 spin_lock_irqsave(&s
->pe_lock
, flags
);
1349 queued_bios
= bio_list_get(&s
->queued_bios
);
1350 spin_unlock_irqrestore(&s
->pe_lock
, flags
);
1352 flush_bios(queued_bios
);
1355 static int do_origin(struct dm_dev
*origin
, struct bio
*bio
);
1358 * Flush a list of buffers.
1360 static void retry_origin_bios(struct dm_snapshot
*s
, struct bio
*bio
)
1367 bio
->bi_next
= NULL
;
1368 r
= do_origin(s
->origin
, bio
);
1369 if (r
== DM_MAPIO_REMAPPED
)
1370 generic_make_request(bio
);
1376 * Error a list of buffers.
1378 static void error_bios(struct bio
*bio
)
1384 bio
->bi_next
= NULL
;
1390 static void __invalidate_snapshot(struct dm_snapshot
*s
, int err
)
1396 DMERR("Invalidating snapshot: Error reading/writing.");
1397 else if (err
== -ENOMEM
)
1398 DMERR("Invalidating snapshot: Unable to allocate exception.");
1400 if (s
->store
->type
->drop_snapshot
)
1401 s
->store
->type
->drop_snapshot(s
->store
);
1405 dm_table_event(s
->ti
->table
);
1408 static void pending_complete(struct dm_snap_pending_exception
*pe
, int success
)
1410 struct dm_exception
*e
;
1411 struct dm_snapshot
*s
= pe
->snap
;
1412 struct bio
*origin_bios
= NULL
;
1413 struct bio
*snapshot_bios
= NULL
;
1417 /* Read/write error - snapshot is unusable */
1418 down_write(&s
->lock
);
1419 __invalidate_snapshot(s
, -EIO
);
1424 e
= alloc_completed_exception();
1426 down_write(&s
->lock
);
1427 __invalidate_snapshot(s
, -ENOMEM
);
1433 down_write(&s
->lock
);
1435 free_completed_exception(e
);
1440 /* Check for conflicting reads */
1441 __check_for_conflicting_io(s
, pe
->e
.old_chunk
);
1444 * Add a proper exception, and remove the
1445 * in-flight exception from the list.
1447 dm_insert_exception(&s
->complete
, e
);
1450 dm_remove_exception(&pe
->e
);
1451 snapshot_bios
= bio_list_get(&pe
->snapshot_bios
);
1452 origin_bios
= bio_list_get(&pe
->origin_bios
);
1453 free_pending_exception(pe
);
1455 increment_pending_exceptions_done_count();
1459 /* Submit any pending write bios */
1461 error_bios(snapshot_bios
);
1463 flush_bios(snapshot_bios
);
1465 retry_origin_bios(s
, origin_bios
);
1468 static void commit_callback(void *context
, int success
)
1470 struct dm_snap_pending_exception
*pe
= context
;
1472 pending_complete(pe
, success
);
1476 * Called when the copy I/O has finished. kcopyd actually runs
1477 * this code so don't block.
1479 static void copy_callback(int read_err
, unsigned long write_err
, void *context
)
1481 struct dm_snap_pending_exception
*pe
= context
;
1482 struct dm_snapshot
*s
= pe
->snap
;
1484 if (read_err
|| write_err
)
1485 pending_complete(pe
, 0);
1488 /* Update the metadata if we are persistent */
1489 s
->store
->type
->commit_exception(s
->store
, &pe
->e
,
1490 commit_callback
, pe
);
1494 * Dispatches the copy operation to kcopyd.
1496 static void start_copy(struct dm_snap_pending_exception
*pe
)
1498 struct dm_snapshot
*s
= pe
->snap
;
1499 struct dm_io_region src
, dest
;
1500 struct block_device
*bdev
= s
->origin
->bdev
;
1503 dev_size
= get_dev_size(bdev
);
1506 src
.sector
= chunk_to_sector(s
->store
, pe
->e
.old_chunk
);
1507 src
.count
= min((sector_t
)s
->store
->chunk_size
, dev_size
- src
.sector
);
1509 dest
.bdev
= s
->cow
->bdev
;
1510 dest
.sector
= chunk_to_sector(s
->store
, pe
->e
.new_chunk
);
1511 dest
.count
= src
.count
;
1513 /* Hand over to kcopyd */
1514 dm_kcopyd_copy(s
->kcopyd_client
,
1515 &src
, 1, &dest
, 0, copy_callback
, pe
);
1518 static struct dm_snap_pending_exception
*
1519 __lookup_pending_exception(struct dm_snapshot
*s
, chunk_t chunk
)
1521 struct dm_exception
*e
= dm_lookup_exception(&s
->pending
, chunk
);
1526 return container_of(e
, struct dm_snap_pending_exception
, e
);
1530 * Looks to see if this snapshot already has a pending exception
1531 * for this chunk, otherwise it allocates a new one and inserts
1532 * it into the pending table.
1534 * NOTE: a write lock must be held on snap->lock before calling
1537 static struct dm_snap_pending_exception
*
1538 __find_pending_exception(struct dm_snapshot
*s
,
1539 struct dm_snap_pending_exception
*pe
, chunk_t chunk
)
1541 struct dm_snap_pending_exception
*pe2
;
1543 pe2
= __lookup_pending_exception(s
, chunk
);
1545 free_pending_exception(pe
);
1549 pe
->e
.old_chunk
= chunk
;
1550 bio_list_init(&pe
->origin_bios
);
1551 bio_list_init(&pe
->snapshot_bios
);
1554 if (s
->store
->type
->prepare_exception(s
->store
, &pe
->e
)) {
1555 free_pending_exception(pe
);
1559 dm_insert_exception(&s
->pending
, &pe
->e
);
1564 static void remap_exception(struct dm_snapshot
*s
, struct dm_exception
*e
,
1565 struct bio
*bio
, chunk_t chunk
)
1567 bio
->bi_bdev
= s
->cow
->bdev
;
1568 bio
->bi_sector
= chunk_to_sector(s
->store
,
1569 dm_chunk_number(e
->new_chunk
) +
1570 (chunk
- e
->old_chunk
)) +
1572 s
->store
->chunk_mask
);
1575 static int snapshot_map(struct dm_target
*ti
, struct bio
*bio
,
1576 union map_info
*map_context
)
1578 struct dm_exception
*e
;
1579 struct dm_snapshot
*s
= ti
->private;
1580 int r
= DM_MAPIO_REMAPPED
;
1582 struct dm_snap_pending_exception
*pe
= NULL
;
1584 if (unlikely(bio_empty_barrier(bio
))) {
1585 bio
->bi_bdev
= s
->cow
->bdev
;
1586 return DM_MAPIO_REMAPPED
;
1589 chunk
= sector_to_chunk(s
->store
, bio
->bi_sector
);
1591 /* Full snapshots are not usable */
1592 /* To get here the table must be live so s->active is always set. */
1596 /* FIXME: should only take write lock if we need
1597 * to copy an exception */
1598 down_write(&s
->lock
);
1605 /* If the block is already remapped - use that, else remap it */
1606 e
= dm_lookup_exception(&s
->complete
, chunk
);
1608 remap_exception(s
, e
, bio
, chunk
);
1613 * Write to snapshot - higher level takes care of RW/RO
1614 * flags so we should only get this if we are
1617 if (bio_rw(bio
) == WRITE
) {
1618 pe
= __lookup_pending_exception(s
, chunk
);
1621 pe
= alloc_pending_exception(s
);
1622 down_write(&s
->lock
);
1625 free_pending_exception(pe
);
1630 e
= dm_lookup_exception(&s
->complete
, chunk
);
1632 free_pending_exception(pe
);
1633 remap_exception(s
, e
, bio
, chunk
);
1637 pe
= __find_pending_exception(s
, pe
, chunk
);
1639 __invalidate_snapshot(s
, -ENOMEM
);
1645 remap_exception(s
, &pe
->e
, bio
, chunk
);
1646 bio_list_add(&pe
->snapshot_bios
, bio
);
1648 r
= DM_MAPIO_SUBMITTED
;
1651 /* this is protected by snap->lock */
1658 bio
->bi_bdev
= s
->origin
->bdev
;
1659 map_context
->ptr
= track_chunk(s
, chunk
);
1669 * A snapshot-merge target behaves like a combination of a snapshot
1670 * target and a snapshot-origin target. It only generates new
1671 * exceptions in other snapshots and not in the one that is being
1674 * For each chunk, if there is an existing exception, it is used to
1675 * redirect I/O to the cow device. Otherwise I/O is sent to the origin,
1676 * which in turn might generate exceptions in other snapshots.
1677 * If merging is currently taking place on the chunk in question, the
1678 * I/O is deferred by adding it to s->bios_queued_during_merge.
1680 static int snapshot_merge_map(struct dm_target
*ti
, struct bio
*bio
,
1681 union map_info
*map_context
)
1683 struct dm_exception
*e
;
1684 struct dm_snapshot
*s
= ti
->private;
1685 int r
= DM_MAPIO_REMAPPED
;
1688 if (unlikely(bio_empty_barrier(bio
))) {
1689 if (!map_context
->flush_request
)
1690 bio
->bi_bdev
= s
->origin
->bdev
;
1692 bio
->bi_bdev
= s
->cow
->bdev
;
1693 map_context
->ptr
= NULL
;
1694 return DM_MAPIO_REMAPPED
;
1697 chunk
= sector_to_chunk(s
->store
, bio
->bi_sector
);
1699 down_write(&s
->lock
);
1701 /* Full merging snapshots are redirected to the origin */
1703 goto redirect_to_origin
;
1705 /* If the block is already remapped - use that */
1706 e
= dm_lookup_exception(&s
->complete
, chunk
);
1708 /* Queue writes overlapping with chunks being merged */
1709 if (bio_rw(bio
) == WRITE
&&
1710 chunk
>= s
->first_merging_chunk
&&
1711 chunk
< (s
->first_merging_chunk
+
1712 s
->num_merging_chunks
)) {
1713 bio
->bi_bdev
= s
->origin
->bdev
;
1714 bio_list_add(&s
->bios_queued_during_merge
, bio
);
1715 r
= DM_MAPIO_SUBMITTED
;
1719 remap_exception(s
, e
, bio
, chunk
);
1721 if (bio_rw(bio
) == WRITE
)
1722 map_context
->ptr
= track_chunk(s
, chunk
);
1727 bio
->bi_bdev
= s
->origin
->bdev
;
1729 if (bio_rw(bio
) == WRITE
) {
1731 return do_origin(s
->origin
, bio
);
1740 static int snapshot_end_io(struct dm_target
*ti
, struct bio
*bio
,
1741 int error
, union map_info
*map_context
)
1743 struct dm_snapshot
*s
= ti
->private;
1744 struct dm_snap_tracked_chunk
*c
= map_context
->ptr
;
1747 stop_tracking_chunk(s
, c
);
1752 static void snapshot_merge_presuspend(struct dm_target
*ti
)
1754 struct dm_snapshot
*s
= ti
->private;
1759 static void snapshot_postsuspend(struct dm_target
*ti
)
1761 struct dm_snapshot
*s
= ti
->private;
1763 down_write(&s
->lock
);
1768 static int snapshot_preresume(struct dm_target
*ti
)
1771 struct dm_snapshot
*s
= ti
->private;
1772 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
1774 down_read(&_origins_lock
);
1775 (void) __find_snapshots_sharing_cow(s
, &snap_src
, &snap_dest
, NULL
);
1776 if (snap_src
&& snap_dest
) {
1777 down_read(&snap_src
->lock
);
1778 if (s
== snap_src
) {
1779 DMERR("Unable to resume snapshot source until "
1780 "handover completes.");
1782 } else if (!snap_src
->suspended
) {
1783 DMERR("Unable to perform snapshot handover until "
1784 "source is suspended.");
1787 up_read(&snap_src
->lock
);
1789 up_read(&_origins_lock
);
1794 static void snapshot_resume(struct dm_target
*ti
)
1796 struct dm_snapshot
*s
= ti
->private;
1797 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
1799 down_read(&_origins_lock
);
1800 (void) __find_snapshots_sharing_cow(s
, &snap_src
, &snap_dest
, NULL
);
1801 if (snap_src
&& snap_dest
) {
1802 down_write(&snap_src
->lock
);
1803 down_write_nested(&snap_dest
->lock
, SINGLE_DEPTH_NESTING
);
1804 __handover_exceptions(snap_src
, snap_dest
);
1805 up_write(&snap_dest
->lock
);
1806 up_write(&snap_src
->lock
);
1808 up_read(&_origins_lock
);
1810 /* Now we have correct chunk size, reregister */
1811 reregister_snapshot(s
);
1813 down_write(&s
->lock
);
1819 static sector_t
get_origin_minimum_chunksize(struct block_device
*bdev
)
1821 sector_t min_chunksize
;
1823 down_read(&_origins_lock
);
1824 min_chunksize
= __minimum_chunk_size(__lookup_origin(bdev
));
1825 up_read(&_origins_lock
);
1827 return min_chunksize
;
1830 static void snapshot_merge_resume(struct dm_target
*ti
)
1832 struct dm_snapshot
*s
= ti
->private;
1835 * Handover exceptions from existing snapshot.
1837 snapshot_resume(ti
);
1840 * snapshot-merge acts as an origin, so set ti->split_io
1842 ti
->split_io
= get_origin_minimum_chunksize(s
->origin
->bdev
);
1847 static int snapshot_status(struct dm_target
*ti
, status_type_t type
,
1848 char *result
, unsigned int maxlen
)
1851 struct dm_snapshot
*snap
= ti
->private;
1854 case STATUSTYPE_INFO
:
1856 down_write(&snap
->lock
);
1860 else if (snap
->merge_failed
)
1861 DMEMIT("Merge failed");
1863 if (snap
->store
->type
->usage
) {
1864 sector_t total_sectors
, sectors_allocated
,
1866 snap
->store
->type
->usage(snap
->store
,
1870 DMEMIT("%llu/%llu %llu",
1871 (unsigned long long)sectors_allocated
,
1872 (unsigned long long)total_sectors
,
1873 (unsigned long long)metadata_sectors
);
1879 up_write(&snap
->lock
);
1883 case STATUSTYPE_TABLE
:
1885 * kdevname returns a static pointer so we need
1886 * to make private copies if the output is to
1889 DMEMIT("%s %s", snap
->origin
->name
, snap
->cow
->name
);
1890 snap
->store
->type
->status(snap
->store
, type
, result
+ sz
,
1898 static int snapshot_iterate_devices(struct dm_target
*ti
,
1899 iterate_devices_callout_fn fn
, void *data
)
1901 struct dm_snapshot
*snap
= ti
->private;
1903 return fn(ti
, snap
->origin
, 0, ti
->len
, data
);
1907 /*-----------------------------------------------------------------
1909 *---------------------------------------------------------------*/
1912 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
1913 * supplied bio was ignored. The caller may submit it immediately.
1914 * (No remapping actually occurs as the origin is always a direct linear
1917 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
1918 * and any supplied bio is added to a list to be submitted once all
1919 * the necessary exceptions exist.
1921 static int __origin_write(struct list_head
*snapshots
, sector_t sector
,
1924 int r
= DM_MAPIO_REMAPPED
;
1925 struct dm_snapshot
*snap
;
1926 struct dm_exception
*e
;
1927 struct dm_snap_pending_exception
*pe
;
1928 struct dm_snap_pending_exception
*pe_to_start_now
= NULL
;
1929 struct dm_snap_pending_exception
*pe_to_start_last
= NULL
;
1932 /* Do all the snapshots on this origin */
1933 list_for_each_entry (snap
, snapshots
, list
) {
1935 * Don't make new exceptions in a merging snapshot
1936 * because it has effectively been deleted
1938 if (dm_target_is_snapshot_merge(snap
->ti
))
1941 down_write(&snap
->lock
);
1943 /* Only deal with valid and active snapshots */
1944 if (!snap
->valid
|| !snap
->active
)
1947 /* Nothing to do if writing beyond end of snapshot */
1948 if (sector
>= dm_table_get_size(snap
->ti
->table
))
1952 * Remember, different snapshots can have
1953 * different chunk sizes.
1955 chunk
= sector_to_chunk(snap
->store
, sector
);
1958 * Check exception table to see if block
1959 * is already remapped in this snapshot
1960 * and trigger an exception if not.
1962 e
= dm_lookup_exception(&snap
->complete
, chunk
);
1966 pe
= __lookup_pending_exception(snap
, chunk
);
1968 up_write(&snap
->lock
);
1969 pe
= alloc_pending_exception(snap
);
1970 down_write(&snap
->lock
);
1973 free_pending_exception(pe
);
1977 e
= dm_lookup_exception(&snap
->complete
, chunk
);
1979 free_pending_exception(pe
);
1983 pe
= __find_pending_exception(snap
, pe
, chunk
);
1985 __invalidate_snapshot(snap
, -ENOMEM
);
1990 r
= DM_MAPIO_SUBMITTED
;
1993 * If an origin bio was supplied, queue it to wait for the
1994 * completion of this exception, and start this one last,
1995 * at the end of the function.
1998 bio_list_add(&pe
->origin_bios
, bio
);
2003 pe_to_start_last
= pe
;
2009 pe_to_start_now
= pe
;
2013 up_write(&snap
->lock
);
2015 if (pe_to_start_now
) {
2016 start_copy(pe_to_start_now
);
2017 pe_to_start_now
= NULL
;
2022 * Submit the exception against which the bio is queued last,
2023 * to give the other exceptions a head start.
2025 if (pe_to_start_last
)
2026 start_copy(pe_to_start_last
);
2032 * Called on a write from the origin driver.
2034 static int do_origin(struct dm_dev
*origin
, struct bio
*bio
)
2037 int r
= DM_MAPIO_REMAPPED
;
2039 down_read(&_origins_lock
);
2040 o
= __lookup_origin(origin
->bdev
);
2042 r
= __origin_write(&o
->snapshots
, bio
->bi_sector
, bio
);
2043 up_read(&_origins_lock
);
2049 * Trigger exceptions in all non-merging snapshots.
2051 * The chunk size of the merging snapshot may be larger than the chunk
2052 * size of some other snapshot so we may need to reallocate multiple
2053 * chunks in other snapshots.
2055 * We scan all the overlapping exceptions in the other snapshots.
2056 * Returns 1 if anything was reallocated and must be waited for,
2057 * otherwise returns 0.
2059 * size must be a multiple of merging_snap's chunk_size.
2061 static int origin_write_extent(struct dm_snapshot
*merging_snap
,
2062 sector_t sector
, unsigned size
)
2069 * The origin's __minimum_chunk_size() got stored in split_io
2070 * by snapshot_merge_resume().
2072 down_read(&_origins_lock
);
2073 o
= __lookup_origin(merging_snap
->origin
->bdev
);
2074 for (n
= 0; n
< size
; n
+= merging_snap
->ti
->split_io
)
2075 if (__origin_write(&o
->snapshots
, sector
+ n
, NULL
) ==
2078 up_read(&_origins_lock
);
2084 * Origin: maps a linear range of a device, with hooks for snapshotting.
2088 * Construct an origin mapping: <dev_path>
2089 * The context for an origin is merely a 'struct dm_dev *'
2090 * pointing to the real device.
2092 static int origin_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
2098 ti
->error
= "origin: incorrect number of arguments";
2102 r
= dm_get_device(ti
, argv
[0], dm_table_get_mode(ti
->table
), &dev
);
2104 ti
->error
= "Cannot get target device";
2109 ti
->num_flush_requests
= 1;
2114 static void origin_dtr(struct dm_target
*ti
)
2116 struct dm_dev
*dev
= ti
->private;
2117 dm_put_device(ti
, dev
);
2120 static int origin_map(struct dm_target
*ti
, struct bio
*bio
,
2121 union map_info
*map_context
)
2123 struct dm_dev
*dev
= ti
->private;
2124 bio
->bi_bdev
= dev
->bdev
;
2126 if (unlikely(bio_empty_barrier(bio
)))
2127 return DM_MAPIO_REMAPPED
;
2129 /* Only tell snapshots if this is a write */
2130 return (bio_rw(bio
) == WRITE
) ? do_origin(dev
, bio
) : DM_MAPIO_REMAPPED
;
2134 * Set the target "split_io" field to the minimum of all the snapshots'
2137 static void origin_resume(struct dm_target
*ti
)
2139 struct dm_dev
*dev
= ti
->private;
2141 ti
->split_io
= get_origin_minimum_chunksize(dev
->bdev
);
2144 static int origin_status(struct dm_target
*ti
, status_type_t type
, char *result
,
2145 unsigned int maxlen
)
2147 struct dm_dev
*dev
= ti
->private;
2150 case STATUSTYPE_INFO
:
2154 case STATUSTYPE_TABLE
:
2155 snprintf(result
, maxlen
, "%s", dev
->name
);
2162 static int origin_iterate_devices(struct dm_target
*ti
,
2163 iterate_devices_callout_fn fn
, void *data
)
2165 struct dm_dev
*dev
= ti
->private;
2167 return fn(ti
, dev
, 0, ti
->len
, data
);
2170 static struct target_type origin_target
= {
2171 .name
= "snapshot-origin",
2172 .version
= {1, 7, 0},
2173 .module
= THIS_MODULE
,
2177 .resume
= origin_resume
,
2178 .status
= origin_status
,
2179 .iterate_devices
= origin_iterate_devices
,
2182 static struct target_type snapshot_target
= {
2184 .version
= {1, 9, 0},
2185 .module
= THIS_MODULE
,
2186 .ctr
= snapshot_ctr
,
2187 .dtr
= snapshot_dtr
,
2188 .map
= snapshot_map
,
2189 .end_io
= snapshot_end_io
,
2190 .postsuspend
= snapshot_postsuspend
,
2191 .preresume
= snapshot_preresume
,
2192 .resume
= snapshot_resume
,
2193 .status
= snapshot_status
,
2194 .iterate_devices
= snapshot_iterate_devices
,
2197 static struct target_type merge_target
= {
2198 .name
= dm_snapshot_merge_target_name
,
2199 .version
= {1, 0, 0},
2200 .module
= THIS_MODULE
,
2201 .ctr
= snapshot_ctr
,
2202 .dtr
= snapshot_dtr
,
2203 .map
= snapshot_merge_map
,
2204 .end_io
= snapshot_end_io
,
2205 .presuspend
= snapshot_merge_presuspend
,
2206 .postsuspend
= snapshot_postsuspend
,
2207 .preresume
= snapshot_preresume
,
2208 .resume
= snapshot_merge_resume
,
2209 .status
= snapshot_status
,
2210 .iterate_devices
= snapshot_iterate_devices
,
2213 static int __init
dm_snapshot_init(void)
2217 r
= dm_exception_store_init();
2219 DMERR("Failed to initialize exception stores");
2223 r
= dm_register_target(&snapshot_target
);
2225 DMERR("snapshot target register failed %d", r
);
2226 goto bad_register_snapshot_target
;
2229 r
= dm_register_target(&origin_target
);
2231 DMERR("Origin target register failed %d", r
);
2232 goto bad_register_origin_target
;
2235 r
= dm_register_target(&merge_target
);
2237 DMERR("Merge target register failed %d", r
);
2238 goto bad_register_merge_target
;
2241 r
= init_origin_hash();
2243 DMERR("init_origin_hash failed.");
2244 goto bad_origin_hash
;
2247 exception_cache
= KMEM_CACHE(dm_exception
, 0);
2248 if (!exception_cache
) {
2249 DMERR("Couldn't create exception cache.");
2251 goto bad_exception_cache
;
2254 pending_cache
= KMEM_CACHE(dm_snap_pending_exception
, 0);
2255 if (!pending_cache
) {
2256 DMERR("Couldn't create pending cache.");
2258 goto bad_pending_cache
;
2261 tracked_chunk_cache
= KMEM_CACHE(dm_snap_tracked_chunk
, 0);
2262 if (!tracked_chunk_cache
) {
2263 DMERR("Couldn't create cache to track chunks in use.");
2265 goto bad_tracked_chunk_cache
;
2268 ksnapd
= create_singlethread_workqueue("ksnapd");
2270 DMERR("Failed to create ksnapd workqueue.");
2272 goto bad_pending_pool
;
2278 kmem_cache_destroy(tracked_chunk_cache
);
2279 bad_tracked_chunk_cache
:
2280 kmem_cache_destroy(pending_cache
);
2282 kmem_cache_destroy(exception_cache
);
2283 bad_exception_cache
:
2286 dm_unregister_target(&merge_target
);
2287 bad_register_merge_target
:
2288 dm_unregister_target(&origin_target
);
2289 bad_register_origin_target
:
2290 dm_unregister_target(&snapshot_target
);
2291 bad_register_snapshot_target
:
2292 dm_exception_store_exit();
2297 static void __exit
dm_snapshot_exit(void)
2299 destroy_workqueue(ksnapd
);
2301 dm_unregister_target(&snapshot_target
);
2302 dm_unregister_target(&origin_target
);
2303 dm_unregister_target(&merge_target
);
2306 kmem_cache_destroy(pending_cache
);
2307 kmem_cache_destroy(exception_cache
);
2308 kmem_cache_destroy(tracked_chunk_cache
);
2310 dm_exception_store_exit();
2314 module_init(dm_snapshot_init
);
2315 module_exit(dm_snapshot_exit
);
2317 MODULE_DESCRIPTION(DM_NAME
" snapshot target");
2318 MODULE_AUTHOR("Joe Thornber");
2319 MODULE_LICENSE("GPL");