4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
6 * This file is released under the GPL.
9 #include <linux/blkdev.h>
10 #include <linux/ctype.h>
11 #include <linux/device-mapper.h>
12 #include <linux/delay.h>
14 #include <linux/init.h>
15 #include <linux/kdev_t.h>
16 #include <linux/list.h>
17 #include <linux/mempool.h>
18 #include <linux/module.h>
19 #include <linux/slab.h>
20 #include <linux/vmalloc.h>
21 #include <linux/log2.h>
22 #include <linux/dm-kcopyd.h>
25 #include "dm-bio-list.h"
27 #define DM_MSG_PREFIX "snapshots"
30 * The percentage increment we will wake up users at
32 #define WAKE_UP_PERCENT 5
35 * kcopyd priority of snapshot operations
37 #define SNAPSHOT_COPY_PRIORITY 2
40 * Reserve 1MB for each snapshot initially (with minimum of 1 page).
42 #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
45 * The size of the mempool used to track chunks in use.
49 static struct workqueue_struct
*ksnapd
;
50 static void flush_queued_bios(struct work_struct
*work
);
52 struct dm_snap_pending_exception
{
53 struct dm_snap_exception e
;
56 * Origin buffers waiting for this to complete are held
59 struct bio_list origin_bios
;
60 struct bio_list snapshot_bios
;
63 * Short-term queue of pending exceptions prior to submission.
65 struct list_head list
;
68 * The primary pending_exception is the one that holds
69 * the ref_count and the list of origin_bios for a
70 * group of pending_exceptions. It is always last to get freed.
71 * These fields get set up when writing to the origin.
73 struct dm_snap_pending_exception
*primary_pe
;
76 * Number of pending_exceptions processing this chunk.
77 * When this drops to zero we must complete the origin bios.
78 * If incrementing or decrementing this, hold pe->snap->lock for
79 * the sibling concerned and not pe->primary_pe->snap->lock unless
84 /* Pointer back to snapshot context */
85 struct dm_snapshot
*snap
;
88 * 1 indicates the exception has already been sent to
95 * Hash table mapping origin volumes to lists of snapshots and
96 * a lock to protect it
98 static struct kmem_cache
*exception_cache
;
99 static struct kmem_cache
*pending_cache
;
101 struct dm_snap_tracked_chunk
{
102 struct hlist_node node
;
106 static struct kmem_cache
*tracked_chunk_cache
;
108 static struct dm_snap_tracked_chunk
*track_chunk(struct dm_snapshot
*s
,
111 struct dm_snap_tracked_chunk
*c
= mempool_alloc(s
->tracked_chunk_pool
,
117 spin_lock_irqsave(&s
->tracked_chunk_lock
, flags
);
118 hlist_add_head(&c
->node
,
119 &s
->tracked_chunk_hash
[DM_TRACKED_CHUNK_HASH(chunk
)]);
120 spin_unlock_irqrestore(&s
->tracked_chunk_lock
, flags
);
125 static void stop_tracking_chunk(struct dm_snapshot
*s
,
126 struct dm_snap_tracked_chunk
*c
)
130 spin_lock_irqsave(&s
->tracked_chunk_lock
, flags
);
132 spin_unlock_irqrestore(&s
->tracked_chunk_lock
, flags
);
134 mempool_free(c
, s
->tracked_chunk_pool
);
137 static int __chunk_is_tracked(struct dm_snapshot
*s
, chunk_t chunk
)
139 struct dm_snap_tracked_chunk
*c
;
140 struct hlist_node
*hn
;
143 spin_lock_irq(&s
->tracked_chunk_lock
);
145 hlist_for_each_entry(c
, hn
,
146 &s
->tracked_chunk_hash
[DM_TRACKED_CHUNK_HASH(chunk
)], node
) {
147 if (c
->chunk
== chunk
) {
153 spin_unlock_irq(&s
->tracked_chunk_lock
);
159 * One of these per registered origin, held in the snapshot_origins hash
162 /* The origin device */
163 struct block_device
*bdev
;
165 struct list_head hash_list
;
167 /* List of snapshots for this origin */
168 struct list_head snapshots
;
172 * Size of the hash table for origin volumes. If we make this
173 * the size of the minors list then it should be nearly perfect
175 #define ORIGIN_HASH_SIZE 256
176 #define ORIGIN_MASK 0xFF
177 static struct list_head
*_origins
;
178 static struct rw_semaphore _origins_lock
;
180 static int init_origin_hash(void)
184 _origins
= kmalloc(ORIGIN_HASH_SIZE
* sizeof(struct list_head
),
187 DMERR("unable to allocate memory");
191 for (i
= 0; i
< ORIGIN_HASH_SIZE
; i
++)
192 INIT_LIST_HEAD(_origins
+ i
);
193 init_rwsem(&_origins_lock
);
198 static void exit_origin_hash(void)
203 static unsigned origin_hash(struct block_device
*bdev
)
205 return bdev
->bd_dev
& ORIGIN_MASK
;
208 static struct origin
*__lookup_origin(struct block_device
*origin
)
210 struct list_head
*ol
;
213 ol
= &_origins
[origin_hash(origin
)];
214 list_for_each_entry (o
, ol
, hash_list
)
215 if (bdev_equal(o
->bdev
, origin
))
221 static void __insert_origin(struct origin
*o
)
223 struct list_head
*sl
= &_origins
[origin_hash(o
->bdev
)];
224 list_add_tail(&o
->hash_list
, sl
);
228 * Make a note of the snapshot and its origin so we can look it
229 * up when the origin has a write on it.
231 static int register_snapshot(struct dm_snapshot
*snap
)
233 struct origin
*o
, *new_o
;
234 struct block_device
*bdev
= snap
->origin
->bdev
;
236 new_o
= kmalloc(sizeof(*new_o
), GFP_KERNEL
);
240 down_write(&_origins_lock
);
241 o
= __lookup_origin(bdev
);
249 /* Initialise the struct */
250 INIT_LIST_HEAD(&o
->snapshots
);
256 list_add_tail(&snap
->list
, &o
->snapshots
);
258 up_write(&_origins_lock
);
262 static void unregister_snapshot(struct dm_snapshot
*s
)
266 down_write(&_origins_lock
);
267 o
= __lookup_origin(s
->origin
->bdev
);
270 if (list_empty(&o
->snapshots
)) {
271 list_del(&o
->hash_list
);
275 up_write(&_origins_lock
);
279 * Implementation of the exception hash tables.
280 * The lowest hash_shift bits of the chunk number are ignored, allowing
281 * some consecutive chunks to be grouped together.
283 static int init_exception_table(struct exception_table
*et
, uint32_t size
,
288 et
->hash_shift
= hash_shift
;
289 et
->hash_mask
= size
- 1;
290 et
->table
= dm_vcalloc(size
, sizeof(struct list_head
));
294 for (i
= 0; i
< size
; i
++)
295 INIT_LIST_HEAD(et
->table
+ i
);
300 static void exit_exception_table(struct exception_table
*et
, struct kmem_cache
*mem
)
302 struct list_head
*slot
;
303 struct dm_snap_exception
*ex
, *next
;
306 size
= et
->hash_mask
+ 1;
307 for (i
= 0; i
< size
; i
++) {
308 slot
= et
->table
+ i
;
310 list_for_each_entry_safe (ex
, next
, slot
, hash_list
)
311 kmem_cache_free(mem
, ex
);
317 static uint32_t exception_hash(struct exception_table
*et
, chunk_t chunk
)
319 return (chunk
>> et
->hash_shift
) & et
->hash_mask
;
322 static void insert_exception(struct exception_table
*eh
,
323 struct dm_snap_exception
*e
)
325 struct list_head
*l
= &eh
->table
[exception_hash(eh
, e
->old_chunk
)];
326 list_add(&e
->hash_list
, l
);
329 static void remove_exception(struct dm_snap_exception
*e
)
331 list_del(&e
->hash_list
);
335 * Return the exception data for a sector, or NULL if not
338 static struct dm_snap_exception
*lookup_exception(struct exception_table
*et
,
341 struct list_head
*slot
;
342 struct dm_snap_exception
*e
;
344 slot
= &et
->table
[exception_hash(et
, chunk
)];
345 list_for_each_entry (e
, slot
, hash_list
)
346 if (chunk
>= e
->old_chunk
&&
347 chunk
<= e
->old_chunk
+ dm_consecutive_chunk_count(e
))
353 static struct dm_snap_exception
*alloc_exception(void)
355 struct dm_snap_exception
*e
;
357 e
= kmem_cache_alloc(exception_cache
, GFP_NOIO
);
359 e
= kmem_cache_alloc(exception_cache
, GFP_ATOMIC
);
364 static void free_exception(struct dm_snap_exception
*e
)
366 kmem_cache_free(exception_cache
, e
);
369 static struct dm_snap_pending_exception
*alloc_pending_exception(struct dm_snapshot
*s
)
371 struct dm_snap_pending_exception
*pe
= mempool_alloc(s
->pending_pool
,
374 atomic_inc(&s
->pending_exceptions_count
);
380 static void free_pending_exception(struct dm_snap_pending_exception
*pe
)
382 struct dm_snapshot
*s
= pe
->snap
;
384 mempool_free(pe
, s
->pending_pool
);
385 smp_mb__before_atomic_dec();
386 atomic_dec(&s
->pending_exceptions_count
);
389 static void insert_completed_exception(struct dm_snapshot
*s
,
390 struct dm_snap_exception
*new_e
)
392 struct exception_table
*eh
= &s
->complete
;
394 struct dm_snap_exception
*e
= NULL
;
396 l
= &eh
->table
[exception_hash(eh
, new_e
->old_chunk
)];
398 /* Add immediately if this table doesn't support consecutive chunks */
402 /* List is ordered by old_chunk */
403 list_for_each_entry_reverse(e
, l
, hash_list
) {
404 /* Insert after an existing chunk? */
405 if (new_e
->old_chunk
== (e
->old_chunk
+
406 dm_consecutive_chunk_count(e
) + 1) &&
407 new_e
->new_chunk
== (dm_chunk_number(e
->new_chunk
) +
408 dm_consecutive_chunk_count(e
) + 1)) {
409 dm_consecutive_chunk_count_inc(e
);
410 free_exception(new_e
);
414 /* Insert before an existing chunk? */
415 if (new_e
->old_chunk
== (e
->old_chunk
- 1) &&
416 new_e
->new_chunk
== (dm_chunk_number(e
->new_chunk
) - 1)) {
417 dm_consecutive_chunk_count_inc(e
);
420 free_exception(new_e
);
424 if (new_e
->old_chunk
> e
->old_chunk
)
429 list_add(&new_e
->hash_list
, e
? &e
->hash_list
: l
);
432 int dm_add_exception(struct dm_snapshot
*s
, chunk_t old
, chunk_t
new)
434 struct dm_snap_exception
*e
;
436 e
= alloc_exception();
442 /* Consecutive_count is implicitly initialised to zero */
445 insert_completed_exception(s
, e
);
453 static int calc_max_buckets(void)
455 /* use a fixed size of 2MB */
456 unsigned long mem
= 2 * 1024 * 1024;
457 mem
/= sizeof(struct list_head
);
463 * Allocate room for a suitable hash table.
465 static int init_hash_tables(struct dm_snapshot
*s
)
467 sector_t hash_size
, cow_dev_size
, origin_dev_size
, max_buckets
;
470 * Calculate based on the size of the original volume or
473 cow_dev_size
= get_dev_size(s
->cow
->bdev
);
474 origin_dev_size
= get_dev_size(s
->origin
->bdev
);
475 max_buckets
= calc_max_buckets();
477 hash_size
= min(origin_dev_size
, cow_dev_size
) >> s
->chunk_shift
;
478 hash_size
= min(hash_size
, max_buckets
);
480 hash_size
= rounddown_pow_of_two(hash_size
);
481 if (init_exception_table(&s
->complete
, hash_size
,
482 DM_CHUNK_CONSECUTIVE_BITS
))
486 * Allocate hash table for in-flight exceptions
487 * Make this smaller than the real hash table
493 if (init_exception_table(&s
->pending
, hash_size
, 0)) {
494 exit_exception_table(&s
->complete
, exception_cache
);
502 * Round a number up to the nearest 'size' boundary. size must
505 static ulong
round_up(ulong n
, ulong size
)
508 return (n
+ size
) & ~size
;
511 static int set_chunk_size(struct dm_snapshot
*s
, const char *chunk_size_arg
,
514 unsigned long chunk_size
;
517 chunk_size
= simple_strtoul(chunk_size_arg
, &value
, 10);
518 if (*chunk_size_arg
== '\0' || *value
!= '\0') {
519 *error
= "Invalid chunk size";
524 s
->chunk_size
= s
->chunk_mask
= s
->chunk_shift
= 0;
529 * Chunk size must be multiple of page size. Silently
530 * round up if it's not.
532 chunk_size
= round_up(chunk_size
, PAGE_SIZE
>> 9);
534 /* Check chunk_size is a power of 2 */
535 if (!is_power_of_2(chunk_size
)) {
536 *error
= "Chunk size is not a power of 2";
540 /* Validate the chunk size against the device block size */
541 if (chunk_size
% (bdev_hardsect_size(s
->cow
->bdev
) >> 9)) {
542 *error
= "Chunk size is not a multiple of device blocksize";
546 s
->chunk_size
= chunk_size
;
547 s
->chunk_mask
= chunk_size
- 1;
548 s
->chunk_shift
= ffs(chunk_size
) - 1;
554 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
556 static int snapshot_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
558 struct dm_snapshot
*s
;
566 ti
->error
= "requires exactly 4 arguments";
571 origin_path
= argv
[0];
573 persistent
= toupper(*argv
[2]);
575 if (persistent
!= 'P' && persistent
!= 'N') {
576 ti
->error
= "Persistent flag is not P or N";
581 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
583 ti
->error
= "Cannot allocate snapshot context private "
589 r
= dm_get_device(ti
, origin_path
, 0, ti
->len
, FMODE_READ
, &s
->origin
);
591 ti
->error
= "Cannot get origin device";
595 r
= dm_get_device(ti
, cow_path
, 0, 0,
596 FMODE_READ
| FMODE_WRITE
, &s
->cow
);
598 dm_put_device(ti
, s
->origin
);
599 ti
->error
= "Cannot get COW device";
603 r
= set_chunk_size(s
, argv
[3], &ti
->error
);
607 s
->type
= persistent
;
611 atomic_set(&s
->pending_exceptions_count
, 0);
612 init_rwsem(&s
->lock
);
613 spin_lock_init(&s
->pe_lock
);
616 /* Allocate hash table for COW data */
617 if (init_hash_tables(s
)) {
618 ti
->error
= "Unable to allocate hash table space";
625 if (persistent
== 'P')
626 r
= dm_create_persistent(&s
->store
);
628 r
= dm_create_transient(&s
->store
);
631 ti
->error
= "Couldn't create exception store";
636 r
= dm_kcopyd_client_create(SNAPSHOT_PAGES
, &s
->kcopyd_client
);
638 ti
->error
= "Could not create kcopyd client";
642 s
->pending_pool
= mempool_create_slab_pool(MIN_IOS
, pending_cache
);
643 if (!s
->pending_pool
) {
644 ti
->error
= "Could not allocate mempool for pending exceptions";
648 s
->tracked_chunk_pool
= mempool_create_slab_pool(MIN_IOS
,
649 tracked_chunk_cache
);
650 if (!s
->tracked_chunk_pool
) {
651 ti
->error
= "Could not allocate tracked_chunk mempool for "
653 goto bad_tracked_chunk_pool
;
656 for (i
= 0; i
< DM_TRACKED_CHUNK_HASH_SIZE
; i
++)
657 INIT_HLIST_HEAD(&s
->tracked_chunk_hash
[i
]);
659 spin_lock_init(&s
->tracked_chunk_lock
);
661 /* Metadata must only be loaded into one table at once */
662 r
= s
->store
.read_metadata(&s
->store
);
664 ti
->error
= "Failed to read snapshot metadata";
665 goto bad_load_and_register
;
668 DMWARN("Snapshot is marked invalid.");
671 bio_list_init(&s
->queued_bios
);
672 INIT_WORK(&s
->queued_bios_work
, flush_queued_bios
);
674 /* Add snapshot to the list of snapshots for this origin */
675 /* Exceptions aren't triggered till snapshot_resume() is called */
676 if (register_snapshot(s
)) {
678 ti
->error
= "Cannot register snapshot origin";
679 goto bad_load_and_register
;
683 ti
->split_io
= s
->chunk_size
;
687 bad_load_and_register
:
688 mempool_destroy(s
->tracked_chunk_pool
);
690 bad_tracked_chunk_pool
:
691 mempool_destroy(s
->pending_pool
);
694 dm_kcopyd_client_destroy(s
->kcopyd_client
);
697 s
->store
.destroy(&s
->store
);
700 exit_exception_table(&s
->pending
, pending_cache
);
701 exit_exception_table(&s
->complete
, exception_cache
);
704 dm_put_device(ti
, s
->cow
);
705 dm_put_device(ti
, s
->origin
);
714 static void __free_exceptions(struct dm_snapshot
*s
)
716 dm_kcopyd_client_destroy(s
->kcopyd_client
);
717 s
->kcopyd_client
= NULL
;
719 exit_exception_table(&s
->pending
, pending_cache
);
720 exit_exception_table(&s
->complete
, exception_cache
);
722 s
->store
.destroy(&s
->store
);
725 static void snapshot_dtr(struct dm_target
*ti
)
727 #ifdef CONFIG_DM_DEBUG
730 struct dm_snapshot
*s
= ti
->private;
732 flush_workqueue(ksnapd
);
734 /* Prevent further origin writes from using this snapshot. */
735 /* After this returns there can be no new kcopyd jobs. */
736 unregister_snapshot(s
);
738 while (atomic_read(&s
->pending_exceptions_count
))
741 * Ensure instructions in mempool_destroy aren't reordered
742 * before atomic_read.
746 #ifdef CONFIG_DM_DEBUG
747 for (i
= 0; i
< DM_TRACKED_CHUNK_HASH_SIZE
; i
++)
748 BUG_ON(!hlist_empty(&s
->tracked_chunk_hash
[i
]));
751 mempool_destroy(s
->tracked_chunk_pool
);
753 __free_exceptions(s
);
755 mempool_destroy(s
->pending_pool
);
757 dm_put_device(ti
, s
->origin
);
758 dm_put_device(ti
, s
->cow
);
764 * Flush a list of buffers.
766 static void flush_bios(struct bio
*bio
)
773 generic_make_request(bio
);
778 static void flush_queued_bios(struct work_struct
*work
)
780 struct dm_snapshot
*s
=
781 container_of(work
, struct dm_snapshot
, queued_bios_work
);
782 struct bio
*queued_bios
;
785 spin_lock_irqsave(&s
->pe_lock
, flags
);
786 queued_bios
= bio_list_get(&s
->queued_bios
);
787 spin_unlock_irqrestore(&s
->pe_lock
, flags
);
789 flush_bios(queued_bios
);
793 * Error a list of buffers.
795 static void error_bios(struct bio
*bio
)
807 static void __invalidate_snapshot(struct dm_snapshot
*s
, int err
)
813 DMERR("Invalidating snapshot: Error reading/writing.");
814 else if (err
== -ENOMEM
)
815 DMERR("Invalidating snapshot: Unable to allocate exception.");
817 if (s
->store
.drop_snapshot
)
818 s
->store
.drop_snapshot(&s
->store
);
822 dm_table_event(s
->ti
->table
);
825 static void get_pending_exception(struct dm_snap_pending_exception
*pe
)
827 atomic_inc(&pe
->ref_count
);
830 static struct bio
*put_pending_exception(struct dm_snap_pending_exception
*pe
)
832 struct dm_snap_pending_exception
*primary_pe
;
833 struct bio
*origin_bios
= NULL
;
835 primary_pe
= pe
->primary_pe
;
838 * If this pe is involved in a write to the origin and
839 * it is the last sibling to complete then release
840 * the bios for the original write to the origin.
843 atomic_dec_and_test(&primary_pe
->ref_count
)) {
844 origin_bios
= bio_list_get(&primary_pe
->origin_bios
);
845 free_pending_exception(primary_pe
);
849 * Free the pe if it's not linked to an origin write or if
850 * it's not itself a primary pe.
852 if (!primary_pe
|| primary_pe
!= pe
)
853 free_pending_exception(pe
);
858 static void pending_complete(struct dm_snap_pending_exception
*pe
, int success
)
860 struct dm_snap_exception
*e
;
861 struct dm_snapshot
*s
= pe
->snap
;
862 struct bio
*origin_bios
= NULL
;
863 struct bio
*snapshot_bios
= NULL
;
867 /* Read/write error - snapshot is unusable */
868 down_write(&s
->lock
);
869 __invalidate_snapshot(s
, -EIO
);
874 e
= alloc_exception();
876 down_write(&s
->lock
);
877 __invalidate_snapshot(s
, -ENOMEM
);
883 down_write(&s
->lock
);
891 * Check for conflicting reads. This is extremely improbable,
892 * so msleep(1) is sufficient and there is no need for a wait queue.
894 while (__chunk_is_tracked(s
, pe
->e
.old_chunk
))
898 * Add a proper exception, and remove the
899 * in-flight exception from the list.
901 insert_completed_exception(s
, e
);
904 remove_exception(&pe
->e
);
905 snapshot_bios
= bio_list_get(&pe
->snapshot_bios
);
906 origin_bios
= put_pending_exception(pe
);
910 /* Submit any pending write bios */
912 error_bios(snapshot_bios
);
914 flush_bios(snapshot_bios
);
916 flush_bios(origin_bios
);
919 static void commit_callback(void *context
, int success
)
921 struct dm_snap_pending_exception
*pe
= context
;
923 pending_complete(pe
, success
);
927 * Called when the copy I/O has finished. kcopyd actually runs
928 * this code so don't block.
930 static void copy_callback(int read_err
, unsigned long write_err
, void *context
)
932 struct dm_snap_pending_exception
*pe
= context
;
933 struct dm_snapshot
*s
= pe
->snap
;
935 if (read_err
|| write_err
)
936 pending_complete(pe
, 0);
939 /* Update the metadata if we are persistent */
940 s
->store
.commit_exception(&s
->store
, &pe
->e
, commit_callback
,
945 * Dispatches the copy operation to kcopyd.
947 static void start_copy(struct dm_snap_pending_exception
*pe
)
949 struct dm_snapshot
*s
= pe
->snap
;
950 struct dm_io_region src
, dest
;
951 struct block_device
*bdev
= s
->origin
->bdev
;
954 dev_size
= get_dev_size(bdev
);
957 src
.sector
= chunk_to_sector(s
, pe
->e
.old_chunk
);
958 src
.count
= min(s
->chunk_size
, dev_size
- src
.sector
);
960 dest
.bdev
= s
->cow
->bdev
;
961 dest
.sector
= chunk_to_sector(s
, pe
->e
.new_chunk
);
962 dest
.count
= src
.count
;
964 /* Hand over to kcopyd */
965 dm_kcopyd_copy(s
->kcopyd_client
,
966 &src
, 1, &dest
, 0, copy_callback
, pe
);
970 * Looks to see if this snapshot already has a pending exception
971 * for this chunk, otherwise it allocates a new one and inserts
972 * it into the pending table.
974 * NOTE: a write lock must be held on snap->lock before calling
977 static struct dm_snap_pending_exception
*
978 __find_pending_exception(struct dm_snapshot
*s
, struct bio
*bio
)
980 struct dm_snap_exception
*e
;
981 struct dm_snap_pending_exception
*pe
;
982 chunk_t chunk
= sector_to_chunk(s
, bio
->bi_sector
);
985 * Is there a pending exception for this already ?
987 e
= lookup_exception(&s
->pending
, chunk
);
989 /* cast the exception to a pending exception */
990 pe
= container_of(e
, struct dm_snap_pending_exception
, e
);
995 * Create a new pending exception, we don't want
996 * to hold the lock while we do this.
999 pe
= alloc_pending_exception(s
);
1000 down_write(&s
->lock
);
1003 free_pending_exception(pe
);
1007 e
= lookup_exception(&s
->pending
, chunk
);
1009 free_pending_exception(pe
);
1010 pe
= container_of(e
, struct dm_snap_pending_exception
, e
);
1014 pe
->e
.old_chunk
= chunk
;
1015 bio_list_init(&pe
->origin_bios
);
1016 bio_list_init(&pe
->snapshot_bios
);
1017 pe
->primary_pe
= NULL
;
1018 atomic_set(&pe
->ref_count
, 0);
1021 if (s
->store
.prepare_exception(&s
->store
, &pe
->e
)) {
1022 free_pending_exception(pe
);
1026 get_pending_exception(pe
);
1027 insert_exception(&s
->pending
, &pe
->e
);
1033 static void remap_exception(struct dm_snapshot
*s
, struct dm_snap_exception
*e
,
1034 struct bio
*bio
, chunk_t chunk
)
1036 bio
->bi_bdev
= s
->cow
->bdev
;
1037 bio
->bi_sector
= chunk_to_sector(s
, dm_chunk_number(e
->new_chunk
) +
1038 (chunk
- e
->old_chunk
)) +
1039 (bio
->bi_sector
& s
->chunk_mask
);
1042 static int snapshot_map(struct dm_target
*ti
, struct bio
*bio
,
1043 union map_info
*map_context
)
1045 struct dm_snap_exception
*e
;
1046 struct dm_snapshot
*s
= ti
->private;
1047 int r
= DM_MAPIO_REMAPPED
;
1049 struct dm_snap_pending_exception
*pe
= NULL
;
1051 chunk
= sector_to_chunk(s
, bio
->bi_sector
);
1053 /* Full snapshots are not usable */
1054 /* To get here the table must be live so s->active is always set. */
1058 /* FIXME: should only take write lock if we need
1059 * to copy an exception */
1060 down_write(&s
->lock
);
1067 /* If the block is already remapped - use that, else remap it */
1068 e
= lookup_exception(&s
->complete
, chunk
);
1070 remap_exception(s
, e
, bio
, chunk
);
1075 * Write to snapshot - higher level takes care of RW/RO
1076 * flags so we should only get this if we are
1079 if (bio_rw(bio
) == WRITE
) {
1080 pe
= __find_pending_exception(s
, bio
);
1082 __invalidate_snapshot(s
, -ENOMEM
);
1087 remap_exception(s
, &pe
->e
, bio
, chunk
);
1088 bio_list_add(&pe
->snapshot_bios
, bio
);
1090 r
= DM_MAPIO_SUBMITTED
;
1093 /* this is protected by snap->lock */
1100 bio
->bi_bdev
= s
->origin
->bdev
;
1101 map_context
->ptr
= track_chunk(s
, chunk
);
1110 static int snapshot_end_io(struct dm_target
*ti
, struct bio
*bio
,
1111 int error
, union map_info
*map_context
)
1113 struct dm_snapshot
*s
= ti
->private;
1114 struct dm_snap_tracked_chunk
*c
= map_context
->ptr
;
1117 stop_tracking_chunk(s
, c
);
1122 static void snapshot_resume(struct dm_target
*ti
)
1124 struct dm_snapshot
*s
= ti
->private;
1126 down_write(&s
->lock
);
1131 static int snapshot_status(struct dm_target
*ti
, status_type_t type
,
1132 char *result
, unsigned int maxlen
)
1134 struct dm_snapshot
*snap
= ti
->private;
1137 case STATUSTYPE_INFO
:
1139 snprintf(result
, maxlen
, "Invalid");
1141 if (snap
->store
.fraction_full
) {
1142 sector_t numerator
, denominator
;
1143 snap
->store
.fraction_full(&snap
->store
,
1146 snprintf(result
, maxlen
, "%llu/%llu",
1147 (unsigned long long)numerator
,
1148 (unsigned long long)denominator
);
1151 snprintf(result
, maxlen
, "Unknown");
1155 case STATUSTYPE_TABLE
:
1157 * kdevname returns a static pointer so we need
1158 * to make private copies if the output is to
1161 snprintf(result
, maxlen
, "%s %s %c %llu",
1162 snap
->origin
->name
, snap
->cow
->name
,
1164 (unsigned long long)snap
->chunk_size
);
1171 /*-----------------------------------------------------------------
1173 *---------------------------------------------------------------*/
1174 static int __origin_write(struct list_head
*snapshots
, struct bio
*bio
)
1176 int r
= DM_MAPIO_REMAPPED
, first
= 0;
1177 struct dm_snapshot
*snap
;
1178 struct dm_snap_exception
*e
;
1179 struct dm_snap_pending_exception
*pe
, *next_pe
, *primary_pe
= NULL
;
1181 LIST_HEAD(pe_queue
);
1183 /* Do all the snapshots on this origin */
1184 list_for_each_entry (snap
, snapshots
, list
) {
1186 down_write(&snap
->lock
);
1188 /* Only deal with valid and active snapshots */
1189 if (!snap
->valid
|| !snap
->active
)
1192 /* Nothing to do if writing beyond end of snapshot */
1193 if (bio
->bi_sector
>= dm_table_get_size(snap
->ti
->table
))
1197 * Remember, different snapshots can have
1198 * different chunk sizes.
1200 chunk
= sector_to_chunk(snap
, bio
->bi_sector
);
1203 * Check exception table to see if block
1204 * is already remapped in this snapshot
1205 * and trigger an exception if not.
1207 * ref_count is initialised to 1 so pending_complete()
1208 * won't destroy the primary_pe while we're inside this loop.
1210 e
= lookup_exception(&snap
->complete
, chunk
);
1214 pe
= __find_pending_exception(snap
, bio
);
1216 __invalidate_snapshot(snap
, -ENOMEM
);
1222 * Either every pe here has same
1223 * primary_pe or none has one yet.
1226 primary_pe
= pe
->primary_pe
;
1232 bio_list_add(&primary_pe
->origin_bios
, bio
);
1234 r
= DM_MAPIO_SUBMITTED
;
1237 if (!pe
->primary_pe
) {
1238 pe
->primary_pe
= primary_pe
;
1239 get_pending_exception(primary_pe
);
1244 list_add_tail(&pe
->list
, &pe_queue
);
1248 up_write(&snap
->lock
);
1255 * If this is the first time we're processing this chunk and
1256 * ref_count is now 1 it means all the pending exceptions
1257 * got completed while we were in the loop above, so it falls to
1258 * us here to remove the primary_pe and submit any origin_bios.
1261 if (first
&& atomic_dec_and_test(&primary_pe
->ref_count
)) {
1262 flush_bios(bio_list_get(&primary_pe
->origin_bios
));
1263 free_pending_exception(primary_pe
);
1264 /* If we got here, pe_queue is necessarily empty. */
1269 * Now that we have a complete pe list we can start the copying.
1271 list_for_each_entry_safe(pe
, next_pe
, &pe_queue
, list
)
1278 * Called on a write from the origin driver.
1280 static int do_origin(struct dm_dev
*origin
, struct bio
*bio
)
1283 int r
= DM_MAPIO_REMAPPED
;
1285 down_read(&_origins_lock
);
1286 o
= __lookup_origin(origin
->bdev
);
1288 r
= __origin_write(&o
->snapshots
, bio
);
1289 up_read(&_origins_lock
);
1295 * Origin: maps a linear range of a device, with hooks for snapshotting.
1299 * Construct an origin mapping: <dev_path>
1300 * The context for an origin is merely a 'struct dm_dev *'
1301 * pointing to the real device.
1303 static int origin_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
1309 ti
->error
= "origin: incorrect number of arguments";
1313 r
= dm_get_device(ti
, argv
[0], 0, ti
->len
,
1314 dm_table_get_mode(ti
->table
), &dev
);
1316 ti
->error
= "Cannot get target device";
1324 static void origin_dtr(struct dm_target
*ti
)
1326 struct dm_dev
*dev
= ti
->private;
1327 dm_put_device(ti
, dev
);
1330 static int origin_map(struct dm_target
*ti
, struct bio
*bio
,
1331 union map_info
*map_context
)
1333 struct dm_dev
*dev
= ti
->private;
1334 bio
->bi_bdev
= dev
->bdev
;
1336 /* Only tell snapshots if this is a write */
1337 return (bio_rw(bio
) == WRITE
) ? do_origin(dev
, bio
) : DM_MAPIO_REMAPPED
;
1340 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
1343 * Set the target "split_io" field to the minimum of all the snapshots'
1346 static void origin_resume(struct dm_target
*ti
)
1348 struct dm_dev
*dev
= ti
->private;
1349 struct dm_snapshot
*snap
;
1351 chunk_t chunk_size
= 0;
1353 down_read(&_origins_lock
);
1354 o
= __lookup_origin(dev
->bdev
);
1356 list_for_each_entry (snap
, &o
->snapshots
, list
)
1357 chunk_size
= min_not_zero(chunk_size
, snap
->chunk_size
);
1358 up_read(&_origins_lock
);
1360 ti
->split_io
= chunk_size
;
1363 static int origin_status(struct dm_target
*ti
, status_type_t type
, char *result
,
1364 unsigned int maxlen
)
1366 struct dm_dev
*dev
= ti
->private;
1369 case STATUSTYPE_INFO
:
1373 case STATUSTYPE_TABLE
:
1374 snprintf(result
, maxlen
, "%s", dev
->name
);
1381 static struct target_type origin_target
= {
1382 .name
= "snapshot-origin",
1383 .version
= {1, 6, 0},
1384 .module
= THIS_MODULE
,
1388 .resume
= origin_resume
,
1389 .status
= origin_status
,
1392 static struct target_type snapshot_target
= {
1394 .version
= {1, 6, 0},
1395 .module
= THIS_MODULE
,
1396 .ctr
= snapshot_ctr
,
1397 .dtr
= snapshot_dtr
,
1398 .map
= snapshot_map
,
1399 .end_io
= snapshot_end_io
,
1400 .resume
= snapshot_resume
,
1401 .status
= snapshot_status
,
1404 static int __init
dm_snapshot_init(void)
1408 r
= dm_register_target(&snapshot_target
);
1410 DMERR("snapshot target register failed %d", r
);
1414 r
= dm_register_target(&origin_target
);
1416 DMERR("Origin target register failed %d", r
);
1420 r
= init_origin_hash();
1422 DMERR("init_origin_hash failed.");
1426 exception_cache
= KMEM_CACHE(dm_snap_exception
, 0);
1427 if (!exception_cache
) {
1428 DMERR("Couldn't create exception cache.");
1433 pending_cache
= KMEM_CACHE(dm_snap_pending_exception
, 0);
1434 if (!pending_cache
) {
1435 DMERR("Couldn't create pending cache.");
1440 tracked_chunk_cache
= KMEM_CACHE(dm_snap_tracked_chunk
, 0);
1441 if (!tracked_chunk_cache
) {
1442 DMERR("Couldn't create cache to track chunks in use.");
1447 ksnapd
= create_singlethread_workqueue("ksnapd");
1449 DMERR("Failed to create ksnapd workqueue.");
1451 goto bad_pending_pool
;
1457 kmem_cache_destroy(tracked_chunk_cache
);
1459 kmem_cache_destroy(pending_cache
);
1461 kmem_cache_destroy(exception_cache
);
1465 dm_unregister_target(&origin_target
);
1467 dm_unregister_target(&snapshot_target
);
1471 static void __exit
dm_snapshot_exit(void)
1475 destroy_workqueue(ksnapd
);
1477 r
= dm_unregister_target(&snapshot_target
);
1479 DMERR("snapshot unregister failed %d", r
);
1481 r
= dm_unregister_target(&origin_target
);
1483 DMERR("origin unregister failed %d", r
);
1486 kmem_cache_destroy(pending_cache
);
1487 kmem_cache_destroy(exception_cache
);
1488 kmem_cache_destroy(tracked_chunk_cache
);
1492 module_init(dm_snapshot_init
);
1493 module_exit(dm_snapshot_exit
);
1495 MODULE_DESCRIPTION(DM_NAME
" snapshot target");
1496 MODULE_AUTHOR("Joe Thornber");
1497 MODULE_LICENSE("GPL");