4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
6 * This file is released under the GPL.
9 #include <linux/blkdev.h>
10 #include <linux/ctype.h>
11 #include <linux/device-mapper.h>
13 #include <linux/init.h>
14 #include <linux/kdev_t.h>
15 #include <linux/list.h>
16 #include <linux/mempool.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/log2.h>
23 #include "dm-bio-list.h"
26 #define DM_MSG_PREFIX "snapshots"
29 * The percentage increment we will wake up users at
31 #define WAKE_UP_PERCENT 5
34 * kcopyd priority of snapshot operations
36 #define SNAPSHOT_COPY_PRIORITY 2
39 * Each snapshot reserves this many pages for io
41 #define SNAPSHOT_PAGES 256
43 static struct workqueue_struct
*ksnapd
;
44 static void flush_queued_bios(struct work_struct
*work
);
46 struct dm_snap_pending_exception
{
47 struct dm_snap_exception e
;
50 * Origin buffers waiting for this to complete are held
53 struct bio_list origin_bios
;
54 struct bio_list snapshot_bios
;
57 * Short-term queue of pending exceptions prior to submission.
59 struct list_head list
;
62 * The primary pending_exception is the one that holds
63 * the ref_count and the list of origin_bios for a
64 * group of pending_exceptions. It is always last to get freed.
65 * These fields get set up when writing to the origin.
67 struct dm_snap_pending_exception
*primary_pe
;
70 * Number of pending_exceptions processing this chunk.
71 * When this drops to zero we must complete the origin bios.
72 * If incrementing or decrementing this, hold pe->snap->lock for
73 * the sibling concerned and not pe->primary_pe->snap->lock unless
78 /* Pointer back to snapshot context */
79 struct dm_snapshot
*snap
;
82 * 1 indicates the exception has already been sent to
89 * Hash table mapping origin volumes to lists of snapshots and
90 * a lock to protect it
92 static struct kmem_cache
*exception_cache
;
93 static struct kmem_cache
*pending_cache
;
94 static mempool_t
*pending_pool
;
97 * One of these per registered origin, held in the snapshot_origins hash
100 /* The origin device */
101 struct block_device
*bdev
;
103 struct list_head hash_list
;
105 /* List of snapshots for this origin */
106 struct list_head snapshots
;
110 * Size of the hash table for origin volumes. If we make this
111 * the size of the minors list then it should be nearly perfect
113 #define ORIGIN_HASH_SIZE 256
114 #define ORIGIN_MASK 0xFF
115 static struct list_head
*_origins
;
116 static struct rw_semaphore _origins_lock
;
118 static int init_origin_hash(void)
122 _origins
= kmalloc(ORIGIN_HASH_SIZE
* sizeof(struct list_head
),
125 DMERR("unable to allocate memory");
129 for (i
= 0; i
< ORIGIN_HASH_SIZE
; i
++)
130 INIT_LIST_HEAD(_origins
+ i
);
131 init_rwsem(&_origins_lock
);
136 static void exit_origin_hash(void)
141 static unsigned origin_hash(struct block_device
*bdev
)
143 return bdev
->bd_dev
& ORIGIN_MASK
;
146 static struct origin
*__lookup_origin(struct block_device
*origin
)
148 struct list_head
*ol
;
151 ol
= &_origins
[origin_hash(origin
)];
152 list_for_each_entry (o
, ol
, hash_list
)
153 if (bdev_equal(o
->bdev
, origin
))
159 static void __insert_origin(struct origin
*o
)
161 struct list_head
*sl
= &_origins
[origin_hash(o
->bdev
)];
162 list_add_tail(&o
->hash_list
, sl
);
166 * Make a note of the snapshot and its origin so we can look it
167 * up when the origin has a write on it.
169 static int register_snapshot(struct dm_snapshot
*snap
)
172 struct block_device
*bdev
= snap
->origin
->bdev
;
174 down_write(&_origins_lock
);
175 o
= __lookup_origin(bdev
);
179 o
= kmalloc(sizeof(*o
), GFP_KERNEL
);
181 up_write(&_origins_lock
);
185 /* Initialise the struct */
186 INIT_LIST_HEAD(&o
->snapshots
);
192 list_add_tail(&snap
->list
, &o
->snapshots
);
194 up_write(&_origins_lock
);
198 static void unregister_snapshot(struct dm_snapshot
*s
)
202 down_write(&_origins_lock
);
203 o
= __lookup_origin(s
->origin
->bdev
);
206 if (list_empty(&o
->snapshots
)) {
207 list_del(&o
->hash_list
);
211 up_write(&_origins_lock
);
215 * Implementation of the exception hash tables.
217 static int init_exception_table(struct exception_table
*et
, uint32_t size
)
221 et
->hash_mask
= size
- 1;
222 et
->table
= dm_vcalloc(size
, sizeof(struct list_head
));
226 for (i
= 0; i
< size
; i
++)
227 INIT_LIST_HEAD(et
->table
+ i
);
232 static void exit_exception_table(struct exception_table
*et
, struct kmem_cache
*mem
)
234 struct list_head
*slot
;
235 struct dm_snap_exception
*ex
, *next
;
238 size
= et
->hash_mask
+ 1;
239 for (i
= 0; i
< size
; i
++) {
240 slot
= et
->table
+ i
;
242 list_for_each_entry_safe (ex
, next
, slot
, hash_list
)
243 kmem_cache_free(mem
, ex
);
249 static uint32_t exception_hash(struct exception_table
*et
, chunk_t chunk
)
251 return chunk
& et
->hash_mask
;
254 static void insert_exception(struct exception_table
*eh
,
255 struct dm_snap_exception
*e
)
257 struct list_head
*l
= &eh
->table
[exception_hash(eh
, e
->old_chunk
)];
258 list_add(&e
->hash_list
, l
);
261 static void remove_exception(struct dm_snap_exception
*e
)
263 list_del(&e
->hash_list
);
267 * Return the exception data for a sector, or NULL if not
270 static struct dm_snap_exception
*lookup_exception(struct exception_table
*et
,
273 struct list_head
*slot
;
274 struct dm_snap_exception
*e
;
276 slot
= &et
->table
[exception_hash(et
, chunk
)];
277 list_for_each_entry (e
, slot
, hash_list
)
278 if (e
->old_chunk
== chunk
)
284 static struct dm_snap_exception
*alloc_exception(void)
286 struct dm_snap_exception
*e
;
288 e
= kmem_cache_alloc(exception_cache
, GFP_NOIO
);
290 e
= kmem_cache_alloc(exception_cache
, GFP_ATOMIC
);
295 static void free_exception(struct dm_snap_exception
*e
)
297 kmem_cache_free(exception_cache
, e
);
300 static struct dm_snap_pending_exception
*alloc_pending_exception(void)
302 return mempool_alloc(pending_pool
, GFP_NOIO
);
305 static void free_pending_exception(struct dm_snap_pending_exception
*pe
)
307 mempool_free(pe
, pending_pool
);
310 int dm_add_exception(struct dm_snapshot
*s
, chunk_t old
, chunk_t
new)
312 struct dm_snap_exception
*e
;
314 e
= alloc_exception();
320 insert_exception(&s
->complete
, e
);
327 static int calc_max_buckets(void)
329 /* use a fixed size of 2MB */
330 unsigned long mem
= 2 * 1024 * 1024;
331 mem
/= sizeof(struct list_head
);
337 * Rounds a number down to a power of 2.
339 static uint32_t round_down(uint32_t n
)
347 * Allocate room for a suitable hash table.
349 static int init_hash_tables(struct dm_snapshot
*s
)
351 sector_t hash_size
, cow_dev_size
, origin_dev_size
, max_buckets
;
354 * Calculate based on the size of the original volume or
357 cow_dev_size
= get_dev_size(s
->cow
->bdev
);
358 origin_dev_size
= get_dev_size(s
->origin
->bdev
);
359 max_buckets
= calc_max_buckets();
361 hash_size
= min(origin_dev_size
, cow_dev_size
) >> s
->chunk_shift
;
362 hash_size
= min(hash_size
, max_buckets
);
364 /* Round it down to a power of 2 */
365 hash_size
= round_down(hash_size
);
366 if (init_exception_table(&s
->complete
, hash_size
))
370 * Allocate hash table for in-flight exceptions
371 * Make this smaller than the real hash table
377 if (init_exception_table(&s
->pending
, hash_size
)) {
378 exit_exception_table(&s
->complete
, exception_cache
);
386 * Round a number up to the nearest 'size' boundary. size must
389 static ulong
round_up(ulong n
, ulong size
)
392 return (n
+ size
) & ~size
;
395 static int set_chunk_size(struct dm_snapshot
*s
, const char *chunk_size_arg
,
398 unsigned long chunk_size
;
401 chunk_size
= simple_strtoul(chunk_size_arg
, &value
, 10);
402 if (*chunk_size_arg
== '\0' || *value
!= '\0') {
403 *error
= "Invalid chunk size";
408 s
->chunk_size
= s
->chunk_mask
= s
->chunk_shift
= 0;
413 * Chunk size must be multiple of page size. Silently
414 * round up if it's not.
416 chunk_size
= round_up(chunk_size
, PAGE_SIZE
>> 9);
418 /* Check chunk_size is a power of 2 */
419 if (!is_power_of_2(chunk_size
)) {
420 *error
= "Chunk size is not a power of 2";
424 /* Validate the chunk size against the device block size */
425 if (chunk_size
% (bdev_hardsect_size(s
->cow
->bdev
) >> 9)) {
426 *error
= "Chunk size is not a multiple of device blocksize";
430 s
->chunk_size
= chunk_size
;
431 s
->chunk_mask
= chunk_size
- 1;
432 s
->chunk_shift
= ffs(chunk_size
) - 1;
438 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
440 static int snapshot_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
442 struct dm_snapshot
*s
;
449 ti
->error
= "requires exactly 4 arguments";
454 origin_path
= argv
[0];
456 persistent
= toupper(*argv
[2]);
458 if (persistent
!= 'P' && persistent
!= 'N') {
459 ti
->error
= "Persistent flag is not P or N";
464 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
466 ti
->error
= "Cannot allocate snapshot context private "
472 r
= dm_get_device(ti
, origin_path
, 0, ti
->len
, FMODE_READ
, &s
->origin
);
474 ti
->error
= "Cannot get origin device";
478 r
= dm_get_device(ti
, cow_path
, 0, 0,
479 FMODE_READ
| FMODE_WRITE
, &s
->cow
);
481 dm_put_device(ti
, s
->origin
);
482 ti
->error
= "Cannot get COW device";
486 r
= set_chunk_size(s
, argv
[3], &ti
->error
);
490 s
->type
= persistent
;
495 init_rwsem(&s
->lock
);
496 spin_lock_init(&s
->pe_lock
);
497 s
->table
= ti
->table
;
499 /* Allocate hash table for COW data */
500 if (init_hash_tables(s
)) {
501 ti
->error
= "Unable to allocate hash table space";
508 if (persistent
== 'P')
509 r
= dm_create_persistent(&s
->store
);
511 r
= dm_create_transient(&s
->store
);
514 ti
->error
= "Couldn't create exception store";
519 r
= kcopyd_client_create(SNAPSHOT_PAGES
, &s
->kcopyd_client
);
521 ti
->error
= "Could not create kcopyd client";
525 /* Metadata must only be loaded into one table at once */
526 r
= s
->store
.read_metadata(&s
->store
);
528 ti
->error
= "Failed to read snapshot metadata";
532 DMWARN("Snapshot is marked invalid.");
535 bio_list_init(&s
->queued_bios
);
536 INIT_WORK(&s
->queued_bios_work
, flush_queued_bios
);
538 /* Add snapshot to the list of snapshots for this origin */
539 /* Exceptions aren't triggered till snapshot_resume() is called */
540 if (register_snapshot(s
)) {
542 ti
->error
= "Cannot register snapshot origin";
547 ti
->split_io
= s
->chunk_size
;
552 kcopyd_client_destroy(s
->kcopyd_client
);
555 s
->store
.destroy(&s
->store
);
558 exit_exception_table(&s
->pending
, pending_cache
);
559 exit_exception_table(&s
->complete
, exception_cache
);
562 dm_put_device(ti
, s
->cow
);
563 dm_put_device(ti
, s
->origin
);
572 static void __free_exceptions(struct dm_snapshot
*s
)
574 kcopyd_client_destroy(s
->kcopyd_client
);
575 s
->kcopyd_client
= NULL
;
577 exit_exception_table(&s
->pending
, pending_cache
);
578 exit_exception_table(&s
->complete
, exception_cache
);
580 s
->store
.destroy(&s
->store
);
583 static void snapshot_dtr(struct dm_target
*ti
)
585 struct dm_snapshot
*s
= ti
->private;
587 flush_workqueue(ksnapd
);
589 /* Prevent further origin writes from using this snapshot. */
590 /* After this returns there can be no new kcopyd jobs. */
591 unregister_snapshot(s
);
593 __free_exceptions(s
);
595 dm_put_device(ti
, s
->origin
);
596 dm_put_device(ti
, s
->cow
);
602 * Flush a list of buffers.
604 static void flush_bios(struct bio
*bio
)
611 generic_make_request(bio
);
616 static void flush_queued_bios(struct work_struct
*work
)
618 struct dm_snapshot
*s
=
619 container_of(work
, struct dm_snapshot
, queued_bios_work
);
620 struct bio
*queued_bios
;
623 spin_lock_irqsave(&s
->pe_lock
, flags
);
624 queued_bios
= bio_list_get(&s
->queued_bios
);
625 spin_unlock_irqrestore(&s
->pe_lock
, flags
);
627 flush_bios(queued_bios
);
631 * Error a list of buffers.
633 static void error_bios(struct bio
*bio
)
645 static void __invalidate_snapshot(struct dm_snapshot
*s
, int err
)
651 DMERR("Invalidating snapshot: Error reading/writing.");
652 else if (err
== -ENOMEM
)
653 DMERR("Invalidating snapshot: Unable to allocate exception.");
655 if (s
->store
.drop_snapshot
)
656 s
->store
.drop_snapshot(&s
->store
);
660 dm_table_event(s
->table
);
663 static void get_pending_exception(struct dm_snap_pending_exception
*pe
)
665 atomic_inc(&pe
->ref_count
);
668 static struct bio
*put_pending_exception(struct dm_snap_pending_exception
*pe
)
670 struct dm_snap_pending_exception
*primary_pe
;
671 struct bio
*origin_bios
= NULL
;
673 primary_pe
= pe
->primary_pe
;
676 * If this pe is involved in a write to the origin and
677 * it is the last sibling to complete then release
678 * the bios for the original write to the origin.
681 atomic_dec_and_test(&primary_pe
->ref_count
))
682 origin_bios
= bio_list_get(&primary_pe
->origin_bios
);
685 * Free the pe if it's not linked to an origin write or if
686 * it's not itself a primary pe.
688 if (!primary_pe
|| primary_pe
!= pe
)
689 free_pending_exception(pe
);
692 * Free the primary pe if nothing references it.
694 if (primary_pe
&& !atomic_read(&primary_pe
->ref_count
))
695 free_pending_exception(primary_pe
);
700 static void pending_complete(struct dm_snap_pending_exception
*pe
, int success
)
702 struct dm_snap_exception
*e
;
703 struct dm_snapshot
*s
= pe
->snap
;
704 struct bio
*origin_bios
= NULL
;
705 struct bio
*snapshot_bios
= NULL
;
709 /* Read/write error - snapshot is unusable */
710 down_write(&s
->lock
);
711 __invalidate_snapshot(s
, -EIO
);
716 e
= alloc_exception();
718 down_write(&s
->lock
);
719 __invalidate_snapshot(s
, -ENOMEM
);
725 down_write(&s
->lock
);
733 * Add a proper exception, and remove the
734 * in-flight exception from the list.
736 insert_exception(&s
->complete
, e
);
739 remove_exception(&pe
->e
);
740 snapshot_bios
= bio_list_get(&pe
->snapshot_bios
);
741 origin_bios
= put_pending_exception(pe
);
745 /* Submit any pending write bios */
747 error_bios(snapshot_bios
);
749 flush_bios(snapshot_bios
);
751 flush_bios(origin_bios
);
754 static void commit_callback(void *context
, int success
)
756 struct dm_snap_pending_exception
*pe
= context
;
758 pending_complete(pe
, success
);
762 * Called when the copy I/O has finished. kcopyd actually runs
763 * this code so don't block.
765 static void copy_callback(int read_err
, unsigned int write_err
, void *context
)
767 struct dm_snap_pending_exception
*pe
= context
;
768 struct dm_snapshot
*s
= pe
->snap
;
770 if (read_err
|| write_err
)
771 pending_complete(pe
, 0);
774 /* Update the metadata if we are persistent */
775 s
->store
.commit_exception(&s
->store
, &pe
->e
, commit_callback
,
780 * Dispatches the copy operation to kcopyd.
782 static void start_copy(struct dm_snap_pending_exception
*pe
)
784 struct dm_snapshot
*s
= pe
->snap
;
785 struct io_region src
, dest
;
786 struct block_device
*bdev
= s
->origin
->bdev
;
789 dev_size
= get_dev_size(bdev
);
792 src
.sector
= chunk_to_sector(s
, pe
->e
.old_chunk
);
793 src
.count
= min(s
->chunk_size
, dev_size
- src
.sector
);
795 dest
.bdev
= s
->cow
->bdev
;
796 dest
.sector
= chunk_to_sector(s
, pe
->e
.new_chunk
);
797 dest
.count
= src
.count
;
799 /* Hand over to kcopyd */
800 kcopyd_copy(s
->kcopyd_client
,
801 &src
, 1, &dest
, 0, copy_callback
, pe
);
805 * Looks to see if this snapshot already has a pending exception
806 * for this chunk, otherwise it allocates a new one and inserts
807 * it into the pending table.
809 * NOTE: a write lock must be held on snap->lock before calling
812 static struct dm_snap_pending_exception
*
813 __find_pending_exception(struct dm_snapshot
*s
, struct bio
*bio
)
815 struct dm_snap_exception
*e
;
816 struct dm_snap_pending_exception
*pe
;
817 chunk_t chunk
= sector_to_chunk(s
, bio
->bi_sector
);
820 * Is there a pending exception for this already ?
822 e
= lookup_exception(&s
->pending
, chunk
);
824 /* cast the exception to a pending exception */
825 pe
= container_of(e
, struct dm_snap_pending_exception
, e
);
830 * Create a new pending exception, we don't want
831 * to hold the lock while we do this.
834 pe
= alloc_pending_exception();
835 down_write(&s
->lock
);
838 free_pending_exception(pe
);
842 e
= lookup_exception(&s
->pending
, chunk
);
844 free_pending_exception(pe
);
845 pe
= container_of(e
, struct dm_snap_pending_exception
, e
);
849 pe
->e
.old_chunk
= chunk
;
850 bio_list_init(&pe
->origin_bios
);
851 bio_list_init(&pe
->snapshot_bios
);
852 pe
->primary_pe
= NULL
;
853 atomic_set(&pe
->ref_count
, 0);
857 if (s
->store
.prepare_exception(&s
->store
, &pe
->e
)) {
858 free_pending_exception(pe
);
862 get_pending_exception(pe
);
863 insert_exception(&s
->pending
, &pe
->e
);
869 static void remap_exception(struct dm_snapshot
*s
, struct dm_snap_exception
*e
,
872 bio
->bi_bdev
= s
->cow
->bdev
;
873 bio
->bi_sector
= chunk_to_sector(s
, e
->new_chunk
) +
874 (bio
->bi_sector
& s
->chunk_mask
);
877 static int snapshot_map(struct dm_target
*ti
, struct bio
*bio
,
878 union map_info
*map_context
)
880 struct dm_snap_exception
*e
;
881 struct dm_snapshot
*s
= ti
->private;
882 int r
= DM_MAPIO_REMAPPED
;
884 struct dm_snap_pending_exception
*pe
= NULL
;
886 chunk
= sector_to_chunk(s
, bio
->bi_sector
);
888 /* Full snapshots are not usable */
889 /* To get here the table must be live so s->active is always set. */
893 /* FIXME: should only take write lock if we need
894 * to copy an exception */
895 down_write(&s
->lock
);
902 /* If the block is already remapped - use that, else remap it */
903 e
= lookup_exception(&s
->complete
, chunk
);
905 remap_exception(s
, e
, bio
);
910 * Write to snapshot - higher level takes care of RW/RO
911 * flags so we should only get this if we are
914 if (bio_rw(bio
) == WRITE
) {
915 pe
= __find_pending_exception(s
, bio
);
917 __invalidate_snapshot(s
, -ENOMEM
);
922 remap_exception(s
, &pe
->e
, bio
);
923 bio_list_add(&pe
->snapshot_bios
, bio
);
925 r
= DM_MAPIO_SUBMITTED
;
928 /* this is protected by snap->lock */
936 * FIXME: this read path scares me because we
937 * always use the origin when we have a pending
938 * exception. However I can't think of a
939 * situation where this is wrong - ejt.
941 bio
->bi_bdev
= s
->origin
->bdev
;
949 static void snapshot_resume(struct dm_target
*ti
)
951 struct dm_snapshot
*s
= ti
->private;
953 down_write(&s
->lock
);
958 static int snapshot_status(struct dm_target
*ti
, status_type_t type
,
959 char *result
, unsigned int maxlen
)
961 struct dm_snapshot
*snap
= ti
->private;
964 case STATUSTYPE_INFO
:
966 snprintf(result
, maxlen
, "Invalid");
968 if (snap
->store
.fraction_full
) {
969 sector_t numerator
, denominator
;
970 snap
->store
.fraction_full(&snap
->store
,
973 snprintf(result
, maxlen
, "%llu/%llu",
974 (unsigned long long)numerator
,
975 (unsigned long long)denominator
);
978 snprintf(result
, maxlen
, "Unknown");
982 case STATUSTYPE_TABLE
:
984 * kdevname returns a static pointer so we need
985 * to make private copies if the output is to
988 snprintf(result
, maxlen
, "%s %s %c %llu",
989 snap
->origin
->name
, snap
->cow
->name
,
991 (unsigned long long)snap
->chunk_size
);
998 /*-----------------------------------------------------------------
1000 *---------------------------------------------------------------*/
1001 static int __origin_write(struct list_head
*snapshots
, struct bio
*bio
)
1003 int r
= DM_MAPIO_REMAPPED
, first
= 0;
1004 struct dm_snapshot
*snap
;
1005 struct dm_snap_exception
*e
;
1006 struct dm_snap_pending_exception
*pe
, *next_pe
, *primary_pe
= NULL
;
1008 LIST_HEAD(pe_queue
);
1010 /* Do all the snapshots on this origin */
1011 list_for_each_entry (snap
, snapshots
, list
) {
1013 down_write(&snap
->lock
);
1015 /* Only deal with valid and active snapshots */
1016 if (!snap
->valid
|| !snap
->active
)
1019 /* Nothing to do if writing beyond end of snapshot */
1020 if (bio
->bi_sector
>= dm_table_get_size(snap
->table
))
1024 * Remember, different snapshots can have
1025 * different chunk sizes.
1027 chunk
= sector_to_chunk(snap
, bio
->bi_sector
);
1030 * Check exception table to see if block
1031 * is already remapped in this snapshot
1032 * and trigger an exception if not.
1034 * ref_count is initialised to 1 so pending_complete()
1035 * won't destroy the primary_pe while we're inside this loop.
1037 e
= lookup_exception(&snap
->complete
, chunk
);
1041 pe
= __find_pending_exception(snap
, bio
);
1043 __invalidate_snapshot(snap
, -ENOMEM
);
1049 * Either every pe here has same
1050 * primary_pe or none has one yet.
1053 primary_pe
= pe
->primary_pe
;
1059 bio_list_add(&primary_pe
->origin_bios
, bio
);
1061 r
= DM_MAPIO_SUBMITTED
;
1064 if (!pe
->primary_pe
) {
1065 pe
->primary_pe
= primary_pe
;
1066 get_pending_exception(primary_pe
);
1071 list_add_tail(&pe
->list
, &pe_queue
);
1075 up_write(&snap
->lock
);
1082 * If this is the first time we're processing this chunk and
1083 * ref_count is now 1 it means all the pending exceptions
1084 * got completed while we were in the loop above, so it falls to
1085 * us here to remove the primary_pe and submit any origin_bios.
1088 if (first
&& atomic_dec_and_test(&primary_pe
->ref_count
)) {
1089 flush_bios(bio_list_get(&primary_pe
->origin_bios
));
1090 free_pending_exception(primary_pe
);
1091 /* If we got here, pe_queue is necessarily empty. */
1096 * Now that we have a complete pe list we can start the copying.
1098 list_for_each_entry_safe(pe
, next_pe
, &pe_queue
, list
)
1105 * Called on a write from the origin driver.
1107 static int do_origin(struct dm_dev
*origin
, struct bio
*bio
)
1110 int r
= DM_MAPIO_REMAPPED
;
1112 down_read(&_origins_lock
);
1113 o
= __lookup_origin(origin
->bdev
);
1115 r
= __origin_write(&o
->snapshots
, bio
);
1116 up_read(&_origins_lock
);
1122 * Origin: maps a linear range of a device, with hooks for snapshotting.
1126 * Construct an origin mapping: <dev_path>
1127 * The context for an origin is merely a 'struct dm_dev *'
1128 * pointing to the real device.
1130 static int origin_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
1136 ti
->error
= "origin: incorrect number of arguments";
1140 r
= dm_get_device(ti
, argv
[0], 0, ti
->len
,
1141 dm_table_get_mode(ti
->table
), &dev
);
1143 ti
->error
= "Cannot get target device";
1151 static void origin_dtr(struct dm_target
*ti
)
1153 struct dm_dev
*dev
= ti
->private;
1154 dm_put_device(ti
, dev
);
1157 static int origin_map(struct dm_target
*ti
, struct bio
*bio
,
1158 union map_info
*map_context
)
1160 struct dm_dev
*dev
= ti
->private;
1161 bio
->bi_bdev
= dev
->bdev
;
1163 /* Only tell snapshots if this is a write */
1164 return (bio_rw(bio
) == WRITE
) ? do_origin(dev
, bio
) : DM_MAPIO_REMAPPED
;
1167 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
1170 * Set the target "split_io" field to the minimum of all the snapshots'
1173 static void origin_resume(struct dm_target
*ti
)
1175 struct dm_dev
*dev
= ti
->private;
1176 struct dm_snapshot
*snap
;
1178 chunk_t chunk_size
= 0;
1180 down_read(&_origins_lock
);
1181 o
= __lookup_origin(dev
->bdev
);
1183 list_for_each_entry (snap
, &o
->snapshots
, list
)
1184 chunk_size
= min_not_zero(chunk_size
, snap
->chunk_size
);
1185 up_read(&_origins_lock
);
1187 ti
->split_io
= chunk_size
;
1190 static int origin_status(struct dm_target
*ti
, status_type_t type
, char *result
,
1191 unsigned int maxlen
)
1193 struct dm_dev
*dev
= ti
->private;
1196 case STATUSTYPE_INFO
:
1200 case STATUSTYPE_TABLE
:
1201 snprintf(result
, maxlen
, "%s", dev
->name
);
1208 static struct target_type origin_target
= {
1209 .name
= "snapshot-origin",
1210 .version
= {1, 5, 0},
1211 .module
= THIS_MODULE
,
1215 .resume
= origin_resume
,
1216 .status
= origin_status
,
1219 static struct target_type snapshot_target
= {
1221 .version
= {1, 5, 0},
1222 .module
= THIS_MODULE
,
1223 .ctr
= snapshot_ctr
,
1224 .dtr
= snapshot_dtr
,
1225 .map
= snapshot_map
,
1226 .resume
= snapshot_resume
,
1227 .status
= snapshot_status
,
1230 static int __init
dm_snapshot_init(void)
1234 r
= dm_register_target(&snapshot_target
);
1236 DMERR("snapshot target register failed %d", r
);
1240 r
= dm_register_target(&origin_target
);
1242 DMERR("Origin target register failed %d", r
);
1246 r
= init_origin_hash();
1248 DMERR("init_origin_hash failed.");
1252 exception_cache
= KMEM_CACHE(dm_snap_exception
, 0);
1253 if (!exception_cache
) {
1254 DMERR("Couldn't create exception cache.");
1259 pending_cache
= KMEM_CACHE(dm_snap_pending_exception
, 0);
1260 if (!pending_cache
) {
1261 DMERR("Couldn't create pending cache.");
1266 pending_pool
= mempool_create_slab_pool(128, pending_cache
);
1267 if (!pending_pool
) {
1268 DMERR("Couldn't create pending pool.");
1273 ksnapd
= create_singlethread_workqueue("ksnapd");
1275 DMERR("Failed to create ksnapd workqueue.");
1283 mempool_destroy(pending_pool
);
1285 kmem_cache_destroy(pending_cache
);
1287 kmem_cache_destroy(exception_cache
);
1291 dm_unregister_target(&origin_target
);
1293 dm_unregister_target(&snapshot_target
);
1297 static void __exit
dm_snapshot_exit(void)
1301 destroy_workqueue(ksnapd
);
1303 r
= dm_unregister_target(&snapshot_target
);
1305 DMERR("snapshot unregister failed %d", r
);
1307 r
= dm_unregister_target(&origin_target
);
1309 DMERR("origin unregister failed %d", r
);
1312 mempool_destroy(pending_pool
);
1313 kmem_cache_destroy(pending_cache
);
1314 kmem_cache_destroy(exception_cache
);
1318 module_init(dm_snapshot_init
);
1319 module_exit(dm_snapshot_exit
);
1321 MODULE_DESCRIPTION(DM_NAME
" snapshot target");
1322 MODULE_AUTHOR("Joe Thornber");
1323 MODULE_LICENSE("GPL");