Committer: Michael Beasley <mike@snafu.setup>
[mikesnafu-overlay.git] / drivers / md / dm-snap.c
blob4dc8a43c034b0242422399a15a7d6a271f97da3d
1 /*
2 * dm-snapshot.c
4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
6 * This file is released under the GPL.
7 */
9 #include <linux/blkdev.h>
10 #include <linux/ctype.h>
11 #include <linux/device-mapper.h>
12 #include <linux/fs.h>
13 #include <linux/init.h>
14 #include <linux/kdev_t.h>
15 #include <linux/list.h>
16 #include <linux/mempool.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/log2.h>
22 #include "dm-snap.h"
23 #include "dm-bio-list.h"
24 #include "kcopyd.h"
26 #define DM_MSG_PREFIX "snapshots"
29 * The percentage increment we will wake up users at
31 #define WAKE_UP_PERCENT 5
34 * kcopyd priority of snapshot operations
36 #define SNAPSHOT_COPY_PRIORITY 2
39 * Each snapshot reserves this many pages for io
41 #define SNAPSHOT_PAGES 256
43 static struct workqueue_struct *ksnapd;
44 static void flush_queued_bios(struct work_struct *work);
46 struct dm_snap_pending_exception {
47 struct dm_snap_exception e;
50 * Origin buffers waiting for this to complete are held
51 * in a bio list
53 struct bio_list origin_bios;
54 struct bio_list snapshot_bios;
57 * Short-term queue of pending exceptions prior to submission.
59 struct list_head list;
62 * The primary pending_exception is the one that holds
63 * the ref_count and the list of origin_bios for a
64 * group of pending_exceptions. It is always last to get freed.
65 * These fields get set up when writing to the origin.
67 struct dm_snap_pending_exception *primary_pe;
70 * Number of pending_exceptions processing this chunk.
71 * When this drops to zero we must complete the origin bios.
72 * If incrementing or decrementing this, hold pe->snap->lock for
73 * the sibling concerned and not pe->primary_pe->snap->lock unless
74 * they are the same.
76 atomic_t ref_count;
78 /* Pointer back to snapshot context */
79 struct dm_snapshot *snap;
82 * 1 indicates the exception has already been sent to
83 * kcopyd.
85 int started;
89 * Hash table mapping origin volumes to lists of snapshots and
90 * a lock to protect it
92 static struct kmem_cache *exception_cache;
93 static struct kmem_cache *pending_cache;
94 static mempool_t *pending_pool;
97 * One of these per registered origin, held in the snapshot_origins hash
99 struct origin {
100 /* The origin device */
101 struct block_device *bdev;
103 struct list_head hash_list;
105 /* List of snapshots for this origin */
106 struct list_head snapshots;
110 * Size of the hash table for origin volumes. If we make this
111 * the size of the minors list then it should be nearly perfect
113 #define ORIGIN_HASH_SIZE 256
114 #define ORIGIN_MASK 0xFF
115 static struct list_head *_origins;
116 static struct rw_semaphore _origins_lock;
118 static int init_origin_hash(void)
120 int i;
122 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
123 GFP_KERNEL);
124 if (!_origins) {
125 DMERR("unable to allocate memory");
126 return -ENOMEM;
129 for (i = 0; i < ORIGIN_HASH_SIZE; i++)
130 INIT_LIST_HEAD(_origins + i);
131 init_rwsem(&_origins_lock);
133 return 0;
136 static void exit_origin_hash(void)
138 kfree(_origins);
141 static unsigned origin_hash(struct block_device *bdev)
143 return bdev->bd_dev & ORIGIN_MASK;
146 static struct origin *__lookup_origin(struct block_device *origin)
148 struct list_head *ol;
149 struct origin *o;
151 ol = &_origins[origin_hash(origin)];
152 list_for_each_entry (o, ol, hash_list)
153 if (bdev_equal(o->bdev, origin))
154 return o;
156 return NULL;
159 static void __insert_origin(struct origin *o)
161 struct list_head *sl = &_origins[origin_hash(o->bdev)];
162 list_add_tail(&o->hash_list, sl);
166 * Make a note of the snapshot and its origin so we can look it
167 * up when the origin has a write on it.
169 static int register_snapshot(struct dm_snapshot *snap)
171 struct origin *o;
172 struct block_device *bdev = snap->origin->bdev;
174 down_write(&_origins_lock);
175 o = __lookup_origin(bdev);
177 if (!o) {
178 /* New origin */
179 o = kmalloc(sizeof(*o), GFP_KERNEL);
180 if (!o) {
181 up_write(&_origins_lock);
182 return -ENOMEM;
185 /* Initialise the struct */
186 INIT_LIST_HEAD(&o->snapshots);
187 o->bdev = bdev;
189 __insert_origin(o);
192 list_add_tail(&snap->list, &o->snapshots);
194 up_write(&_origins_lock);
195 return 0;
198 static void unregister_snapshot(struct dm_snapshot *s)
200 struct origin *o;
202 down_write(&_origins_lock);
203 o = __lookup_origin(s->origin->bdev);
205 list_del(&s->list);
206 if (list_empty(&o->snapshots)) {
207 list_del(&o->hash_list);
208 kfree(o);
211 up_write(&_origins_lock);
215 * Implementation of the exception hash tables.
216 * The lowest hash_shift bits of the chunk number are ignored, allowing
217 * some consecutive chunks to be grouped together.
219 static int init_exception_table(struct exception_table *et, uint32_t size,
220 unsigned hash_shift)
222 unsigned int i;
224 et->hash_shift = hash_shift;
225 et->hash_mask = size - 1;
226 et->table = dm_vcalloc(size, sizeof(struct list_head));
227 if (!et->table)
228 return -ENOMEM;
230 for (i = 0; i < size; i++)
231 INIT_LIST_HEAD(et->table + i);
233 return 0;
236 static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem)
238 struct list_head *slot;
239 struct dm_snap_exception *ex, *next;
240 int i, size;
242 size = et->hash_mask + 1;
243 for (i = 0; i < size; i++) {
244 slot = et->table + i;
246 list_for_each_entry_safe (ex, next, slot, hash_list)
247 kmem_cache_free(mem, ex);
250 vfree(et->table);
253 static uint32_t exception_hash(struct exception_table *et, chunk_t chunk)
255 return (chunk >> et->hash_shift) & et->hash_mask;
258 static void insert_exception(struct exception_table *eh,
259 struct dm_snap_exception *e)
261 struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)];
262 list_add(&e->hash_list, l);
265 static void remove_exception(struct dm_snap_exception *e)
267 list_del(&e->hash_list);
271 * Return the exception data for a sector, or NULL if not
272 * remapped.
274 static struct dm_snap_exception *lookup_exception(struct exception_table *et,
275 chunk_t chunk)
277 struct list_head *slot;
278 struct dm_snap_exception *e;
280 slot = &et->table[exception_hash(et, chunk)];
281 list_for_each_entry (e, slot, hash_list)
282 if (chunk >= e->old_chunk &&
283 chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
284 return e;
286 return NULL;
289 static struct dm_snap_exception *alloc_exception(void)
291 struct dm_snap_exception *e;
293 e = kmem_cache_alloc(exception_cache, GFP_NOIO);
294 if (!e)
295 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
297 return e;
300 static void free_exception(struct dm_snap_exception *e)
302 kmem_cache_free(exception_cache, e);
305 static struct dm_snap_pending_exception *alloc_pending_exception(void)
307 return mempool_alloc(pending_pool, GFP_NOIO);
310 static void free_pending_exception(struct dm_snap_pending_exception *pe)
312 mempool_free(pe, pending_pool);
315 static void insert_completed_exception(struct dm_snapshot *s,
316 struct dm_snap_exception *new_e)
318 struct exception_table *eh = &s->complete;
319 struct list_head *l;
320 struct dm_snap_exception *e = NULL;
322 l = &eh->table[exception_hash(eh, new_e->old_chunk)];
324 /* Add immediately if this table doesn't support consecutive chunks */
325 if (!eh->hash_shift)
326 goto out;
328 /* List is ordered by old_chunk */
329 list_for_each_entry_reverse(e, l, hash_list) {
330 /* Insert after an existing chunk? */
331 if (new_e->old_chunk == (e->old_chunk +
332 dm_consecutive_chunk_count(e) + 1) &&
333 new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
334 dm_consecutive_chunk_count(e) + 1)) {
335 dm_consecutive_chunk_count_inc(e);
336 free_exception(new_e);
337 return;
340 /* Insert before an existing chunk? */
341 if (new_e->old_chunk == (e->old_chunk - 1) &&
342 new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
343 dm_consecutive_chunk_count_inc(e);
344 e->old_chunk--;
345 e->new_chunk--;
346 free_exception(new_e);
347 return;
350 if (new_e->old_chunk > e->old_chunk)
351 break;
354 out:
355 list_add(&new_e->hash_list, e ? &e->hash_list : l);
358 int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new)
360 struct dm_snap_exception *e;
362 e = alloc_exception();
363 if (!e)
364 return -ENOMEM;
366 e->old_chunk = old;
368 /* Consecutive_count is implicitly initialised to zero */
369 e->new_chunk = new;
371 insert_completed_exception(s, e);
373 return 0;
377 * Hard coded magic.
379 static int calc_max_buckets(void)
381 /* use a fixed size of 2MB */
382 unsigned long mem = 2 * 1024 * 1024;
383 mem /= sizeof(struct list_head);
385 return mem;
389 * Allocate room for a suitable hash table.
391 static int init_hash_tables(struct dm_snapshot *s)
393 sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
396 * Calculate based on the size of the original volume or
397 * the COW volume...
399 cow_dev_size = get_dev_size(s->cow->bdev);
400 origin_dev_size = get_dev_size(s->origin->bdev);
401 max_buckets = calc_max_buckets();
403 hash_size = min(origin_dev_size, cow_dev_size) >> s->chunk_shift;
404 hash_size = min(hash_size, max_buckets);
406 hash_size = rounddown_pow_of_two(hash_size);
407 if (init_exception_table(&s->complete, hash_size,
408 DM_CHUNK_CONSECUTIVE_BITS))
409 return -ENOMEM;
412 * Allocate hash table for in-flight exceptions
413 * Make this smaller than the real hash table
415 hash_size >>= 3;
416 if (hash_size < 64)
417 hash_size = 64;
419 if (init_exception_table(&s->pending, hash_size, 0)) {
420 exit_exception_table(&s->complete, exception_cache);
421 return -ENOMEM;
424 return 0;
428 * Round a number up to the nearest 'size' boundary. size must
429 * be a power of 2.
431 static ulong round_up(ulong n, ulong size)
433 size--;
434 return (n + size) & ~size;
437 static int set_chunk_size(struct dm_snapshot *s, const char *chunk_size_arg,
438 char **error)
440 unsigned long chunk_size;
441 char *value;
443 chunk_size = simple_strtoul(chunk_size_arg, &value, 10);
444 if (*chunk_size_arg == '\0' || *value != '\0') {
445 *error = "Invalid chunk size";
446 return -EINVAL;
449 if (!chunk_size) {
450 s->chunk_size = s->chunk_mask = s->chunk_shift = 0;
451 return 0;
455 * Chunk size must be multiple of page size. Silently
456 * round up if it's not.
458 chunk_size = round_up(chunk_size, PAGE_SIZE >> 9);
460 /* Check chunk_size is a power of 2 */
461 if (!is_power_of_2(chunk_size)) {
462 *error = "Chunk size is not a power of 2";
463 return -EINVAL;
466 /* Validate the chunk size against the device block size */
467 if (chunk_size % (bdev_hardsect_size(s->cow->bdev) >> 9)) {
468 *error = "Chunk size is not a multiple of device blocksize";
469 return -EINVAL;
472 s->chunk_size = chunk_size;
473 s->chunk_mask = chunk_size - 1;
474 s->chunk_shift = ffs(chunk_size) - 1;
476 return 0;
480 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
482 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
484 struct dm_snapshot *s;
485 int r = -EINVAL;
486 char persistent;
487 char *origin_path;
488 char *cow_path;
490 if (argc != 4) {
491 ti->error = "requires exactly 4 arguments";
492 r = -EINVAL;
493 goto bad1;
496 origin_path = argv[0];
497 cow_path = argv[1];
498 persistent = toupper(*argv[2]);
500 if (persistent != 'P' && persistent != 'N') {
501 ti->error = "Persistent flag is not P or N";
502 r = -EINVAL;
503 goto bad1;
506 s = kmalloc(sizeof(*s), GFP_KERNEL);
507 if (s == NULL) {
508 ti->error = "Cannot allocate snapshot context private "
509 "structure";
510 r = -ENOMEM;
511 goto bad1;
514 r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
515 if (r) {
516 ti->error = "Cannot get origin device";
517 goto bad2;
520 r = dm_get_device(ti, cow_path, 0, 0,
521 FMODE_READ | FMODE_WRITE, &s->cow);
522 if (r) {
523 dm_put_device(ti, s->origin);
524 ti->error = "Cannot get COW device";
525 goto bad2;
528 r = set_chunk_size(s, argv[3], &ti->error);
529 if (r)
530 goto bad3;
532 s->type = persistent;
534 s->valid = 1;
535 s->active = 0;
536 s->last_percent = 0;
537 init_rwsem(&s->lock);
538 spin_lock_init(&s->pe_lock);
539 s->table = ti->table;
541 /* Allocate hash table for COW data */
542 if (init_hash_tables(s)) {
543 ti->error = "Unable to allocate hash table space";
544 r = -ENOMEM;
545 goto bad3;
548 s->store.snap = s;
550 if (persistent == 'P')
551 r = dm_create_persistent(&s->store);
552 else
553 r = dm_create_transient(&s->store);
555 if (r) {
556 ti->error = "Couldn't create exception store";
557 r = -EINVAL;
558 goto bad4;
561 r = kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
562 if (r) {
563 ti->error = "Could not create kcopyd client";
564 goto bad5;
567 /* Metadata must only be loaded into one table at once */
568 r = s->store.read_metadata(&s->store);
569 if (r < 0) {
570 ti->error = "Failed to read snapshot metadata";
571 goto bad6;
572 } else if (r > 0) {
573 s->valid = 0;
574 DMWARN("Snapshot is marked invalid.");
577 bio_list_init(&s->queued_bios);
578 INIT_WORK(&s->queued_bios_work, flush_queued_bios);
580 /* Add snapshot to the list of snapshots for this origin */
581 /* Exceptions aren't triggered till snapshot_resume() is called */
582 if (register_snapshot(s)) {
583 r = -EINVAL;
584 ti->error = "Cannot register snapshot origin";
585 goto bad6;
588 ti->private = s;
589 ti->split_io = s->chunk_size;
591 return 0;
593 bad6:
594 kcopyd_client_destroy(s->kcopyd_client);
596 bad5:
597 s->store.destroy(&s->store);
599 bad4:
600 exit_exception_table(&s->pending, pending_cache);
601 exit_exception_table(&s->complete, exception_cache);
603 bad3:
604 dm_put_device(ti, s->cow);
605 dm_put_device(ti, s->origin);
607 bad2:
608 kfree(s);
610 bad1:
611 return r;
614 static void __free_exceptions(struct dm_snapshot *s)
616 kcopyd_client_destroy(s->kcopyd_client);
617 s->kcopyd_client = NULL;
619 exit_exception_table(&s->pending, pending_cache);
620 exit_exception_table(&s->complete, exception_cache);
622 s->store.destroy(&s->store);
625 static void snapshot_dtr(struct dm_target *ti)
627 struct dm_snapshot *s = ti->private;
629 flush_workqueue(ksnapd);
631 /* Prevent further origin writes from using this snapshot. */
632 /* After this returns there can be no new kcopyd jobs. */
633 unregister_snapshot(s);
635 __free_exceptions(s);
637 dm_put_device(ti, s->origin);
638 dm_put_device(ti, s->cow);
640 kfree(s);
644 * Flush a list of buffers.
646 static void flush_bios(struct bio *bio)
648 struct bio *n;
650 while (bio) {
651 n = bio->bi_next;
652 bio->bi_next = NULL;
653 generic_make_request(bio);
654 bio = n;
658 static void flush_queued_bios(struct work_struct *work)
660 struct dm_snapshot *s =
661 container_of(work, struct dm_snapshot, queued_bios_work);
662 struct bio *queued_bios;
663 unsigned long flags;
665 spin_lock_irqsave(&s->pe_lock, flags);
666 queued_bios = bio_list_get(&s->queued_bios);
667 spin_unlock_irqrestore(&s->pe_lock, flags);
669 flush_bios(queued_bios);
673 * Error a list of buffers.
675 static void error_bios(struct bio *bio)
677 struct bio *n;
679 while (bio) {
680 n = bio->bi_next;
681 bio->bi_next = NULL;
682 bio_io_error(bio);
683 bio = n;
687 static void __invalidate_snapshot(struct dm_snapshot *s, int err)
689 if (!s->valid)
690 return;
692 if (err == -EIO)
693 DMERR("Invalidating snapshot: Error reading/writing.");
694 else if (err == -ENOMEM)
695 DMERR("Invalidating snapshot: Unable to allocate exception.");
697 if (s->store.drop_snapshot)
698 s->store.drop_snapshot(&s->store);
700 s->valid = 0;
702 dm_table_event(s->table);
705 static void get_pending_exception(struct dm_snap_pending_exception *pe)
707 atomic_inc(&pe->ref_count);
710 static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
712 struct dm_snap_pending_exception *primary_pe;
713 struct bio *origin_bios = NULL;
715 primary_pe = pe->primary_pe;
718 * If this pe is involved in a write to the origin and
719 * it is the last sibling to complete then release
720 * the bios for the original write to the origin.
722 if (primary_pe &&
723 atomic_dec_and_test(&primary_pe->ref_count))
724 origin_bios = bio_list_get(&primary_pe->origin_bios);
727 * Free the pe if it's not linked to an origin write or if
728 * it's not itself a primary pe.
730 if (!primary_pe || primary_pe != pe)
731 free_pending_exception(pe);
734 * Free the primary pe if nothing references it.
736 if (primary_pe && !atomic_read(&primary_pe->ref_count))
737 free_pending_exception(primary_pe);
739 return origin_bios;
742 static void pending_complete(struct dm_snap_pending_exception *pe, int success)
744 struct dm_snap_exception *e;
745 struct dm_snapshot *s = pe->snap;
746 struct bio *origin_bios = NULL;
747 struct bio *snapshot_bios = NULL;
748 int error = 0;
750 if (!success) {
751 /* Read/write error - snapshot is unusable */
752 down_write(&s->lock);
753 __invalidate_snapshot(s, -EIO);
754 error = 1;
755 goto out;
758 e = alloc_exception();
759 if (!e) {
760 down_write(&s->lock);
761 __invalidate_snapshot(s, -ENOMEM);
762 error = 1;
763 goto out;
765 *e = pe->e;
767 down_write(&s->lock);
768 if (!s->valid) {
769 free_exception(e);
770 error = 1;
771 goto out;
775 * Add a proper exception, and remove the
776 * in-flight exception from the list.
778 insert_completed_exception(s, e);
780 out:
781 remove_exception(&pe->e);
782 snapshot_bios = bio_list_get(&pe->snapshot_bios);
783 origin_bios = put_pending_exception(pe);
785 up_write(&s->lock);
787 /* Submit any pending write bios */
788 if (error)
789 error_bios(snapshot_bios);
790 else
791 flush_bios(snapshot_bios);
793 flush_bios(origin_bios);
796 static void commit_callback(void *context, int success)
798 struct dm_snap_pending_exception *pe = context;
800 pending_complete(pe, success);
804 * Called when the copy I/O has finished. kcopyd actually runs
805 * this code so don't block.
807 static void copy_callback(int read_err, unsigned long write_err, void *context)
809 struct dm_snap_pending_exception *pe = context;
810 struct dm_snapshot *s = pe->snap;
812 if (read_err || write_err)
813 pending_complete(pe, 0);
815 else
816 /* Update the metadata if we are persistent */
817 s->store.commit_exception(&s->store, &pe->e, commit_callback,
818 pe);
822 * Dispatches the copy operation to kcopyd.
824 static void start_copy(struct dm_snap_pending_exception *pe)
826 struct dm_snapshot *s = pe->snap;
827 struct io_region src, dest;
828 struct block_device *bdev = s->origin->bdev;
829 sector_t dev_size;
831 dev_size = get_dev_size(bdev);
833 src.bdev = bdev;
834 src.sector = chunk_to_sector(s, pe->e.old_chunk);
835 src.count = min(s->chunk_size, dev_size - src.sector);
837 dest.bdev = s->cow->bdev;
838 dest.sector = chunk_to_sector(s, pe->e.new_chunk);
839 dest.count = src.count;
841 /* Hand over to kcopyd */
842 kcopyd_copy(s->kcopyd_client,
843 &src, 1, &dest, 0, copy_callback, pe);
847 * Looks to see if this snapshot already has a pending exception
848 * for this chunk, otherwise it allocates a new one and inserts
849 * it into the pending table.
851 * NOTE: a write lock must be held on snap->lock before calling
852 * this.
854 static struct dm_snap_pending_exception *
855 __find_pending_exception(struct dm_snapshot *s, struct bio *bio)
857 struct dm_snap_exception *e;
858 struct dm_snap_pending_exception *pe;
859 chunk_t chunk = sector_to_chunk(s, bio->bi_sector);
862 * Is there a pending exception for this already ?
864 e = lookup_exception(&s->pending, chunk);
865 if (e) {
866 /* cast the exception to a pending exception */
867 pe = container_of(e, struct dm_snap_pending_exception, e);
868 goto out;
872 * Create a new pending exception, we don't want
873 * to hold the lock while we do this.
875 up_write(&s->lock);
876 pe = alloc_pending_exception();
877 down_write(&s->lock);
879 if (!s->valid) {
880 free_pending_exception(pe);
881 return NULL;
884 e = lookup_exception(&s->pending, chunk);
885 if (e) {
886 free_pending_exception(pe);
887 pe = container_of(e, struct dm_snap_pending_exception, e);
888 goto out;
891 pe->e.old_chunk = chunk;
892 bio_list_init(&pe->origin_bios);
893 bio_list_init(&pe->snapshot_bios);
894 pe->primary_pe = NULL;
895 atomic_set(&pe->ref_count, 0);
896 pe->snap = s;
897 pe->started = 0;
899 if (s->store.prepare_exception(&s->store, &pe->e)) {
900 free_pending_exception(pe);
901 return NULL;
904 get_pending_exception(pe);
905 insert_exception(&s->pending, &pe->e);
907 out:
908 return pe;
911 static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e,
912 struct bio *bio, chunk_t chunk)
914 bio->bi_bdev = s->cow->bdev;
915 bio->bi_sector = chunk_to_sector(s, dm_chunk_number(e->new_chunk) +
916 (chunk - e->old_chunk)) +
917 (bio->bi_sector & s->chunk_mask);
920 static int snapshot_map(struct dm_target *ti, struct bio *bio,
921 union map_info *map_context)
923 struct dm_snap_exception *e;
924 struct dm_snapshot *s = ti->private;
925 int r = DM_MAPIO_REMAPPED;
926 chunk_t chunk;
927 struct dm_snap_pending_exception *pe = NULL;
929 chunk = sector_to_chunk(s, bio->bi_sector);
931 /* Full snapshots are not usable */
932 /* To get here the table must be live so s->active is always set. */
933 if (!s->valid)
934 return -EIO;
936 /* FIXME: should only take write lock if we need
937 * to copy an exception */
938 down_write(&s->lock);
940 if (!s->valid) {
941 r = -EIO;
942 goto out_unlock;
945 /* If the block is already remapped - use that, else remap it */
946 e = lookup_exception(&s->complete, chunk);
947 if (e) {
948 remap_exception(s, e, bio, chunk);
949 goto out_unlock;
953 * Write to snapshot - higher level takes care of RW/RO
954 * flags so we should only get this if we are
955 * writeable.
957 if (bio_rw(bio) == WRITE) {
958 pe = __find_pending_exception(s, bio);
959 if (!pe) {
960 __invalidate_snapshot(s, -ENOMEM);
961 r = -EIO;
962 goto out_unlock;
965 remap_exception(s, &pe->e, bio, chunk);
966 bio_list_add(&pe->snapshot_bios, bio);
968 r = DM_MAPIO_SUBMITTED;
970 if (!pe->started) {
971 /* this is protected by snap->lock */
972 pe->started = 1;
973 up_write(&s->lock);
974 start_copy(pe);
975 goto out;
977 } else
979 * FIXME: this read path scares me because we
980 * always use the origin when we have a pending
981 * exception. However I can't think of a
982 * situation where this is wrong - ejt.
984 bio->bi_bdev = s->origin->bdev;
986 out_unlock:
987 up_write(&s->lock);
988 out:
989 return r;
992 static void snapshot_resume(struct dm_target *ti)
994 struct dm_snapshot *s = ti->private;
996 down_write(&s->lock);
997 s->active = 1;
998 up_write(&s->lock);
1001 static int snapshot_status(struct dm_target *ti, status_type_t type,
1002 char *result, unsigned int maxlen)
1004 struct dm_snapshot *snap = ti->private;
1006 switch (type) {
1007 case STATUSTYPE_INFO:
1008 if (!snap->valid)
1009 snprintf(result, maxlen, "Invalid");
1010 else {
1011 if (snap->store.fraction_full) {
1012 sector_t numerator, denominator;
1013 snap->store.fraction_full(&snap->store,
1014 &numerator,
1015 &denominator);
1016 snprintf(result, maxlen, "%llu/%llu",
1017 (unsigned long long)numerator,
1018 (unsigned long long)denominator);
1020 else
1021 snprintf(result, maxlen, "Unknown");
1023 break;
1025 case STATUSTYPE_TABLE:
1027 * kdevname returns a static pointer so we need
1028 * to make private copies if the output is to
1029 * make sense.
1031 snprintf(result, maxlen, "%s %s %c %llu",
1032 snap->origin->name, snap->cow->name,
1033 snap->type,
1034 (unsigned long long)snap->chunk_size);
1035 break;
1038 return 0;
1041 /*-----------------------------------------------------------------
1042 * Origin methods
1043 *---------------------------------------------------------------*/
1044 static int __origin_write(struct list_head *snapshots, struct bio *bio)
1046 int r = DM_MAPIO_REMAPPED, first = 0;
1047 struct dm_snapshot *snap;
1048 struct dm_snap_exception *e;
1049 struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL;
1050 chunk_t chunk;
1051 LIST_HEAD(pe_queue);
1053 /* Do all the snapshots on this origin */
1054 list_for_each_entry (snap, snapshots, list) {
1056 down_write(&snap->lock);
1058 /* Only deal with valid and active snapshots */
1059 if (!snap->valid || !snap->active)
1060 goto next_snapshot;
1062 /* Nothing to do if writing beyond end of snapshot */
1063 if (bio->bi_sector >= dm_table_get_size(snap->table))
1064 goto next_snapshot;
1067 * Remember, different snapshots can have
1068 * different chunk sizes.
1070 chunk = sector_to_chunk(snap, bio->bi_sector);
1073 * Check exception table to see if block
1074 * is already remapped in this snapshot
1075 * and trigger an exception if not.
1077 * ref_count is initialised to 1 so pending_complete()
1078 * won't destroy the primary_pe while we're inside this loop.
1080 e = lookup_exception(&snap->complete, chunk);
1081 if (e)
1082 goto next_snapshot;
1084 pe = __find_pending_exception(snap, bio);
1085 if (!pe) {
1086 __invalidate_snapshot(snap, -ENOMEM);
1087 goto next_snapshot;
1090 if (!primary_pe) {
1092 * Either every pe here has same
1093 * primary_pe or none has one yet.
1095 if (pe->primary_pe)
1096 primary_pe = pe->primary_pe;
1097 else {
1098 primary_pe = pe;
1099 first = 1;
1102 bio_list_add(&primary_pe->origin_bios, bio);
1104 r = DM_MAPIO_SUBMITTED;
1107 if (!pe->primary_pe) {
1108 pe->primary_pe = primary_pe;
1109 get_pending_exception(primary_pe);
1112 if (!pe->started) {
1113 pe->started = 1;
1114 list_add_tail(&pe->list, &pe_queue);
1117 next_snapshot:
1118 up_write(&snap->lock);
1121 if (!primary_pe)
1122 return r;
1125 * If this is the first time we're processing this chunk and
1126 * ref_count is now 1 it means all the pending exceptions
1127 * got completed while we were in the loop above, so it falls to
1128 * us here to remove the primary_pe and submit any origin_bios.
1131 if (first && atomic_dec_and_test(&primary_pe->ref_count)) {
1132 flush_bios(bio_list_get(&primary_pe->origin_bios));
1133 free_pending_exception(primary_pe);
1134 /* If we got here, pe_queue is necessarily empty. */
1135 return r;
1139 * Now that we have a complete pe list we can start the copying.
1141 list_for_each_entry_safe(pe, next_pe, &pe_queue, list)
1142 start_copy(pe);
1144 return r;
1148 * Called on a write from the origin driver.
1150 static int do_origin(struct dm_dev *origin, struct bio *bio)
1152 struct origin *o;
1153 int r = DM_MAPIO_REMAPPED;
1155 down_read(&_origins_lock);
1156 o = __lookup_origin(origin->bdev);
1157 if (o)
1158 r = __origin_write(&o->snapshots, bio);
1159 up_read(&_origins_lock);
1161 return r;
1165 * Origin: maps a linear range of a device, with hooks for snapshotting.
1169 * Construct an origin mapping: <dev_path>
1170 * The context for an origin is merely a 'struct dm_dev *'
1171 * pointing to the real device.
1173 static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1175 int r;
1176 struct dm_dev *dev;
1178 if (argc != 1) {
1179 ti->error = "origin: incorrect number of arguments";
1180 return -EINVAL;
1183 r = dm_get_device(ti, argv[0], 0, ti->len,
1184 dm_table_get_mode(ti->table), &dev);
1185 if (r) {
1186 ti->error = "Cannot get target device";
1187 return r;
1190 ti->private = dev;
1191 return 0;
1194 static void origin_dtr(struct dm_target *ti)
1196 struct dm_dev *dev = ti->private;
1197 dm_put_device(ti, dev);
1200 static int origin_map(struct dm_target *ti, struct bio *bio,
1201 union map_info *map_context)
1203 struct dm_dev *dev = ti->private;
1204 bio->bi_bdev = dev->bdev;
1206 /* Only tell snapshots if this is a write */
1207 return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
1210 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
1213 * Set the target "split_io" field to the minimum of all the snapshots'
1214 * chunk sizes.
1216 static void origin_resume(struct dm_target *ti)
1218 struct dm_dev *dev = ti->private;
1219 struct dm_snapshot *snap;
1220 struct origin *o;
1221 chunk_t chunk_size = 0;
1223 down_read(&_origins_lock);
1224 o = __lookup_origin(dev->bdev);
1225 if (o)
1226 list_for_each_entry (snap, &o->snapshots, list)
1227 chunk_size = min_not_zero(chunk_size, snap->chunk_size);
1228 up_read(&_origins_lock);
1230 ti->split_io = chunk_size;
1233 static int origin_status(struct dm_target *ti, status_type_t type, char *result,
1234 unsigned int maxlen)
1236 struct dm_dev *dev = ti->private;
1238 switch (type) {
1239 case STATUSTYPE_INFO:
1240 result[0] = '\0';
1241 break;
1243 case STATUSTYPE_TABLE:
1244 snprintf(result, maxlen, "%s", dev->name);
1245 break;
1248 return 0;
1251 static struct target_type origin_target = {
1252 .name = "snapshot-origin",
1253 .version = {1, 6, 0},
1254 .module = THIS_MODULE,
1255 .ctr = origin_ctr,
1256 .dtr = origin_dtr,
1257 .map = origin_map,
1258 .resume = origin_resume,
1259 .status = origin_status,
1262 static struct target_type snapshot_target = {
1263 .name = "snapshot",
1264 .version = {1, 6, 0},
1265 .module = THIS_MODULE,
1266 .ctr = snapshot_ctr,
1267 .dtr = snapshot_dtr,
1268 .map = snapshot_map,
1269 .resume = snapshot_resume,
1270 .status = snapshot_status,
1273 static int __init dm_snapshot_init(void)
1275 int r;
1277 r = dm_register_target(&snapshot_target);
1278 if (r) {
1279 DMERR("snapshot target register failed %d", r);
1280 return r;
1283 r = dm_register_target(&origin_target);
1284 if (r < 0) {
1285 DMERR("Origin target register failed %d", r);
1286 goto bad1;
1289 r = init_origin_hash();
1290 if (r) {
1291 DMERR("init_origin_hash failed.");
1292 goto bad2;
1295 exception_cache = KMEM_CACHE(dm_snap_exception, 0);
1296 if (!exception_cache) {
1297 DMERR("Couldn't create exception cache.");
1298 r = -ENOMEM;
1299 goto bad3;
1302 pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
1303 if (!pending_cache) {
1304 DMERR("Couldn't create pending cache.");
1305 r = -ENOMEM;
1306 goto bad4;
1309 pending_pool = mempool_create_slab_pool(128, pending_cache);
1310 if (!pending_pool) {
1311 DMERR("Couldn't create pending pool.");
1312 r = -ENOMEM;
1313 goto bad5;
1316 ksnapd = create_singlethread_workqueue("ksnapd");
1317 if (!ksnapd) {
1318 DMERR("Failed to create ksnapd workqueue.");
1319 r = -ENOMEM;
1320 goto bad6;
1323 return 0;
1325 bad6:
1326 mempool_destroy(pending_pool);
1327 bad5:
1328 kmem_cache_destroy(pending_cache);
1329 bad4:
1330 kmem_cache_destroy(exception_cache);
1331 bad3:
1332 exit_origin_hash();
1333 bad2:
1334 dm_unregister_target(&origin_target);
1335 bad1:
1336 dm_unregister_target(&snapshot_target);
1337 return r;
1340 static void __exit dm_snapshot_exit(void)
1342 int r;
1344 destroy_workqueue(ksnapd);
1346 r = dm_unregister_target(&snapshot_target);
1347 if (r)
1348 DMERR("snapshot unregister failed %d", r);
1350 r = dm_unregister_target(&origin_target);
1351 if (r)
1352 DMERR("origin unregister failed %d", r);
1354 exit_origin_hash();
1355 mempool_destroy(pending_pool);
1356 kmem_cache_destroy(pending_cache);
1357 kmem_cache_destroy(exception_cache);
1360 /* Module hooks */
1361 module_init(dm_snapshot_init);
1362 module_exit(dm_snapshot_exit);
1364 MODULE_DESCRIPTION(DM_NAME " snapshot target");
1365 MODULE_AUTHOR("Joe Thornber");
1366 MODULE_LICENSE("GPL");