sfc: Change SPI lengths to type size_t
[linux-2.6/verdex.git] / drivers / md / dm-snap.c
blob6c96db26b87c2ac4c3dd4ba6d29f99d60c24ac17
1 /*
2 * dm-snapshot.c
4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
6 * This file is released under the GPL.
7 */
9 #include <linux/blkdev.h>
10 #include <linux/ctype.h>
11 #include <linux/device-mapper.h>
12 #include <linux/fs.h>
13 #include <linux/init.h>
14 #include <linux/kdev_t.h>
15 #include <linux/list.h>
16 #include <linux/mempool.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/log2.h>
21 #include <linux/dm-kcopyd.h>
23 #include "dm-snap.h"
24 #include "dm-bio-list.h"
26 #define DM_MSG_PREFIX "snapshots"
29 * The percentage increment we will wake up users at
31 #define WAKE_UP_PERCENT 5
34 * kcopyd priority of snapshot operations
36 #define SNAPSHOT_COPY_PRIORITY 2
39 * Reserve 1MB for each snapshot initially (with minimum of 1 page).
41 #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
44 * The size of the mempool used to track chunks in use.
46 #define MIN_IOS 256
48 static struct workqueue_struct *ksnapd;
49 static void flush_queued_bios(struct work_struct *work);
51 struct dm_snap_pending_exception {
52 struct dm_snap_exception e;
55 * Origin buffers waiting for this to complete are held
56 * in a bio list
58 struct bio_list origin_bios;
59 struct bio_list snapshot_bios;
62 * Short-term queue of pending exceptions prior to submission.
64 struct list_head list;
67 * The primary pending_exception is the one that holds
68 * the ref_count and the list of origin_bios for a
69 * group of pending_exceptions. It is always last to get freed.
70 * These fields get set up when writing to the origin.
72 struct dm_snap_pending_exception *primary_pe;
75 * Number of pending_exceptions processing this chunk.
76 * When this drops to zero we must complete the origin bios.
77 * If incrementing or decrementing this, hold pe->snap->lock for
78 * the sibling concerned and not pe->primary_pe->snap->lock unless
79 * they are the same.
81 atomic_t ref_count;
83 /* Pointer back to snapshot context */
84 struct dm_snapshot *snap;
87 * 1 indicates the exception has already been sent to
88 * kcopyd.
90 int started;
94 * Hash table mapping origin volumes to lists of snapshots and
95 * a lock to protect it
97 static struct kmem_cache *exception_cache;
98 static struct kmem_cache *pending_cache;
100 struct dm_snap_tracked_chunk {
101 struct hlist_node node;
102 chunk_t chunk;
105 static struct kmem_cache *tracked_chunk_cache;
107 static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s,
108 chunk_t chunk)
110 struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool,
111 GFP_NOIO);
112 unsigned long flags;
114 c->chunk = chunk;
116 spin_lock_irqsave(&s->tracked_chunk_lock, flags);
117 hlist_add_head(&c->node,
118 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]);
119 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
121 return c;
124 static void stop_tracking_chunk(struct dm_snapshot *s,
125 struct dm_snap_tracked_chunk *c)
127 unsigned long flags;
129 spin_lock_irqsave(&s->tracked_chunk_lock, flags);
130 hlist_del(&c->node);
131 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags);
133 mempool_free(c, s->tracked_chunk_pool);
136 static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk)
138 struct dm_snap_tracked_chunk *c;
139 struct hlist_node *hn;
140 int found = 0;
142 spin_lock_irq(&s->tracked_chunk_lock);
144 hlist_for_each_entry(c, hn,
145 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) {
146 if (c->chunk == chunk) {
147 found = 1;
148 break;
152 spin_unlock_irq(&s->tracked_chunk_lock);
154 return found;
158 * One of these per registered origin, held in the snapshot_origins hash
160 struct origin {
161 /* The origin device */
162 struct block_device *bdev;
164 struct list_head hash_list;
166 /* List of snapshots for this origin */
167 struct list_head snapshots;
171 * Size of the hash table for origin volumes. If we make this
172 * the size of the minors list then it should be nearly perfect
174 #define ORIGIN_HASH_SIZE 256
175 #define ORIGIN_MASK 0xFF
176 static struct list_head *_origins;
177 static struct rw_semaphore _origins_lock;
179 static int init_origin_hash(void)
181 int i;
183 _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head),
184 GFP_KERNEL);
185 if (!_origins) {
186 DMERR("unable to allocate memory");
187 return -ENOMEM;
190 for (i = 0; i < ORIGIN_HASH_SIZE; i++)
191 INIT_LIST_HEAD(_origins + i);
192 init_rwsem(&_origins_lock);
194 return 0;
197 static void exit_origin_hash(void)
199 kfree(_origins);
202 static unsigned origin_hash(struct block_device *bdev)
204 return bdev->bd_dev & ORIGIN_MASK;
207 static struct origin *__lookup_origin(struct block_device *origin)
209 struct list_head *ol;
210 struct origin *o;
212 ol = &_origins[origin_hash(origin)];
213 list_for_each_entry (o, ol, hash_list)
214 if (bdev_equal(o->bdev, origin))
215 return o;
217 return NULL;
220 static void __insert_origin(struct origin *o)
222 struct list_head *sl = &_origins[origin_hash(o->bdev)];
223 list_add_tail(&o->hash_list, sl);
227 * Make a note of the snapshot and its origin so we can look it
228 * up when the origin has a write on it.
230 static int register_snapshot(struct dm_snapshot *snap)
232 struct origin *o, *new_o;
233 struct block_device *bdev = snap->origin->bdev;
235 new_o = kmalloc(sizeof(*new_o), GFP_KERNEL);
236 if (!new_o)
237 return -ENOMEM;
239 down_write(&_origins_lock);
240 o = __lookup_origin(bdev);
242 if (o)
243 kfree(new_o);
244 else {
245 /* New origin */
246 o = new_o;
248 /* Initialise the struct */
249 INIT_LIST_HEAD(&o->snapshots);
250 o->bdev = bdev;
252 __insert_origin(o);
255 list_add_tail(&snap->list, &o->snapshots);
257 up_write(&_origins_lock);
258 return 0;
261 static void unregister_snapshot(struct dm_snapshot *s)
263 struct origin *o;
265 down_write(&_origins_lock);
266 o = __lookup_origin(s->origin->bdev);
268 list_del(&s->list);
269 if (list_empty(&o->snapshots)) {
270 list_del(&o->hash_list);
271 kfree(o);
274 up_write(&_origins_lock);
278 * Implementation of the exception hash tables.
279 * The lowest hash_shift bits of the chunk number are ignored, allowing
280 * some consecutive chunks to be grouped together.
282 static int init_exception_table(struct exception_table *et, uint32_t size,
283 unsigned hash_shift)
285 unsigned int i;
287 et->hash_shift = hash_shift;
288 et->hash_mask = size - 1;
289 et->table = dm_vcalloc(size, sizeof(struct list_head));
290 if (!et->table)
291 return -ENOMEM;
293 for (i = 0; i < size; i++)
294 INIT_LIST_HEAD(et->table + i);
296 return 0;
299 static void exit_exception_table(struct exception_table *et, struct kmem_cache *mem)
301 struct list_head *slot;
302 struct dm_snap_exception *ex, *next;
303 int i, size;
305 size = et->hash_mask + 1;
306 for (i = 0; i < size; i++) {
307 slot = et->table + i;
309 list_for_each_entry_safe (ex, next, slot, hash_list)
310 kmem_cache_free(mem, ex);
313 vfree(et->table);
316 static uint32_t exception_hash(struct exception_table *et, chunk_t chunk)
318 return (chunk >> et->hash_shift) & et->hash_mask;
321 static void insert_exception(struct exception_table *eh,
322 struct dm_snap_exception *e)
324 struct list_head *l = &eh->table[exception_hash(eh, e->old_chunk)];
325 list_add(&e->hash_list, l);
328 static void remove_exception(struct dm_snap_exception *e)
330 list_del(&e->hash_list);
334 * Return the exception data for a sector, or NULL if not
335 * remapped.
337 static struct dm_snap_exception *lookup_exception(struct exception_table *et,
338 chunk_t chunk)
340 struct list_head *slot;
341 struct dm_snap_exception *e;
343 slot = &et->table[exception_hash(et, chunk)];
344 list_for_each_entry (e, slot, hash_list)
345 if (chunk >= e->old_chunk &&
346 chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
347 return e;
349 return NULL;
352 static struct dm_snap_exception *alloc_exception(void)
354 struct dm_snap_exception *e;
356 e = kmem_cache_alloc(exception_cache, GFP_NOIO);
357 if (!e)
358 e = kmem_cache_alloc(exception_cache, GFP_ATOMIC);
360 return e;
363 static void free_exception(struct dm_snap_exception *e)
365 kmem_cache_free(exception_cache, e);
368 static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s)
370 struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool,
371 GFP_NOIO);
373 atomic_inc(&s->pending_exceptions_count);
374 pe->snap = s;
376 return pe;
379 static void free_pending_exception(struct dm_snap_pending_exception *pe)
381 struct dm_snapshot *s = pe->snap;
383 mempool_free(pe, s->pending_pool);
384 smp_mb__before_atomic_dec();
385 atomic_dec(&s->pending_exceptions_count);
388 static void insert_completed_exception(struct dm_snapshot *s,
389 struct dm_snap_exception *new_e)
391 struct exception_table *eh = &s->complete;
392 struct list_head *l;
393 struct dm_snap_exception *e = NULL;
395 l = &eh->table[exception_hash(eh, new_e->old_chunk)];
397 /* Add immediately if this table doesn't support consecutive chunks */
398 if (!eh->hash_shift)
399 goto out;
401 /* List is ordered by old_chunk */
402 list_for_each_entry_reverse(e, l, hash_list) {
403 /* Insert after an existing chunk? */
404 if (new_e->old_chunk == (e->old_chunk +
405 dm_consecutive_chunk_count(e) + 1) &&
406 new_e->new_chunk == (dm_chunk_number(e->new_chunk) +
407 dm_consecutive_chunk_count(e) + 1)) {
408 dm_consecutive_chunk_count_inc(e);
409 free_exception(new_e);
410 return;
413 /* Insert before an existing chunk? */
414 if (new_e->old_chunk == (e->old_chunk - 1) &&
415 new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) {
416 dm_consecutive_chunk_count_inc(e);
417 e->old_chunk--;
418 e->new_chunk--;
419 free_exception(new_e);
420 return;
423 if (new_e->old_chunk > e->old_chunk)
424 break;
427 out:
428 list_add(&new_e->hash_list, e ? &e->hash_list : l);
431 int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new)
433 struct dm_snap_exception *e;
435 e = alloc_exception();
436 if (!e)
437 return -ENOMEM;
439 e->old_chunk = old;
441 /* Consecutive_count is implicitly initialised to zero */
442 e->new_chunk = new;
444 insert_completed_exception(s, e);
446 return 0;
450 * Hard coded magic.
452 static int calc_max_buckets(void)
454 /* use a fixed size of 2MB */
455 unsigned long mem = 2 * 1024 * 1024;
456 mem /= sizeof(struct list_head);
458 return mem;
462 * Allocate room for a suitable hash table.
464 static int init_hash_tables(struct dm_snapshot *s)
466 sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets;
469 * Calculate based on the size of the original volume or
470 * the COW volume...
472 cow_dev_size = get_dev_size(s->cow->bdev);
473 origin_dev_size = get_dev_size(s->origin->bdev);
474 max_buckets = calc_max_buckets();
476 hash_size = min(origin_dev_size, cow_dev_size) >> s->chunk_shift;
477 hash_size = min(hash_size, max_buckets);
479 hash_size = rounddown_pow_of_two(hash_size);
480 if (init_exception_table(&s->complete, hash_size,
481 DM_CHUNK_CONSECUTIVE_BITS))
482 return -ENOMEM;
485 * Allocate hash table for in-flight exceptions
486 * Make this smaller than the real hash table
488 hash_size >>= 3;
489 if (hash_size < 64)
490 hash_size = 64;
492 if (init_exception_table(&s->pending, hash_size, 0)) {
493 exit_exception_table(&s->complete, exception_cache);
494 return -ENOMEM;
497 return 0;
501 * Round a number up to the nearest 'size' boundary. size must
502 * be a power of 2.
504 static ulong round_up(ulong n, ulong size)
506 size--;
507 return (n + size) & ~size;
510 static int set_chunk_size(struct dm_snapshot *s, const char *chunk_size_arg,
511 char **error)
513 unsigned long chunk_size;
514 char *value;
516 chunk_size = simple_strtoul(chunk_size_arg, &value, 10);
517 if (*chunk_size_arg == '\0' || *value != '\0') {
518 *error = "Invalid chunk size";
519 return -EINVAL;
522 if (!chunk_size) {
523 s->chunk_size = s->chunk_mask = s->chunk_shift = 0;
524 return 0;
528 * Chunk size must be multiple of page size. Silently
529 * round up if it's not.
531 chunk_size = round_up(chunk_size, PAGE_SIZE >> 9);
533 /* Check chunk_size is a power of 2 */
534 if (!is_power_of_2(chunk_size)) {
535 *error = "Chunk size is not a power of 2";
536 return -EINVAL;
539 /* Validate the chunk size against the device block size */
540 if (chunk_size % (bdev_hardsect_size(s->cow->bdev) >> 9)) {
541 *error = "Chunk size is not a multiple of device blocksize";
542 return -EINVAL;
545 s->chunk_size = chunk_size;
546 s->chunk_mask = chunk_size - 1;
547 s->chunk_shift = ffs(chunk_size) - 1;
549 return 0;
553 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
555 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
557 struct dm_snapshot *s;
558 int i;
559 int r = -EINVAL;
560 char persistent;
561 char *origin_path;
562 char *cow_path;
564 if (argc != 4) {
565 ti->error = "requires exactly 4 arguments";
566 r = -EINVAL;
567 goto bad1;
570 origin_path = argv[0];
571 cow_path = argv[1];
572 persistent = toupper(*argv[2]);
574 if (persistent != 'P' && persistent != 'N') {
575 ti->error = "Persistent flag is not P or N";
576 r = -EINVAL;
577 goto bad1;
580 s = kmalloc(sizeof(*s), GFP_KERNEL);
581 if (s == NULL) {
582 ti->error = "Cannot allocate snapshot context private "
583 "structure";
584 r = -ENOMEM;
585 goto bad1;
588 r = dm_get_device(ti, origin_path, 0, ti->len, FMODE_READ, &s->origin);
589 if (r) {
590 ti->error = "Cannot get origin device";
591 goto bad2;
594 r = dm_get_device(ti, cow_path, 0, 0,
595 FMODE_READ | FMODE_WRITE, &s->cow);
596 if (r) {
597 dm_put_device(ti, s->origin);
598 ti->error = "Cannot get COW device";
599 goto bad2;
602 r = set_chunk_size(s, argv[3], &ti->error);
603 if (r)
604 goto bad3;
606 s->type = persistent;
608 s->valid = 1;
609 s->active = 0;
610 atomic_set(&s->pending_exceptions_count, 0);
611 init_rwsem(&s->lock);
612 spin_lock_init(&s->pe_lock);
613 s->ti = ti;
615 /* Allocate hash table for COW data */
616 if (init_hash_tables(s)) {
617 ti->error = "Unable to allocate hash table space";
618 r = -ENOMEM;
619 goto bad3;
622 s->store.snap = s;
624 if (persistent == 'P')
625 r = dm_create_persistent(&s->store);
626 else
627 r = dm_create_transient(&s->store);
629 if (r) {
630 ti->error = "Couldn't create exception store";
631 r = -EINVAL;
632 goto bad4;
635 r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client);
636 if (r) {
637 ti->error = "Could not create kcopyd client";
638 goto bad5;
641 s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache);
642 if (!s->pending_pool) {
643 ti->error = "Could not allocate mempool for pending exceptions";
644 goto bad6;
647 s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS,
648 tracked_chunk_cache);
649 if (!s->tracked_chunk_pool) {
650 ti->error = "Could not allocate tracked_chunk mempool for "
651 "tracking reads";
652 goto bad_tracked_chunk_pool;
655 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
656 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]);
658 spin_lock_init(&s->tracked_chunk_lock);
660 /* Metadata must only be loaded into one table at once */
661 r = s->store.read_metadata(&s->store);
662 if (r < 0) {
663 ti->error = "Failed to read snapshot metadata";
664 goto bad_load_and_register;
665 } else if (r > 0) {
666 s->valid = 0;
667 DMWARN("Snapshot is marked invalid.");
670 bio_list_init(&s->queued_bios);
671 INIT_WORK(&s->queued_bios_work, flush_queued_bios);
673 /* Add snapshot to the list of snapshots for this origin */
674 /* Exceptions aren't triggered till snapshot_resume() is called */
675 if (register_snapshot(s)) {
676 r = -EINVAL;
677 ti->error = "Cannot register snapshot origin";
678 goto bad_load_and_register;
681 ti->private = s;
682 ti->split_io = s->chunk_size;
684 return 0;
686 bad_load_and_register:
687 mempool_destroy(s->tracked_chunk_pool);
689 bad_tracked_chunk_pool:
690 mempool_destroy(s->pending_pool);
692 bad6:
693 dm_kcopyd_client_destroy(s->kcopyd_client);
695 bad5:
696 s->store.destroy(&s->store);
698 bad4:
699 exit_exception_table(&s->pending, pending_cache);
700 exit_exception_table(&s->complete, exception_cache);
702 bad3:
703 dm_put_device(ti, s->cow);
704 dm_put_device(ti, s->origin);
706 bad2:
707 kfree(s);
709 bad1:
710 return r;
713 static void __free_exceptions(struct dm_snapshot *s)
715 dm_kcopyd_client_destroy(s->kcopyd_client);
716 s->kcopyd_client = NULL;
718 exit_exception_table(&s->pending, pending_cache);
719 exit_exception_table(&s->complete, exception_cache);
721 s->store.destroy(&s->store);
724 static void snapshot_dtr(struct dm_target *ti)
726 #ifdef CONFIG_DM_DEBUG
727 int i;
728 #endif
729 struct dm_snapshot *s = ti->private;
731 flush_workqueue(ksnapd);
733 /* Prevent further origin writes from using this snapshot. */
734 /* After this returns there can be no new kcopyd jobs. */
735 unregister_snapshot(s);
737 while (atomic_read(&s->pending_exceptions_count))
738 yield();
740 * Ensure instructions in mempool_destroy aren't reordered
741 * before atomic_read.
743 smp_mb();
745 #ifdef CONFIG_DM_DEBUG
746 for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++)
747 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i]));
748 #endif
750 mempool_destroy(s->tracked_chunk_pool);
752 __free_exceptions(s);
754 mempool_destroy(s->pending_pool);
756 dm_put_device(ti, s->origin);
757 dm_put_device(ti, s->cow);
759 kfree(s);
763 * Flush a list of buffers.
765 static void flush_bios(struct bio *bio)
767 struct bio *n;
769 while (bio) {
770 n = bio->bi_next;
771 bio->bi_next = NULL;
772 generic_make_request(bio);
773 bio = n;
777 static void flush_queued_bios(struct work_struct *work)
779 struct dm_snapshot *s =
780 container_of(work, struct dm_snapshot, queued_bios_work);
781 struct bio *queued_bios;
782 unsigned long flags;
784 spin_lock_irqsave(&s->pe_lock, flags);
785 queued_bios = bio_list_get(&s->queued_bios);
786 spin_unlock_irqrestore(&s->pe_lock, flags);
788 flush_bios(queued_bios);
792 * Error a list of buffers.
794 static void error_bios(struct bio *bio)
796 struct bio *n;
798 while (bio) {
799 n = bio->bi_next;
800 bio->bi_next = NULL;
801 bio_io_error(bio);
802 bio = n;
806 static void __invalidate_snapshot(struct dm_snapshot *s, int err)
808 if (!s->valid)
809 return;
811 if (err == -EIO)
812 DMERR("Invalidating snapshot: Error reading/writing.");
813 else if (err == -ENOMEM)
814 DMERR("Invalidating snapshot: Unable to allocate exception.");
816 if (s->store.drop_snapshot)
817 s->store.drop_snapshot(&s->store);
819 s->valid = 0;
821 dm_table_event(s->ti->table);
824 static void get_pending_exception(struct dm_snap_pending_exception *pe)
826 atomic_inc(&pe->ref_count);
829 static struct bio *put_pending_exception(struct dm_snap_pending_exception *pe)
831 struct dm_snap_pending_exception *primary_pe;
832 struct bio *origin_bios = NULL;
834 primary_pe = pe->primary_pe;
837 * If this pe is involved in a write to the origin and
838 * it is the last sibling to complete then release
839 * the bios for the original write to the origin.
841 if (primary_pe &&
842 atomic_dec_and_test(&primary_pe->ref_count)) {
843 origin_bios = bio_list_get(&primary_pe->origin_bios);
844 free_pending_exception(primary_pe);
848 * Free the pe if it's not linked to an origin write or if
849 * it's not itself a primary pe.
851 if (!primary_pe || primary_pe != pe)
852 free_pending_exception(pe);
854 return origin_bios;
857 static void pending_complete(struct dm_snap_pending_exception *pe, int success)
859 struct dm_snap_exception *e;
860 struct dm_snapshot *s = pe->snap;
861 struct bio *origin_bios = NULL;
862 struct bio *snapshot_bios = NULL;
863 int error = 0;
865 if (!success) {
866 /* Read/write error - snapshot is unusable */
867 down_write(&s->lock);
868 __invalidate_snapshot(s, -EIO);
869 error = 1;
870 goto out;
873 e = alloc_exception();
874 if (!e) {
875 down_write(&s->lock);
876 __invalidate_snapshot(s, -ENOMEM);
877 error = 1;
878 goto out;
880 *e = pe->e;
882 down_write(&s->lock);
883 if (!s->valid) {
884 free_exception(e);
885 error = 1;
886 goto out;
890 * Check for conflicting reads. This is extremely improbable,
891 * so yield() is sufficient and there is no need for a wait queue.
893 while (__chunk_is_tracked(s, pe->e.old_chunk))
894 yield();
897 * Add a proper exception, and remove the
898 * in-flight exception from the list.
900 insert_completed_exception(s, e);
902 out:
903 remove_exception(&pe->e);
904 snapshot_bios = bio_list_get(&pe->snapshot_bios);
905 origin_bios = put_pending_exception(pe);
907 up_write(&s->lock);
909 /* Submit any pending write bios */
910 if (error)
911 error_bios(snapshot_bios);
912 else
913 flush_bios(snapshot_bios);
915 flush_bios(origin_bios);
918 static void commit_callback(void *context, int success)
920 struct dm_snap_pending_exception *pe = context;
922 pending_complete(pe, success);
926 * Called when the copy I/O has finished. kcopyd actually runs
927 * this code so don't block.
929 static void copy_callback(int read_err, unsigned long write_err, void *context)
931 struct dm_snap_pending_exception *pe = context;
932 struct dm_snapshot *s = pe->snap;
934 if (read_err || write_err)
935 pending_complete(pe, 0);
937 else
938 /* Update the metadata if we are persistent */
939 s->store.commit_exception(&s->store, &pe->e, commit_callback,
940 pe);
944 * Dispatches the copy operation to kcopyd.
946 static void start_copy(struct dm_snap_pending_exception *pe)
948 struct dm_snapshot *s = pe->snap;
949 struct dm_io_region src, dest;
950 struct block_device *bdev = s->origin->bdev;
951 sector_t dev_size;
953 dev_size = get_dev_size(bdev);
955 src.bdev = bdev;
956 src.sector = chunk_to_sector(s, pe->e.old_chunk);
957 src.count = min(s->chunk_size, dev_size - src.sector);
959 dest.bdev = s->cow->bdev;
960 dest.sector = chunk_to_sector(s, pe->e.new_chunk);
961 dest.count = src.count;
963 /* Hand over to kcopyd */
964 dm_kcopyd_copy(s->kcopyd_client,
965 &src, 1, &dest, 0, copy_callback, pe);
969 * Looks to see if this snapshot already has a pending exception
970 * for this chunk, otherwise it allocates a new one and inserts
971 * it into the pending table.
973 * NOTE: a write lock must be held on snap->lock before calling
974 * this.
976 static struct dm_snap_pending_exception *
977 __find_pending_exception(struct dm_snapshot *s, struct bio *bio)
979 struct dm_snap_exception *e;
980 struct dm_snap_pending_exception *pe;
981 chunk_t chunk = sector_to_chunk(s, bio->bi_sector);
984 * Is there a pending exception for this already ?
986 e = lookup_exception(&s->pending, chunk);
987 if (e) {
988 /* cast the exception to a pending exception */
989 pe = container_of(e, struct dm_snap_pending_exception, e);
990 goto out;
994 * Create a new pending exception, we don't want
995 * to hold the lock while we do this.
997 up_write(&s->lock);
998 pe = alloc_pending_exception(s);
999 down_write(&s->lock);
1001 if (!s->valid) {
1002 free_pending_exception(pe);
1003 return NULL;
1006 e = lookup_exception(&s->pending, chunk);
1007 if (e) {
1008 free_pending_exception(pe);
1009 pe = container_of(e, struct dm_snap_pending_exception, e);
1010 goto out;
1013 pe->e.old_chunk = chunk;
1014 bio_list_init(&pe->origin_bios);
1015 bio_list_init(&pe->snapshot_bios);
1016 pe->primary_pe = NULL;
1017 atomic_set(&pe->ref_count, 0);
1018 pe->started = 0;
1020 if (s->store.prepare_exception(&s->store, &pe->e)) {
1021 free_pending_exception(pe);
1022 return NULL;
1025 get_pending_exception(pe);
1026 insert_exception(&s->pending, &pe->e);
1028 out:
1029 return pe;
1032 static void remap_exception(struct dm_snapshot *s, struct dm_snap_exception *e,
1033 struct bio *bio, chunk_t chunk)
1035 bio->bi_bdev = s->cow->bdev;
1036 bio->bi_sector = chunk_to_sector(s, dm_chunk_number(e->new_chunk) +
1037 (chunk - e->old_chunk)) +
1038 (bio->bi_sector & s->chunk_mask);
1041 static int snapshot_map(struct dm_target *ti, struct bio *bio,
1042 union map_info *map_context)
1044 struct dm_snap_exception *e;
1045 struct dm_snapshot *s = ti->private;
1046 int r = DM_MAPIO_REMAPPED;
1047 chunk_t chunk;
1048 struct dm_snap_pending_exception *pe = NULL;
1050 chunk = sector_to_chunk(s, bio->bi_sector);
1052 /* Full snapshots are not usable */
1053 /* To get here the table must be live so s->active is always set. */
1054 if (!s->valid)
1055 return -EIO;
1057 /* FIXME: should only take write lock if we need
1058 * to copy an exception */
1059 down_write(&s->lock);
1061 if (!s->valid) {
1062 r = -EIO;
1063 goto out_unlock;
1066 /* If the block is already remapped - use that, else remap it */
1067 e = lookup_exception(&s->complete, chunk);
1068 if (e) {
1069 remap_exception(s, e, bio, chunk);
1070 goto out_unlock;
1074 * Write to snapshot - higher level takes care of RW/RO
1075 * flags so we should only get this if we are
1076 * writeable.
1078 if (bio_rw(bio) == WRITE) {
1079 pe = __find_pending_exception(s, bio);
1080 if (!pe) {
1081 __invalidate_snapshot(s, -ENOMEM);
1082 r = -EIO;
1083 goto out_unlock;
1086 remap_exception(s, &pe->e, bio, chunk);
1087 bio_list_add(&pe->snapshot_bios, bio);
1089 r = DM_MAPIO_SUBMITTED;
1091 if (!pe->started) {
1092 /* this is protected by snap->lock */
1093 pe->started = 1;
1094 up_write(&s->lock);
1095 start_copy(pe);
1096 goto out;
1098 } else {
1099 bio->bi_bdev = s->origin->bdev;
1100 map_context->ptr = track_chunk(s, chunk);
1103 out_unlock:
1104 up_write(&s->lock);
1105 out:
1106 return r;
1109 static int snapshot_end_io(struct dm_target *ti, struct bio *bio,
1110 int error, union map_info *map_context)
1112 struct dm_snapshot *s = ti->private;
1113 struct dm_snap_tracked_chunk *c = map_context->ptr;
1115 if (c)
1116 stop_tracking_chunk(s, c);
1118 return 0;
1121 static void snapshot_resume(struct dm_target *ti)
1123 struct dm_snapshot *s = ti->private;
1125 down_write(&s->lock);
1126 s->active = 1;
1127 up_write(&s->lock);
1130 static int snapshot_status(struct dm_target *ti, status_type_t type,
1131 char *result, unsigned int maxlen)
1133 struct dm_snapshot *snap = ti->private;
1135 switch (type) {
1136 case STATUSTYPE_INFO:
1137 if (!snap->valid)
1138 snprintf(result, maxlen, "Invalid");
1139 else {
1140 if (snap->store.fraction_full) {
1141 sector_t numerator, denominator;
1142 snap->store.fraction_full(&snap->store,
1143 &numerator,
1144 &denominator);
1145 snprintf(result, maxlen, "%llu/%llu",
1146 (unsigned long long)numerator,
1147 (unsigned long long)denominator);
1149 else
1150 snprintf(result, maxlen, "Unknown");
1152 break;
1154 case STATUSTYPE_TABLE:
1156 * kdevname returns a static pointer so we need
1157 * to make private copies if the output is to
1158 * make sense.
1160 snprintf(result, maxlen, "%s %s %c %llu",
1161 snap->origin->name, snap->cow->name,
1162 snap->type,
1163 (unsigned long long)snap->chunk_size);
1164 break;
1167 return 0;
1170 /*-----------------------------------------------------------------
1171 * Origin methods
1172 *---------------------------------------------------------------*/
1173 static int __origin_write(struct list_head *snapshots, struct bio *bio)
1175 int r = DM_MAPIO_REMAPPED, first = 0;
1176 struct dm_snapshot *snap;
1177 struct dm_snap_exception *e;
1178 struct dm_snap_pending_exception *pe, *next_pe, *primary_pe = NULL;
1179 chunk_t chunk;
1180 LIST_HEAD(pe_queue);
1182 /* Do all the snapshots on this origin */
1183 list_for_each_entry (snap, snapshots, list) {
1185 down_write(&snap->lock);
1187 /* Only deal with valid and active snapshots */
1188 if (!snap->valid || !snap->active)
1189 goto next_snapshot;
1191 /* Nothing to do if writing beyond end of snapshot */
1192 if (bio->bi_sector >= dm_table_get_size(snap->ti->table))
1193 goto next_snapshot;
1196 * Remember, different snapshots can have
1197 * different chunk sizes.
1199 chunk = sector_to_chunk(snap, bio->bi_sector);
1202 * Check exception table to see if block
1203 * is already remapped in this snapshot
1204 * and trigger an exception if not.
1206 * ref_count is initialised to 1 so pending_complete()
1207 * won't destroy the primary_pe while we're inside this loop.
1209 e = lookup_exception(&snap->complete, chunk);
1210 if (e)
1211 goto next_snapshot;
1213 pe = __find_pending_exception(snap, bio);
1214 if (!pe) {
1215 __invalidate_snapshot(snap, -ENOMEM);
1216 goto next_snapshot;
1219 if (!primary_pe) {
1221 * Either every pe here has same
1222 * primary_pe or none has one yet.
1224 if (pe->primary_pe)
1225 primary_pe = pe->primary_pe;
1226 else {
1227 primary_pe = pe;
1228 first = 1;
1231 bio_list_add(&primary_pe->origin_bios, bio);
1233 r = DM_MAPIO_SUBMITTED;
1236 if (!pe->primary_pe) {
1237 pe->primary_pe = primary_pe;
1238 get_pending_exception(primary_pe);
1241 if (!pe->started) {
1242 pe->started = 1;
1243 list_add_tail(&pe->list, &pe_queue);
1246 next_snapshot:
1247 up_write(&snap->lock);
1250 if (!primary_pe)
1251 return r;
1254 * If this is the first time we're processing this chunk and
1255 * ref_count is now 1 it means all the pending exceptions
1256 * got completed while we were in the loop above, so it falls to
1257 * us here to remove the primary_pe and submit any origin_bios.
1260 if (first && atomic_dec_and_test(&primary_pe->ref_count)) {
1261 flush_bios(bio_list_get(&primary_pe->origin_bios));
1262 free_pending_exception(primary_pe);
1263 /* If we got here, pe_queue is necessarily empty. */
1264 return r;
1268 * Now that we have a complete pe list we can start the copying.
1270 list_for_each_entry_safe(pe, next_pe, &pe_queue, list)
1271 start_copy(pe);
1273 return r;
1277 * Called on a write from the origin driver.
1279 static int do_origin(struct dm_dev *origin, struct bio *bio)
1281 struct origin *o;
1282 int r = DM_MAPIO_REMAPPED;
1284 down_read(&_origins_lock);
1285 o = __lookup_origin(origin->bdev);
1286 if (o)
1287 r = __origin_write(&o->snapshots, bio);
1288 up_read(&_origins_lock);
1290 return r;
1294 * Origin: maps a linear range of a device, with hooks for snapshotting.
1298 * Construct an origin mapping: <dev_path>
1299 * The context for an origin is merely a 'struct dm_dev *'
1300 * pointing to the real device.
1302 static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1304 int r;
1305 struct dm_dev *dev;
1307 if (argc != 1) {
1308 ti->error = "origin: incorrect number of arguments";
1309 return -EINVAL;
1312 r = dm_get_device(ti, argv[0], 0, ti->len,
1313 dm_table_get_mode(ti->table), &dev);
1314 if (r) {
1315 ti->error = "Cannot get target device";
1316 return r;
1319 ti->private = dev;
1320 return 0;
1323 static void origin_dtr(struct dm_target *ti)
1325 struct dm_dev *dev = ti->private;
1326 dm_put_device(ti, dev);
1329 static int origin_map(struct dm_target *ti, struct bio *bio,
1330 union map_info *map_context)
1332 struct dm_dev *dev = ti->private;
1333 bio->bi_bdev = dev->bdev;
1335 /* Only tell snapshots if this is a write */
1336 return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED;
1339 #define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
1342 * Set the target "split_io" field to the minimum of all the snapshots'
1343 * chunk sizes.
1345 static void origin_resume(struct dm_target *ti)
1347 struct dm_dev *dev = ti->private;
1348 struct dm_snapshot *snap;
1349 struct origin *o;
1350 chunk_t chunk_size = 0;
1352 down_read(&_origins_lock);
1353 o = __lookup_origin(dev->bdev);
1354 if (o)
1355 list_for_each_entry (snap, &o->snapshots, list)
1356 chunk_size = min_not_zero(chunk_size, snap->chunk_size);
1357 up_read(&_origins_lock);
1359 ti->split_io = chunk_size;
1362 static int origin_status(struct dm_target *ti, status_type_t type, char *result,
1363 unsigned int maxlen)
1365 struct dm_dev *dev = ti->private;
1367 switch (type) {
1368 case STATUSTYPE_INFO:
1369 result[0] = '\0';
1370 break;
1372 case STATUSTYPE_TABLE:
1373 snprintf(result, maxlen, "%s", dev->name);
1374 break;
1377 return 0;
1380 static struct target_type origin_target = {
1381 .name = "snapshot-origin",
1382 .version = {1, 6, 0},
1383 .module = THIS_MODULE,
1384 .ctr = origin_ctr,
1385 .dtr = origin_dtr,
1386 .map = origin_map,
1387 .resume = origin_resume,
1388 .status = origin_status,
1391 static struct target_type snapshot_target = {
1392 .name = "snapshot",
1393 .version = {1, 6, 0},
1394 .module = THIS_MODULE,
1395 .ctr = snapshot_ctr,
1396 .dtr = snapshot_dtr,
1397 .map = snapshot_map,
1398 .end_io = snapshot_end_io,
1399 .resume = snapshot_resume,
1400 .status = snapshot_status,
1403 static int __init dm_snapshot_init(void)
1405 int r;
1407 r = dm_register_target(&snapshot_target);
1408 if (r) {
1409 DMERR("snapshot target register failed %d", r);
1410 return r;
1413 r = dm_register_target(&origin_target);
1414 if (r < 0) {
1415 DMERR("Origin target register failed %d", r);
1416 goto bad1;
1419 r = init_origin_hash();
1420 if (r) {
1421 DMERR("init_origin_hash failed.");
1422 goto bad2;
1425 exception_cache = KMEM_CACHE(dm_snap_exception, 0);
1426 if (!exception_cache) {
1427 DMERR("Couldn't create exception cache.");
1428 r = -ENOMEM;
1429 goto bad3;
1432 pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0);
1433 if (!pending_cache) {
1434 DMERR("Couldn't create pending cache.");
1435 r = -ENOMEM;
1436 goto bad4;
1439 tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0);
1440 if (!tracked_chunk_cache) {
1441 DMERR("Couldn't create cache to track chunks in use.");
1442 r = -ENOMEM;
1443 goto bad5;
1446 ksnapd = create_singlethread_workqueue("ksnapd");
1447 if (!ksnapd) {
1448 DMERR("Failed to create ksnapd workqueue.");
1449 r = -ENOMEM;
1450 goto bad_pending_pool;
1453 return 0;
1455 bad_pending_pool:
1456 kmem_cache_destroy(tracked_chunk_cache);
1457 bad5:
1458 kmem_cache_destroy(pending_cache);
1459 bad4:
1460 kmem_cache_destroy(exception_cache);
1461 bad3:
1462 exit_origin_hash();
1463 bad2:
1464 dm_unregister_target(&origin_target);
1465 bad1:
1466 dm_unregister_target(&snapshot_target);
1467 return r;
1470 static void __exit dm_snapshot_exit(void)
1472 int r;
1474 destroy_workqueue(ksnapd);
1476 r = dm_unregister_target(&snapshot_target);
1477 if (r)
1478 DMERR("snapshot unregister failed %d", r);
1480 r = dm_unregister_target(&origin_target);
1481 if (r)
1482 DMERR("origin unregister failed %d", r);
1484 exit_origin_hash();
1485 kmem_cache_destroy(pending_cache);
1486 kmem_cache_destroy(exception_cache);
1487 kmem_cache_destroy(tracked_chunk_cache);
1490 /* Module hooks */
1491 module_init(dm_snapshot_init);
1492 module_exit(dm_snapshot_exit);
1494 MODULE_DESCRIPTION(DM_NAME " snapshot target");
1495 MODULE_AUTHOR("Joe Thornber");
1496 MODULE_LICENSE("GPL");