4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5 * Copyright (C) 2006 Red Hat GmbH
7 * This file is released under the GPL.
14 #include <linux/pagemap.h>
15 #include <linux/vmalloc.h>
16 #include <linux/slab.h>
17 #include <linux/dm-io.h>
18 #include <linux/dm-kcopyd.h>
20 #define DM_MSG_PREFIX "snapshots"
21 #define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */
23 /*-----------------------------------------------------------------
24 * Persistent snapshots, by persistent we mean that the snapshot
25 * will survive a reboot.
26 *---------------------------------------------------------------*/
29 * We need to store a record of which parts of the origin have
30 * been copied to the snapshot device. The snapshot code
31 * requires that we copy exception chunks to chunk aligned areas
32 * of the COW store. It makes sense therefore, to store the
33 * metadata in chunk size blocks.
35 * There is no backward or forward compatibility implemented,
36 * snapshots with different disk versions than the kernel will
37 * not be usable. It is expected that "lvcreate" will blank out
38 * the start of a fresh COW device before calling the snapshot
41 * The first chunk of the COW device just contains the header.
42 * After this there is a chunk filled with exception metadata,
43 * followed by as many exception chunks as can fit in the
46 * All on disk structures are in little-endian format. The end
47 * of the exceptions info is indicated by an exception with a
48 * new_chunk of 0, which is invalid since it would point to the
53 * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
55 #define SNAP_MAGIC 0x70416e53
58 * The on-disk version of the metadata.
60 #define SNAPSHOT_DISK_VERSION 1
66 * Is this snapshot valid. There is no way of recovering
67 * an invalid snapshot.
72 * Simple, incrementing version. no backward
81 struct disk_exception
{
86 struct commit_callback
{
87 void (*callback
)(void *, int success
);
92 * The top level structure for a persistent exception store.
95 struct dm_snapshot
*snap
; /* up pointer to my snapshot */
98 uint32_t exceptions_per_area
;
101 * Now that we have an asynchronous kcopyd there is no
102 * need for large chunk sizes, so it wont hurt to have a
103 * whole chunks worth of metadata in memory at once.
108 * An area of zeros used to clear the next area.
113 * Used to keep track of which metadata area the data in
116 chunk_t current_area
;
119 * The next free chunk for an exception.
124 * The index of next free exception in the current
127 uint32_t current_committed
;
129 atomic_t pending_count
;
130 uint32_t callback_count
;
131 struct commit_callback
*callbacks
;
132 struct dm_io_client
*io_client
;
134 struct workqueue_struct
*metadata_wq
;
137 static unsigned sectors_to_pages(unsigned sectors
)
139 return DIV_ROUND_UP(sectors
, PAGE_SIZE
>> 9);
142 static int alloc_area(struct pstore
*ps
)
147 len
= ps
->snap
->chunk_size
<< SECTOR_SHIFT
;
150 * Allocate the chunk_size block of memory that will hold
151 * a single metadata area.
153 ps
->area
= vmalloc(len
);
157 ps
->zero_area
= vmalloc(len
);
158 if (!ps
->zero_area
) {
162 memset(ps
->zero_area
, 0, len
);
167 static void free_area(struct pstore
*ps
)
171 vfree(ps
->zero_area
);
172 ps
->zero_area
= NULL
;
176 struct dm_io_region
*where
;
177 struct dm_io_request
*io_req
;
178 struct work_struct work
;
182 static void do_metadata(struct work_struct
*work
)
184 struct mdata_req
*req
= container_of(work
, struct mdata_req
, work
);
186 req
->result
= dm_io(req
->io_req
, 1, req
->where
, NULL
);
190 * Read or write a chunk aligned and sized block of data from a device.
192 static int chunk_io(struct pstore
*ps
, chunk_t chunk
, int rw
, int metadata
)
194 struct dm_io_region where
= {
195 .bdev
= ps
->snap
->cow
->bdev
,
196 .sector
= ps
->snap
->chunk_size
* chunk
,
197 .count
= ps
->snap
->chunk_size
,
199 struct dm_io_request io_req
= {
201 .mem
.type
= DM_IO_VMA
,
202 .mem
.ptr
.vma
= ps
->area
,
203 .client
= ps
->io_client
,
206 struct mdata_req req
;
209 return dm_io(&io_req
, 1, &where
, NULL
);
212 req
.io_req
= &io_req
;
215 * Issue the synchronous I/O from a different thread
216 * to avoid generic_make_request recursion.
218 INIT_WORK(&req
.work
, do_metadata
);
219 queue_work(ps
->metadata_wq
, &req
.work
);
220 flush_workqueue(ps
->metadata_wq
);
226 * Convert a metadata area index to a chunk index.
228 static chunk_t
area_location(struct pstore
*ps
, chunk_t area
)
230 return 1 + ((ps
->exceptions_per_area
+ 1) * area
);
234 * Read or write a metadata area. Remembering to skip the first
235 * chunk which holds the header.
237 static int area_io(struct pstore
*ps
, int rw
)
242 chunk
= area_location(ps
, ps
->current_area
);
244 r
= chunk_io(ps
, chunk
, rw
, 0);
251 static void zero_memory_area(struct pstore
*ps
)
253 memset(ps
->area
, 0, ps
->snap
->chunk_size
<< SECTOR_SHIFT
);
256 static int zero_disk_area(struct pstore
*ps
, chunk_t area
)
258 struct dm_io_region where
= {
259 .bdev
= ps
->snap
->cow
->bdev
,
260 .sector
= ps
->snap
->chunk_size
* area_location(ps
, area
),
261 .count
= ps
->snap
->chunk_size
,
263 struct dm_io_request io_req
= {
265 .mem
.type
= DM_IO_VMA
,
266 .mem
.ptr
.vma
= ps
->zero_area
,
267 .client
= ps
->io_client
,
271 return dm_io(&io_req
, 1, &where
, NULL
);
274 static int read_header(struct pstore
*ps
, int *new_snapshot
)
277 struct disk_header
*dh
;
279 int chunk_size_supplied
= 1;
282 * Use default chunk size (or hardsect_size, if larger) if none supplied
284 if (!ps
->snap
->chunk_size
) {
285 ps
->snap
->chunk_size
= max(DM_CHUNK_SIZE_DEFAULT_SECTORS
,
286 bdev_hardsect_size(ps
->snap
->cow
->bdev
) >> 9);
287 ps
->snap
->chunk_mask
= ps
->snap
->chunk_size
- 1;
288 ps
->snap
->chunk_shift
= ffs(ps
->snap
->chunk_size
) - 1;
289 chunk_size_supplied
= 0;
292 ps
->io_client
= dm_io_client_create(sectors_to_pages(ps
->snap
->
294 if (IS_ERR(ps
->io_client
))
295 return PTR_ERR(ps
->io_client
);
301 r
= chunk_io(ps
, 0, READ
, 1);
305 dh
= (struct disk_header
*) ps
->area
;
307 if (le32_to_cpu(dh
->magic
) == 0) {
312 if (le32_to_cpu(dh
->magic
) != SNAP_MAGIC
) {
313 DMWARN("Invalid or corrupt snapshot");
319 ps
->valid
= le32_to_cpu(dh
->valid
);
320 ps
->version
= le32_to_cpu(dh
->version
);
321 chunk_size
= le32_to_cpu(dh
->chunk_size
);
323 if (!chunk_size_supplied
|| ps
->snap
->chunk_size
== chunk_size
)
326 DMWARN("chunk size %llu in device metadata overrides "
327 "table chunk size of %llu.",
328 (unsigned long long)chunk_size
,
329 (unsigned long long)ps
->snap
->chunk_size
);
331 /* We had a bogus chunk_size. Fix stuff up. */
334 ps
->snap
->chunk_size
= chunk_size
;
335 ps
->snap
->chunk_mask
= chunk_size
- 1;
336 ps
->snap
->chunk_shift
= ffs(chunk_size
) - 1;
338 r
= dm_io_client_resize(sectors_to_pages(ps
->snap
->chunk_size
),
351 static int write_header(struct pstore
*ps
)
353 struct disk_header
*dh
;
355 memset(ps
->area
, 0, ps
->snap
->chunk_size
<< SECTOR_SHIFT
);
357 dh
= (struct disk_header
*) ps
->area
;
358 dh
->magic
= cpu_to_le32(SNAP_MAGIC
);
359 dh
->valid
= cpu_to_le32(ps
->valid
);
360 dh
->version
= cpu_to_le32(ps
->version
);
361 dh
->chunk_size
= cpu_to_le32(ps
->snap
->chunk_size
);
363 return chunk_io(ps
, 0, WRITE
, 1);
367 * Access functions for the disk exceptions, these do the endian conversions.
369 static struct disk_exception
*get_exception(struct pstore
*ps
, uint32_t index
)
371 BUG_ON(index
>= ps
->exceptions_per_area
);
373 return ((struct disk_exception
*) ps
->area
) + index
;
376 static void read_exception(struct pstore
*ps
,
377 uint32_t index
, struct disk_exception
*result
)
379 struct disk_exception
*e
= get_exception(ps
, index
);
382 result
->old_chunk
= le64_to_cpu(e
->old_chunk
);
383 result
->new_chunk
= le64_to_cpu(e
->new_chunk
);
386 static void write_exception(struct pstore
*ps
,
387 uint32_t index
, struct disk_exception
*de
)
389 struct disk_exception
*e
= get_exception(ps
, index
);
392 e
->old_chunk
= cpu_to_le64(de
->old_chunk
);
393 e
->new_chunk
= cpu_to_le64(de
->new_chunk
);
397 * Registers the exceptions that are present in the current area.
398 * 'full' is filled in to indicate if the area has been
401 static int insert_exceptions(struct pstore
*ps
, int *full
)
405 struct disk_exception de
;
407 /* presume the area is full */
410 for (i
= 0; i
< ps
->exceptions_per_area
; i
++) {
411 read_exception(ps
, i
, &de
);
414 * If the new_chunk is pointing at the start of
415 * the COW device, where the first metadata area
416 * is we know that we've hit the end of the
417 * exceptions. Therefore the area is not full.
419 if (de
.new_chunk
== 0LL) {
420 ps
->current_committed
= i
;
426 * Keep track of the start of the free chunks.
428 if (ps
->next_free
<= de
.new_chunk
)
429 ps
->next_free
= de
.new_chunk
+ 1;
432 * Otherwise we add the exception to the snapshot.
434 r
= dm_add_exception(ps
->snap
, de
.old_chunk
, de
.new_chunk
);
442 static int read_exceptions(struct pstore
*ps
)
447 * Keeping reading chunks and inserting exceptions until
448 * we find a partially full area.
450 for (ps
->current_area
= 0; full
; ps
->current_area
++) {
451 r
= area_io(ps
, READ
);
455 r
= insert_exceptions(ps
, &full
);
465 static struct pstore
*get_info(struct exception_store
*store
)
467 return (struct pstore
*) store
->context
;
470 static void persistent_fraction_full(struct exception_store
*store
,
471 sector_t
*numerator
, sector_t
*denominator
)
473 *numerator
= get_info(store
)->next_free
* store
->snap
->chunk_size
;
474 *denominator
= get_dev_size(store
->snap
->cow
->bdev
);
477 static void persistent_destroy(struct exception_store
*store
)
479 struct pstore
*ps
= get_info(store
);
481 destroy_workqueue(ps
->metadata_wq
);
482 dm_io_client_destroy(ps
->io_client
);
483 vfree(ps
->callbacks
);
488 static int persistent_read_metadata(struct exception_store
*store
)
490 int r
, uninitialized_var(new_snapshot
);
491 struct pstore
*ps
= get_info(store
);
494 * Read the snapshot header.
496 r
= read_header(ps
, &new_snapshot
);
501 * Now we know correct chunk_size, complete the initialisation.
503 ps
->exceptions_per_area
= (ps
->snap
->chunk_size
<< SECTOR_SHIFT
) /
504 sizeof(struct disk_exception
);
505 ps
->callbacks
= dm_vcalloc(ps
->exceptions_per_area
,
506 sizeof(*ps
->callbacks
));
511 * Do we need to setup a new snapshot ?
514 r
= write_header(ps
);
516 DMWARN("write_header failed");
520 ps
->current_area
= 0;
521 zero_memory_area(ps
);
522 r
= zero_disk_area(ps
, 0);
524 DMWARN("zero_disk_area(0) failed");
531 if (ps
->version
!= SNAPSHOT_DISK_VERSION
) {
532 DMWARN("unable to handle snapshot disk version %d",
538 * Metadata are valid, but snapshot is invalidated
546 r
= read_exceptions(ps
);
554 static int persistent_prepare(struct exception_store
*store
,
555 struct dm_snap_exception
*e
)
557 struct pstore
*ps
= get_info(store
);
560 sector_t size
= get_dev_size(store
->snap
->cow
->bdev
);
562 /* Is there enough room ? */
563 if (size
< ((ps
->next_free
+ 1) * store
->snap
->chunk_size
))
566 e
->new_chunk
= ps
->next_free
;
569 * Move onto the next free pending, making sure to take
570 * into account the location of the metadata chunks.
572 stride
= (ps
->exceptions_per_area
+ 1);
573 next_free
= ++ps
->next_free
;
574 if (sector_div(next_free
, stride
) == 1)
577 atomic_inc(&ps
->pending_count
);
581 static void persistent_commit(struct exception_store
*store
,
582 struct dm_snap_exception
*e
,
583 void (*callback
) (void *, int success
),
584 void *callback_context
)
587 struct pstore
*ps
= get_info(store
);
588 struct disk_exception de
;
589 struct commit_callback
*cb
;
591 de
.old_chunk
= e
->old_chunk
;
592 de
.new_chunk
= e
->new_chunk
;
593 write_exception(ps
, ps
->current_committed
++, &de
);
596 * Add the callback to the back of the array. This code
597 * is the only place where the callback array is
598 * manipulated, and we know that it will never be called
599 * multiple times concurrently.
601 cb
= ps
->callbacks
+ ps
->callback_count
++;
602 cb
->callback
= callback
;
603 cb
->context
= callback_context
;
606 * If there are exceptions in flight and we have not yet
607 * filled this metadata area there's nothing more to do.
609 if (!atomic_dec_and_test(&ps
->pending_count
) &&
610 (ps
->current_committed
!= ps
->exceptions_per_area
))
614 * If we completely filled the current area, then wipe the next one.
616 if ((ps
->current_committed
== ps
->exceptions_per_area
) &&
617 zero_disk_area(ps
, ps
->current_area
+ 1))
621 * Commit exceptions to disk.
623 if (ps
->valid
&& area_io(ps
, WRITE
))
627 * Advance to the next area if this one is full.
629 if (ps
->current_committed
== ps
->exceptions_per_area
) {
630 ps
->current_committed
= 0;
632 zero_memory_area(ps
);
635 for (i
= 0; i
< ps
->callback_count
; i
++) {
636 cb
= ps
->callbacks
+ i
;
637 cb
->callback(cb
->context
, ps
->valid
);
640 ps
->callback_count
= 0;
643 static void persistent_drop(struct exception_store
*store
)
645 struct pstore
*ps
= get_info(store
);
648 if (write_header(ps
))
649 DMWARN("write header failed");
652 int dm_create_persistent(struct exception_store
*store
)
656 /* allocate the pstore */
657 ps
= kmalloc(sizeof(*ps
), GFP_KERNEL
);
661 ps
->snap
= store
->snap
;
663 ps
->version
= SNAPSHOT_DISK_VERSION
;
665 ps
->next_free
= 2; /* skipping the header and first area */
666 ps
->current_committed
= 0;
668 ps
->callback_count
= 0;
669 atomic_set(&ps
->pending_count
, 0);
670 ps
->callbacks
= NULL
;
672 ps
->metadata_wq
= create_singlethread_workqueue("ksnaphd");
673 if (!ps
->metadata_wq
) {
675 DMERR("couldn't start header metadata update thread");
679 store
->destroy
= persistent_destroy
;
680 store
->read_metadata
= persistent_read_metadata
;
681 store
->prepare_exception
= persistent_prepare
;
682 store
->commit_exception
= persistent_commit
;
683 store
->drop_snapshot
= persistent_drop
;
684 store
->fraction_full
= persistent_fraction_full
;
690 /*-----------------------------------------------------------------
691 * Implementation of the store for non-persistent snapshots.
692 *---------------------------------------------------------------*/
697 static void transient_destroy(struct exception_store
*store
)
699 kfree(store
->context
);
702 static int transient_read_metadata(struct exception_store
*store
)
707 static int transient_prepare(struct exception_store
*store
,
708 struct dm_snap_exception
*e
)
710 struct transient_c
*tc
= (struct transient_c
*) store
->context
;
711 sector_t size
= get_dev_size(store
->snap
->cow
->bdev
);
713 if (size
< (tc
->next_free
+ store
->snap
->chunk_size
))
716 e
->new_chunk
= sector_to_chunk(store
->snap
, tc
->next_free
);
717 tc
->next_free
+= store
->snap
->chunk_size
;
722 static void transient_commit(struct exception_store
*store
,
723 struct dm_snap_exception
*e
,
724 void (*callback
) (void *, int success
),
725 void *callback_context
)
728 callback(callback_context
, 1);
731 static void transient_fraction_full(struct exception_store
*store
,
732 sector_t
*numerator
, sector_t
*denominator
)
734 *numerator
= ((struct transient_c
*) store
->context
)->next_free
;
735 *denominator
= get_dev_size(store
->snap
->cow
->bdev
);
738 int dm_create_transient(struct exception_store
*store
)
740 struct transient_c
*tc
;
742 store
->destroy
= transient_destroy
;
743 store
->read_metadata
= transient_read_metadata
;
744 store
->prepare_exception
= transient_prepare
;
745 store
->commit_exception
= transient_commit
;
746 store
->drop_snapshot
= NULL
;
747 store
->fraction_full
= transient_fraction_full
;
749 tc
= kmalloc(sizeof(struct transient_c
), GFP_KERNEL
);