2 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2006-2008 Red Hat GmbH
5 * This file is released under the GPL.
8 #include "dm-exception-store.h"
11 #include <linux/pagemap.h>
12 #include <linux/vmalloc.h>
13 #include <linux/slab.h>
14 #include <linux/dm-io.h>
16 #define DM_MSG_PREFIX "persistent snapshot"
17 #define DM_CHUNK_SIZE_DEFAULT_SECTORS 32 /* 16KB */
19 /*-----------------------------------------------------------------
20 * Persistent snapshots, by persistent we mean that the snapshot
21 * will survive a reboot.
22 *---------------------------------------------------------------*/
25 * We need to store a record of which parts of the origin have
26 * been copied to the snapshot device. The snapshot code
27 * requires that we copy exception chunks to chunk aligned areas
28 * of the COW store. It makes sense therefore, to store the
29 * metadata in chunk size blocks.
31 * There is no backward or forward compatibility implemented,
32 * snapshots with different disk versions than the kernel will
33 * not be usable. It is expected that "lvcreate" will blank out
34 * the start of a fresh COW device before calling the snapshot
37 * The first chunk of the COW device just contains the header.
38 * After this there is a chunk filled with exception metadata,
39 * followed by as many exception chunks as can fit in the
42 * All on disk structures are in little-endian format. The end
43 * of the exceptions info is indicated by an exception with a
44 * new_chunk of 0, which is invalid since it would point to the
49 * Magic for persistent snapshots: "SnAp" - Feeble isn't it.
51 #define SNAP_MAGIC 0x70416e53
54 * The on-disk version of the metadata.
56 #define SNAPSHOT_DISK_VERSION 1
62 * Is this snapshot valid. There is no way of recovering
63 * an invalid snapshot.
68 * Simple, incrementing version. no backward
77 struct disk_exception
{
82 struct commit_callback
{
83 void (*callback
)(void *, int success
);
88 * The top level structure for a persistent exception store.
91 struct dm_exception_store
*store
;
94 uint32_t exceptions_per_area
;
97 * Now that we have an asynchronous kcopyd there is no
98 * need for large chunk sizes, so it wont hurt to have a
99 * whole chunks worth of metadata in memory at once.
104 * An area of zeros used to clear the next area.
109 * Used to keep track of which metadata area the data in
112 chunk_t current_area
;
115 * The next free chunk for an exception.
120 * The index of next free exception in the current
123 uint32_t current_committed
;
125 atomic_t pending_count
;
126 uint32_t callback_count
;
127 struct commit_callback
*callbacks
;
128 struct dm_io_client
*io_client
;
130 struct workqueue_struct
*metadata_wq
;
133 static unsigned sectors_to_pages(unsigned sectors
)
135 return DIV_ROUND_UP(sectors
, PAGE_SIZE
>> 9);
138 static int alloc_area(struct pstore
*ps
)
143 len
= ps
->store
->chunk_size
<< SECTOR_SHIFT
;
146 * Allocate the chunk_size block of memory that will hold
147 * a single metadata area.
149 ps
->area
= vmalloc(len
);
153 ps
->zero_area
= vmalloc(len
);
154 if (!ps
->zero_area
) {
158 memset(ps
->zero_area
, 0, len
);
163 static void free_area(struct pstore
*ps
)
170 vfree(ps
->zero_area
);
171 ps
->zero_area
= NULL
;
175 struct dm_io_region
*where
;
176 struct dm_io_request
*io_req
;
177 struct work_struct work
;
181 static void do_metadata(struct work_struct
*work
)
183 struct mdata_req
*req
= container_of(work
, struct mdata_req
, work
);
185 req
->result
= dm_io(req
->io_req
, 1, req
->where
, NULL
);
189 * Read or write a chunk aligned and sized block of data from a device.
191 static int chunk_io(struct pstore
*ps
, chunk_t chunk
, int rw
, int metadata
)
193 struct dm_io_region where
= {
194 .bdev
= ps
->store
->cow
->bdev
,
195 .sector
= ps
->store
->chunk_size
* chunk
,
196 .count
= ps
->store
->chunk_size
,
198 struct dm_io_request io_req
= {
200 .mem
.type
= DM_IO_VMA
,
201 .mem
.ptr
.vma
= ps
->area
,
202 .client
= ps
->io_client
,
205 struct mdata_req req
;
208 return dm_io(&io_req
, 1, &where
, NULL
);
211 req
.io_req
= &io_req
;
214 * Issue the synchronous I/O from a different thread
215 * to avoid generic_make_request recursion.
217 INIT_WORK(&req
.work
, do_metadata
);
218 queue_work(ps
->metadata_wq
, &req
.work
);
219 flush_workqueue(ps
->metadata_wq
);
225 * Convert a metadata area index to a chunk index.
227 static chunk_t
area_location(struct pstore
*ps
, chunk_t area
)
229 return 1 + ((ps
->exceptions_per_area
+ 1) * area
);
233 * Read or write a metadata area. Remembering to skip the first
234 * chunk which holds the header.
236 static int area_io(struct pstore
*ps
, int rw
)
241 chunk
= area_location(ps
, ps
->current_area
);
243 r
= chunk_io(ps
, chunk
, rw
, 0);
250 static void zero_memory_area(struct pstore
*ps
)
252 memset(ps
->area
, 0, ps
->store
->chunk_size
<< SECTOR_SHIFT
);
255 static int zero_disk_area(struct pstore
*ps
, chunk_t area
)
257 struct dm_io_region where
= {
258 .bdev
= ps
->store
->cow
->bdev
,
259 .sector
= ps
->store
->chunk_size
* area_location(ps
, area
),
260 .count
= ps
->store
->chunk_size
,
262 struct dm_io_request io_req
= {
264 .mem
.type
= DM_IO_VMA
,
265 .mem
.ptr
.vma
= ps
->zero_area
,
266 .client
= ps
->io_client
,
270 return dm_io(&io_req
, 1, &where
, NULL
);
273 static int read_header(struct pstore
*ps
, int *new_snapshot
)
276 struct disk_header
*dh
;
278 int chunk_size_supplied
= 1;
281 * Use default chunk size (or hardsect_size, if larger) if none supplied
283 if (!ps
->store
->chunk_size
) {
284 ps
->store
->chunk_size
= max(DM_CHUNK_SIZE_DEFAULT_SECTORS
,
285 bdev_hardsect_size(ps
->store
->cow
->bdev
) >> 9);
286 ps
->store
->chunk_mask
= ps
->store
->chunk_size
- 1;
287 ps
->store
->chunk_shift
= ffs(ps
->store
->chunk_size
) - 1;
288 chunk_size_supplied
= 0;
291 ps
->io_client
= dm_io_client_create(sectors_to_pages(ps
->store
->
293 if (IS_ERR(ps
->io_client
))
294 return PTR_ERR(ps
->io_client
);
300 r
= chunk_io(ps
, 0, READ
, 1);
304 dh
= (struct disk_header
*) ps
->area
;
306 if (le32_to_cpu(dh
->magic
) == 0) {
311 if (le32_to_cpu(dh
->magic
) != SNAP_MAGIC
) {
312 DMWARN("Invalid or corrupt snapshot");
318 ps
->valid
= le32_to_cpu(dh
->valid
);
319 ps
->version
= le32_to_cpu(dh
->version
);
320 chunk_size
= le32_to_cpu(dh
->chunk_size
);
322 if (!chunk_size_supplied
|| ps
->store
->chunk_size
== chunk_size
)
325 DMWARN("chunk size %llu in device metadata overrides "
326 "table chunk size of %llu.",
327 (unsigned long long)chunk_size
,
328 (unsigned long long)ps
->store
->chunk_size
);
330 /* We had a bogus chunk_size. Fix stuff up. */
333 ps
->store
->chunk_size
= chunk_size
;
334 ps
->store
->chunk_mask
= chunk_size
- 1;
335 ps
->store
->chunk_shift
= ffs(chunk_size
) - 1;
337 r
= dm_io_client_resize(sectors_to_pages(ps
->store
->chunk_size
),
350 static int write_header(struct pstore
*ps
)
352 struct disk_header
*dh
;
354 memset(ps
->area
, 0, ps
->store
->chunk_size
<< SECTOR_SHIFT
);
356 dh
= (struct disk_header
*) ps
->area
;
357 dh
->magic
= cpu_to_le32(SNAP_MAGIC
);
358 dh
->valid
= cpu_to_le32(ps
->valid
);
359 dh
->version
= cpu_to_le32(ps
->version
);
360 dh
->chunk_size
= cpu_to_le32(ps
->store
->chunk_size
);
362 return chunk_io(ps
, 0, WRITE
, 1);
366 * Access functions for the disk exceptions, these do the endian conversions.
368 static struct disk_exception
*get_exception(struct pstore
*ps
, uint32_t index
)
370 BUG_ON(index
>= ps
->exceptions_per_area
);
372 return ((struct disk_exception
*) ps
->area
) + index
;
375 static void read_exception(struct pstore
*ps
,
376 uint32_t index
, struct disk_exception
*result
)
378 struct disk_exception
*e
= get_exception(ps
, index
);
381 result
->old_chunk
= le64_to_cpu(e
->old_chunk
);
382 result
->new_chunk
= le64_to_cpu(e
->new_chunk
);
385 static void write_exception(struct pstore
*ps
,
386 uint32_t index
, struct disk_exception
*de
)
388 struct disk_exception
*e
= get_exception(ps
, index
);
391 e
->old_chunk
= cpu_to_le64(de
->old_chunk
);
392 e
->new_chunk
= cpu_to_le64(de
->new_chunk
);
396 * Registers the exceptions that are present in the current area.
397 * 'full' is filled in to indicate if the area has been
400 static int insert_exceptions(struct pstore
*ps
,
401 int (*callback
)(void *callback_context
,
402 chunk_t old
, chunk_t
new),
403 void *callback_context
,
408 struct disk_exception de
;
410 /* presume the area is full */
413 for (i
= 0; i
< ps
->exceptions_per_area
; i
++) {
414 read_exception(ps
, i
, &de
);
417 * If the new_chunk is pointing at the start of
418 * the COW device, where the first metadata area
419 * is we know that we've hit the end of the
420 * exceptions. Therefore the area is not full.
422 if (de
.new_chunk
== 0LL) {
423 ps
->current_committed
= i
;
429 * Keep track of the start of the free chunks.
431 if (ps
->next_free
<= de
.new_chunk
)
432 ps
->next_free
= de
.new_chunk
+ 1;
435 * Otherwise we add the exception to the snapshot.
437 r
= callback(callback_context
, de
.old_chunk
, de
.new_chunk
);
445 static int read_exceptions(struct pstore
*ps
,
446 int (*callback
)(void *callback_context
, chunk_t old
,
448 void *callback_context
)
453 * Keeping reading chunks and inserting exceptions until
454 * we find a partially full area.
456 for (ps
->current_area
= 0; full
; ps
->current_area
++) {
457 r
= area_io(ps
, READ
);
461 r
= insert_exceptions(ps
, callback
, callback_context
, &full
);
471 static struct pstore
*get_info(struct dm_exception_store
*store
)
473 return (struct pstore
*) store
->context
;
476 static void persistent_fraction_full(struct dm_exception_store
*store
,
477 sector_t
*numerator
, sector_t
*denominator
)
479 *numerator
= get_info(store
)->next_free
* store
->chunk_size
;
480 *denominator
= get_dev_size(store
->cow
->bdev
);
483 static void persistent_dtr(struct dm_exception_store
*store
)
485 struct pstore
*ps
= get_info(store
);
487 destroy_workqueue(ps
->metadata_wq
);
489 /* Created in read_header */
491 dm_io_client_destroy(ps
->io_client
);
494 /* Allocated in persistent_read_metadata */
496 vfree(ps
->callbacks
);
501 static int persistent_read_metadata(struct dm_exception_store
*store
,
502 int (*callback
)(void *callback_context
,
503 chunk_t old
, chunk_t
new),
504 void *callback_context
)
506 int r
, uninitialized_var(new_snapshot
);
507 struct pstore
*ps
= get_info(store
);
510 * Read the snapshot header.
512 r
= read_header(ps
, &new_snapshot
);
517 * Now we know correct chunk_size, complete the initialisation.
519 ps
->exceptions_per_area
= (ps
->store
->chunk_size
<< SECTOR_SHIFT
) /
520 sizeof(struct disk_exception
);
521 ps
->callbacks
= dm_vcalloc(ps
->exceptions_per_area
,
522 sizeof(*ps
->callbacks
));
527 * Do we need to setup a new snapshot ?
530 r
= write_header(ps
);
532 DMWARN("write_header failed");
536 ps
->current_area
= 0;
537 zero_memory_area(ps
);
538 r
= zero_disk_area(ps
, 0);
540 DMWARN("zero_disk_area(0) failed");
547 if (ps
->version
!= SNAPSHOT_DISK_VERSION
) {
548 DMWARN("unable to handle snapshot disk version %d",
554 * Metadata are valid, but snapshot is invalidated
562 r
= read_exceptions(ps
, callback
, callback_context
);
570 static int persistent_prepare_exception(struct dm_exception_store
*store
,
571 struct dm_snap_exception
*e
)
573 struct pstore
*ps
= get_info(store
);
576 sector_t size
= get_dev_size(store
->cow
->bdev
);
578 /* Is there enough room ? */
579 if (size
< ((ps
->next_free
+ 1) * store
->chunk_size
))
582 e
->new_chunk
= ps
->next_free
;
585 * Move onto the next free pending, making sure to take
586 * into account the location of the metadata chunks.
588 stride
= (ps
->exceptions_per_area
+ 1);
589 next_free
= ++ps
->next_free
;
590 if (sector_div(next_free
, stride
) == 1)
593 atomic_inc(&ps
->pending_count
);
597 static void persistent_commit_exception(struct dm_exception_store
*store
,
598 struct dm_snap_exception
*e
,
599 void (*callback
) (void *, int success
),
600 void *callback_context
)
603 struct pstore
*ps
= get_info(store
);
604 struct disk_exception de
;
605 struct commit_callback
*cb
;
607 de
.old_chunk
= e
->old_chunk
;
608 de
.new_chunk
= e
->new_chunk
;
609 write_exception(ps
, ps
->current_committed
++, &de
);
612 * Add the callback to the back of the array. This code
613 * is the only place where the callback array is
614 * manipulated, and we know that it will never be called
615 * multiple times concurrently.
617 cb
= ps
->callbacks
+ ps
->callback_count
++;
618 cb
->callback
= callback
;
619 cb
->context
= callback_context
;
622 * If there are exceptions in flight and we have not yet
623 * filled this metadata area there's nothing more to do.
625 if (!atomic_dec_and_test(&ps
->pending_count
) &&
626 (ps
->current_committed
!= ps
->exceptions_per_area
))
630 * If we completely filled the current area, then wipe the next one.
632 if ((ps
->current_committed
== ps
->exceptions_per_area
) &&
633 zero_disk_area(ps
, ps
->current_area
+ 1))
637 * Commit exceptions to disk.
639 if (ps
->valid
&& area_io(ps
, WRITE
))
643 * Advance to the next area if this one is full.
645 if (ps
->current_committed
== ps
->exceptions_per_area
) {
646 ps
->current_committed
= 0;
648 zero_memory_area(ps
);
651 for (i
= 0; i
< ps
->callback_count
; i
++) {
652 cb
= ps
->callbacks
+ i
;
653 cb
->callback(cb
->context
, ps
->valid
);
656 ps
->callback_count
= 0;
659 static void persistent_drop_snapshot(struct dm_exception_store
*store
)
661 struct pstore
*ps
= get_info(store
);
664 if (write_header(ps
))
665 DMWARN("write header failed");
668 static int persistent_ctr(struct dm_exception_store
*store
,
669 unsigned argc
, char **argv
)
673 /* allocate the pstore */
674 ps
= kzalloc(sizeof(*ps
), GFP_KERNEL
);
680 ps
->version
= SNAPSHOT_DISK_VERSION
;
682 ps
->next_free
= 2; /* skipping the header and first area */
683 ps
->current_committed
= 0;
685 ps
->callback_count
= 0;
686 atomic_set(&ps
->pending_count
, 0);
687 ps
->callbacks
= NULL
;
689 ps
->metadata_wq
= create_singlethread_workqueue("ksnaphd");
690 if (!ps
->metadata_wq
) {
692 DMERR("couldn't start header metadata update thread");
701 static unsigned persistent_status(struct dm_exception_store
*store
,
702 status_type_t status
, char *result
,
708 case STATUSTYPE_INFO
:
710 case STATUSTYPE_TABLE
:
711 DMEMIT(" %s P %llu", store
->cow
->name
,
712 (unsigned long long)store
->chunk_size
);
718 static struct dm_exception_store_type _persistent_type
= {
719 .name
= "persistent",
720 .module
= THIS_MODULE
,
721 .ctr
= persistent_ctr
,
722 .dtr
= persistent_dtr
,
723 .read_metadata
= persistent_read_metadata
,
724 .prepare_exception
= persistent_prepare_exception
,
725 .commit_exception
= persistent_commit_exception
,
726 .drop_snapshot
= persistent_drop_snapshot
,
727 .fraction_full
= persistent_fraction_full
,
728 .status
= persistent_status
,
731 static struct dm_exception_store_type _persistent_compat_type
= {
733 .module
= THIS_MODULE
,
734 .ctr
= persistent_ctr
,
735 .dtr
= persistent_dtr
,
736 .read_metadata
= persistent_read_metadata
,
737 .prepare_exception
= persistent_prepare_exception
,
738 .commit_exception
= persistent_commit_exception
,
739 .drop_snapshot
= persistent_drop_snapshot
,
740 .fraction_full
= persistent_fraction_full
,
741 .status
= persistent_status
,
744 int dm_persistent_snapshot_init(void)
748 r
= dm_exception_store_type_register(&_persistent_type
);
750 DMERR("Unable to register persistent exception store type");
754 r
= dm_exception_store_type_register(&_persistent_compat_type
);
756 DMERR("Unable to register old-style persistent exception "
758 dm_exception_store_type_unregister(&_persistent_type
);
765 void dm_persistent_snapshot_exit(void)
767 dm_exception_store_type_unregister(&_persistent_type
);
768 dm_exception_store_type_unregister(&_persistent_compat_type
);