block/backup: refactor and tolerate unallocated cluster skipping
[qemu/ar7.git] / block / backup.c
blob78f1b793546601ee3835eee7234ec6583e7e470c
1 /*
2 * QEMU backup
4 * Copyright (C) 2013 Proxmox Server Solutions
6 * Authors:
7 * Dietmar Maurer (dietmar@proxmox.com)
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
16 #include "trace.h"
17 #include "block/block.h"
18 #include "block/block_int.h"
19 #include "block/blockjob_int.h"
20 #include "block/block_backup.h"
21 #include "qapi/error.h"
22 #include "qapi/qmp/qerror.h"
23 #include "qemu/ratelimit.h"
24 #include "qemu/cutils.h"
25 #include "sysemu/block-backend.h"
26 #include "qemu/bitmap.h"
27 #include "qemu/error-report.h"
29 #define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16)
31 typedef struct CowRequest {
32 int64_t start_byte;
33 int64_t end_byte;
34 QLIST_ENTRY(CowRequest) list;
35 CoQueue wait_queue; /* coroutines blocked on this request */
36 } CowRequest;
38 typedef struct BackupBlockJob {
39 BlockJob common;
40 BlockBackend *target;
41 /* bitmap for sync=incremental */
42 BdrvDirtyBitmap *sync_bitmap;
43 MirrorSyncMode sync_mode;
44 BlockdevOnError on_source_error;
45 BlockdevOnError on_target_error;
46 CoRwlock flush_rwlock;
47 uint64_t len;
48 uint64_t bytes_read;
49 int64_t cluster_size;
50 bool compress;
51 NotifierWithReturn before_write;
52 QLIST_HEAD(, CowRequest) inflight_reqs;
54 HBitmap *copy_bitmap;
55 bool use_copy_range;
56 int64_t copy_range_size;
58 bool serialize_target_writes;
59 } BackupBlockJob;
61 static const BlockJobDriver backup_job_driver;
63 /* See if in-flight requests overlap and wait for them to complete */
64 static void coroutine_fn wait_for_overlapping_requests(BackupBlockJob *job,
65 int64_t start,
66 int64_t end)
68 CowRequest *req;
69 bool retry;
71 do {
72 retry = false;
73 QLIST_FOREACH(req, &job->inflight_reqs, list) {
74 if (end > req->start_byte && start < req->end_byte) {
75 qemu_co_queue_wait(&req->wait_queue, NULL);
76 retry = true;
77 break;
80 } while (retry);
83 /* Keep track of an in-flight request */
84 static void cow_request_begin(CowRequest *req, BackupBlockJob *job,
85 int64_t start, int64_t end)
87 req->start_byte = start;
88 req->end_byte = end;
89 qemu_co_queue_init(&req->wait_queue);
90 QLIST_INSERT_HEAD(&job->inflight_reqs, req, list);
93 /* Forget about a completed request */
94 static void cow_request_end(CowRequest *req)
96 QLIST_REMOVE(req, list);
97 qemu_co_queue_restart_all(&req->wait_queue);
100 /* Copy range to target with a bounce buffer and return the bytes copied. If
101 * error occurred, return a negative error number */
102 static int coroutine_fn backup_cow_with_bounce_buffer(BackupBlockJob *job,
103 int64_t start,
104 int64_t end,
105 bool is_write_notifier,
106 bool *error_is_read,
107 void **bounce_buffer)
109 int ret;
110 BlockBackend *blk = job->common.blk;
111 int nbytes;
112 int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
113 int write_flags = job->serialize_target_writes ? BDRV_REQ_SERIALISING : 0;
115 assert(QEMU_IS_ALIGNED(start, job->cluster_size));
116 hbitmap_reset(job->copy_bitmap, start, job->cluster_size);
117 nbytes = MIN(job->cluster_size, job->len - start);
118 if (!*bounce_buffer) {
119 *bounce_buffer = blk_blockalign(blk, job->cluster_size);
122 ret = blk_co_pread(blk, start, nbytes, *bounce_buffer, read_flags);
123 if (ret < 0) {
124 trace_backup_do_cow_read_fail(job, start, ret);
125 if (error_is_read) {
126 *error_is_read = true;
128 goto fail;
131 if (buffer_is_zero(*bounce_buffer, nbytes)) {
132 ret = blk_co_pwrite_zeroes(job->target, start,
133 nbytes, write_flags | BDRV_REQ_MAY_UNMAP);
134 } else {
135 ret = blk_co_pwrite(job->target, start,
136 nbytes, *bounce_buffer, write_flags |
137 (job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0));
139 if (ret < 0) {
140 trace_backup_do_cow_write_fail(job, start, ret);
141 if (error_is_read) {
142 *error_is_read = false;
144 goto fail;
147 return nbytes;
148 fail:
149 hbitmap_set(job->copy_bitmap, start, job->cluster_size);
150 return ret;
154 /* Copy range to target and return the bytes copied. If error occurred, return a
155 * negative error number. */
156 static int coroutine_fn backup_cow_with_offload(BackupBlockJob *job,
157 int64_t start,
158 int64_t end,
159 bool is_write_notifier)
161 int ret;
162 int nr_clusters;
163 BlockBackend *blk = job->common.blk;
164 int nbytes;
165 int read_flags = is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0;
166 int write_flags = job->serialize_target_writes ? BDRV_REQ_SERIALISING : 0;
168 assert(QEMU_IS_ALIGNED(job->copy_range_size, job->cluster_size));
169 assert(QEMU_IS_ALIGNED(start, job->cluster_size));
170 nbytes = MIN(job->copy_range_size, end - start);
171 nr_clusters = DIV_ROUND_UP(nbytes, job->cluster_size);
172 hbitmap_reset(job->copy_bitmap, start, job->cluster_size * nr_clusters);
173 ret = blk_co_copy_range(blk, start, job->target, start, nbytes,
174 read_flags, write_flags);
175 if (ret < 0) {
176 trace_backup_do_cow_copy_range_fail(job, start, ret);
177 hbitmap_set(job->copy_bitmap, start, job->cluster_size * nr_clusters);
178 return ret;
181 return nbytes;
184 static int coroutine_fn backup_do_cow(BackupBlockJob *job,
185 int64_t offset, uint64_t bytes,
186 bool *error_is_read,
187 bool is_write_notifier)
189 CowRequest cow_request;
190 int ret = 0;
191 int64_t start, end; /* bytes */
192 void *bounce_buffer = NULL;
194 qemu_co_rwlock_rdlock(&job->flush_rwlock);
196 start = QEMU_ALIGN_DOWN(offset, job->cluster_size);
197 end = QEMU_ALIGN_UP(bytes + offset, job->cluster_size);
199 trace_backup_do_cow_enter(job, start, offset, bytes);
201 wait_for_overlapping_requests(job, start, end);
202 cow_request_begin(&cow_request, job, start, end);
204 while (start < end) {
205 if (!hbitmap_get(job->copy_bitmap, start)) {
206 trace_backup_do_cow_skip(job, start);
207 start += job->cluster_size;
208 continue; /* already copied */
211 trace_backup_do_cow_process(job, start);
213 if (job->use_copy_range) {
214 ret = backup_cow_with_offload(job, start, end, is_write_notifier);
215 if (ret < 0) {
216 job->use_copy_range = false;
219 if (!job->use_copy_range) {
220 ret = backup_cow_with_bounce_buffer(job, start, end, is_write_notifier,
221 error_is_read, &bounce_buffer);
223 if (ret < 0) {
224 break;
227 /* Publish progress, guest I/O counts as progress too. Note that the
228 * offset field is an opaque progress value, it is not a disk offset.
230 start += ret;
231 job->bytes_read += ret;
232 job_progress_update(&job->common.job, ret);
233 ret = 0;
236 if (bounce_buffer) {
237 qemu_vfree(bounce_buffer);
240 cow_request_end(&cow_request);
242 trace_backup_do_cow_return(job, offset, bytes, ret);
244 qemu_co_rwlock_unlock(&job->flush_rwlock);
246 return ret;
249 static int coroutine_fn backup_before_write_notify(
250 NotifierWithReturn *notifier,
251 void *opaque)
253 BackupBlockJob *job = container_of(notifier, BackupBlockJob, before_write);
254 BdrvTrackedRequest *req = opaque;
256 assert(req->bs == blk_bs(job->common.blk));
257 assert(QEMU_IS_ALIGNED(req->offset, BDRV_SECTOR_SIZE));
258 assert(QEMU_IS_ALIGNED(req->bytes, BDRV_SECTOR_SIZE));
260 return backup_do_cow(job, req->offset, req->bytes, NULL, true);
263 static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret)
265 BdrvDirtyBitmap *bm;
266 BlockDriverState *bs = blk_bs(job->common.blk);
268 if (ret < 0) {
269 /* Merge the successor back into the parent, delete nothing. */
270 bm = bdrv_reclaim_dirty_bitmap(bs, job->sync_bitmap, NULL);
271 assert(bm);
272 } else {
273 /* Everything is fine, delete this bitmap and install the backup. */
274 bm = bdrv_dirty_bitmap_abdicate(bs, job->sync_bitmap, NULL);
275 assert(bm);
279 static void backup_commit(Job *job)
281 BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
282 if (s->sync_bitmap) {
283 backup_cleanup_sync_bitmap(s, 0);
287 static void backup_abort(Job *job)
289 BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
290 if (s->sync_bitmap) {
291 backup_cleanup_sync_bitmap(s, -1);
295 static void backup_clean(Job *job)
297 BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
298 assert(s->target);
299 blk_unref(s->target);
300 s->target = NULL;
302 if (s->copy_bitmap) {
303 hbitmap_free(s->copy_bitmap);
304 s->copy_bitmap = NULL;
308 void backup_do_checkpoint(BlockJob *job, Error **errp)
310 BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common);
312 assert(block_job_driver(job) == &backup_job_driver);
314 if (backup_job->sync_mode != MIRROR_SYNC_MODE_NONE) {
315 error_setg(errp, "The backup job only supports block checkpoint in"
316 " sync=none mode");
317 return;
320 hbitmap_set(backup_job->copy_bitmap, 0, backup_job->len);
323 static void backup_drain(BlockJob *job)
325 BackupBlockJob *s = container_of(job, BackupBlockJob, common);
327 /* Need to keep a reference in case blk_drain triggers execution
328 * of backup_complete...
330 if (s->target) {
331 BlockBackend *target = s->target;
332 blk_ref(target);
333 blk_drain(target);
334 blk_unref(target);
338 static BlockErrorAction backup_error_action(BackupBlockJob *job,
339 bool read, int error)
341 if (read) {
342 return block_job_error_action(&job->common, job->on_source_error,
343 true, error);
344 } else {
345 return block_job_error_action(&job->common, job->on_target_error,
346 false, error);
350 static bool coroutine_fn yield_and_check(BackupBlockJob *job)
352 uint64_t delay_ns;
354 if (job_is_cancelled(&job->common.job)) {
355 return true;
358 /* We need to yield even for delay_ns = 0 so that bdrv_drain_all() can
359 * return. Without a yield, the VM would not reboot. */
360 delay_ns = block_job_ratelimit_get_delay(&job->common, job->bytes_read);
361 job->bytes_read = 0;
362 job_sleep_ns(&job->common.job, delay_ns);
364 if (job_is_cancelled(&job->common.job)) {
365 return true;
368 return false;
371 static bool bdrv_is_unallocated_range(BlockDriverState *bs,
372 int64_t offset, int64_t bytes)
374 int64_t end = offset + bytes;
376 while (offset < end && !bdrv_is_allocated(bs, offset, bytes, &bytes)) {
377 if (bytes == 0) {
378 return true;
380 offset += bytes;
381 bytes = end - offset;
384 return offset >= end;
387 static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
389 int ret;
390 bool error_is_read;
391 int64_t offset;
392 HBitmapIter hbi;
394 hbitmap_iter_init(&hbi, job->copy_bitmap, 0);
395 while ((offset = hbitmap_iter_next(&hbi)) != -1) {
396 do {
397 if (yield_and_check(job)) {
398 return 0;
400 ret = backup_do_cow(job, offset,
401 job->cluster_size, &error_is_read, false);
402 if (ret < 0 && backup_error_action(job, error_is_read, -ret) ==
403 BLOCK_ERROR_ACTION_REPORT)
405 return ret;
407 } while (ret < 0);
410 return 0;
413 /* init copy_bitmap from sync_bitmap */
414 static void backup_incremental_init_copy_bitmap(BackupBlockJob *job)
416 uint64_t offset = 0;
417 uint64_t bytes = job->len;
419 while (bdrv_dirty_bitmap_next_dirty_area(job->sync_bitmap,
420 &offset, &bytes))
422 hbitmap_set(job->copy_bitmap, offset, bytes);
424 offset += bytes;
425 if (offset >= job->len) {
426 break;
428 bytes = job->len - offset;
431 /* TODO job_progress_set_remaining() would make more sense */
432 job_progress_update(&job->common.job,
433 job->len - hbitmap_count(job->copy_bitmap));
436 static int coroutine_fn backup_run(Job *job, Error **errp)
438 BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
439 BlockDriverState *bs = blk_bs(s->common.blk);
440 int64_t offset;
441 int ret = 0;
443 QLIST_INIT(&s->inflight_reqs);
444 qemu_co_rwlock_init(&s->flush_rwlock);
446 job_progress_set_remaining(job, s->len);
448 if (s->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
449 backup_incremental_init_copy_bitmap(s);
450 } else {
451 hbitmap_set(s->copy_bitmap, 0, s->len);
454 s->before_write.notify = backup_before_write_notify;
455 bdrv_add_before_write_notifier(bs, &s->before_write);
457 if (s->sync_mode == MIRROR_SYNC_MODE_NONE) {
458 /* All bits are set in copy_bitmap to allow any cluster to be copied.
459 * This does not actually require them to be copied. */
460 while (!job_is_cancelled(job)) {
461 /* Yield until the job is cancelled. We just let our before_write
462 * notify callback service CoW requests. */
463 job_yield(job);
465 } else if (s->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
466 ret = backup_run_incremental(s);
467 } else {
468 /* Both FULL and TOP SYNC_MODE's require copying.. */
469 for (offset = 0; offset < s->len;
470 offset += s->cluster_size) {
471 bool error_is_read;
473 if (yield_and_check(s)) {
474 break;
477 if (s->sync_mode == MIRROR_SYNC_MODE_TOP &&
478 bdrv_is_unallocated_range(bs, offset, s->cluster_size))
480 continue;
483 ret = backup_do_cow(s, offset, s->cluster_size,
484 &error_is_read, false);
485 if (ret < 0) {
486 /* Depending on error action, fail now or retry cluster */
487 BlockErrorAction action =
488 backup_error_action(s, error_is_read, -ret);
489 if (action == BLOCK_ERROR_ACTION_REPORT) {
490 break;
491 } else {
492 offset -= s->cluster_size;
493 continue;
499 notifier_with_return_remove(&s->before_write);
501 /* wait until pending backup_do_cow() calls have completed */
502 qemu_co_rwlock_wrlock(&s->flush_rwlock);
503 qemu_co_rwlock_unlock(&s->flush_rwlock);
505 return ret;
508 static const BlockJobDriver backup_job_driver = {
509 .job_driver = {
510 .instance_size = sizeof(BackupBlockJob),
511 .job_type = JOB_TYPE_BACKUP,
512 .free = block_job_free,
513 .user_resume = block_job_user_resume,
514 .drain = block_job_drain,
515 .run = backup_run,
516 .commit = backup_commit,
517 .abort = backup_abort,
518 .clean = backup_clean,
520 .drain = backup_drain,
523 BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
524 BlockDriverState *target, int64_t speed,
525 MirrorSyncMode sync_mode, BdrvDirtyBitmap *sync_bitmap,
526 bool compress,
527 BlockdevOnError on_source_error,
528 BlockdevOnError on_target_error,
529 int creation_flags,
530 BlockCompletionFunc *cb, void *opaque,
531 JobTxn *txn, Error **errp)
533 int64_t len;
534 BlockDriverInfo bdi;
535 BackupBlockJob *job = NULL;
536 int ret;
538 assert(bs);
539 assert(target);
541 if (bs == target) {
542 error_setg(errp, "Source and target cannot be the same");
543 return NULL;
546 if (!bdrv_is_inserted(bs)) {
547 error_setg(errp, "Device is not inserted: %s",
548 bdrv_get_device_name(bs));
549 return NULL;
552 if (!bdrv_is_inserted(target)) {
553 error_setg(errp, "Device is not inserted: %s",
554 bdrv_get_device_name(target));
555 return NULL;
558 if (compress && target->drv->bdrv_co_pwritev_compressed == NULL) {
559 error_setg(errp, "Compression is not supported for this drive %s",
560 bdrv_get_device_name(target));
561 return NULL;
564 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) {
565 return NULL;
568 if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) {
569 return NULL;
572 if (sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
573 if (!sync_bitmap) {
574 error_setg(errp, "must provide a valid bitmap name for "
575 "\"incremental\" sync mode");
576 return NULL;
579 /* Create a new bitmap, and freeze/disable this one. */
580 if (bdrv_dirty_bitmap_create_successor(bs, sync_bitmap, errp) < 0) {
581 return NULL;
583 } else if (sync_bitmap) {
584 error_setg(errp,
585 "a sync_bitmap was provided to backup_run, "
586 "but received an incompatible sync_mode (%s)",
587 MirrorSyncMode_str(sync_mode));
588 return NULL;
591 len = bdrv_getlength(bs);
592 if (len < 0) {
593 error_setg_errno(errp, -len, "unable to get length for '%s'",
594 bdrv_get_device_name(bs));
595 goto error;
598 /* job->len is fixed, so we can't allow resize */
599 job = block_job_create(job_id, &backup_job_driver, txn, bs,
600 BLK_PERM_CONSISTENT_READ,
601 BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
602 BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD,
603 speed, creation_flags, cb, opaque, errp);
604 if (!job) {
605 goto error;
608 /* The target must match the source in size, so no resize here either */
609 job->target = blk_new(BLK_PERM_WRITE,
610 BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
611 BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD);
612 ret = blk_insert_bs(job->target, target, errp);
613 if (ret < 0) {
614 goto error;
617 job->on_source_error = on_source_error;
618 job->on_target_error = on_target_error;
619 job->sync_mode = sync_mode;
620 job->sync_bitmap = sync_mode == MIRROR_SYNC_MODE_INCREMENTAL ?
621 sync_bitmap : NULL;
622 job->compress = compress;
624 /* Detect image-fleecing (and similar) schemes */
625 job->serialize_target_writes = bdrv_chain_contains(target, bs);
627 /* If there is no backing file on the target, we cannot rely on COW if our
628 * backup cluster size is smaller than the target cluster size. Even for
629 * targets with a backing file, try to avoid COW if possible. */
630 ret = bdrv_get_info(target, &bdi);
631 if (ret == -ENOTSUP && !target->backing) {
632 /* Cluster size is not defined */
633 warn_report("The target block device doesn't provide "
634 "information about the block size and it doesn't have a "
635 "backing file. The default block size of %u bytes is "
636 "used. If the actual block size of the target exceeds "
637 "this default, the backup may be unusable",
638 BACKUP_CLUSTER_SIZE_DEFAULT);
639 job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
640 } else if (ret < 0 && !target->backing) {
641 error_setg_errno(errp, -ret,
642 "Couldn't determine the cluster size of the target image, "
643 "which has no backing file");
644 error_append_hint(errp,
645 "Aborting, since this may create an unusable destination image\n");
646 goto error;
647 } else if (ret < 0 && target->backing) {
648 /* Not fatal; just trudge on ahead. */
649 job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
650 } else {
651 job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
654 job->copy_bitmap = hbitmap_alloc(len, ctz32(job->cluster_size));
655 job->use_copy_range = true;
656 job->copy_range_size = MIN_NON_ZERO(blk_get_max_transfer(job->common.blk),
657 blk_get_max_transfer(job->target));
658 job->copy_range_size = MAX(job->cluster_size,
659 QEMU_ALIGN_UP(job->copy_range_size,
660 job->cluster_size));
662 /* Required permissions are already taken with target's blk_new() */
663 block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
664 &error_abort);
665 job->len = len;
667 return &job->common;
669 error:
670 if (sync_bitmap) {
671 bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL);
673 if (job) {
674 backup_clean(&job->common.job);
675 job_early_fail(&job->common.job);
678 return NULL;