qxl: call qemu_spice_display_init_common for secondary devices
[qemu/ar7.git] / block / backup.c
blob504a0891509a4e287942b89ff7ed3258abda3856
1 /*
2 * QEMU backup
4 * Copyright (C) 2013 Proxmox Server Solutions
6 * Authors:
7 * Dietmar Maurer (dietmar@proxmox.com)
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
16 #include "trace.h"
17 #include "block/block.h"
18 #include "block/block_int.h"
19 #include "block/blockjob_int.h"
20 #include "block/block_backup.h"
21 #include "qapi/error.h"
22 #include "qapi/qmp/qerror.h"
23 #include "qemu/ratelimit.h"
24 #include "qemu/cutils.h"
25 #include "sysemu/block-backend.h"
26 #include "qemu/bitmap.h"
27 #include "qemu/error-report.h"
29 #define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16)
30 #define SLICE_TIME 100000000ULL /* ns */
32 typedef struct BackupBlockJob {
33 BlockJob common;
34 BlockBackend *target;
35 /* bitmap for sync=incremental */
36 BdrvDirtyBitmap *sync_bitmap;
37 MirrorSyncMode sync_mode;
38 RateLimit limit;
39 BlockdevOnError on_source_error;
40 BlockdevOnError on_target_error;
41 CoRwlock flush_rwlock;
42 uint64_t bytes_read;
43 unsigned long *done_bitmap;
44 int64_t cluster_size;
45 bool compress;
46 NotifierWithReturn before_write;
47 QLIST_HEAD(, CowRequest) inflight_reqs;
48 } BackupBlockJob;
50 /* See if in-flight requests overlap and wait for them to complete */
51 static void coroutine_fn wait_for_overlapping_requests(BackupBlockJob *job,
52 int64_t start,
53 int64_t end)
55 CowRequest *req;
56 bool retry;
58 do {
59 retry = false;
60 QLIST_FOREACH(req, &job->inflight_reqs, list) {
61 if (end > req->start_byte && start < req->end_byte) {
62 qemu_co_queue_wait(&req->wait_queue, NULL);
63 retry = true;
64 break;
67 } while (retry);
70 /* Keep track of an in-flight request */
71 static void cow_request_begin(CowRequest *req, BackupBlockJob *job,
72 int64_t start, int64_t end)
74 req->start_byte = start;
75 req->end_byte = end;
76 qemu_co_queue_init(&req->wait_queue);
77 QLIST_INSERT_HEAD(&job->inflight_reqs, req, list);
80 /* Forget about a completed request */
81 static void cow_request_end(CowRequest *req)
83 QLIST_REMOVE(req, list);
84 qemu_co_queue_restart_all(&req->wait_queue);
87 static int coroutine_fn backup_do_cow(BackupBlockJob *job,
88 int64_t offset, uint64_t bytes,
89 bool *error_is_read,
90 bool is_write_notifier)
92 BlockBackend *blk = job->common.blk;
93 CowRequest cow_request;
94 struct iovec iov;
95 QEMUIOVector bounce_qiov;
96 void *bounce_buffer = NULL;
97 int ret = 0;
98 int64_t start, end; /* bytes */
99 int n; /* bytes */
101 qemu_co_rwlock_rdlock(&job->flush_rwlock);
103 start = QEMU_ALIGN_DOWN(offset, job->cluster_size);
104 end = QEMU_ALIGN_UP(bytes + offset, job->cluster_size);
106 trace_backup_do_cow_enter(job, start, offset, bytes);
108 wait_for_overlapping_requests(job, start, end);
109 cow_request_begin(&cow_request, job, start, end);
111 for (; start < end; start += job->cluster_size) {
112 if (test_bit(start / job->cluster_size, job->done_bitmap)) {
113 trace_backup_do_cow_skip(job, start);
114 continue; /* already copied */
117 trace_backup_do_cow_process(job, start);
119 n = MIN(job->cluster_size, job->common.len - start);
121 if (!bounce_buffer) {
122 bounce_buffer = blk_blockalign(blk, job->cluster_size);
124 iov.iov_base = bounce_buffer;
125 iov.iov_len = n;
126 qemu_iovec_init_external(&bounce_qiov, &iov, 1);
128 ret = blk_co_preadv(blk, start, bounce_qiov.size, &bounce_qiov,
129 is_write_notifier ? BDRV_REQ_NO_SERIALISING : 0);
130 if (ret < 0) {
131 trace_backup_do_cow_read_fail(job, start, ret);
132 if (error_is_read) {
133 *error_is_read = true;
135 goto out;
138 if (buffer_is_zero(iov.iov_base, iov.iov_len)) {
139 ret = blk_co_pwrite_zeroes(job->target, start,
140 bounce_qiov.size, BDRV_REQ_MAY_UNMAP);
141 } else {
142 ret = blk_co_pwritev(job->target, start,
143 bounce_qiov.size, &bounce_qiov,
144 job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0);
146 if (ret < 0) {
147 trace_backup_do_cow_write_fail(job, start, ret);
148 if (error_is_read) {
149 *error_is_read = false;
151 goto out;
154 set_bit(start / job->cluster_size, job->done_bitmap);
156 /* Publish progress, guest I/O counts as progress too. Note that the
157 * offset field is an opaque progress value, it is not a disk offset.
159 job->bytes_read += n;
160 job->common.offset += n;
163 out:
164 if (bounce_buffer) {
165 qemu_vfree(bounce_buffer);
168 cow_request_end(&cow_request);
170 trace_backup_do_cow_return(job, offset, bytes, ret);
172 qemu_co_rwlock_unlock(&job->flush_rwlock);
174 return ret;
177 static int coroutine_fn backup_before_write_notify(
178 NotifierWithReturn *notifier,
179 void *opaque)
181 BackupBlockJob *job = container_of(notifier, BackupBlockJob, before_write);
182 BdrvTrackedRequest *req = opaque;
184 assert(req->bs == blk_bs(job->common.blk));
185 assert(QEMU_IS_ALIGNED(req->offset, BDRV_SECTOR_SIZE));
186 assert(QEMU_IS_ALIGNED(req->bytes, BDRV_SECTOR_SIZE));
188 return backup_do_cow(job, req->offset, req->bytes, NULL, true);
191 static void backup_set_speed(BlockJob *job, int64_t speed, Error **errp)
193 BackupBlockJob *s = container_of(job, BackupBlockJob, common);
195 if (speed < 0) {
196 error_setg(errp, QERR_INVALID_PARAMETER, "speed");
197 return;
199 ratelimit_set_speed(&s->limit, speed, SLICE_TIME);
202 static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret)
204 BdrvDirtyBitmap *bm;
205 BlockDriverState *bs = blk_bs(job->common.blk);
207 if (ret < 0 || block_job_is_cancelled(&job->common)) {
208 /* Merge the successor back into the parent, delete nothing. */
209 bm = bdrv_reclaim_dirty_bitmap(bs, job->sync_bitmap, NULL);
210 assert(bm);
211 } else {
212 /* Everything is fine, delete this bitmap and install the backup. */
213 bm = bdrv_dirty_bitmap_abdicate(bs, job->sync_bitmap, NULL);
214 assert(bm);
218 static void backup_commit(BlockJob *job)
220 BackupBlockJob *s = container_of(job, BackupBlockJob, common);
221 if (s->sync_bitmap) {
222 backup_cleanup_sync_bitmap(s, 0);
226 static void backup_abort(BlockJob *job)
228 BackupBlockJob *s = container_of(job, BackupBlockJob, common);
229 if (s->sync_bitmap) {
230 backup_cleanup_sync_bitmap(s, -1);
234 static void backup_clean(BlockJob *job)
236 BackupBlockJob *s = container_of(job, BackupBlockJob, common);
237 assert(s->target);
238 blk_unref(s->target);
239 s->target = NULL;
242 static void backup_attached_aio_context(BlockJob *job, AioContext *aio_context)
244 BackupBlockJob *s = container_of(job, BackupBlockJob, common);
246 blk_set_aio_context(s->target, aio_context);
249 void backup_do_checkpoint(BlockJob *job, Error **errp)
251 BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common);
252 int64_t len;
254 assert(job->driver->job_type == BLOCK_JOB_TYPE_BACKUP);
256 if (backup_job->sync_mode != MIRROR_SYNC_MODE_NONE) {
257 error_setg(errp, "The backup job only supports block checkpoint in"
258 " sync=none mode");
259 return;
262 len = DIV_ROUND_UP(backup_job->common.len, backup_job->cluster_size);
263 bitmap_zero(backup_job->done_bitmap, len);
266 void backup_wait_for_overlapping_requests(BlockJob *job, int64_t offset,
267 uint64_t bytes)
269 BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common);
270 int64_t start, end;
272 assert(job->driver->job_type == BLOCK_JOB_TYPE_BACKUP);
274 start = QEMU_ALIGN_DOWN(offset, backup_job->cluster_size);
275 end = QEMU_ALIGN_UP(offset + bytes, backup_job->cluster_size);
276 wait_for_overlapping_requests(backup_job, start, end);
279 void backup_cow_request_begin(CowRequest *req, BlockJob *job,
280 int64_t offset, uint64_t bytes)
282 BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common);
283 int64_t start, end;
285 assert(job->driver->job_type == BLOCK_JOB_TYPE_BACKUP);
287 start = QEMU_ALIGN_DOWN(offset, backup_job->cluster_size);
288 end = QEMU_ALIGN_UP(offset + bytes, backup_job->cluster_size);
289 cow_request_begin(req, backup_job, start, end);
292 void backup_cow_request_end(CowRequest *req)
294 cow_request_end(req);
297 static void backup_drain(BlockJob *job)
299 BackupBlockJob *s = container_of(job, BackupBlockJob, common);
301 /* Need to keep a reference in case blk_drain triggers execution
302 * of backup_complete...
304 if (s->target) {
305 BlockBackend *target = s->target;
306 blk_ref(target);
307 blk_drain(target);
308 blk_unref(target);
312 static BlockErrorAction backup_error_action(BackupBlockJob *job,
313 bool read, int error)
315 if (read) {
316 return block_job_error_action(&job->common, job->on_source_error,
317 true, error);
318 } else {
319 return block_job_error_action(&job->common, job->on_target_error,
320 false, error);
324 typedef struct {
325 int ret;
326 } BackupCompleteData;
328 static void backup_complete(BlockJob *job, void *opaque)
330 BackupCompleteData *data = opaque;
332 block_job_completed(job, data->ret);
333 g_free(data);
336 static bool coroutine_fn yield_and_check(BackupBlockJob *job)
338 if (block_job_is_cancelled(&job->common)) {
339 return true;
342 /* we need to yield so that bdrv_drain_all() returns.
343 * (without, VM does not reboot)
345 if (job->common.speed) {
346 uint64_t delay_ns = ratelimit_calculate_delay(&job->limit,
347 job->bytes_read);
348 job->bytes_read = 0;
349 block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, delay_ns);
350 } else {
351 block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, 0);
354 if (block_job_is_cancelled(&job->common)) {
355 return true;
358 return false;
361 static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
363 bool error_is_read;
364 int ret = 0;
365 int clusters_per_iter;
366 uint32_t granularity;
367 int64_t offset;
368 int64_t cluster;
369 int64_t end;
370 int64_t last_cluster = -1;
371 BdrvDirtyBitmapIter *dbi;
373 granularity = bdrv_dirty_bitmap_granularity(job->sync_bitmap);
374 clusters_per_iter = MAX((granularity / job->cluster_size), 1);
375 dbi = bdrv_dirty_iter_new(job->sync_bitmap, 0);
377 /* Find the next dirty sector(s) */
378 while ((offset = bdrv_dirty_iter_next(dbi) * BDRV_SECTOR_SIZE) >= 0) {
379 cluster = offset / job->cluster_size;
381 /* Fake progress updates for any clusters we skipped */
382 if (cluster != last_cluster + 1) {
383 job->common.offset += ((cluster - last_cluster - 1) *
384 job->cluster_size);
387 for (end = cluster + clusters_per_iter; cluster < end; cluster++) {
388 do {
389 if (yield_and_check(job)) {
390 goto out;
392 ret = backup_do_cow(job, cluster * job->cluster_size,
393 job->cluster_size, &error_is_read,
394 false);
395 if ((ret < 0) &&
396 backup_error_action(job, error_is_read, -ret) ==
397 BLOCK_ERROR_ACTION_REPORT) {
398 goto out;
400 } while (ret < 0);
403 /* If the bitmap granularity is smaller than the backup granularity,
404 * we need to advance the iterator pointer to the next cluster. */
405 if (granularity < job->cluster_size) {
406 bdrv_set_dirty_iter(dbi,
407 cluster * job->cluster_size / BDRV_SECTOR_SIZE);
410 last_cluster = cluster - 1;
413 /* Play some final catchup with the progress meter */
414 end = DIV_ROUND_UP(job->common.len, job->cluster_size);
415 if (last_cluster + 1 < end) {
416 job->common.offset += ((end - last_cluster - 1) * job->cluster_size);
419 out:
420 bdrv_dirty_iter_free(dbi);
421 return ret;
424 static void coroutine_fn backup_run(void *opaque)
426 BackupBlockJob *job = opaque;
427 BackupCompleteData *data;
428 BlockDriverState *bs = blk_bs(job->common.blk);
429 int64_t offset;
430 int ret = 0;
432 QLIST_INIT(&job->inflight_reqs);
433 qemu_co_rwlock_init(&job->flush_rwlock);
435 job->done_bitmap = bitmap_new(DIV_ROUND_UP(job->common.len,
436 job->cluster_size));
438 job->before_write.notify = backup_before_write_notify;
439 bdrv_add_before_write_notifier(bs, &job->before_write);
441 if (job->sync_mode == MIRROR_SYNC_MODE_NONE) {
442 while (!block_job_is_cancelled(&job->common)) {
443 /* Yield until the job is cancelled. We just let our before_write
444 * notify callback service CoW requests. */
445 block_job_yield(&job->common);
447 } else if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
448 ret = backup_run_incremental(job);
449 } else {
450 /* Both FULL and TOP SYNC_MODE's require copying.. */
451 for (offset = 0; offset < job->common.len;
452 offset += job->cluster_size) {
453 bool error_is_read;
454 int alloced = 0;
456 if (yield_and_check(job)) {
457 break;
460 if (job->sync_mode == MIRROR_SYNC_MODE_TOP) {
461 int i;
462 int64_t n;
464 /* Check to see if these blocks are already in the
465 * backing file. */
467 for (i = 0; i < job->cluster_size;) {
468 /* bdrv_is_allocated() only returns true/false based
469 * on the first set of sectors it comes across that
470 * are are all in the same state.
471 * For that reason we must verify each sector in the
472 * backup cluster length. We end up copying more than
473 * needed but at some point that is always the case. */
474 alloced =
475 bdrv_is_allocated(bs, offset + i,
476 job->cluster_size - i, &n);
477 i += n;
479 if (alloced || n == 0) {
480 break;
484 /* If the above loop never found any sectors that are in
485 * the topmost image, skip this backup. */
486 if (alloced == 0) {
487 continue;
490 /* FULL sync mode we copy the whole drive. */
491 if (alloced < 0) {
492 ret = alloced;
493 } else {
494 ret = backup_do_cow(job, offset, job->cluster_size,
495 &error_is_read, false);
497 if (ret < 0) {
498 /* Depending on error action, fail now or retry cluster */
499 BlockErrorAction action =
500 backup_error_action(job, error_is_read, -ret);
501 if (action == BLOCK_ERROR_ACTION_REPORT) {
502 break;
503 } else {
504 offset -= job->cluster_size;
505 continue;
511 notifier_with_return_remove(&job->before_write);
513 /* wait until pending backup_do_cow() calls have completed */
514 qemu_co_rwlock_wrlock(&job->flush_rwlock);
515 qemu_co_rwlock_unlock(&job->flush_rwlock);
516 g_free(job->done_bitmap);
518 data = g_malloc(sizeof(*data));
519 data->ret = ret;
520 block_job_defer_to_main_loop(&job->common, backup_complete, data);
523 static const BlockJobDriver backup_job_driver = {
524 .instance_size = sizeof(BackupBlockJob),
525 .job_type = BLOCK_JOB_TYPE_BACKUP,
526 .start = backup_run,
527 .set_speed = backup_set_speed,
528 .commit = backup_commit,
529 .abort = backup_abort,
530 .clean = backup_clean,
531 .attached_aio_context = backup_attached_aio_context,
532 .drain = backup_drain,
535 BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
536 BlockDriverState *target, int64_t speed,
537 MirrorSyncMode sync_mode, BdrvDirtyBitmap *sync_bitmap,
538 bool compress,
539 BlockdevOnError on_source_error,
540 BlockdevOnError on_target_error,
541 int creation_flags,
542 BlockCompletionFunc *cb, void *opaque,
543 BlockJobTxn *txn, Error **errp)
545 int64_t len;
546 BlockDriverInfo bdi;
547 BackupBlockJob *job = NULL;
548 int ret;
550 assert(bs);
551 assert(target);
553 if (bs == target) {
554 error_setg(errp, "Source and target cannot be the same");
555 return NULL;
558 if (!bdrv_is_inserted(bs)) {
559 error_setg(errp, "Device is not inserted: %s",
560 bdrv_get_device_name(bs));
561 return NULL;
564 if (!bdrv_is_inserted(target)) {
565 error_setg(errp, "Device is not inserted: %s",
566 bdrv_get_device_name(target));
567 return NULL;
570 if (compress && target->drv->bdrv_co_pwritev_compressed == NULL) {
571 error_setg(errp, "Compression is not supported for this drive %s",
572 bdrv_get_device_name(target));
573 return NULL;
576 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) {
577 return NULL;
580 if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) {
581 return NULL;
584 if (sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
585 if (!sync_bitmap) {
586 error_setg(errp, "must provide a valid bitmap name for "
587 "\"incremental\" sync mode");
588 return NULL;
591 /* Create a new bitmap, and freeze/disable this one. */
592 if (bdrv_dirty_bitmap_create_successor(bs, sync_bitmap, errp) < 0) {
593 return NULL;
595 } else if (sync_bitmap) {
596 error_setg(errp,
597 "a sync_bitmap was provided to backup_run, "
598 "but received an incompatible sync_mode (%s)",
599 MirrorSyncMode_lookup[sync_mode]);
600 return NULL;
603 len = bdrv_getlength(bs);
604 if (len < 0) {
605 error_setg_errno(errp, -len, "unable to get length for '%s'",
606 bdrv_get_device_name(bs));
607 goto error;
610 /* job->common.len is fixed, so we can't allow resize */
611 job = block_job_create(job_id, &backup_job_driver, bs,
612 BLK_PERM_CONSISTENT_READ,
613 BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
614 BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD,
615 speed, creation_flags, cb, opaque, errp);
616 if (!job) {
617 goto error;
620 /* The target must match the source in size, so no resize here either */
621 job->target = blk_new(BLK_PERM_WRITE,
622 BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE |
623 BLK_PERM_WRITE_UNCHANGED | BLK_PERM_GRAPH_MOD);
624 ret = blk_insert_bs(job->target, target, errp);
625 if (ret < 0) {
626 goto error;
629 job->on_source_error = on_source_error;
630 job->on_target_error = on_target_error;
631 job->sync_mode = sync_mode;
632 job->sync_bitmap = sync_mode == MIRROR_SYNC_MODE_INCREMENTAL ?
633 sync_bitmap : NULL;
634 job->compress = compress;
636 /* If there is no backing file on the target, we cannot rely on COW if our
637 * backup cluster size is smaller than the target cluster size. Even for
638 * targets with a backing file, try to avoid COW if possible. */
639 ret = bdrv_get_info(target, &bdi);
640 if (ret == -ENOTSUP && !target->backing) {
641 /* Cluster size is not defined */
642 warn_report("The target block device doesn't provide "
643 "information about the block size and it doesn't have a "
644 "backing file. The default block size of %u bytes is "
645 "used. If the actual block size of the target exceeds "
646 "this default, the backup may be unusable",
647 BACKUP_CLUSTER_SIZE_DEFAULT);
648 job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
649 } else if (ret < 0 && !target->backing) {
650 error_setg_errno(errp, -ret,
651 "Couldn't determine the cluster size of the target image, "
652 "which has no backing file");
653 error_append_hint(errp,
654 "Aborting, since this may create an unusable destination image\n");
655 goto error;
656 } else if (ret < 0 && target->backing) {
657 /* Not fatal; just trudge on ahead. */
658 job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
659 } else {
660 job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
663 /* Required permissions are already taken with target's blk_new() */
664 block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
665 &error_abort);
666 job->common.len = len;
667 block_job_txn_add_job(txn, &job->common);
669 return &job->common;
671 error:
672 if (sync_bitmap) {
673 bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL);
675 if (job) {
676 backup_clean(&job->common);
677 block_job_early_fail(&job->common);
680 return NULL;