4 * Copyright (C) 2013 Proxmox Server Solutions
7 * Dietmar Maurer (dietmar@proxmox.com)
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
17 #include "block/block.h"
18 #include "block/block_int.h"
19 #include "block/blockjob.h"
20 #include "qapi/error.h"
21 #include "qapi/qmp/qerror.h"
22 #include "qemu/ratelimit.h"
23 #include "qemu/cutils.h"
24 #include "sysemu/block-backend.h"
25 #include "qemu/bitmap.h"
27 #define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16)
28 #define SLICE_TIME 100000000ULL /* ns */
30 typedef struct CowRequest
{
33 QLIST_ENTRY(CowRequest
) list
;
34 CoQueue wait_queue
; /* coroutines blocked on this request */
37 typedef struct BackupBlockJob
{
39 BlockDriverState
*target
;
40 /* bitmap for sync=incremental */
41 BdrvDirtyBitmap
*sync_bitmap
;
42 MirrorSyncMode sync_mode
;
44 BlockdevOnError on_source_error
;
45 BlockdevOnError on_target_error
;
46 CoRwlock flush_rwlock
;
47 uint64_t sectors_read
;
48 unsigned long *done_bitmap
;
50 NotifierWithReturn before_write
;
51 QLIST_HEAD(, CowRequest
) inflight_reqs
;
54 /* Size of a cluster in sectors, instead of bytes. */
55 static inline int64_t cluster_size_sectors(BackupBlockJob
*job
)
57 return job
->cluster_size
/ BDRV_SECTOR_SIZE
;
60 /* See if in-flight requests overlap and wait for them to complete */
61 static void coroutine_fn
wait_for_overlapping_requests(BackupBlockJob
*job
,
70 QLIST_FOREACH(req
, &job
->inflight_reqs
, list
) {
71 if (end
> req
->start
&& start
< req
->end
) {
72 qemu_co_queue_wait(&req
->wait_queue
);
80 /* Keep track of an in-flight request */
81 static void cow_request_begin(CowRequest
*req
, BackupBlockJob
*job
,
82 int64_t start
, int64_t end
)
86 qemu_co_queue_init(&req
->wait_queue
);
87 QLIST_INSERT_HEAD(&job
->inflight_reqs
, req
, list
);
90 /* Forget about a completed request */
91 static void cow_request_end(CowRequest
*req
)
93 QLIST_REMOVE(req
, list
);
94 qemu_co_queue_restart_all(&req
->wait_queue
);
97 static int coroutine_fn
backup_do_cow(BlockDriverState
*bs
,
99 int64_t sector_num
, int nb_sectors
,
101 bool is_write_notifier
)
103 CowRequest cow_request
;
105 QEMUIOVector bounce_qiov
;
106 void *bounce_buffer
= NULL
;
108 int64_t sectors_per_cluster
= cluster_size_sectors(job
);
112 qemu_co_rwlock_rdlock(&job
->flush_rwlock
);
114 start
= sector_num
/ sectors_per_cluster
;
115 end
= DIV_ROUND_UP(sector_num
+ nb_sectors
, sectors_per_cluster
);
117 trace_backup_do_cow_enter(job
, start
, sector_num
, nb_sectors
);
119 wait_for_overlapping_requests(job
, start
, end
);
120 cow_request_begin(&cow_request
, job
, start
, end
);
122 for (; start
< end
; start
++) {
123 if (test_bit(start
, job
->done_bitmap
)) {
124 trace_backup_do_cow_skip(job
, start
);
125 continue; /* already copied */
128 trace_backup_do_cow_process(job
, start
);
130 n
= MIN(sectors_per_cluster
,
131 job
->common
.len
/ BDRV_SECTOR_SIZE
-
132 start
* sectors_per_cluster
);
134 if (!bounce_buffer
) {
135 bounce_buffer
= qemu_blockalign(bs
, job
->cluster_size
);
137 iov
.iov_base
= bounce_buffer
;
138 iov
.iov_len
= n
* BDRV_SECTOR_SIZE
;
139 qemu_iovec_init_external(&bounce_qiov
, &iov
, 1);
141 if (is_write_notifier
) {
142 ret
= bdrv_co_readv_no_serialising(bs
,
143 start
* sectors_per_cluster
,
146 ret
= bdrv_co_readv(bs
, start
* sectors_per_cluster
, n
,
150 trace_backup_do_cow_read_fail(job
, start
, ret
);
152 *error_is_read
= true;
157 if (buffer_is_zero(iov
.iov_base
, iov
.iov_len
)) {
158 ret
= bdrv_co_write_zeroes(job
->target
,
159 start
* sectors_per_cluster
,
160 n
, BDRV_REQ_MAY_UNMAP
);
162 ret
= bdrv_co_writev(job
->target
,
163 start
* sectors_per_cluster
, n
,
167 trace_backup_do_cow_write_fail(job
, start
, ret
);
169 *error_is_read
= false;
174 set_bit(start
, job
->done_bitmap
);
176 /* Publish progress, guest I/O counts as progress too. Note that the
177 * offset field is an opaque progress value, it is not a disk offset.
179 job
->sectors_read
+= n
;
180 job
->common
.offset
+= n
* BDRV_SECTOR_SIZE
;
185 qemu_vfree(bounce_buffer
);
188 cow_request_end(&cow_request
);
190 trace_backup_do_cow_return(job
, sector_num
, nb_sectors
, ret
);
192 qemu_co_rwlock_unlock(&job
->flush_rwlock
);
197 static int coroutine_fn
backup_before_write_notify(
198 NotifierWithReturn
*notifier
,
201 BackupBlockJob
*job
= container_of(notifier
, BackupBlockJob
, before_write
);
202 BdrvTrackedRequest
*req
= opaque
;
203 int64_t sector_num
= req
->offset
>> BDRV_SECTOR_BITS
;
204 int nb_sectors
= req
->bytes
>> BDRV_SECTOR_BITS
;
206 assert((req
->offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
207 assert((req
->bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
209 return backup_do_cow(req
->bs
, job
, sector_num
,
210 nb_sectors
, NULL
, true);
213 static void backup_set_speed(BlockJob
*job
, int64_t speed
, Error
**errp
)
215 BackupBlockJob
*s
= container_of(job
, BackupBlockJob
, common
);
218 error_setg(errp
, QERR_INVALID_PARAMETER
, "speed");
221 ratelimit_set_speed(&s
->limit
, speed
/ BDRV_SECTOR_SIZE
, SLICE_TIME
);
224 static void backup_cleanup_sync_bitmap(BackupBlockJob
*job
, int ret
)
227 BlockDriverState
*bs
= job
->common
.bs
;
229 if (ret
< 0 || block_job_is_cancelled(&job
->common
)) {
230 /* Merge the successor back into the parent, delete nothing. */
231 bm
= bdrv_reclaim_dirty_bitmap(bs
, job
->sync_bitmap
, NULL
);
234 /* Everything is fine, delete this bitmap and install the backup. */
235 bm
= bdrv_dirty_bitmap_abdicate(bs
, job
->sync_bitmap
, NULL
);
240 static void backup_commit(BlockJob
*job
)
242 BackupBlockJob
*s
= container_of(job
, BackupBlockJob
, common
);
243 if (s
->sync_bitmap
) {
244 backup_cleanup_sync_bitmap(s
, 0);
248 static void backup_abort(BlockJob
*job
)
250 BackupBlockJob
*s
= container_of(job
, BackupBlockJob
, common
);
251 if (s
->sync_bitmap
) {
252 backup_cleanup_sync_bitmap(s
, -1);
256 static const BlockJobDriver backup_job_driver
= {
257 .instance_size
= sizeof(BackupBlockJob
),
258 .job_type
= BLOCK_JOB_TYPE_BACKUP
,
259 .set_speed
= backup_set_speed
,
260 .commit
= backup_commit
,
261 .abort
= backup_abort
,
264 static BlockErrorAction
backup_error_action(BackupBlockJob
*job
,
265 bool read
, int error
)
268 return block_job_error_action(&job
->common
, job
->on_source_error
,
271 return block_job_error_action(&job
->common
, job
->on_target_error
,
278 } BackupCompleteData
;
280 static void backup_complete(BlockJob
*job
, void *opaque
)
282 BackupBlockJob
*s
= container_of(job
, BackupBlockJob
, common
);
283 BackupCompleteData
*data
= opaque
;
285 bdrv_unref(s
->target
);
287 block_job_completed(job
, data
->ret
);
291 static bool coroutine_fn
yield_and_check(BackupBlockJob
*job
)
293 if (block_job_is_cancelled(&job
->common
)) {
297 /* we need to yield so that bdrv_drain_all() returns.
298 * (without, VM does not reboot)
300 if (job
->common
.speed
) {
301 uint64_t delay_ns
= ratelimit_calculate_delay(&job
->limit
,
303 job
->sectors_read
= 0;
304 block_job_sleep_ns(&job
->common
, QEMU_CLOCK_REALTIME
, delay_ns
);
306 block_job_sleep_ns(&job
->common
, QEMU_CLOCK_REALTIME
, 0);
309 if (block_job_is_cancelled(&job
->common
)) {
316 static int coroutine_fn
backup_run_incremental(BackupBlockJob
*job
)
320 int clusters_per_iter
;
321 uint32_t granularity
;
325 int64_t last_cluster
= -1;
326 int64_t sectors_per_cluster
= cluster_size_sectors(job
);
327 BlockDriverState
*bs
= job
->common
.bs
;
330 granularity
= bdrv_dirty_bitmap_granularity(job
->sync_bitmap
);
331 clusters_per_iter
= MAX((granularity
/ job
->cluster_size
), 1);
332 bdrv_dirty_iter_init(job
->sync_bitmap
, &hbi
);
334 /* Find the next dirty sector(s) */
335 while ((sector
= hbitmap_iter_next(&hbi
)) != -1) {
336 cluster
= sector
/ sectors_per_cluster
;
338 /* Fake progress updates for any clusters we skipped */
339 if (cluster
!= last_cluster
+ 1) {
340 job
->common
.offset
+= ((cluster
- last_cluster
- 1) *
344 for (end
= cluster
+ clusters_per_iter
; cluster
< end
; cluster
++) {
346 if (yield_and_check(job
)) {
349 ret
= backup_do_cow(bs
, job
, cluster
* sectors_per_cluster
,
350 sectors_per_cluster
, &error_is_read
,
353 backup_error_action(job
, error_is_read
, -ret
) ==
354 BLOCK_ERROR_ACTION_REPORT
) {
360 /* If the bitmap granularity is smaller than the backup granularity,
361 * we need to advance the iterator pointer to the next cluster. */
362 if (granularity
< job
->cluster_size
) {
363 bdrv_set_dirty_iter(&hbi
, cluster
* sectors_per_cluster
);
366 last_cluster
= cluster
- 1;
369 /* Play some final catchup with the progress meter */
370 end
= DIV_ROUND_UP(job
->common
.len
, job
->cluster_size
);
371 if (last_cluster
+ 1 < end
) {
372 job
->common
.offset
+= ((end
- last_cluster
- 1) * job
->cluster_size
);
378 static void coroutine_fn
backup_run(void *opaque
)
380 BackupBlockJob
*job
= opaque
;
381 BackupCompleteData
*data
;
382 BlockDriverState
*bs
= job
->common
.bs
;
383 BlockDriverState
*target
= job
->target
;
385 int64_t sectors_per_cluster
= cluster_size_sectors(job
);
388 QLIST_INIT(&job
->inflight_reqs
);
389 qemu_co_rwlock_init(&job
->flush_rwlock
);
392 end
= DIV_ROUND_UP(job
->common
.len
, job
->cluster_size
);
394 job
->done_bitmap
= bitmap_new(end
);
396 job
->before_write
.notify
= backup_before_write_notify
;
397 bdrv_add_before_write_notifier(bs
, &job
->before_write
);
399 if (job
->sync_mode
== MIRROR_SYNC_MODE_NONE
) {
400 while (!block_job_is_cancelled(&job
->common
)) {
401 /* Yield until the job is cancelled. We just let our before_write
402 * notify callback service CoW requests. */
403 job
->common
.busy
= false;
404 qemu_coroutine_yield();
405 job
->common
.busy
= true;
407 } else if (job
->sync_mode
== MIRROR_SYNC_MODE_INCREMENTAL
) {
408 ret
= backup_run_incremental(job
);
410 /* Both FULL and TOP SYNC_MODE's require copying.. */
411 for (; start
< end
; start
++) {
413 if (yield_and_check(job
)) {
417 if (job
->sync_mode
== MIRROR_SYNC_MODE_TOP
) {
421 /* Check to see if these blocks are already in the
424 for (i
= 0; i
< sectors_per_cluster
;) {
425 /* bdrv_is_allocated() only returns true/false based
426 * on the first set of sectors it comes across that
427 * are are all in the same state.
428 * For that reason we must verify each sector in the
429 * backup cluster length. We end up copying more than
430 * needed but at some point that is always the case. */
432 bdrv_is_allocated(bs
,
433 start
* sectors_per_cluster
+ i
,
434 sectors_per_cluster
- i
, &n
);
437 if (alloced
== 1 || n
== 0) {
442 /* If the above loop never found any sectors that are in
443 * the topmost image, skip this backup. */
448 /* FULL sync mode we copy the whole drive. */
449 ret
= backup_do_cow(bs
, job
, start
* sectors_per_cluster
,
450 sectors_per_cluster
, &error_is_read
, false);
452 /* Depending on error action, fail now or retry cluster */
453 BlockErrorAction action
=
454 backup_error_action(job
, error_is_read
, -ret
);
455 if (action
== BLOCK_ERROR_ACTION_REPORT
) {
465 notifier_with_return_remove(&job
->before_write
);
467 /* wait until pending backup_do_cow() calls have completed */
468 qemu_co_rwlock_wrlock(&job
->flush_rwlock
);
469 qemu_co_rwlock_unlock(&job
->flush_rwlock
);
470 g_free(job
->done_bitmap
);
472 bdrv_op_unblock_all(target
, job
->common
.blocker
);
474 data
= g_malloc(sizeof(*data
));
476 block_job_defer_to_main_loop(&job
->common
, backup_complete
, data
);
479 void backup_start(BlockDriverState
*bs
, BlockDriverState
*target
,
480 int64_t speed
, MirrorSyncMode sync_mode
,
481 BdrvDirtyBitmap
*sync_bitmap
,
482 BlockdevOnError on_source_error
,
483 BlockdevOnError on_target_error
,
484 BlockCompletionFunc
*cb
, void *opaque
,
485 BlockJobTxn
*txn
, Error
**errp
)
489 BackupBlockJob
*job
= NULL
;
497 error_setg(errp
, "Source and target cannot be the same");
501 if (!bdrv_is_inserted(bs
)) {
502 error_setg(errp
, "Device is not inserted: %s",
503 bdrv_get_device_name(bs
));
507 if (!bdrv_is_inserted(target
)) {
508 error_setg(errp
, "Device is not inserted: %s",
509 bdrv_get_device_name(target
));
513 if (bdrv_op_is_blocked(bs
, BLOCK_OP_TYPE_BACKUP_SOURCE
, errp
)) {
517 if (bdrv_op_is_blocked(target
, BLOCK_OP_TYPE_BACKUP_TARGET
, errp
)) {
521 if (sync_mode
== MIRROR_SYNC_MODE_INCREMENTAL
) {
523 error_setg(errp
, "must provide a valid bitmap name for "
524 "\"incremental\" sync mode");
528 /* Create a new bitmap, and freeze/disable this one. */
529 if (bdrv_dirty_bitmap_create_successor(bs
, sync_bitmap
, errp
) < 0) {
532 } else if (sync_bitmap
) {
534 "a sync_bitmap was provided to backup_run, "
535 "but received an incompatible sync_mode (%s)",
536 MirrorSyncMode_lookup
[sync_mode
]);
540 len
= bdrv_getlength(bs
);
542 error_setg_errno(errp
, -len
, "unable to get length for '%s'",
543 bdrv_get_device_name(bs
));
547 job
= block_job_create(&backup_job_driver
, bs
, speed
, cb
, opaque
, errp
);
552 job
->on_source_error
= on_source_error
;
553 job
->on_target_error
= on_target_error
;
554 job
->target
= target
;
555 job
->sync_mode
= sync_mode
;
556 job
->sync_bitmap
= sync_mode
== MIRROR_SYNC_MODE_INCREMENTAL
?
559 /* If there is no backing file on the target, we cannot rely on COW if our
560 * backup cluster size is smaller than the target cluster size. Even for
561 * targets with a backing file, try to avoid COW if possible. */
562 ret
= bdrv_get_info(job
->target
, &bdi
);
563 if (ret
< 0 && !target
->backing
) {
564 error_setg_errno(errp
, -ret
,
565 "Couldn't determine the cluster size of the target image, "
566 "which has no backing file");
567 error_append_hint(errp
,
568 "Aborting, since this may create an unusable destination image\n");
570 } else if (ret
< 0 && target
->backing
) {
571 /* Not fatal; just trudge on ahead. */
572 job
->cluster_size
= BACKUP_CLUSTER_SIZE_DEFAULT
;
574 job
->cluster_size
= MAX(BACKUP_CLUSTER_SIZE_DEFAULT
, bdi
.cluster_size
);
577 bdrv_op_block_all(target
, job
->common
.blocker
);
578 job
->common
.len
= len
;
579 job
->common
.co
= qemu_coroutine_create(backup_run
);
580 block_job_txn_add_job(txn
, &job
->common
);
581 qemu_coroutine_enter(job
->common
.co
, job
);
586 bdrv_reclaim_dirty_bitmap(bs
, sync_bitmap
, NULL
);
589 block_job_unref(&job
->common
);