4 * Copyright (C) 2013 Proxmox Server Solutions
7 * Dietmar Maurer (dietmar@proxmox.com)
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
17 #include "block/block.h"
18 #include "block/block_int.h"
19 #include "block/blockjob_int.h"
20 #include "block/block_backup.h"
21 #include "qapi/error.h"
22 #include "qapi/qmp/qerror.h"
23 #include "qemu/ratelimit.h"
24 #include "qemu/cutils.h"
25 #include "sysemu/block-backend.h"
26 #include "qemu/bitmap.h"
27 #include "qemu/error-report.h"
29 #define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16)
30 #define SLICE_TIME 100000000ULL /* ns */
32 typedef struct BackupBlockJob
{
35 /* bitmap for sync=incremental */
36 BdrvDirtyBitmap
*sync_bitmap
;
37 MirrorSyncMode sync_mode
;
39 BlockdevOnError on_source_error
;
40 BlockdevOnError on_target_error
;
41 CoRwlock flush_rwlock
;
42 uint64_t sectors_read
;
43 unsigned long *done_bitmap
;
46 NotifierWithReturn before_write
;
47 QLIST_HEAD(, CowRequest
) inflight_reqs
;
50 /* Size of a cluster in sectors, instead of bytes. */
51 static inline int64_t cluster_size_sectors(BackupBlockJob
*job
)
53 return job
->cluster_size
/ BDRV_SECTOR_SIZE
;
56 /* See if in-flight requests overlap and wait for them to complete */
57 static void coroutine_fn
wait_for_overlapping_requests(BackupBlockJob
*job
,
66 QLIST_FOREACH(req
, &job
->inflight_reqs
, list
) {
67 if (end
> req
->start
&& start
< req
->end
) {
68 qemu_co_queue_wait(&req
->wait_queue
, NULL
);
76 /* Keep track of an in-flight request */
77 static void cow_request_begin(CowRequest
*req
, BackupBlockJob
*job
,
78 int64_t start
, int64_t end
)
82 qemu_co_queue_init(&req
->wait_queue
);
83 QLIST_INSERT_HEAD(&job
->inflight_reqs
, req
, list
);
86 /* Forget about a completed request */
87 static void cow_request_end(CowRequest
*req
)
89 QLIST_REMOVE(req
, list
);
90 qemu_co_queue_restart_all(&req
->wait_queue
);
93 static int coroutine_fn
backup_do_cow(BackupBlockJob
*job
,
94 int64_t sector_num
, int nb_sectors
,
96 bool is_write_notifier
)
98 BlockBackend
*blk
= job
->common
.blk
;
99 CowRequest cow_request
;
101 QEMUIOVector bounce_qiov
;
102 void *bounce_buffer
= NULL
;
104 int64_t sectors_per_cluster
= cluster_size_sectors(job
);
105 int64_t bytes_per_cluster
= sectors_per_cluster
* BDRV_SECTOR_SIZE
;
109 qemu_co_rwlock_rdlock(&job
->flush_rwlock
);
111 start
= sector_num
/ sectors_per_cluster
;
112 end
= DIV_ROUND_UP(sector_num
+ nb_sectors
, sectors_per_cluster
);
114 trace_backup_do_cow_enter(job
, start
* bytes_per_cluster
,
115 sector_num
* BDRV_SECTOR_SIZE
,
116 nb_sectors
* BDRV_SECTOR_SIZE
);
118 wait_for_overlapping_requests(job
, start
, end
);
119 cow_request_begin(&cow_request
, job
, start
, end
);
121 for (; start
< end
; start
++) {
122 if (test_bit(start
, job
->done_bitmap
)) {
123 trace_backup_do_cow_skip(job
, start
* bytes_per_cluster
);
124 continue; /* already copied */
127 trace_backup_do_cow_process(job
, start
* bytes_per_cluster
);
129 n
= MIN(sectors_per_cluster
,
130 job
->common
.len
/ BDRV_SECTOR_SIZE
-
131 start
* sectors_per_cluster
);
133 if (!bounce_buffer
) {
134 bounce_buffer
= blk_blockalign(blk
, job
->cluster_size
);
136 iov
.iov_base
= bounce_buffer
;
137 iov
.iov_len
= n
* BDRV_SECTOR_SIZE
;
138 qemu_iovec_init_external(&bounce_qiov
, &iov
, 1);
140 ret
= blk_co_preadv(blk
, start
* job
->cluster_size
,
141 bounce_qiov
.size
, &bounce_qiov
,
142 is_write_notifier
? BDRV_REQ_NO_SERIALISING
: 0);
144 trace_backup_do_cow_read_fail(job
, start
* bytes_per_cluster
, ret
);
146 *error_is_read
= true;
151 if (buffer_is_zero(iov
.iov_base
, iov
.iov_len
)) {
152 ret
= blk_co_pwrite_zeroes(job
->target
, start
* job
->cluster_size
,
153 bounce_qiov
.size
, BDRV_REQ_MAY_UNMAP
);
155 ret
= blk_co_pwritev(job
->target
, start
* job
->cluster_size
,
156 bounce_qiov
.size
, &bounce_qiov
,
157 job
->compress
? BDRV_REQ_WRITE_COMPRESSED
: 0);
160 trace_backup_do_cow_write_fail(job
, start
* bytes_per_cluster
, ret
);
162 *error_is_read
= false;
167 set_bit(start
, job
->done_bitmap
);
169 /* Publish progress, guest I/O counts as progress too. Note that the
170 * offset field is an opaque progress value, it is not a disk offset.
172 job
->sectors_read
+= n
;
173 job
->common
.offset
+= n
* BDRV_SECTOR_SIZE
;
178 qemu_vfree(bounce_buffer
);
181 cow_request_end(&cow_request
);
183 trace_backup_do_cow_return(job
, sector_num
* BDRV_SECTOR_SIZE
,
184 nb_sectors
* BDRV_SECTOR_SIZE
, ret
);
186 qemu_co_rwlock_unlock(&job
->flush_rwlock
);
191 static int coroutine_fn
backup_before_write_notify(
192 NotifierWithReturn
*notifier
,
195 BackupBlockJob
*job
= container_of(notifier
, BackupBlockJob
, before_write
);
196 BdrvTrackedRequest
*req
= opaque
;
197 int64_t sector_num
= req
->offset
>> BDRV_SECTOR_BITS
;
198 int nb_sectors
= req
->bytes
>> BDRV_SECTOR_BITS
;
200 assert(req
->bs
== blk_bs(job
->common
.blk
));
201 assert((req
->offset
& (BDRV_SECTOR_SIZE
- 1)) == 0);
202 assert((req
->bytes
& (BDRV_SECTOR_SIZE
- 1)) == 0);
204 return backup_do_cow(job
, sector_num
, nb_sectors
, NULL
, true);
207 static void backup_set_speed(BlockJob
*job
, int64_t speed
, Error
**errp
)
209 BackupBlockJob
*s
= container_of(job
, BackupBlockJob
, common
);
212 error_setg(errp
, QERR_INVALID_PARAMETER
, "speed");
215 ratelimit_set_speed(&s
->limit
, speed
, SLICE_TIME
);
218 static void backup_cleanup_sync_bitmap(BackupBlockJob
*job
, int ret
)
221 BlockDriverState
*bs
= blk_bs(job
->common
.blk
);
223 if (ret
< 0 || block_job_is_cancelled(&job
->common
)) {
224 /* Merge the successor back into the parent, delete nothing. */
225 bm
= bdrv_reclaim_dirty_bitmap(bs
, job
->sync_bitmap
, NULL
);
228 /* Everything is fine, delete this bitmap and install the backup. */
229 bm
= bdrv_dirty_bitmap_abdicate(bs
, job
->sync_bitmap
, NULL
);
234 static void backup_commit(BlockJob
*job
)
236 BackupBlockJob
*s
= container_of(job
, BackupBlockJob
, common
);
237 if (s
->sync_bitmap
) {
238 backup_cleanup_sync_bitmap(s
, 0);
242 static void backup_abort(BlockJob
*job
)
244 BackupBlockJob
*s
= container_of(job
, BackupBlockJob
, common
);
245 if (s
->sync_bitmap
) {
246 backup_cleanup_sync_bitmap(s
, -1);
250 static void backup_clean(BlockJob
*job
)
252 BackupBlockJob
*s
= container_of(job
, BackupBlockJob
, common
);
254 blk_unref(s
->target
);
258 static void backup_attached_aio_context(BlockJob
*job
, AioContext
*aio_context
)
260 BackupBlockJob
*s
= container_of(job
, BackupBlockJob
, common
);
262 blk_set_aio_context(s
->target
, aio_context
);
265 void backup_do_checkpoint(BlockJob
*job
, Error
**errp
)
267 BackupBlockJob
*backup_job
= container_of(job
, BackupBlockJob
, common
);
270 assert(job
->driver
->job_type
== BLOCK_JOB_TYPE_BACKUP
);
272 if (backup_job
->sync_mode
!= MIRROR_SYNC_MODE_NONE
) {
273 error_setg(errp
, "The backup job only supports block checkpoint in"
278 len
= DIV_ROUND_UP(backup_job
->common
.len
, backup_job
->cluster_size
);
279 bitmap_zero(backup_job
->done_bitmap
, len
);
282 void backup_wait_for_overlapping_requests(BlockJob
*job
, int64_t sector_num
,
285 BackupBlockJob
*backup_job
= container_of(job
, BackupBlockJob
, common
);
286 int64_t sectors_per_cluster
= cluster_size_sectors(backup_job
);
289 assert(job
->driver
->job_type
== BLOCK_JOB_TYPE_BACKUP
);
291 start
= sector_num
/ sectors_per_cluster
;
292 end
= DIV_ROUND_UP(sector_num
+ nb_sectors
, sectors_per_cluster
);
293 wait_for_overlapping_requests(backup_job
, start
, end
);
296 void backup_cow_request_begin(CowRequest
*req
, BlockJob
*job
,
300 BackupBlockJob
*backup_job
= container_of(job
, BackupBlockJob
, common
);
301 int64_t sectors_per_cluster
= cluster_size_sectors(backup_job
);
304 assert(job
->driver
->job_type
== BLOCK_JOB_TYPE_BACKUP
);
306 start
= sector_num
/ sectors_per_cluster
;
307 end
= DIV_ROUND_UP(sector_num
+ nb_sectors
, sectors_per_cluster
);
308 cow_request_begin(req
, backup_job
, start
, end
);
311 void backup_cow_request_end(CowRequest
*req
)
313 cow_request_end(req
);
316 static void backup_drain(BlockJob
*job
)
318 BackupBlockJob
*s
= container_of(job
, BackupBlockJob
, common
);
320 /* Need to keep a reference in case blk_drain triggers execution
321 * of backup_complete...
324 BlockBackend
*target
= s
->target
;
331 static BlockErrorAction
backup_error_action(BackupBlockJob
*job
,
332 bool read
, int error
)
335 return block_job_error_action(&job
->common
, job
->on_source_error
,
338 return block_job_error_action(&job
->common
, job
->on_target_error
,
345 } BackupCompleteData
;
347 static void backup_complete(BlockJob
*job
, void *opaque
)
349 BackupCompleteData
*data
= opaque
;
351 block_job_completed(job
, data
->ret
);
355 static bool coroutine_fn
yield_and_check(BackupBlockJob
*job
)
357 if (block_job_is_cancelled(&job
->common
)) {
361 /* we need to yield so that bdrv_drain_all() returns.
362 * (without, VM does not reboot)
364 if (job
->common
.speed
) {
365 uint64_t delay_ns
= ratelimit_calculate_delay(&job
->limit
,
368 job
->sectors_read
= 0;
369 block_job_sleep_ns(&job
->common
, QEMU_CLOCK_REALTIME
, delay_ns
);
371 block_job_sleep_ns(&job
->common
, QEMU_CLOCK_REALTIME
, 0);
374 if (block_job_is_cancelled(&job
->common
)) {
381 static int coroutine_fn
backup_run_incremental(BackupBlockJob
*job
)
385 int clusters_per_iter
;
386 uint32_t granularity
;
390 int64_t last_cluster
= -1;
391 int64_t sectors_per_cluster
= cluster_size_sectors(job
);
392 BdrvDirtyBitmapIter
*dbi
;
394 granularity
= bdrv_dirty_bitmap_granularity(job
->sync_bitmap
);
395 clusters_per_iter
= MAX((granularity
/ job
->cluster_size
), 1);
396 dbi
= bdrv_dirty_iter_new(job
->sync_bitmap
, 0);
398 /* Find the next dirty sector(s) */
399 while ((sector
= bdrv_dirty_iter_next(dbi
)) != -1) {
400 cluster
= sector
/ sectors_per_cluster
;
402 /* Fake progress updates for any clusters we skipped */
403 if (cluster
!= last_cluster
+ 1) {
404 job
->common
.offset
+= ((cluster
- last_cluster
- 1) *
408 for (end
= cluster
+ clusters_per_iter
; cluster
< end
; cluster
++) {
410 if (yield_and_check(job
)) {
413 ret
= backup_do_cow(job
, cluster
* sectors_per_cluster
,
414 sectors_per_cluster
, &error_is_read
,
417 backup_error_action(job
, error_is_read
, -ret
) ==
418 BLOCK_ERROR_ACTION_REPORT
) {
424 /* If the bitmap granularity is smaller than the backup granularity,
425 * we need to advance the iterator pointer to the next cluster. */
426 if (granularity
< job
->cluster_size
) {
427 bdrv_set_dirty_iter(dbi
, cluster
* sectors_per_cluster
);
430 last_cluster
= cluster
- 1;
433 /* Play some final catchup with the progress meter */
434 end
= DIV_ROUND_UP(job
->common
.len
, job
->cluster_size
);
435 if (last_cluster
+ 1 < end
) {
436 job
->common
.offset
+= ((end
- last_cluster
- 1) * job
->cluster_size
);
440 bdrv_dirty_iter_free(dbi
);
444 static void coroutine_fn
backup_run(void *opaque
)
446 BackupBlockJob
*job
= opaque
;
447 BackupCompleteData
*data
;
448 BlockDriverState
*bs
= blk_bs(job
->common
.blk
);
450 int64_t sectors_per_cluster
= cluster_size_sectors(job
);
453 QLIST_INIT(&job
->inflight_reqs
);
454 qemu_co_rwlock_init(&job
->flush_rwlock
);
457 end
= DIV_ROUND_UP(job
->common
.len
, job
->cluster_size
);
459 job
->done_bitmap
= bitmap_new(end
);
461 job
->before_write
.notify
= backup_before_write_notify
;
462 bdrv_add_before_write_notifier(bs
, &job
->before_write
);
464 if (job
->sync_mode
== MIRROR_SYNC_MODE_NONE
) {
465 while (!block_job_is_cancelled(&job
->common
)) {
466 /* Yield until the job is cancelled. We just let our before_write
467 * notify callback service CoW requests. */
468 block_job_yield(&job
->common
);
470 } else if (job
->sync_mode
== MIRROR_SYNC_MODE_INCREMENTAL
) {
471 ret
= backup_run_incremental(job
);
473 /* Both FULL and TOP SYNC_MODE's require copying.. */
474 for (; start
< end
; start
++) {
478 if (yield_and_check(job
)) {
482 if (job
->sync_mode
== MIRROR_SYNC_MODE_TOP
) {
485 /* Check to see if these blocks are already in the
488 for (i
= 0; i
< sectors_per_cluster
;) {
489 /* bdrv_is_allocated() only returns true/false based
490 * on the first set of sectors it comes across that
491 * are are all in the same state.
492 * For that reason we must verify each sector in the
493 * backup cluster length. We end up copying more than
494 * needed but at some point that is always the case. */
496 bdrv_is_allocated(bs
,
497 start
* sectors_per_cluster
+ i
,
498 sectors_per_cluster
- i
, &n
);
501 if (alloced
|| n
== 0) {
506 /* If the above loop never found any sectors that are in
507 * the topmost image, skip this backup. */
512 /* FULL sync mode we copy the whole drive. */
516 ret
= backup_do_cow(job
, start
* sectors_per_cluster
,
517 sectors_per_cluster
, &error_is_read
,
521 /* Depending on error action, fail now or retry cluster */
522 BlockErrorAction action
=
523 backup_error_action(job
, error_is_read
, -ret
);
524 if (action
== BLOCK_ERROR_ACTION_REPORT
) {
534 notifier_with_return_remove(&job
->before_write
);
536 /* wait until pending backup_do_cow() calls have completed */
537 qemu_co_rwlock_wrlock(&job
->flush_rwlock
);
538 qemu_co_rwlock_unlock(&job
->flush_rwlock
);
539 g_free(job
->done_bitmap
);
541 data
= g_malloc(sizeof(*data
));
543 block_job_defer_to_main_loop(&job
->common
, backup_complete
, data
);
546 static const BlockJobDriver backup_job_driver
= {
547 .instance_size
= sizeof(BackupBlockJob
),
548 .job_type
= BLOCK_JOB_TYPE_BACKUP
,
550 .set_speed
= backup_set_speed
,
551 .commit
= backup_commit
,
552 .abort
= backup_abort
,
553 .clean
= backup_clean
,
554 .attached_aio_context
= backup_attached_aio_context
,
555 .drain
= backup_drain
,
558 BlockJob
*backup_job_create(const char *job_id
, BlockDriverState
*bs
,
559 BlockDriverState
*target
, int64_t speed
,
560 MirrorSyncMode sync_mode
, BdrvDirtyBitmap
*sync_bitmap
,
562 BlockdevOnError on_source_error
,
563 BlockdevOnError on_target_error
,
565 BlockCompletionFunc
*cb
, void *opaque
,
566 BlockJobTxn
*txn
, Error
**errp
)
570 BackupBlockJob
*job
= NULL
;
577 error_setg(errp
, "Source and target cannot be the same");
581 if (!bdrv_is_inserted(bs
)) {
582 error_setg(errp
, "Device is not inserted: %s",
583 bdrv_get_device_name(bs
));
587 if (!bdrv_is_inserted(target
)) {
588 error_setg(errp
, "Device is not inserted: %s",
589 bdrv_get_device_name(target
));
593 if (compress
&& target
->drv
->bdrv_co_pwritev_compressed
== NULL
) {
594 error_setg(errp
, "Compression is not supported for this drive %s",
595 bdrv_get_device_name(target
));
599 if (bdrv_op_is_blocked(bs
, BLOCK_OP_TYPE_BACKUP_SOURCE
, errp
)) {
603 if (bdrv_op_is_blocked(target
, BLOCK_OP_TYPE_BACKUP_TARGET
, errp
)) {
607 if (sync_mode
== MIRROR_SYNC_MODE_INCREMENTAL
) {
609 error_setg(errp
, "must provide a valid bitmap name for "
610 "\"incremental\" sync mode");
614 /* Create a new bitmap, and freeze/disable this one. */
615 if (bdrv_dirty_bitmap_create_successor(bs
, sync_bitmap
, errp
) < 0) {
618 } else if (sync_bitmap
) {
620 "a sync_bitmap was provided to backup_run, "
621 "but received an incompatible sync_mode (%s)",
622 MirrorSyncMode_lookup
[sync_mode
]);
626 len
= bdrv_getlength(bs
);
628 error_setg_errno(errp
, -len
, "unable to get length for '%s'",
629 bdrv_get_device_name(bs
));
633 /* job->common.len is fixed, so we can't allow resize */
634 job
= block_job_create(job_id
, &backup_job_driver
, bs
,
635 BLK_PERM_CONSISTENT_READ
,
636 BLK_PERM_CONSISTENT_READ
| BLK_PERM_WRITE
|
637 BLK_PERM_WRITE_UNCHANGED
| BLK_PERM_GRAPH_MOD
,
638 speed
, creation_flags
, cb
, opaque
, errp
);
643 /* The target must match the source in size, so no resize here either */
644 job
->target
= blk_new(BLK_PERM_WRITE
,
645 BLK_PERM_CONSISTENT_READ
| BLK_PERM_WRITE
|
646 BLK_PERM_WRITE_UNCHANGED
| BLK_PERM_GRAPH_MOD
);
647 ret
= blk_insert_bs(job
->target
, target
, errp
);
652 job
->on_source_error
= on_source_error
;
653 job
->on_target_error
= on_target_error
;
654 job
->sync_mode
= sync_mode
;
655 job
->sync_bitmap
= sync_mode
== MIRROR_SYNC_MODE_INCREMENTAL
?
657 job
->compress
= compress
;
659 /* If there is no backing file on the target, we cannot rely on COW if our
660 * backup cluster size is smaller than the target cluster size. Even for
661 * targets with a backing file, try to avoid COW if possible. */
662 ret
= bdrv_get_info(target
, &bdi
);
663 if (ret
== -ENOTSUP
&& !target
->backing
) {
664 /* Cluster size is not defined */
665 error_report("WARNING: The target block device doesn't provide "
666 "information about the block size and it doesn't have a "
667 "backing file. The default block size of %u bytes is "
668 "used. If the actual block size of the target exceeds "
669 "this default, the backup may be unusable",
670 BACKUP_CLUSTER_SIZE_DEFAULT
);
671 job
->cluster_size
= BACKUP_CLUSTER_SIZE_DEFAULT
;
672 } else if (ret
< 0 && !target
->backing
) {
673 error_setg_errno(errp
, -ret
,
674 "Couldn't determine the cluster size of the target image, "
675 "which has no backing file");
676 error_append_hint(errp
,
677 "Aborting, since this may create an unusable destination image\n");
679 } else if (ret
< 0 && target
->backing
) {
680 /* Not fatal; just trudge on ahead. */
681 job
->cluster_size
= BACKUP_CLUSTER_SIZE_DEFAULT
;
683 job
->cluster_size
= MAX(BACKUP_CLUSTER_SIZE_DEFAULT
, bdi
.cluster_size
);
686 /* Required permissions are already taken with target's blk_new() */
687 block_job_add_bdrv(&job
->common
, "target", target
, 0, BLK_PERM_ALL
,
689 job
->common
.len
= len
;
690 block_job_txn_add_job(txn
, &job
->common
);
696 bdrv_reclaim_dirty_bitmap(bs
, sync_bitmap
, NULL
);
699 backup_clean(&job
->common
);
700 block_job_early_fail(&job
->common
);