4 * Copyright (C) 2013 Proxmox Server Solutions
7 * Dietmar Maurer (dietmar@proxmox.com)
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
17 #include "block/block.h"
18 #include "block/block_int.h"
19 #include "block/blockjob_int.h"
20 #include "block/block_backup.h"
21 #include "qapi/error.h"
22 #include "qapi/qmp/qerror.h"
23 #include "qemu/ratelimit.h"
24 #include "qemu/cutils.h"
25 #include "sysemu/block-backend.h"
26 #include "qemu/bitmap.h"
27 #include "qemu/error-report.h"
29 #define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16)
31 typedef struct CowRequest
{
34 QLIST_ENTRY(CowRequest
) list
;
35 CoQueue wait_queue
; /* coroutines blocked on this request */
38 typedef struct BackupBlockJob
{
42 BdrvDirtyBitmap
*sync_bitmap
;
43 BdrvDirtyBitmap
*copy_bitmap
;
45 MirrorSyncMode sync_mode
;
46 BitmapSyncMode bitmap_mode
;
47 BlockdevOnError on_source_error
;
48 BlockdevOnError on_target_error
;
49 CoRwlock flush_rwlock
;
54 NotifierWithReturn before_write
;
55 QLIST_HEAD(, CowRequest
) inflight_reqs
;
58 int64_t copy_range_size
;
60 bool serialize_target_writes
;
63 static const BlockJobDriver backup_job_driver
;
65 /* See if in-flight requests overlap and wait for them to complete */
66 static void coroutine_fn
wait_for_overlapping_requests(BackupBlockJob
*job
,
75 QLIST_FOREACH(req
, &job
->inflight_reqs
, list
) {
76 if (end
> req
->start_byte
&& start
< req
->end_byte
) {
77 qemu_co_queue_wait(&req
->wait_queue
, NULL
);
85 /* Keep track of an in-flight request */
86 static void cow_request_begin(CowRequest
*req
, BackupBlockJob
*job
,
87 int64_t start
, int64_t end
)
89 req
->start_byte
= start
;
91 qemu_co_queue_init(&req
->wait_queue
);
92 QLIST_INSERT_HEAD(&job
->inflight_reqs
, req
, list
);
95 /* Forget about a completed request */
96 static void cow_request_end(CowRequest
*req
)
98 QLIST_REMOVE(req
, list
);
99 qemu_co_queue_restart_all(&req
->wait_queue
);
102 /* Copy range to target with a bounce buffer and return the bytes copied. If
103 * error occurred, return a negative error number */
104 static int coroutine_fn
backup_cow_with_bounce_buffer(BackupBlockJob
*job
,
107 bool is_write_notifier
,
109 void **bounce_buffer
)
112 BlockBackend
*blk
= job
->common
.blk
;
114 int read_flags
= is_write_notifier
? BDRV_REQ_NO_SERIALISING
: 0;
115 int write_flags
= job
->serialize_target_writes
? BDRV_REQ_SERIALISING
: 0;
117 assert(QEMU_IS_ALIGNED(start
, job
->cluster_size
));
118 bdrv_reset_dirty_bitmap(job
->copy_bitmap
, start
, job
->cluster_size
);
119 nbytes
= MIN(job
->cluster_size
, job
->len
- start
);
120 if (!*bounce_buffer
) {
121 *bounce_buffer
= blk_blockalign(blk
, job
->cluster_size
);
124 ret
= blk_co_pread(blk
, start
, nbytes
, *bounce_buffer
, read_flags
);
126 trace_backup_do_cow_read_fail(job
, start
, ret
);
128 *error_is_read
= true;
133 if (buffer_is_zero(*bounce_buffer
, nbytes
)) {
134 ret
= blk_co_pwrite_zeroes(job
->target
, start
,
135 nbytes
, write_flags
| BDRV_REQ_MAY_UNMAP
);
137 ret
= blk_co_pwrite(job
->target
, start
,
138 nbytes
, *bounce_buffer
, write_flags
|
139 (job
->compress
? BDRV_REQ_WRITE_COMPRESSED
: 0));
142 trace_backup_do_cow_write_fail(job
, start
, ret
);
144 *error_is_read
= false;
151 bdrv_set_dirty_bitmap(job
->copy_bitmap
, start
, job
->cluster_size
);
156 /* Copy range to target and return the bytes copied. If error occurred, return a
157 * negative error number. */
158 static int coroutine_fn
backup_cow_with_offload(BackupBlockJob
*job
,
161 bool is_write_notifier
)
165 BlockBackend
*blk
= job
->common
.blk
;
167 int read_flags
= is_write_notifier
? BDRV_REQ_NO_SERIALISING
: 0;
168 int write_flags
= job
->serialize_target_writes
? BDRV_REQ_SERIALISING
: 0;
170 assert(QEMU_IS_ALIGNED(job
->copy_range_size
, job
->cluster_size
));
171 assert(QEMU_IS_ALIGNED(start
, job
->cluster_size
));
172 nbytes
= MIN(job
->copy_range_size
, end
- start
);
173 nr_clusters
= DIV_ROUND_UP(nbytes
, job
->cluster_size
);
174 bdrv_reset_dirty_bitmap(job
->copy_bitmap
, start
,
175 job
->cluster_size
* nr_clusters
);
176 ret
= blk_co_copy_range(blk
, start
, job
->target
, start
, nbytes
,
177 read_flags
, write_flags
);
179 trace_backup_do_cow_copy_range_fail(job
, start
, ret
);
180 bdrv_set_dirty_bitmap(job
->copy_bitmap
, start
,
181 job
->cluster_size
* nr_clusters
);
189 * Check if the cluster starting at offset is allocated or not.
190 * return via pnum the number of contiguous clusters sharing this allocation.
192 static int backup_is_cluster_allocated(BackupBlockJob
*s
, int64_t offset
,
195 BlockDriverState
*bs
= blk_bs(s
->common
.blk
);
196 int64_t count
, total_count
= 0;
197 int64_t bytes
= s
->len
- offset
;
200 assert(QEMU_IS_ALIGNED(offset
, s
->cluster_size
));
203 ret
= bdrv_is_allocated(bs
, offset
, bytes
, &count
);
208 total_count
+= count
;
210 if (ret
|| count
== 0) {
212 * ret: partial segment(s) are considered allocated.
213 * otherwise: unallocated tail is treated as an entire segment.
215 *pnum
= DIV_ROUND_UP(total_count
, s
->cluster_size
);
219 /* Unallocated segment(s) with uncertain following segment(s) */
220 if (total_count
>= s
->cluster_size
) {
221 *pnum
= total_count
/ s
->cluster_size
;
230 static int coroutine_fn
backup_do_cow(BackupBlockJob
*job
,
231 int64_t offset
, uint64_t bytes
,
233 bool is_write_notifier
)
235 CowRequest cow_request
;
237 int64_t start
, end
; /* bytes */
238 void *bounce_buffer
= NULL
;
240 qemu_co_rwlock_rdlock(&job
->flush_rwlock
);
242 start
= QEMU_ALIGN_DOWN(offset
, job
->cluster_size
);
243 end
= QEMU_ALIGN_UP(bytes
+ offset
, job
->cluster_size
);
245 trace_backup_do_cow_enter(job
, start
, offset
, bytes
);
247 wait_for_overlapping_requests(job
, start
, end
);
248 cow_request_begin(&cow_request
, job
, start
, end
);
250 while (start
< end
) {
253 if (!bdrv_dirty_bitmap_get(job
->copy_bitmap
, start
)) {
254 trace_backup_do_cow_skip(job
, start
);
255 start
+= job
->cluster_size
;
256 continue; /* already copied */
259 dirty_end
= bdrv_dirty_bitmap_next_zero(job
->copy_bitmap
, start
,
265 trace_backup_do_cow_process(job
, start
);
267 if (job
->use_copy_range
) {
268 ret
= backup_cow_with_offload(job
, start
, dirty_end
,
271 job
->use_copy_range
= false;
274 if (!job
->use_copy_range
) {
275 ret
= backup_cow_with_bounce_buffer(job
, start
, dirty_end
,
277 error_is_read
, &bounce_buffer
);
283 /* Publish progress, guest I/O counts as progress too. Note that the
284 * offset field is an opaque progress value, it is not a disk offset.
287 job
->bytes_read
+= ret
;
288 job_progress_update(&job
->common
.job
, ret
);
293 qemu_vfree(bounce_buffer
);
296 cow_request_end(&cow_request
);
298 trace_backup_do_cow_return(job
, offset
, bytes
, ret
);
300 qemu_co_rwlock_unlock(&job
->flush_rwlock
);
305 static int coroutine_fn
backup_before_write_notify(
306 NotifierWithReturn
*notifier
,
309 BackupBlockJob
*job
= container_of(notifier
, BackupBlockJob
, before_write
);
310 BdrvTrackedRequest
*req
= opaque
;
312 assert(req
->bs
== blk_bs(job
->common
.blk
));
313 assert(QEMU_IS_ALIGNED(req
->offset
, BDRV_SECTOR_SIZE
));
314 assert(QEMU_IS_ALIGNED(req
->bytes
, BDRV_SECTOR_SIZE
));
316 return backup_do_cow(job
, req
->offset
, req
->bytes
, NULL
, true);
319 static void backup_cleanup_sync_bitmap(BackupBlockJob
*job
, int ret
)
322 BlockDriverState
*bs
= blk_bs(job
->common
.blk
);
323 bool sync
= (((ret
== 0) || (job
->bitmap_mode
== BITMAP_SYNC_MODE_ALWAYS
)) \
324 && (job
->bitmap_mode
!= BITMAP_SYNC_MODE_NEVER
));
328 * We succeeded, or we always intended to sync the bitmap.
329 * Delete this bitmap and install the child.
331 bm
= bdrv_dirty_bitmap_abdicate(bs
, job
->sync_bitmap
, NULL
);
334 * We failed, or we never intended to sync the bitmap anyway.
335 * Merge the successor back into the parent, keeping all data.
337 bm
= bdrv_reclaim_dirty_bitmap(bs
, job
->sync_bitmap
, NULL
);
342 if (ret
< 0 && job
->bitmap_mode
== BITMAP_SYNC_MODE_ALWAYS
) {
343 /* If we failed and synced, merge in the bits we didn't copy: */
344 bdrv_dirty_bitmap_merge_internal(bm
, job
->copy_bitmap
,
349 static void backup_commit(Job
*job
)
351 BackupBlockJob
*s
= container_of(job
, BackupBlockJob
, common
.job
);
352 if (s
->sync_bitmap
) {
353 backup_cleanup_sync_bitmap(s
, 0);
357 static void backup_abort(Job
*job
)
359 BackupBlockJob
*s
= container_of(job
, BackupBlockJob
, common
.job
);
360 if (s
->sync_bitmap
) {
361 backup_cleanup_sync_bitmap(s
, -1);
365 static void backup_clean(Job
*job
)
367 BackupBlockJob
*s
= container_of(job
, BackupBlockJob
, common
.job
);
368 BlockDriverState
*bs
= blk_bs(s
->common
.blk
);
370 if (s
->copy_bitmap
) {
371 bdrv_release_dirty_bitmap(bs
, s
->copy_bitmap
);
372 s
->copy_bitmap
= NULL
;
376 blk_unref(s
->target
);
380 void backup_do_checkpoint(BlockJob
*job
, Error
**errp
)
382 BackupBlockJob
*backup_job
= container_of(job
, BackupBlockJob
, common
);
384 assert(block_job_driver(job
) == &backup_job_driver
);
386 if (backup_job
->sync_mode
!= MIRROR_SYNC_MODE_NONE
) {
387 error_setg(errp
, "The backup job only supports block checkpoint in"
392 bdrv_set_dirty_bitmap(backup_job
->copy_bitmap
, 0, backup_job
->len
);
395 static void backup_drain(BlockJob
*job
)
397 BackupBlockJob
*s
= container_of(job
, BackupBlockJob
, common
);
399 /* Need to keep a reference in case blk_drain triggers execution
400 * of backup_complete...
403 BlockBackend
*target
= s
->target
;
410 static BlockErrorAction
backup_error_action(BackupBlockJob
*job
,
411 bool read
, int error
)
414 return block_job_error_action(&job
->common
, job
->on_source_error
,
417 return block_job_error_action(&job
->common
, job
->on_target_error
,
422 static bool coroutine_fn
yield_and_check(BackupBlockJob
*job
)
426 if (job_is_cancelled(&job
->common
.job
)) {
430 /* We need to yield even for delay_ns = 0 so that bdrv_drain_all() can
431 * return. Without a yield, the VM would not reboot. */
432 delay_ns
= block_job_ratelimit_get_delay(&job
->common
, job
->bytes_read
);
434 job_sleep_ns(&job
->common
.job
, delay_ns
);
436 if (job_is_cancelled(&job
->common
.job
)) {
443 static int coroutine_fn
backup_loop(BackupBlockJob
*job
)
447 BdrvDirtyBitmapIter
*bdbi
;
451 bdbi
= bdrv_dirty_iter_new(job
->copy_bitmap
);
452 while ((offset
= bdrv_dirty_iter_next(bdbi
)) != -1) {
453 if (job
->sync_mode
== MIRROR_SYNC_MODE_TOP
&&
454 !backup_is_cluster_allocated(job
, offset
, &dummy
))
456 bdrv_reset_dirty_bitmap(job
->copy_bitmap
, offset
,
462 if (yield_and_check(job
)) {
465 ret
= backup_do_cow(job
, offset
,
466 job
->cluster_size
, &error_is_read
, false);
467 if (ret
< 0 && backup_error_action(job
, error_is_read
, -ret
) ==
468 BLOCK_ERROR_ACTION_REPORT
)
476 bdrv_dirty_iter_free(bdbi
);
480 static void backup_init_copy_bitmap(BackupBlockJob
*job
)
485 if (job
->sync_mode
== MIRROR_SYNC_MODE_BITMAP
) {
486 ret
= bdrv_dirty_bitmap_merge_internal(job
->copy_bitmap
,
491 bdrv_set_dirty_bitmap(job
->copy_bitmap
, 0, job
->len
);
494 estimate
= bdrv_get_dirty_count(job
->copy_bitmap
);
495 job_progress_set_remaining(&job
->common
.job
, estimate
);
498 static int coroutine_fn
backup_run(Job
*job
, Error
**errp
)
500 BackupBlockJob
*s
= container_of(job
, BackupBlockJob
, common
.job
);
501 BlockDriverState
*bs
= blk_bs(s
->common
.blk
);
504 QLIST_INIT(&s
->inflight_reqs
);
505 qemu_co_rwlock_init(&s
->flush_rwlock
);
507 backup_init_copy_bitmap(s
);
509 s
->before_write
.notify
= backup_before_write_notify
;
510 bdrv_add_before_write_notifier(bs
, &s
->before_write
);
512 if (s
->sync_mode
== MIRROR_SYNC_MODE_NONE
) {
513 /* All bits are set in copy_bitmap to allow any cluster to be copied.
514 * This does not actually require them to be copied. */
515 while (!job_is_cancelled(job
)) {
516 /* Yield until the job is cancelled. We just let our before_write
517 * notify callback service CoW requests. */
521 ret
= backup_loop(s
);
524 notifier_with_return_remove(&s
->before_write
);
526 /* wait until pending backup_do_cow() calls have completed */
527 qemu_co_rwlock_wrlock(&s
->flush_rwlock
);
528 qemu_co_rwlock_unlock(&s
->flush_rwlock
);
533 static const BlockJobDriver backup_job_driver
= {
535 .instance_size
= sizeof(BackupBlockJob
),
536 .job_type
= JOB_TYPE_BACKUP
,
537 .free
= block_job_free
,
538 .user_resume
= block_job_user_resume
,
539 .drain
= block_job_drain
,
541 .commit
= backup_commit
,
542 .abort
= backup_abort
,
543 .clean
= backup_clean
,
545 .drain
= backup_drain
,
548 static int64_t backup_calculate_cluster_size(BlockDriverState
*target
,
555 * If there is no backing file on the target, we cannot rely on COW if our
556 * backup cluster size is smaller than the target cluster size. Even for
557 * targets with a backing file, try to avoid COW if possible.
559 ret
= bdrv_get_info(target
, &bdi
);
560 if (ret
== -ENOTSUP
&& !target
->backing
) {
561 /* Cluster size is not defined */
562 warn_report("The target block device doesn't provide "
563 "information about the block size and it doesn't have a "
564 "backing file. The default block size of %u bytes is "
565 "used. If the actual block size of the target exceeds "
566 "this default, the backup may be unusable",
567 BACKUP_CLUSTER_SIZE_DEFAULT
);
568 return BACKUP_CLUSTER_SIZE_DEFAULT
;
569 } else if (ret
< 0 && !target
->backing
) {
570 error_setg_errno(errp
, -ret
,
571 "Couldn't determine the cluster size of the target image, "
572 "which has no backing file");
573 error_append_hint(errp
,
574 "Aborting, since this may create an unusable destination image\n");
576 } else if (ret
< 0 && target
->backing
) {
577 /* Not fatal; just trudge on ahead. */
578 return BACKUP_CLUSTER_SIZE_DEFAULT
;
581 return MAX(BACKUP_CLUSTER_SIZE_DEFAULT
, bdi
.cluster_size
);
584 BlockJob
*backup_job_create(const char *job_id
, BlockDriverState
*bs
,
585 BlockDriverState
*target
, int64_t speed
,
586 MirrorSyncMode sync_mode
, BdrvDirtyBitmap
*sync_bitmap
,
587 BitmapSyncMode bitmap_mode
,
589 BlockdevOnError on_source_error
,
590 BlockdevOnError on_target_error
,
592 BlockCompletionFunc
*cb
, void *opaque
,
593 JobTxn
*txn
, Error
**errp
)
596 BackupBlockJob
*job
= NULL
;
598 int64_t cluster_size
;
599 BdrvDirtyBitmap
*copy_bitmap
= NULL
;
604 /* QMP interface protects us from these cases */
605 assert(sync_mode
!= MIRROR_SYNC_MODE_INCREMENTAL
);
606 assert(sync_bitmap
|| sync_mode
!= MIRROR_SYNC_MODE_BITMAP
);
609 error_setg(errp
, "Source and target cannot be the same");
613 if (!bdrv_is_inserted(bs
)) {
614 error_setg(errp
, "Device is not inserted: %s",
615 bdrv_get_device_name(bs
));
619 if (!bdrv_is_inserted(target
)) {
620 error_setg(errp
, "Device is not inserted: %s",
621 bdrv_get_device_name(target
));
625 if (compress
&& target
->drv
->bdrv_co_pwritev_compressed
== NULL
) {
626 error_setg(errp
, "Compression is not supported for this drive %s",
627 bdrv_get_device_name(target
));
631 if (bdrv_op_is_blocked(bs
, BLOCK_OP_TYPE_BACKUP_SOURCE
, errp
)) {
635 if (bdrv_op_is_blocked(target
, BLOCK_OP_TYPE_BACKUP_TARGET
, errp
)) {
639 if (sync_mode
== MIRROR_SYNC_MODE_BITMAP
) {
640 /* If we need to write to this bitmap, check that we can: */
641 if (bitmap_mode
!= BITMAP_SYNC_MODE_NEVER
&&
642 bdrv_dirty_bitmap_check(sync_bitmap
, BDRV_BITMAP_DEFAULT
, errp
)) {
646 /* Create a new bitmap, and freeze/disable this one. */
647 if (bdrv_dirty_bitmap_create_successor(bs
, sync_bitmap
, errp
) < 0) {
650 } else if (sync_bitmap
) {
652 "a bitmap was given to backup_job_create, "
653 "but it received an incompatible sync_mode (%s)",
654 MirrorSyncMode_str(sync_mode
));
658 len
= bdrv_getlength(bs
);
660 error_setg_errno(errp
, -len
, "unable to get length for '%s'",
661 bdrv_get_device_name(bs
));
665 cluster_size
= backup_calculate_cluster_size(target
, errp
);
666 if (cluster_size
< 0) {
670 copy_bitmap
= bdrv_create_dirty_bitmap(bs
, cluster_size
, NULL
, errp
);
674 bdrv_disable_dirty_bitmap(copy_bitmap
);
676 /* job->len is fixed, so we can't allow resize */
677 job
= block_job_create(job_id
, &backup_job_driver
, txn
, bs
,
678 BLK_PERM_CONSISTENT_READ
,
679 BLK_PERM_CONSISTENT_READ
| BLK_PERM_WRITE
|
680 BLK_PERM_WRITE_UNCHANGED
| BLK_PERM_GRAPH_MOD
,
681 speed
, creation_flags
, cb
, opaque
, errp
);
686 /* The target must match the source in size, so no resize here either */
687 job
->target
= blk_new(job
->common
.job
.aio_context
,
689 BLK_PERM_CONSISTENT_READ
| BLK_PERM_WRITE
|
690 BLK_PERM_WRITE_UNCHANGED
| BLK_PERM_GRAPH_MOD
);
691 ret
= blk_insert_bs(job
->target
, target
, errp
);
695 blk_set_disable_request_queuing(job
->target
, true);
697 job
->on_source_error
= on_source_error
;
698 job
->on_target_error
= on_target_error
;
699 job
->sync_mode
= sync_mode
;
700 job
->sync_bitmap
= sync_bitmap
;
701 job
->bitmap_mode
= bitmap_mode
;
702 job
->compress
= compress
;
704 /* Detect image-fleecing (and similar) schemes */
705 job
->serialize_target_writes
= bdrv_chain_contains(target
, bs
);
706 job
->cluster_size
= cluster_size
;
707 job
->copy_bitmap
= copy_bitmap
;
709 job
->use_copy_range
= !compress
; /* compression isn't supported for it */
710 job
->copy_range_size
= MIN_NON_ZERO(blk_get_max_transfer(job
->common
.blk
),
711 blk_get_max_transfer(job
->target
));
712 job
->copy_range_size
= MAX(job
->cluster_size
,
713 QEMU_ALIGN_UP(job
->copy_range_size
,
716 /* Required permissions are already taken with target's blk_new() */
717 block_job_add_bdrv(&job
->common
, "target", target
, 0, BLK_PERM_ALL
,
725 assert(!job
|| !job
->copy_bitmap
);
726 bdrv_release_dirty_bitmap(bs
, copy_bitmap
);
729 bdrv_reclaim_dirty_bitmap(bs
, sync_bitmap
, NULL
);
732 backup_clean(&job
->common
.job
);
733 job_early_fail(&job
->common
.job
);