4 * Copyright Red Hat, Inc. 2012
7 * Paolo Bonzini <pbonzini@redhat.com>
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/cutils.h"
17 #include "block/blockjob_int.h"
18 #include "block/block_int.h"
19 #include "sysemu/block-backend.h"
20 #include "qapi/error.h"
21 #include "qapi/qmp/qerror.h"
22 #include "qemu/ratelimit.h"
23 #include "qemu/bitmap.h"
25 #define MAX_IN_FLIGHT 16
26 #define MAX_IO_BYTES (1 << 20) /* 1 Mb */
27 #define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES)
29 /* The mirroring buffer is a list of granularity-sized chunks.
30 * Free chunks are organized in a list.
32 typedef struct MirrorBuffer
{
33 QSIMPLEQ_ENTRY(MirrorBuffer
) next
;
36 typedef struct MirrorBlockJob
{
39 BlockDriverState
*mirror_top_bs
;
40 BlockDriverState
*source
;
41 BlockDriverState
*base
;
43 /* The name of the graph node to replace */
45 /* The BDS to replace */
46 BlockDriverState
*to_replace
;
47 /* Used to block operations on the drive-mirror-replace target */
48 Error
*replace_blocker
;
50 BlockMirrorBackingMode backing_mode
;
51 BlockdevOnError on_source_error
, on_target_error
;
57 unsigned long *cow_bitmap
;
58 BdrvDirtyBitmap
*dirty_bitmap
;
59 BdrvDirtyBitmapIter
*dbi
;
61 QSIMPLEQ_HEAD(, MirrorBuffer
) buf_free
;
64 uint64_t last_pause_ns
;
65 unsigned long *in_flight_bitmap
;
67 int64_t bytes_in_flight
;
71 int target_cluster_size
;
73 bool initial_zeroing_ongoing
;
76 typedef struct MirrorOp
{
83 static BlockErrorAction
mirror_error_action(MirrorBlockJob
*s
, bool read
,
88 return block_job_error_action(&s
->common
, s
->on_source_error
,
91 return block_job_error_action(&s
->common
, s
->on_target_error
,
96 static void mirror_iteration_done(MirrorOp
*op
, int ret
)
98 MirrorBlockJob
*s
= op
->s
;
103 trace_mirror_iteration_done(s
, op
->offset
, op
->bytes
, ret
);
106 s
->bytes_in_flight
-= op
->bytes
;
108 for (i
= 0; i
< op
->qiov
.niov
; i
++) {
109 MirrorBuffer
*buf
= (MirrorBuffer
*) iov
[i
].iov_base
;
110 QSIMPLEQ_INSERT_TAIL(&s
->buf_free
, buf
, next
);
114 chunk_num
= op
->offset
/ s
->granularity
;
115 nb_chunks
= DIV_ROUND_UP(op
->bytes
, s
->granularity
);
116 bitmap_clear(s
->in_flight_bitmap
, chunk_num
, nb_chunks
);
119 bitmap_set(s
->cow_bitmap
, chunk_num
, nb_chunks
);
121 if (!s
->initial_zeroing_ongoing
) {
122 block_job_progress_update(&s
->common
, op
->bytes
);
125 qemu_iovec_destroy(&op
->qiov
);
128 if (s
->waiting_for_io
) {
129 qemu_coroutine_enter(s
->common
.co
);
133 static void mirror_write_complete(void *opaque
, int ret
)
135 MirrorOp
*op
= opaque
;
136 MirrorBlockJob
*s
= op
->s
;
138 aio_context_acquire(blk_get_aio_context(s
->common
.blk
));
140 BlockErrorAction action
;
142 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, op
->offset
, op
->bytes
);
143 action
= mirror_error_action(s
, false, -ret
);
144 if (action
== BLOCK_ERROR_ACTION_REPORT
&& s
->ret
>= 0) {
148 mirror_iteration_done(op
, ret
);
149 aio_context_release(blk_get_aio_context(s
->common
.blk
));
152 static void mirror_read_complete(void *opaque
, int ret
)
154 MirrorOp
*op
= opaque
;
155 MirrorBlockJob
*s
= op
->s
;
157 aio_context_acquire(blk_get_aio_context(s
->common
.blk
));
159 BlockErrorAction action
;
161 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, op
->offset
, op
->bytes
);
162 action
= mirror_error_action(s
, true, -ret
);
163 if (action
== BLOCK_ERROR_ACTION_REPORT
&& s
->ret
>= 0) {
167 mirror_iteration_done(op
, ret
);
169 blk_aio_pwritev(s
->target
, op
->offset
, &op
->qiov
,
170 0, mirror_write_complete
, op
);
172 aio_context_release(blk_get_aio_context(s
->common
.blk
));
175 /* Clip bytes relative to offset to not exceed end-of-file */
176 static inline int64_t mirror_clip_bytes(MirrorBlockJob
*s
,
180 return MIN(bytes
, s
->bdev_length
- offset
);
183 /* Round offset and/or bytes to target cluster if COW is needed, and
184 * return the offset of the adjusted tail against original. */
185 static int mirror_cow_align(MirrorBlockJob
*s
, int64_t *offset
,
190 int64_t align_offset
= *offset
;
191 int64_t align_bytes
= *bytes
;
192 int max_bytes
= s
->granularity
* s
->max_iov
;
194 need_cow
= !test_bit(*offset
/ s
->granularity
, s
->cow_bitmap
);
195 need_cow
|= !test_bit((*offset
+ *bytes
- 1) / s
->granularity
,
198 bdrv_round_to_clusters(blk_bs(s
->target
), *offset
, *bytes
,
199 &align_offset
, &align_bytes
);
202 if (align_bytes
> max_bytes
) {
203 align_bytes
= max_bytes
;
205 align_bytes
= QEMU_ALIGN_DOWN(align_bytes
, s
->target_cluster_size
);
208 /* Clipping may result in align_bytes unaligned to chunk boundary, but
209 * that doesn't matter because it's already the end of source image. */
210 align_bytes
= mirror_clip_bytes(s
, align_offset
, align_bytes
);
212 ret
= align_offset
+ align_bytes
- (*offset
+ *bytes
);
213 *offset
= align_offset
;
214 *bytes
= align_bytes
;
219 static inline void mirror_wait_for_io(MirrorBlockJob
*s
)
221 assert(!s
->waiting_for_io
);
222 s
->waiting_for_io
= true;
223 qemu_coroutine_yield();
224 s
->waiting_for_io
= false;
227 /* Submit async read while handling COW.
228 * Returns: The number of bytes copied after and including offset,
229 * excluding any bytes copied prior to offset due to alignment.
230 * This will be @bytes if no alignment is necessary, or
231 * (new_end - offset) if tail is rounded up or down due to
232 * alignment or buffer limit.
234 static uint64_t mirror_do_read(MirrorBlockJob
*s
, int64_t offset
,
237 BlockBackend
*source
= s
->common
.blk
;
243 max_bytes
= s
->granularity
* s
->max_iov
;
245 /* We can only handle as much as buf_size at a time. */
246 bytes
= MIN(s
->buf_size
, MIN(max_bytes
, bytes
));
248 assert(bytes
< BDRV_REQUEST_MAX_BYTES
);
252 ret
+= mirror_cow_align(s
, &offset
, &bytes
);
254 assert(bytes
<= s
->buf_size
);
255 /* The offset is granularity-aligned because:
256 * 1) Caller passes in aligned values;
257 * 2) mirror_cow_align is used only when target cluster is larger. */
258 assert(QEMU_IS_ALIGNED(offset
, s
->granularity
));
259 /* The range is sector-aligned, since bdrv_getlength() rounds up. */
260 assert(QEMU_IS_ALIGNED(bytes
, BDRV_SECTOR_SIZE
));
261 nb_chunks
= DIV_ROUND_UP(bytes
, s
->granularity
);
263 while (s
->buf_free_count
< nb_chunks
) {
264 trace_mirror_yield_in_flight(s
, offset
, s
->in_flight
);
265 mirror_wait_for_io(s
);
268 /* Allocate a MirrorOp that is used as an AIO callback. */
269 op
= g_new(MirrorOp
, 1);
274 /* Now make a QEMUIOVector taking enough granularity-sized chunks
277 qemu_iovec_init(&op
->qiov
, nb_chunks
);
278 while (nb_chunks
-- > 0) {
279 MirrorBuffer
*buf
= QSIMPLEQ_FIRST(&s
->buf_free
);
280 size_t remaining
= bytes
- op
->qiov
.size
;
282 QSIMPLEQ_REMOVE_HEAD(&s
->buf_free
, next
);
284 qemu_iovec_add(&op
->qiov
, buf
, MIN(s
->granularity
, remaining
));
287 /* Copy the dirty cluster. */
289 s
->bytes_in_flight
+= bytes
;
290 trace_mirror_one_iteration(s
, offset
, bytes
);
292 blk_aio_preadv(source
, offset
, &op
->qiov
, 0, mirror_read_complete
, op
);
296 static void mirror_do_zero_or_discard(MirrorBlockJob
*s
,
303 /* Allocate a MirrorOp that is used as an AIO callback. The qiov is zeroed
304 * so the freeing in mirror_iteration_done is nop. */
305 op
= g_new0(MirrorOp
, 1);
311 s
->bytes_in_flight
+= bytes
;
313 blk_aio_pdiscard(s
->target
, offset
,
314 op
->bytes
, mirror_write_complete
, op
);
316 blk_aio_pwrite_zeroes(s
->target
, offset
,
317 op
->bytes
, s
->unmap
? BDRV_REQ_MAY_UNMAP
: 0,
318 mirror_write_complete
, op
);
322 static uint64_t coroutine_fn
mirror_iteration(MirrorBlockJob
*s
)
324 BlockDriverState
*source
= s
->source
;
325 int64_t offset
, first_chunk
;
326 uint64_t delay_ns
= 0;
327 /* At least the first dirty chunk is mirrored in one iteration. */
329 bool write_zeroes_ok
= bdrv_can_write_zeroes_with_unmap(blk_bs(s
->target
));
330 int max_io_bytes
= MAX(s
->buf_size
/ MAX_IN_FLIGHT
, MAX_IO_BYTES
);
332 bdrv_dirty_bitmap_lock(s
->dirty_bitmap
);
333 offset
= bdrv_dirty_iter_next(s
->dbi
);
335 bdrv_set_dirty_iter(s
->dbi
, 0);
336 offset
= bdrv_dirty_iter_next(s
->dbi
);
337 trace_mirror_restart_iter(s
, bdrv_get_dirty_count(s
->dirty_bitmap
));
340 bdrv_dirty_bitmap_unlock(s
->dirty_bitmap
);
342 first_chunk
= offset
/ s
->granularity
;
343 while (test_bit(first_chunk
, s
->in_flight_bitmap
)) {
344 trace_mirror_yield_in_flight(s
, offset
, s
->in_flight
);
345 mirror_wait_for_io(s
);
348 block_job_pause_point(&s
->common
);
350 /* Find the number of consective dirty chunks following the first dirty
351 * one, and wait for in flight requests in them. */
352 bdrv_dirty_bitmap_lock(s
->dirty_bitmap
);
353 while (nb_chunks
* s
->granularity
< s
->buf_size
) {
355 int64_t next_offset
= offset
+ nb_chunks
* s
->granularity
;
356 int64_t next_chunk
= next_offset
/ s
->granularity
;
357 if (next_offset
>= s
->bdev_length
||
358 !bdrv_get_dirty_locked(source
, s
->dirty_bitmap
, next_offset
)) {
361 if (test_bit(next_chunk
, s
->in_flight_bitmap
)) {
365 next_dirty
= bdrv_dirty_iter_next(s
->dbi
);
366 if (next_dirty
> next_offset
|| next_dirty
< 0) {
367 /* The bitmap iterator's cache is stale, refresh it */
368 bdrv_set_dirty_iter(s
->dbi
, next_offset
);
369 next_dirty
= bdrv_dirty_iter_next(s
->dbi
);
371 assert(next_dirty
== next_offset
);
375 /* Clear dirty bits before querying the block status, because
376 * calling bdrv_block_status_above could yield - if some blocks are
377 * marked dirty in this window, we need to know.
379 bdrv_reset_dirty_bitmap_locked(s
->dirty_bitmap
, offset
,
380 nb_chunks
* s
->granularity
);
381 bdrv_dirty_bitmap_unlock(s
->dirty_bitmap
);
383 bitmap_set(s
->in_flight_bitmap
, offset
/ s
->granularity
, nb_chunks
);
384 while (nb_chunks
> 0 && offset
< s
->bdev_length
) {
387 int64_t io_bytes_acct
;
391 MIRROR_METHOD_DISCARD
392 } mirror_method
= MIRROR_METHOD_COPY
;
394 assert(!(offset
% s
->granularity
));
395 ret
= bdrv_block_status_above(source
, NULL
, offset
,
396 nb_chunks
* s
->granularity
,
397 &io_bytes
, NULL
, NULL
);
399 io_bytes
= MIN(nb_chunks
* s
->granularity
, max_io_bytes
);
400 } else if (ret
& BDRV_BLOCK_DATA
) {
401 io_bytes
= MIN(io_bytes
, max_io_bytes
);
404 io_bytes
-= io_bytes
% s
->granularity
;
405 if (io_bytes
< s
->granularity
) {
406 io_bytes
= s
->granularity
;
407 } else if (ret
>= 0 && !(ret
& BDRV_BLOCK_DATA
)) {
408 int64_t target_offset
;
409 int64_t target_bytes
;
410 bdrv_round_to_clusters(blk_bs(s
->target
), offset
, io_bytes
,
411 &target_offset
, &target_bytes
);
412 if (target_offset
== offset
&&
413 target_bytes
== io_bytes
) {
414 mirror_method
= ret
& BDRV_BLOCK_ZERO
?
416 MIRROR_METHOD_DISCARD
;
420 while (s
->in_flight
>= MAX_IN_FLIGHT
) {
421 trace_mirror_yield_in_flight(s
, offset
, s
->in_flight
);
422 mirror_wait_for_io(s
);
429 io_bytes
= mirror_clip_bytes(s
, offset
, io_bytes
);
430 switch (mirror_method
) {
431 case MIRROR_METHOD_COPY
:
432 io_bytes
= io_bytes_acct
= mirror_do_read(s
, offset
, io_bytes
);
434 case MIRROR_METHOD_ZERO
:
435 case MIRROR_METHOD_DISCARD
:
436 mirror_do_zero_or_discard(s
, offset
, io_bytes
,
437 mirror_method
== MIRROR_METHOD_DISCARD
);
438 if (write_zeroes_ok
) {
441 io_bytes_acct
= io_bytes
;
449 nb_chunks
-= DIV_ROUND_UP(io_bytes
, s
->granularity
);
450 delay_ns
= block_job_ratelimit_get_delay(&s
->common
, io_bytes_acct
);
455 static void mirror_free_init(MirrorBlockJob
*s
)
457 int granularity
= s
->granularity
;
458 size_t buf_size
= s
->buf_size
;
459 uint8_t *buf
= s
->buf
;
461 assert(s
->buf_free_count
== 0);
462 QSIMPLEQ_INIT(&s
->buf_free
);
463 while (buf_size
!= 0) {
464 MirrorBuffer
*cur
= (MirrorBuffer
*)buf
;
465 QSIMPLEQ_INSERT_TAIL(&s
->buf_free
, cur
, next
);
467 buf_size
-= granularity
;
472 /* This is also used for the .pause callback. There is no matching
473 * mirror_resume() because mirror_run() will begin iterating again
474 * when the job is resumed.
476 static void mirror_wait_for_all_io(MirrorBlockJob
*s
)
478 while (s
->in_flight
> 0) {
479 mirror_wait_for_io(s
);
487 static void mirror_exit(BlockJob
*job
, void *opaque
)
489 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
490 MirrorExitData
*data
= opaque
;
491 AioContext
*replace_aio_context
= NULL
;
492 BlockDriverState
*src
= s
->source
;
493 BlockDriverState
*target_bs
= blk_bs(s
->target
);
494 BlockDriverState
*mirror_top_bs
= s
->mirror_top_bs
;
495 Error
*local_err
= NULL
;
497 bdrv_release_dirty_bitmap(src
, s
->dirty_bitmap
);
499 /* Make sure that the source BDS doesn't go away before we called
500 * block_job_completed(). */
502 bdrv_ref(mirror_top_bs
);
505 /* Remove target parent that still uses BLK_PERM_WRITE/RESIZE before
506 * inserting target_bs at s->to_replace, where we might not be able to get
509 * Note that blk_unref() alone doesn't necessarily drop permissions because
510 * we might be running nested inside mirror_drain(), which takes an extra
511 * reference, so use an explicit blk_set_perm() first. */
512 blk_set_perm(s
->target
, 0, BLK_PERM_ALL
, &error_abort
);
513 blk_unref(s
->target
);
516 /* We don't access the source any more. Dropping any WRITE/RESIZE is
517 * required before it could become a backing file of target_bs. */
518 bdrv_child_try_set_perm(mirror_top_bs
->backing
, 0, BLK_PERM_ALL
,
520 if (s
->backing_mode
== MIRROR_SOURCE_BACKING_CHAIN
) {
521 BlockDriverState
*backing
= s
->is_none_mode
? src
: s
->base
;
522 if (backing_bs(target_bs
) != backing
) {
523 bdrv_set_backing_hd(target_bs
, backing
, &local_err
);
525 error_report_err(local_err
);
532 replace_aio_context
= bdrv_get_aio_context(s
->to_replace
);
533 aio_context_acquire(replace_aio_context
);
536 if (s
->should_complete
&& data
->ret
== 0) {
537 BlockDriverState
*to_replace
= src
;
539 to_replace
= s
->to_replace
;
542 if (bdrv_get_flags(target_bs
) != bdrv_get_flags(to_replace
)) {
543 bdrv_reopen(target_bs
, bdrv_get_flags(to_replace
), NULL
);
546 /* The mirror job has no requests in flight any more, but we need to
547 * drain potential other users of the BDS before changing the graph. */
548 bdrv_drained_begin(target_bs
);
549 bdrv_replace_node(to_replace
, target_bs
, &local_err
);
550 bdrv_drained_end(target_bs
);
552 error_report_err(local_err
);
557 bdrv_op_unblock_all(s
->to_replace
, s
->replace_blocker
);
558 error_free(s
->replace_blocker
);
559 bdrv_unref(s
->to_replace
);
561 if (replace_aio_context
) {
562 aio_context_release(replace_aio_context
);
565 bdrv_unref(target_bs
);
567 /* Remove the mirror filter driver from the graph. Before this, get rid of
568 * the blockers on the intermediate nodes so that the resulting state is
569 * valid. Also give up permissions on mirror_top_bs->backing, which might
570 * block the removal. */
571 block_job_remove_all_bdrv(job
);
572 bdrv_child_try_set_perm(mirror_top_bs
->backing
, 0, BLK_PERM_ALL
,
574 bdrv_replace_node(mirror_top_bs
, backing_bs(mirror_top_bs
), &error_abort
);
576 /* We just changed the BDS the job BB refers to (with either or both of the
577 * bdrv_replace_node() calls), so switch the BB back so the cleanup does
578 * the right thing. We don't need any permissions any more now. */
579 blk_remove_bs(job
->blk
);
580 blk_set_perm(job
->blk
, 0, BLK_PERM_ALL
, &error_abort
);
581 blk_insert_bs(job
->blk
, mirror_top_bs
, &error_abort
);
583 block_job_completed(&s
->common
, data
->ret
);
586 bdrv_drained_end(src
);
587 bdrv_unref(mirror_top_bs
);
591 static void mirror_throttle(MirrorBlockJob
*s
)
593 int64_t now
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
595 if (now
- s
->last_pause_ns
> BLOCK_JOB_SLICE_TIME
) {
596 s
->last_pause_ns
= now
;
597 block_job_sleep_ns(&s
->common
, 0);
599 block_job_pause_point(&s
->common
);
603 static int coroutine_fn
mirror_dirty_init(MirrorBlockJob
*s
)
606 BlockDriverState
*base
= s
->base
;
607 BlockDriverState
*bs
= s
->source
;
608 BlockDriverState
*target_bs
= blk_bs(s
->target
);
612 if (base
== NULL
&& !bdrv_has_zero_init(target_bs
)) {
613 if (!bdrv_can_write_zeroes_with_unmap(target_bs
)) {
614 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, 0, s
->bdev_length
);
618 s
->initial_zeroing_ongoing
= true;
619 for (offset
= 0; offset
< s
->bdev_length
; ) {
620 int bytes
= MIN(s
->bdev_length
- offset
,
621 QEMU_ALIGN_DOWN(INT_MAX
, s
->granularity
));
625 if (block_job_is_cancelled(&s
->common
)) {
626 s
->initial_zeroing_ongoing
= false;
630 if (s
->in_flight
>= MAX_IN_FLIGHT
) {
631 trace_mirror_yield(s
, UINT64_MAX
, s
->buf_free_count
,
633 mirror_wait_for_io(s
);
637 mirror_do_zero_or_discard(s
, offset
, bytes
, false);
641 mirror_wait_for_all_io(s
);
642 s
->initial_zeroing_ongoing
= false;
645 /* First part, loop on the sectors and initialize the dirty bitmap. */
646 for (offset
= 0; offset
< s
->bdev_length
; ) {
647 /* Just to make sure we are not exceeding int limit. */
648 int bytes
= MIN(s
->bdev_length
- offset
,
649 QEMU_ALIGN_DOWN(INT_MAX
, s
->granularity
));
653 if (block_job_is_cancelled(&s
->common
)) {
657 ret
= bdrv_is_allocated_above(bs
, base
, offset
, bytes
, &count
);
664 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, offset
, count
);
671 /* Called when going out of the streaming phase to flush the bulk of the
672 * data to the medium, or just before completing.
674 static int mirror_flush(MirrorBlockJob
*s
)
676 int ret
= blk_flush(s
->target
);
678 if (mirror_error_action(s
, false, -ret
) == BLOCK_ERROR_ACTION_REPORT
) {
685 static void coroutine_fn
mirror_run(void *opaque
)
687 MirrorBlockJob
*s
= opaque
;
688 MirrorExitData
*data
;
689 BlockDriverState
*bs
= s
->source
;
690 BlockDriverState
*target_bs
= blk_bs(s
->target
);
691 bool need_drain
= true;
694 char backing_filename
[2]; /* we only need 2 characters because we are only
695 checking for a NULL string */
698 if (block_job_is_cancelled(&s
->common
)) {
702 s
->bdev_length
= bdrv_getlength(bs
);
703 if (s
->bdev_length
< 0) {
704 ret
= s
->bdev_length
;
708 /* Active commit must resize the base image if its size differs from the
710 if (s
->base
== blk_bs(s
->target
)) {
713 base_length
= blk_getlength(s
->target
);
714 if (base_length
< 0) {
719 if (s
->bdev_length
> base_length
) {
720 ret
= blk_truncate(s
->target
, s
->bdev_length
, PREALLOC_MODE_OFF
,
728 if (s
->bdev_length
== 0) {
729 /* Report BLOCK_JOB_READY and wait for complete. */
730 block_job_event_ready(&s
->common
);
732 while (!block_job_is_cancelled(&s
->common
) && !s
->should_complete
) {
733 block_job_yield(&s
->common
);
735 s
->common
.cancelled
= false;
739 length
= DIV_ROUND_UP(s
->bdev_length
, s
->granularity
);
740 s
->in_flight_bitmap
= bitmap_new(length
);
742 /* If we have no backing file yet in the destination, we cannot let
743 * the destination do COW. Instead, we copy sectors around the
744 * dirty data if needed. We need a bitmap to do that.
746 bdrv_get_backing_filename(target_bs
, backing_filename
,
747 sizeof(backing_filename
));
748 if (!bdrv_get_info(target_bs
, &bdi
) && bdi
.cluster_size
) {
749 s
->target_cluster_size
= bdi
.cluster_size
;
751 s
->target_cluster_size
= BDRV_SECTOR_SIZE
;
753 if (backing_filename
[0] && !target_bs
->backing
&&
754 s
->granularity
< s
->target_cluster_size
) {
755 s
->buf_size
= MAX(s
->buf_size
, s
->target_cluster_size
);
756 s
->cow_bitmap
= bitmap_new(length
);
758 s
->max_iov
= MIN(bs
->bl
.max_iov
, target_bs
->bl
.max_iov
);
760 s
->buf
= qemu_try_blockalign(bs
, s
->buf_size
);
761 if (s
->buf
== NULL
) {
768 s
->last_pause_ns
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
769 if (!s
->is_none_mode
) {
770 ret
= mirror_dirty_init(s
);
771 if (ret
< 0 || block_job_is_cancelled(&s
->common
)) {
777 s
->dbi
= bdrv_dirty_iter_new(s
->dirty_bitmap
);
779 uint64_t delay_ns
= 0;
781 bool should_complete
;
788 block_job_pause_point(&s
->common
);
790 cnt
= bdrv_get_dirty_count(s
->dirty_bitmap
);
791 /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is
792 * the number of bytes currently being processed; together those are
793 * the current remaining operation length */
794 block_job_progress_set_remaining(&s
->common
, s
->bytes_in_flight
+ cnt
);
796 /* Note that even when no rate limit is applied we need to yield
797 * periodically with no pending I/O so that bdrv_drain_all() returns.
798 * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is
799 * an error, or when the source is clean, whichever comes first. */
800 delta
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - s
->last_pause_ns
;
801 if (delta
< BLOCK_JOB_SLICE_TIME
&&
802 s
->common
.iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
803 if (s
->in_flight
>= MAX_IN_FLIGHT
|| s
->buf_free_count
== 0 ||
804 (cnt
== 0 && s
->in_flight
> 0)) {
805 trace_mirror_yield(s
, cnt
, s
->buf_free_count
, s
->in_flight
);
806 mirror_wait_for_io(s
);
808 } else if (cnt
!= 0) {
809 delay_ns
= mirror_iteration(s
);
813 should_complete
= false;
814 if (s
->in_flight
== 0 && cnt
== 0) {
815 trace_mirror_before_flush(s
);
817 if (mirror_flush(s
) < 0) {
818 /* Go check s->ret. */
821 /* We're out of the streaming phase. From now on, if the job
822 * is cancelled we will actually complete all pending I/O and
823 * report completion. This way, block-job-cancel will leave
824 * the target in a consistent state.
826 block_job_event_ready(&s
->common
);
830 should_complete
= s
->should_complete
||
831 block_job_is_cancelled(&s
->common
);
832 cnt
= bdrv_get_dirty_count(s
->dirty_bitmap
);
835 if (cnt
== 0 && should_complete
) {
836 /* The dirty bitmap is not updated while operations are pending.
837 * If we're about to exit, wait for pending operations before
838 * calling bdrv_get_dirty_count(bs), or we may exit while the
839 * source has dirty data to copy!
841 * Note that I/O can be submitted by the guest while
842 * mirror_populate runs, so pause it now. Before deciding
843 * whether to switch to target check one last time if I/O has
844 * come in the meanwhile, and if not flush the data to disk.
846 trace_mirror_before_drain(s
, cnt
);
848 bdrv_drained_begin(bs
);
849 cnt
= bdrv_get_dirty_count(s
->dirty_bitmap
);
850 if (cnt
> 0 || mirror_flush(s
) < 0) {
851 bdrv_drained_end(bs
);
855 /* The two disks are in sync. Exit and report successful
858 assert(QLIST_EMPTY(&bs
->tracked_requests
));
859 s
->common
.cancelled
= false;
866 if (s
->synced
&& !should_complete
) {
867 delay_ns
= (s
->in_flight
== 0 &&
868 cnt
== 0 ? BLOCK_JOB_SLICE_TIME
: 0);
870 trace_mirror_before_sleep(s
, cnt
, s
->synced
, delay_ns
);
871 block_job_sleep_ns(&s
->common
, delay_ns
);
872 if (block_job_is_cancelled(&s
->common
) &&
873 (!s
->synced
|| s
->common
.force
))
877 s
->last_pause_ns
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
881 if (s
->in_flight
> 0) {
882 /* We get here only if something went wrong. Either the job failed,
883 * or it was cancelled prematurely so that we do not guarantee that
884 * the target is a copy of the source.
886 assert(ret
< 0 || ((s
->common
.force
|| !s
->synced
) &&
887 block_job_is_cancelled(&s
->common
)));
889 mirror_wait_for_all_io(s
);
892 assert(s
->in_flight
== 0);
894 g_free(s
->cow_bitmap
);
895 g_free(s
->in_flight_bitmap
);
896 bdrv_dirty_iter_free(s
->dbi
);
898 data
= g_malloc(sizeof(*data
));
902 bdrv_drained_begin(bs
);
904 block_job_defer_to_main_loop(&s
->common
, mirror_exit
, data
);
907 static void mirror_complete(BlockJob
*job
, Error
**errp
)
909 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
910 BlockDriverState
*target
;
912 target
= blk_bs(s
->target
);
915 error_setg(errp
, "The active block job '%s' cannot be completed",
920 if (s
->backing_mode
== MIRROR_OPEN_BACKING_CHAIN
) {
923 assert(!target
->backing
);
924 ret
= bdrv_open_backing_file(target
, NULL
, "backing", errp
);
930 /* block all operations on to_replace bs */
932 AioContext
*replace_aio_context
;
934 s
->to_replace
= bdrv_find_node(s
->replaces
);
935 if (!s
->to_replace
) {
936 error_setg(errp
, "Node name '%s' not found", s
->replaces
);
940 replace_aio_context
= bdrv_get_aio_context(s
->to_replace
);
941 aio_context_acquire(replace_aio_context
);
943 /* TODO Translate this into permission system. Current definition of
944 * GRAPH_MOD would require to request it for the parents; they might
945 * not even be BlockDriverStates, however, so a BdrvChild can't address
946 * them. May need redefinition of GRAPH_MOD. */
947 error_setg(&s
->replace_blocker
,
948 "block device is in use by block-job-complete");
949 bdrv_op_block_all(s
->to_replace
, s
->replace_blocker
);
950 bdrv_ref(s
->to_replace
);
952 aio_context_release(replace_aio_context
);
955 s
->should_complete
= true;
956 block_job_enter(&s
->common
);
959 static void mirror_pause(BlockJob
*job
)
961 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
963 mirror_wait_for_all_io(s
);
966 static void mirror_attached_aio_context(BlockJob
*job
, AioContext
*new_context
)
968 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
970 blk_set_aio_context(s
->target
, new_context
);
973 static void mirror_drain(BlockJob
*job
)
975 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
977 /* Need to keep a reference in case blk_drain triggers execution
978 * of mirror_complete...
981 BlockBackend
*target
= s
->target
;
988 static const BlockJobDriver mirror_job_driver
= {
989 .instance_size
= sizeof(MirrorBlockJob
),
990 .job_type
= BLOCK_JOB_TYPE_MIRROR
,
992 .complete
= mirror_complete
,
993 .pause
= mirror_pause
,
994 .attached_aio_context
= mirror_attached_aio_context
,
995 .drain
= mirror_drain
,
998 static const BlockJobDriver commit_active_job_driver
= {
999 .instance_size
= sizeof(MirrorBlockJob
),
1000 .job_type
= BLOCK_JOB_TYPE_COMMIT
,
1001 .start
= mirror_run
,
1002 .complete
= mirror_complete
,
1003 .pause
= mirror_pause
,
1004 .attached_aio_context
= mirror_attached_aio_context
,
1005 .drain
= mirror_drain
,
1008 static int coroutine_fn
bdrv_mirror_top_preadv(BlockDriverState
*bs
,
1009 uint64_t offset
, uint64_t bytes
, QEMUIOVector
*qiov
, int flags
)
1011 return bdrv_co_preadv(bs
->backing
, offset
, bytes
, qiov
, flags
);
1014 static int coroutine_fn
bdrv_mirror_top_pwritev(BlockDriverState
*bs
,
1015 uint64_t offset
, uint64_t bytes
, QEMUIOVector
*qiov
, int flags
)
1017 return bdrv_co_pwritev(bs
->backing
, offset
, bytes
, qiov
, flags
);
1020 static int coroutine_fn
bdrv_mirror_top_flush(BlockDriverState
*bs
)
1022 if (bs
->backing
== NULL
) {
1023 /* we can be here after failed bdrv_append in mirror_start_job */
1026 return bdrv_co_flush(bs
->backing
->bs
);
1029 static int coroutine_fn
bdrv_mirror_top_pwrite_zeroes(BlockDriverState
*bs
,
1030 int64_t offset
, int bytes
, BdrvRequestFlags flags
)
1032 return bdrv_co_pwrite_zeroes(bs
->backing
, offset
, bytes
, flags
);
1035 static int coroutine_fn
bdrv_mirror_top_pdiscard(BlockDriverState
*bs
,
1036 int64_t offset
, int bytes
)
1038 return bdrv_co_pdiscard(bs
->backing
->bs
, offset
, bytes
);
1041 static void bdrv_mirror_top_refresh_filename(BlockDriverState
*bs
, QDict
*opts
)
1043 if (bs
->backing
== NULL
) {
1044 /* we can be here after failed bdrv_attach_child in
1045 * bdrv_set_backing_hd */
1048 bdrv_refresh_filename(bs
->backing
->bs
);
1049 pstrcpy(bs
->exact_filename
, sizeof(bs
->exact_filename
),
1050 bs
->backing
->bs
->filename
);
1053 static void bdrv_mirror_top_close(BlockDriverState
*bs
)
1057 static void bdrv_mirror_top_child_perm(BlockDriverState
*bs
, BdrvChild
*c
,
1058 const BdrvChildRole
*role
,
1059 BlockReopenQueue
*reopen_queue
,
1060 uint64_t perm
, uint64_t shared
,
1061 uint64_t *nperm
, uint64_t *nshared
)
1063 /* Must be able to forward guest writes to the real image */
1065 if (perm
& BLK_PERM_WRITE
) {
1066 *nperm
|= BLK_PERM_WRITE
;
1069 *nshared
= BLK_PERM_ALL
;
1072 /* Dummy node that provides consistent read to its users without requiring it
1073 * from its backing file and that allows writes on the backing file chain. */
1074 static BlockDriver bdrv_mirror_top
= {
1075 .format_name
= "mirror_top",
1076 .bdrv_co_preadv
= bdrv_mirror_top_preadv
,
1077 .bdrv_co_pwritev
= bdrv_mirror_top_pwritev
,
1078 .bdrv_co_pwrite_zeroes
= bdrv_mirror_top_pwrite_zeroes
,
1079 .bdrv_co_pdiscard
= bdrv_mirror_top_pdiscard
,
1080 .bdrv_co_flush
= bdrv_mirror_top_flush
,
1081 .bdrv_co_block_status
= bdrv_co_block_status_from_backing
,
1082 .bdrv_refresh_filename
= bdrv_mirror_top_refresh_filename
,
1083 .bdrv_close
= bdrv_mirror_top_close
,
1084 .bdrv_child_perm
= bdrv_mirror_top_child_perm
,
1087 static void mirror_start_job(const char *job_id
, BlockDriverState
*bs
,
1088 int creation_flags
, BlockDriverState
*target
,
1089 const char *replaces
, int64_t speed
,
1090 uint32_t granularity
, int64_t buf_size
,
1091 BlockMirrorBackingMode backing_mode
,
1092 BlockdevOnError on_source_error
,
1093 BlockdevOnError on_target_error
,
1095 BlockCompletionFunc
*cb
,
1097 const BlockJobDriver
*driver
,
1098 bool is_none_mode
, BlockDriverState
*base
,
1099 bool auto_complete
, const char *filter_node_name
,
1104 BlockDriverState
*mirror_top_bs
;
1105 bool target_graph_mod
;
1106 bool target_is_backing
;
1107 Error
*local_err
= NULL
;
1110 if (granularity
== 0) {
1111 granularity
= bdrv_get_default_bitmap_granularity(target
);
1114 assert(is_power_of_2(granularity
));
1117 error_setg(errp
, "Invalid parameter 'buf-size'");
1121 if (buf_size
== 0) {
1122 buf_size
= DEFAULT_MIRROR_BUF_SIZE
;
1125 /* In the case of active commit, add dummy driver to provide consistent
1126 * reads on the top, while disabling it in the intermediate nodes, and make
1127 * the backing chain writable. */
1128 mirror_top_bs
= bdrv_new_open_driver(&bdrv_mirror_top
, filter_node_name
,
1130 if (mirror_top_bs
== NULL
) {
1133 if (!filter_node_name
) {
1134 mirror_top_bs
->implicit
= true;
1136 mirror_top_bs
->total_sectors
= bs
->total_sectors
;
1137 mirror_top_bs
->supported_write_flags
= BDRV_REQ_WRITE_UNCHANGED
;
1138 mirror_top_bs
->supported_zero_flags
= BDRV_REQ_WRITE_UNCHANGED
;
1139 bdrv_set_aio_context(mirror_top_bs
, bdrv_get_aio_context(bs
));
1141 /* bdrv_append takes ownership of the mirror_top_bs reference, need to keep
1142 * it alive until block_job_create() succeeds even if bs has no parent. */
1143 bdrv_ref(mirror_top_bs
);
1144 bdrv_drained_begin(bs
);
1145 bdrv_append(mirror_top_bs
, bs
, &local_err
);
1146 bdrv_drained_end(bs
);
1149 bdrv_unref(mirror_top_bs
);
1150 error_propagate(errp
, local_err
);
1154 /* Make sure that the source is not resized while the job is running */
1155 s
= block_job_create(job_id
, driver
, NULL
, mirror_top_bs
,
1156 BLK_PERM_CONSISTENT_READ
,
1157 BLK_PERM_CONSISTENT_READ
| BLK_PERM_WRITE_UNCHANGED
|
1158 BLK_PERM_WRITE
| BLK_PERM_GRAPH_MOD
, speed
,
1159 creation_flags
, cb
, opaque
, errp
);
1163 /* The block job now has a reference to this node */
1164 bdrv_unref(mirror_top_bs
);
1167 s
->mirror_top_bs
= mirror_top_bs
;
1169 /* No resize for the target either; while the mirror is still running, a
1170 * consistent read isn't necessarily possible. We could possibly allow
1171 * writes and graph modifications, though it would likely defeat the
1172 * purpose of a mirror, so leave them blocked for now.
1174 * In the case of active commit, things look a bit different, though,
1175 * because the target is an already populated backing file in active use.
1176 * We can allow anything except resize there.*/
1177 target_is_backing
= bdrv_chain_contains(bs
, target
);
1178 target_graph_mod
= (backing_mode
!= MIRROR_LEAVE_BACKING_CHAIN
);
1179 s
->target
= blk_new(BLK_PERM_WRITE
| BLK_PERM_RESIZE
|
1180 (target_graph_mod
? BLK_PERM_GRAPH_MOD
: 0),
1181 BLK_PERM_WRITE_UNCHANGED
|
1182 (target_is_backing
? BLK_PERM_CONSISTENT_READ
|
1184 BLK_PERM_GRAPH_MOD
: 0));
1185 ret
= blk_insert_bs(s
->target
, target
, errp
);
1190 /* XXX: Mirror target could be a NBD server of target QEMU in the case
1191 * of non-shared block migration. To allow migration completion, we
1192 * have to allow "inactivate" of the target BB. When that happens, we
1193 * know the job is drained, and the vcpus are stopped, so no write
1194 * operation will be performed. Block layer already has assertions to
1196 blk_set_force_allow_inactivate(s
->target
);
1199 s
->replaces
= g_strdup(replaces
);
1200 s
->on_source_error
= on_source_error
;
1201 s
->on_target_error
= on_target_error
;
1202 s
->is_none_mode
= is_none_mode
;
1203 s
->backing_mode
= backing_mode
;
1205 s
->granularity
= granularity
;
1206 s
->buf_size
= ROUND_UP(buf_size
, granularity
);
1208 if (auto_complete
) {
1209 s
->should_complete
= true;
1212 s
->dirty_bitmap
= bdrv_create_dirty_bitmap(bs
, granularity
, NULL
, errp
);
1213 if (!s
->dirty_bitmap
) {
1217 /* Required permissions are already taken with blk_new() */
1218 block_job_add_bdrv(&s
->common
, "target", target
, 0, BLK_PERM_ALL
,
1221 /* In commit_active_start() all intermediate nodes disappear, so
1222 * any jobs in them must be blocked */
1223 if (target_is_backing
) {
1224 BlockDriverState
*iter
;
1225 for (iter
= backing_bs(bs
); iter
!= target
; iter
= backing_bs(iter
)) {
1226 /* XXX BLK_PERM_WRITE needs to be allowed so we don't block
1227 * ourselves at s->base (if writes are blocked for a node, they are
1228 * also blocked for its backing file). The other options would be a
1229 * second filter driver above s->base (== target). */
1230 ret
= block_job_add_bdrv(&s
->common
, "intermediate node", iter
, 0,
1231 BLK_PERM_WRITE_UNCHANGED
| BLK_PERM_WRITE
,
1239 trace_mirror_start(bs
, s
, opaque
);
1240 block_job_start(&s
->common
);
1245 /* Make sure this BDS does not go away until we have completed the graph
1247 bdrv_ref(mirror_top_bs
);
1249 g_free(s
->replaces
);
1250 blk_unref(s
->target
);
1251 block_job_early_fail(&s
->common
);
1254 bdrv_child_try_set_perm(mirror_top_bs
->backing
, 0, BLK_PERM_ALL
,
1256 bdrv_replace_node(mirror_top_bs
, backing_bs(mirror_top_bs
), &error_abort
);
1258 bdrv_unref(mirror_top_bs
);
1261 void mirror_start(const char *job_id
, BlockDriverState
*bs
,
1262 BlockDriverState
*target
, const char *replaces
,
1263 int64_t speed
, uint32_t granularity
, int64_t buf_size
,
1264 MirrorSyncMode mode
, BlockMirrorBackingMode backing_mode
,
1265 BlockdevOnError on_source_error
,
1266 BlockdevOnError on_target_error
,
1267 bool unmap
, const char *filter_node_name
, Error
**errp
)
1270 BlockDriverState
*base
;
1272 if (mode
== MIRROR_SYNC_MODE_INCREMENTAL
) {
1273 error_setg(errp
, "Sync mode 'incremental' not supported");
1276 is_none_mode
= mode
== MIRROR_SYNC_MODE_NONE
;
1277 base
= mode
== MIRROR_SYNC_MODE_TOP
? backing_bs(bs
) : NULL
;
1278 mirror_start_job(job_id
, bs
, BLOCK_JOB_DEFAULT
, target
, replaces
,
1279 speed
, granularity
, buf_size
, backing_mode
,
1280 on_source_error
, on_target_error
, unmap
, NULL
, NULL
,
1281 &mirror_job_driver
, is_none_mode
, base
, false,
1282 filter_node_name
, true, errp
);
1285 void commit_active_start(const char *job_id
, BlockDriverState
*bs
,
1286 BlockDriverState
*base
, int creation_flags
,
1287 int64_t speed
, BlockdevOnError on_error
,
1288 const char *filter_node_name
,
1289 BlockCompletionFunc
*cb
, void *opaque
,
1290 bool auto_complete
, Error
**errp
)
1292 int orig_base_flags
;
1293 Error
*local_err
= NULL
;
1295 orig_base_flags
= bdrv_get_flags(base
);
1297 if (bdrv_reopen(base
, bs
->open_flags
, errp
)) {
1301 mirror_start_job(job_id
, bs
, creation_flags
, base
, NULL
, speed
, 0, 0,
1302 MIRROR_LEAVE_BACKING_CHAIN
,
1303 on_error
, on_error
, true, cb
, opaque
,
1304 &commit_active_job_driver
, false, base
, auto_complete
,
1305 filter_node_name
, false, &local_err
);
1307 error_propagate(errp
, local_err
);
1308 goto error_restore_flags
;
1313 error_restore_flags
:
1314 /* ignore error and errp for bdrv_reopen, because we want to propagate
1315 * the original error */
1316 bdrv_reopen(base
, orig_base_flags
, NULL
);