4 * Copyright Red Hat, Inc. 2012
7 * Paolo Bonzini <pbonzini@redhat.com>
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/cutils.h"
16 #include "qemu/coroutine.h"
17 #include "qemu/range.h"
19 #include "block/blockjob_int.h"
20 #include "block/block_int.h"
21 #include "block/dirty-bitmap.h"
22 #include "sysemu/block-backend.h"
23 #include "qapi/error.h"
24 #include "qemu/ratelimit.h"
25 #include "qemu/bitmap.h"
26 #include "qemu/memalign.h"
28 #define MAX_IN_FLIGHT 16
29 #define MAX_IO_BYTES (1 << 20) /* 1 Mb */
30 #define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES)
32 /* The mirroring buffer is a list of granularity-sized chunks.
33 * Free chunks are organized in a list.
35 typedef struct MirrorBuffer
{
36 QSIMPLEQ_ENTRY(MirrorBuffer
) next
;
39 typedef struct MirrorOp MirrorOp
;
41 typedef struct MirrorBlockJob
{
44 BlockDriverState
*mirror_top_bs
;
45 BlockDriverState
*base
;
46 BlockDriverState
*base_overlay
;
48 /* The name of the graph node to replace */
50 /* The BDS to replace */
51 BlockDriverState
*to_replace
;
52 /* Used to block operations on the drive-mirror-replace target */
53 Error
*replace_blocker
;
55 BlockMirrorBackingMode backing_mode
;
56 /* Whether the target image requires explicit zero-initialization */
59 * To be accesssed with atomics. Written only under the BQL (required by the
60 * current implementation of mirror_change()).
62 MirrorCopyMode copy_mode
;
63 BlockdevOnError on_source_error
, on_target_error
;
65 * To be accessed with atomics.
67 * Set when the target is synced (dirty bitmap is clean, nothing in flight)
68 * and the job is running in active mode.
75 unsigned long *cow_bitmap
;
76 BdrvDirtyBitmap
*dirty_bitmap
;
77 BdrvDirtyBitmapIter
*dbi
;
79 QSIMPLEQ_HEAD(, MirrorBuffer
) buf_free
;
82 uint64_t last_pause_ns
;
83 unsigned long *in_flight_bitmap
;
85 int64_t bytes_in_flight
;
86 QTAILQ_HEAD(, MirrorOp
) ops_in_flight
;
89 int target_cluster_size
;
91 bool initial_zeroing_ongoing
;
92 int in_active_write_counter
;
93 int64_t active_write_bytes_in_flight
;
98 typedef struct MirrorBDSOpaque
{
110 /* The pointee is set by mirror_co_read(), mirror_co_zero(), and
111 * mirror_co_discard() before yielding for the first time */
112 int64_t *bytes_handled
;
115 bool is_active_write
;
117 CoQueue waiting_requests
;
119 MirrorOp
*waiting_for_op
;
121 QTAILQ_ENTRY(MirrorOp
) next
;
124 typedef enum MirrorMethod
{
127 MIRROR_METHOD_DISCARD
,
130 static BlockErrorAction
mirror_error_action(MirrorBlockJob
*s
, bool read
,
133 qatomic_set(&s
->actively_synced
, false);
135 return block_job_error_action(&s
->common
, s
->on_source_error
,
138 return block_job_error_action(&s
->common
, s
->on_target_error
,
143 static void coroutine_fn
mirror_wait_on_conflicts(MirrorOp
*self
,
148 uint64_t self_start_chunk
= offset
/ s
->granularity
;
149 uint64_t self_end_chunk
= DIV_ROUND_UP(offset
+ bytes
, s
->granularity
);
150 uint64_t self_nb_chunks
= self_end_chunk
- self_start_chunk
;
152 while (find_next_bit(s
->in_flight_bitmap
, self_end_chunk
,
153 self_start_chunk
) < self_end_chunk
&&
158 QTAILQ_FOREACH(op
, &s
->ops_in_flight
, next
) {
159 uint64_t op_start_chunk
= op
->offset
/ s
->granularity
;
160 uint64_t op_nb_chunks
= DIV_ROUND_UP(op
->offset
+ op
->bytes
,
168 if (ranges_overlap(self_start_chunk
, self_nb_chunks
,
169 op_start_chunk
, op_nb_chunks
))
173 * If the operation is already (indirectly) waiting for us,
174 * or will wait for us as soon as it wakes up, then just go
175 * on (instead of producing a deadlock in the former case).
177 if (op
->waiting_for_op
) {
181 self
->waiting_for_op
= op
;
184 qemu_co_queue_wait(&op
->waiting_requests
, NULL
);
187 self
->waiting_for_op
= NULL
;
196 static void coroutine_fn
mirror_iteration_done(MirrorOp
*op
, int ret
)
198 MirrorBlockJob
*s
= op
->s
;
203 trace_mirror_iteration_done(s
, op
->offset
, op
->bytes
, ret
);
206 s
->bytes_in_flight
-= op
->bytes
;
208 for (i
= 0; i
< op
->qiov
.niov
; i
++) {
209 MirrorBuffer
*buf
= (MirrorBuffer
*) iov
[i
].iov_base
;
210 QSIMPLEQ_INSERT_TAIL(&s
->buf_free
, buf
, next
);
214 chunk_num
= op
->offset
/ s
->granularity
;
215 nb_chunks
= DIV_ROUND_UP(op
->bytes
, s
->granularity
);
217 bitmap_clear(s
->in_flight_bitmap
, chunk_num
, nb_chunks
);
218 QTAILQ_REMOVE(&s
->ops_in_flight
, op
, next
);
221 bitmap_set(s
->cow_bitmap
, chunk_num
, nb_chunks
);
223 if (!s
->initial_zeroing_ongoing
) {
224 job_progress_update(&s
->common
.job
, op
->bytes
);
227 qemu_iovec_destroy(&op
->qiov
);
229 qemu_co_queue_restart_all(&op
->waiting_requests
);
233 static void coroutine_fn
mirror_write_complete(MirrorOp
*op
, int ret
)
235 MirrorBlockJob
*s
= op
->s
;
238 BlockErrorAction action
;
240 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, op
->offset
, op
->bytes
);
241 action
= mirror_error_action(s
, false, -ret
);
242 if (action
== BLOCK_ERROR_ACTION_REPORT
&& s
->ret
>= 0) {
247 mirror_iteration_done(op
, ret
);
250 static void coroutine_fn
mirror_read_complete(MirrorOp
*op
, int ret
)
252 MirrorBlockJob
*s
= op
->s
;
255 BlockErrorAction action
;
257 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, op
->offset
, op
->bytes
);
258 action
= mirror_error_action(s
, true, -ret
);
259 if (action
== BLOCK_ERROR_ACTION_REPORT
&& s
->ret
>= 0) {
263 mirror_iteration_done(op
, ret
);
267 ret
= blk_co_pwritev(s
->target
, op
->offset
, op
->qiov
.size
, &op
->qiov
, 0);
268 mirror_write_complete(op
, ret
);
271 /* Clip bytes relative to offset to not exceed end-of-file */
272 static inline int64_t mirror_clip_bytes(MirrorBlockJob
*s
,
276 return MIN(bytes
, s
->bdev_length
- offset
);
279 /* Round offset and/or bytes to target cluster if COW is needed, and
280 * return the offset of the adjusted tail against original. */
281 static int coroutine_fn
mirror_cow_align(MirrorBlockJob
*s
, int64_t *offset
,
286 int64_t align_offset
= *offset
;
287 int64_t align_bytes
= *bytes
;
288 int max_bytes
= s
->granularity
* s
->max_iov
;
290 need_cow
= !test_bit(*offset
/ s
->granularity
, s
->cow_bitmap
);
291 need_cow
|= !test_bit((*offset
+ *bytes
- 1) / s
->granularity
,
294 bdrv_round_to_subclusters(blk_bs(s
->target
), *offset
, *bytes
,
295 &align_offset
, &align_bytes
);
298 if (align_bytes
> max_bytes
) {
299 align_bytes
= max_bytes
;
301 align_bytes
= QEMU_ALIGN_DOWN(align_bytes
, s
->target_cluster_size
);
304 /* Clipping may result in align_bytes unaligned to chunk boundary, but
305 * that doesn't matter because it's already the end of source image. */
306 align_bytes
= mirror_clip_bytes(s
, align_offset
, align_bytes
);
308 ret
= align_offset
+ align_bytes
- (*offset
+ *bytes
);
309 *offset
= align_offset
;
310 *bytes
= align_bytes
;
315 static inline void coroutine_fn
316 mirror_wait_for_free_in_flight_slot(MirrorBlockJob
*s
)
320 QTAILQ_FOREACH(op
, &s
->ops_in_flight
, next
) {
322 * Do not wait on pseudo ops, because it may in turn wait on
323 * some other operation to start, which may in fact be the
324 * caller of this function. Since there is only one pseudo op
325 * at any given time, we will always find some real operation
327 * Also, do not wait on active operations, because they do not
328 * use up in-flight slots.
330 if (!op
->is_pseudo_op
&& op
->is_in_flight
&& !op
->is_active_write
) {
331 qemu_co_queue_wait(&op
->waiting_requests
, NULL
);
338 /* Perform a mirror copy operation.
340 * *op->bytes_handled is set to the number of bytes copied after and
341 * including offset, excluding any bytes copied prior to offset due
342 * to alignment. This will be op->bytes if no alignment is necessary,
343 * or (new_end - op->offset) if the tail is rounded up or down due to
344 * alignment or buffer limit.
346 static void coroutine_fn
mirror_co_read(void *opaque
)
348 MirrorOp
*op
= opaque
;
349 MirrorBlockJob
*s
= op
->s
;
354 max_bytes
= s
->granularity
* s
->max_iov
;
356 /* We can only handle as much as buf_size at a time. */
357 op
->bytes
= MIN(s
->buf_size
, MIN(max_bytes
, op
->bytes
));
359 assert(op
->bytes
< BDRV_REQUEST_MAX_BYTES
);
360 *op
->bytes_handled
= op
->bytes
;
363 *op
->bytes_handled
+= mirror_cow_align(s
, &op
->offset
, &op
->bytes
);
365 /* Cannot exceed BDRV_REQUEST_MAX_BYTES + INT_MAX */
366 assert(*op
->bytes_handled
<= UINT_MAX
);
367 assert(op
->bytes
<= s
->buf_size
);
368 /* The offset is granularity-aligned because:
369 * 1) Caller passes in aligned values;
370 * 2) mirror_cow_align is used only when target cluster is larger. */
371 assert(QEMU_IS_ALIGNED(op
->offset
, s
->granularity
));
372 /* The range is sector-aligned, since bdrv_getlength() rounds up. */
373 assert(QEMU_IS_ALIGNED(op
->bytes
, BDRV_SECTOR_SIZE
));
374 nb_chunks
= DIV_ROUND_UP(op
->bytes
, s
->granularity
);
376 while (s
->buf_free_count
< nb_chunks
) {
377 trace_mirror_yield_in_flight(s
, op
->offset
, s
->in_flight
);
378 mirror_wait_for_free_in_flight_slot(s
);
381 /* Now make a QEMUIOVector taking enough granularity-sized chunks
384 qemu_iovec_init(&op
->qiov
, nb_chunks
);
385 while (nb_chunks
-- > 0) {
386 MirrorBuffer
*buf
= QSIMPLEQ_FIRST(&s
->buf_free
);
387 size_t remaining
= op
->bytes
- op
->qiov
.size
;
389 QSIMPLEQ_REMOVE_HEAD(&s
->buf_free
, next
);
391 qemu_iovec_add(&op
->qiov
, buf
, MIN(s
->granularity
, remaining
));
394 /* Copy the dirty cluster. */
396 s
->bytes_in_flight
+= op
->bytes
;
397 op
->is_in_flight
= true;
398 trace_mirror_one_iteration(s
, op
->offset
, op
->bytes
);
400 WITH_GRAPH_RDLOCK_GUARD() {
401 ret
= bdrv_co_preadv(s
->mirror_top_bs
->backing
, op
->offset
, op
->bytes
,
404 mirror_read_complete(op
, ret
);
407 static void coroutine_fn
mirror_co_zero(void *opaque
)
409 MirrorOp
*op
= opaque
;
413 op
->s
->bytes_in_flight
+= op
->bytes
;
414 *op
->bytes_handled
= op
->bytes
;
415 op
->is_in_flight
= true;
417 ret
= blk_co_pwrite_zeroes(op
->s
->target
, op
->offset
, op
->bytes
,
418 op
->s
->unmap
? BDRV_REQ_MAY_UNMAP
: 0);
419 mirror_write_complete(op
, ret
);
422 static void coroutine_fn
mirror_co_discard(void *opaque
)
424 MirrorOp
*op
= opaque
;
428 op
->s
->bytes_in_flight
+= op
->bytes
;
429 *op
->bytes_handled
= op
->bytes
;
430 op
->is_in_flight
= true;
432 ret
= blk_co_pdiscard(op
->s
->target
, op
->offset
, op
->bytes
);
433 mirror_write_complete(op
, ret
);
436 static unsigned mirror_perform(MirrorBlockJob
*s
, int64_t offset
,
437 unsigned bytes
, MirrorMethod mirror_method
)
441 int64_t bytes_handled
= -1;
443 op
= g_new(MirrorOp
, 1);
448 .bytes_handled
= &bytes_handled
,
450 qemu_co_queue_init(&op
->waiting_requests
);
452 switch (mirror_method
) {
453 case MIRROR_METHOD_COPY
:
454 co
= qemu_coroutine_create(mirror_co_read
, op
);
456 case MIRROR_METHOD_ZERO
:
457 co
= qemu_coroutine_create(mirror_co_zero
, op
);
459 case MIRROR_METHOD_DISCARD
:
460 co
= qemu_coroutine_create(mirror_co_discard
, op
);
467 QTAILQ_INSERT_TAIL(&s
->ops_in_flight
, op
, next
);
468 qemu_coroutine_enter(co
);
469 /* At this point, ownership of op has been moved to the coroutine
470 * and the object may already be freed */
472 /* Assert that this value has been set */
473 assert(bytes_handled
>= 0);
475 /* Same assertion as in mirror_co_read() (and for mirror_co_read()
476 * and mirror_co_discard(), bytes_handled == op->bytes, which
477 * is the @bytes parameter given to this function) */
478 assert(bytes_handled
<= UINT_MAX
);
479 return bytes_handled
;
482 static void coroutine_fn
mirror_iteration(MirrorBlockJob
*s
)
484 BlockDriverState
*source
= s
->mirror_top_bs
->backing
->bs
;
487 /* At least the first dirty chunk is mirrored in one iteration. */
489 bool write_zeroes_ok
= bdrv_can_write_zeroes_with_unmap(blk_bs(s
->target
));
490 int max_io_bytes
= MAX(s
->buf_size
/ MAX_IN_FLIGHT
, MAX_IO_BYTES
);
492 bdrv_dirty_bitmap_lock(s
->dirty_bitmap
);
493 offset
= bdrv_dirty_iter_next(s
->dbi
);
495 bdrv_set_dirty_iter(s
->dbi
, 0);
496 offset
= bdrv_dirty_iter_next(s
->dbi
);
497 trace_mirror_restart_iter(s
, bdrv_get_dirty_count(s
->dirty_bitmap
));
500 bdrv_dirty_bitmap_unlock(s
->dirty_bitmap
);
503 * Wait for concurrent requests to @offset. The next loop will limit the
504 * copied area based on in_flight_bitmap so we only copy an area that does
505 * not overlap with concurrent in-flight requests. Still, we would like to
506 * copy something, so wait until there are at least no more requests to the
507 * very beginning of the area.
509 mirror_wait_on_conflicts(NULL
, s
, offset
, 1);
511 job_pause_point(&s
->common
.job
);
513 /* Find the number of consecutive dirty chunks following the first dirty
514 * one, and wait for in flight requests in them. */
515 bdrv_dirty_bitmap_lock(s
->dirty_bitmap
);
516 while (nb_chunks
* s
->granularity
< s
->buf_size
) {
518 int64_t next_offset
= offset
+ nb_chunks
* s
->granularity
;
519 int64_t next_chunk
= next_offset
/ s
->granularity
;
520 if (next_offset
>= s
->bdev_length
||
521 !bdrv_dirty_bitmap_get_locked(s
->dirty_bitmap
, next_offset
)) {
524 if (test_bit(next_chunk
, s
->in_flight_bitmap
)) {
528 next_dirty
= bdrv_dirty_iter_next(s
->dbi
);
529 if (next_dirty
> next_offset
|| next_dirty
< 0) {
530 /* The bitmap iterator's cache is stale, refresh it */
531 bdrv_set_dirty_iter(s
->dbi
, next_offset
);
532 next_dirty
= bdrv_dirty_iter_next(s
->dbi
);
534 assert(next_dirty
== next_offset
);
538 /* Clear dirty bits before querying the block status, because
539 * calling bdrv_block_status_above could yield - if some blocks are
540 * marked dirty in this window, we need to know.
542 bdrv_reset_dirty_bitmap_locked(s
->dirty_bitmap
, offset
,
543 nb_chunks
* s
->granularity
);
544 bdrv_dirty_bitmap_unlock(s
->dirty_bitmap
);
546 /* Before claiming an area in the in-flight bitmap, we have to
547 * create a MirrorOp for it so that conflicting requests can wait
548 * for it. mirror_perform() will create the real MirrorOps later,
549 * for now we just create a pseudo operation that will wake up all
550 * conflicting requests once all real operations have been
552 pseudo_op
= g_new(MirrorOp
, 1);
553 *pseudo_op
= (MirrorOp
){
555 .bytes
= nb_chunks
* s
->granularity
,
556 .is_pseudo_op
= true,
558 qemu_co_queue_init(&pseudo_op
->waiting_requests
);
559 QTAILQ_INSERT_TAIL(&s
->ops_in_flight
, pseudo_op
, next
);
561 bitmap_set(s
->in_flight_bitmap
, offset
/ s
->granularity
, nb_chunks
);
562 while (nb_chunks
> 0 && offset
< s
->bdev_length
) {
565 int64_t io_bytes_acct
;
566 MirrorMethod mirror_method
= MIRROR_METHOD_COPY
;
568 assert(!(offset
% s
->granularity
));
569 WITH_GRAPH_RDLOCK_GUARD() {
570 ret
= bdrv_co_block_status_above(source
, NULL
, offset
,
571 nb_chunks
* s
->granularity
,
572 &io_bytes
, NULL
, NULL
);
575 io_bytes
= MIN(nb_chunks
* s
->granularity
, max_io_bytes
);
576 } else if (ret
& BDRV_BLOCK_DATA
) {
577 io_bytes
= MIN(io_bytes
, max_io_bytes
);
580 io_bytes
-= io_bytes
% s
->granularity
;
581 if (io_bytes
< s
->granularity
) {
582 io_bytes
= s
->granularity
;
583 } else if (ret
>= 0 && !(ret
& BDRV_BLOCK_DATA
)) {
584 int64_t target_offset
;
585 int64_t target_bytes
;
586 WITH_GRAPH_RDLOCK_GUARD() {
587 bdrv_round_to_subclusters(blk_bs(s
->target
), offset
, io_bytes
,
588 &target_offset
, &target_bytes
);
590 if (target_offset
== offset
&&
591 target_bytes
== io_bytes
) {
592 mirror_method
= ret
& BDRV_BLOCK_ZERO
?
594 MIRROR_METHOD_DISCARD
;
598 while (s
->in_flight
>= MAX_IN_FLIGHT
) {
599 trace_mirror_yield_in_flight(s
, offset
, s
->in_flight
);
600 mirror_wait_for_free_in_flight_slot(s
);
608 io_bytes
= mirror_clip_bytes(s
, offset
, io_bytes
);
609 io_bytes
= mirror_perform(s
, offset
, io_bytes
, mirror_method
);
610 if (mirror_method
!= MIRROR_METHOD_COPY
&& write_zeroes_ok
) {
613 io_bytes_acct
= io_bytes
;
617 nb_chunks
-= DIV_ROUND_UP(io_bytes
, s
->granularity
);
618 block_job_ratelimit_processed_bytes(&s
->common
, io_bytes_acct
);
622 QTAILQ_REMOVE(&s
->ops_in_flight
, pseudo_op
, next
);
623 qemu_co_queue_restart_all(&pseudo_op
->waiting_requests
);
627 static void mirror_free_init(MirrorBlockJob
*s
)
629 int granularity
= s
->granularity
;
630 size_t buf_size
= s
->buf_size
;
631 uint8_t *buf
= s
->buf
;
633 assert(s
->buf_free_count
== 0);
634 QSIMPLEQ_INIT(&s
->buf_free
);
635 while (buf_size
!= 0) {
636 MirrorBuffer
*cur
= (MirrorBuffer
*)buf
;
637 QSIMPLEQ_INSERT_TAIL(&s
->buf_free
, cur
, next
);
639 buf_size
-= granularity
;
644 /* This is also used for the .pause callback. There is no matching
645 * mirror_resume() because mirror_run() will begin iterating again
646 * when the job is resumed.
648 static void coroutine_fn
mirror_wait_for_all_io(MirrorBlockJob
*s
)
650 while (s
->in_flight
> 0) {
651 mirror_wait_for_free_in_flight_slot(s
);
656 * mirror_exit_common: handle both abort() and prepare() cases.
657 * for .prepare, returns 0 on success and -errno on failure.
658 * for .abort cases, denoted by abort = true, MUST return 0.
660 static int mirror_exit_common(Job
*job
)
662 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
.job
);
663 BlockJob
*bjob
= &s
->common
;
664 MirrorBDSOpaque
*bs_opaque
;
665 AioContext
*replace_aio_context
= NULL
;
666 BlockDriverState
*src
;
667 BlockDriverState
*target_bs
;
668 BlockDriverState
*mirror_top_bs
;
669 Error
*local_err
= NULL
;
670 bool abort
= job
->ret
< 0;
680 aio_context_acquire(qemu_get_aio_context());
681 bdrv_graph_rdlock_main_loop();
683 mirror_top_bs
= s
->mirror_top_bs
;
684 bs_opaque
= mirror_top_bs
->opaque
;
685 src
= mirror_top_bs
->backing
->bs
;
686 target_bs
= blk_bs(s
->target
);
688 if (bdrv_chain_contains(src
, target_bs
)) {
689 bdrv_unfreeze_backing_chain(mirror_top_bs
, target_bs
);
692 bdrv_release_dirty_bitmap(s
->dirty_bitmap
);
694 /* Make sure that the source BDS doesn't go away during bdrv_replace_node,
695 * before we can call bdrv_drained_end */
697 bdrv_ref(mirror_top_bs
);
700 bdrv_graph_rdunlock_main_loop();
703 * Remove target parent that still uses BLK_PERM_WRITE/RESIZE before
704 * inserting target_bs at s->to_replace, where we might not be able to get
707 blk_unref(s
->target
);
710 /* We don't access the source any more. Dropping any WRITE/RESIZE is
711 * required before it could become a backing file of target_bs. Not having
712 * these permissions any more means that we can't allow any new requests on
713 * mirror_top_bs from now on, so keep it drained. */
714 bdrv_drained_begin(mirror_top_bs
);
715 bdrv_drained_begin(target_bs
);
716 bs_opaque
->stop
= true;
718 bdrv_graph_rdlock_main_loop();
719 bdrv_child_refresh_perms(mirror_top_bs
, mirror_top_bs
->backing
,
722 if (!abort
&& s
->backing_mode
== MIRROR_SOURCE_BACKING_CHAIN
) {
723 BlockDriverState
*backing
= s
->is_none_mode
? src
: s
->base
;
724 BlockDriverState
*unfiltered_target
= bdrv_skip_filters(target_bs
);
726 if (bdrv_cow_bs(unfiltered_target
) != backing
) {
727 bdrv_set_backing_hd(unfiltered_target
, backing
, &local_err
);
729 error_report_err(local_err
);
734 } else if (!abort
&& s
->backing_mode
== MIRROR_OPEN_BACKING_CHAIN
) {
735 assert(!bdrv_backing_chain_next(target_bs
));
736 ret
= bdrv_open_backing_file(bdrv_skip_filters(target_bs
), NULL
,
737 "backing", &local_err
);
739 error_report_err(local_err
);
743 bdrv_graph_rdunlock_main_loop();
746 replace_aio_context
= bdrv_get_aio_context(s
->to_replace
);
747 aio_context_acquire(replace_aio_context
);
750 if (s
->should_complete
&& !abort
) {
751 BlockDriverState
*to_replace
= s
->to_replace
?: src
;
752 bool ro
= bdrv_is_read_only(to_replace
);
754 if (ro
!= bdrv_is_read_only(target_bs
)) {
755 bdrv_reopen_set_read_only(target_bs
, ro
, NULL
);
758 /* The mirror job has no requests in flight any more, but we need to
759 * drain potential other users of the BDS before changing the graph. */
761 bdrv_drained_begin(to_replace
);
763 * Cannot use check_to_replace_node() here, because that would
764 * check for an op blocker on @to_replace, and we have our own
767 bdrv_graph_wrlock(target_bs
);
768 if (bdrv_recurse_can_replace(src
, to_replace
)) {
769 bdrv_replace_node(to_replace
, target_bs
, &local_err
);
771 error_setg(&local_err
, "Can no longer replace '%s' by '%s', "
772 "because it can no longer be guaranteed that doing so "
773 "would not lead to an abrupt change of visible data",
774 to_replace
->node_name
, target_bs
->node_name
);
776 bdrv_graph_wrunlock();
777 bdrv_drained_end(to_replace
);
779 error_report_err(local_err
);
784 bdrv_op_unblock_all(s
->to_replace
, s
->replace_blocker
);
785 error_free(s
->replace_blocker
);
786 bdrv_unref(s
->to_replace
);
788 if (replace_aio_context
) {
789 aio_context_release(replace_aio_context
);
794 * Remove the mirror filter driver from the graph. Before this, get rid of
795 * the blockers on the intermediate nodes so that the resulting state is
798 block_job_remove_all_bdrv(bjob
);
799 bdrv_graph_wrlock(mirror_top_bs
);
800 bdrv_replace_node(mirror_top_bs
, mirror_top_bs
->backing
->bs
, &error_abort
);
801 bdrv_graph_wrunlock();
803 bdrv_drained_end(target_bs
);
804 bdrv_unref(target_bs
);
806 bs_opaque
->job
= NULL
;
808 bdrv_drained_end(src
);
809 bdrv_drained_end(mirror_top_bs
);
811 bdrv_unref(mirror_top_bs
);
814 aio_context_release(qemu_get_aio_context());
819 static int mirror_prepare(Job
*job
)
821 return mirror_exit_common(job
);
824 static void mirror_abort(Job
*job
)
826 int ret
= mirror_exit_common(job
);
830 static void coroutine_fn
mirror_throttle(MirrorBlockJob
*s
)
832 int64_t now
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
834 if (now
- s
->last_pause_ns
> BLOCK_JOB_SLICE_TIME
) {
835 s
->last_pause_ns
= now
;
836 job_sleep_ns(&s
->common
.job
, 0);
838 job_pause_point(&s
->common
.job
);
842 static int coroutine_fn
mirror_dirty_init(MirrorBlockJob
*s
)
845 BlockDriverState
*bs
= s
->mirror_top_bs
->backing
->bs
;
846 BlockDriverState
*target_bs
= blk_bs(s
->target
);
850 if (s
->zero_target
) {
851 if (!bdrv_can_write_zeroes_with_unmap(target_bs
)) {
852 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, 0, s
->bdev_length
);
856 s
->initial_zeroing_ongoing
= true;
857 for (offset
= 0; offset
< s
->bdev_length
; ) {
858 int bytes
= MIN(s
->bdev_length
- offset
,
859 QEMU_ALIGN_DOWN(INT_MAX
, s
->granularity
));
863 if (job_is_cancelled(&s
->common
.job
)) {
864 s
->initial_zeroing_ongoing
= false;
868 if (s
->in_flight
>= MAX_IN_FLIGHT
) {
869 trace_mirror_yield(s
, UINT64_MAX
, s
->buf_free_count
,
871 mirror_wait_for_free_in_flight_slot(s
);
875 mirror_perform(s
, offset
, bytes
, MIRROR_METHOD_ZERO
);
879 mirror_wait_for_all_io(s
);
880 s
->initial_zeroing_ongoing
= false;
883 /* First part, loop on the sectors and initialize the dirty bitmap. */
884 for (offset
= 0; offset
< s
->bdev_length
; ) {
885 /* Just to make sure we are not exceeding int limit. */
886 int bytes
= MIN(s
->bdev_length
- offset
,
887 QEMU_ALIGN_DOWN(INT_MAX
, s
->granularity
));
891 if (job_is_cancelled(&s
->common
.job
)) {
895 WITH_GRAPH_RDLOCK_GUARD() {
896 ret
= bdrv_co_is_allocated_above(bs
, s
->base_overlay
, true, offset
,
905 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, offset
, count
);
912 /* Called when going out of the streaming phase to flush the bulk of the
913 * data to the medium, or just before completing.
915 static int coroutine_fn
mirror_flush(MirrorBlockJob
*s
)
917 int ret
= blk_co_flush(s
->target
);
919 if (mirror_error_action(s
, false, -ret
) == BLOCK_ERROR_ACTION_REPORT
) {
926 static int coroutine_fn
mirror_run(Job
*job
, Error
**errp
)
928 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
.job
);
929 BlockDriverState
*bs
= s
->mirror_top_bs
->backing
->bs
;
930 MirrorBDSOpaque
*mirror_top_opaque
= s
->mirror_top_bs
->opaque
;
931 BlockDriverState
*target_bs
= blk_bs(s
->target
);
932 bool need_drain
= true;
933 BlockDeviceIoStatus iostatus
;
935 int64_t target_length
;
937 char backing_filename
[2]; /* we only need 2 characters because we are only
938 checking for a NULL string */
941 if (job_is_cancelled(&s
->common
.job
)) {
945 bdrv_graph_co_rdlock();
946 s
->bdev_length
= bdrv_co_getlength(bs
);
947 bdrv_graph_co_rdunlock();
949 if (s
->bdev_length
< 0) {
950 ret
= s
->bdev_length
;
954 target_length
= blk_co_getlength(s
->target
);
955 if (target_length
< 0) {
960 /* Active commit must resize the base image if its size differs from the
962 if (s
->base
== blk_bs(s
->target
)) {
963 if (s
->bdev_length
> target_length
) {
964 ret
= blk_co_truncate(s
->target
, s
->bdev_length
, false,
965 PREALLOC_MODE_OFF
, 0, NULL
);
970 } else if (s
->bdev_length
!= target_length
) {
971 error_setg(errp
, "Source and target image have different sizes");
976 if (s
->bdev_length
== 0) {
977 /* Transition to the READY state and wait for complete. */
978 job_transition_to_ready(&s
->common
.job
);
979 qatomic_set(&s
->actively_synced
, true);
980 while (!job_cancel_requested(&s
->common
.job
) && !s
->should_complete
) {
981 job_yield(&s
->common
.job
);
986 length
= DIV_ROUND_UP(s
->bdev_length
, s
->granularity
);
987 s
->in_flight_bitmap
= bitmap_new(length
);
989 /* If we have no backing file yet in the destination, we cannot let
990 * the destination do COW. Instead, we copy sectors around the
991 * dirty data if needed. We need a bitmap to do that.
993 bdrv_get_backing_filename(target_bs
, backing_filename
,
994 sizeof(backing_filename
));
995 bdrv_graph_co_rdlock();
996 if (!bdrv_co_get_info(target_bs
, &bdi
) && bdi
.cluster_size
) {
997 s
->target_cluster_size
= bdi
.cluster_size
;
999 s
->target_cluster_size
= BDRV_SECTOR_SIZE
;
1001 if (backing_filename
[0] && !bdrv_backing_chain_next(target_bs
) &&
1002 s
->granularity
< s
->target_cluster_size
) {
1003 s
->buf_size
= MAX(s
->buf_size
, s
->target_cluster_size
);
1004 s
->cow_bitmap
= bitmap_new(length
);
1006 s
->max_iov
= MIN(bs
->bl
.max_iov
, target_bs
->bl
.max_iov
);
1007 bdrv_graph_co_rdunlock();
1009 s
->buf
= qemu_try_blockalign(bs
, s
->buf_size
);
1010 if (s
->buf
== NULL
) {
1012 goto immediate_exit
;
1015 mirror_free_init(s
);
1017 s
->last_pause_ns
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
1018 if (!s
->is_none_mode
) {
1019 ret
= mirror_dirty_init(s
);
1020 if (ret
< 0 || job_is_cancelled(&s
->common
.job
)) {
1021 goto immediate_exit
;
1026 * Only now the job is fully initialised and mirror_top_bs should start
1029 mirror_top_opaque
->job
= s
;
1032 s
->dbi
= bdrv_dirty_iter_new(s
->dirty_bitmap
);
1035 bool should_complete
;
1039 goto immediate_exit
;
1042 job_pause_point(&s
->common
.job
);
1044 if (job_is_cancelled(&s
->common
.job
)) {
1046 goto immediate_exit
;
1049 cnt
= bdrv_get_dirty_count(s
->dirty_bitmap
);
1050 /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is
1051 * the number of bytes currently being processed; together those are
1052 * the current remaining operation length */
1053 job_progress_set_remaining(&s
->common
.job
,
1054 s
->bytes_in_flight
+ cnt
+
1055 s
->active_write_bytes_in_flight
);
1057 /* Note that even when no rate limit is applied we need to yield
1058 * periodically with no pending I/O so that bdrv_drain_all() returns.
1059 * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is
1060 * an error, or when the source is clean, whichever comes first. */
1061 delta
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - s
->last_pause_ns
;
1062 WITH_JOB_LOCK_GUARD() {
1063 iostatus
= s
->common
.iostatus
;
1065 if (delta
< BLOCK_JOB_SLICE_TIME
&&
1066 iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
1067 if (s
->in_flight
>= MAX_IN_FLIGHT
|| s
->buf_free_count
== 0 ||
1068 (cnt
== 0 && s
->in_flight
> 0)) {
1069 trace_mirror_yield(s
, cnt
, s
->buf_free_count
, s
->in_flight
);
1070 mirror_wait_for_free_in_flight_slot(s
);
1072 } else if (cnt
!= 0) {
1073 mirror_iteration(s
);
1077 should_complete
= false;
1078 if (s
->in_flight
== 0 && cnt
== 0) {
1079 trace_mirror_before_flush(s
);
1080 if (!job_is_ready(&s
->common
.job
)) {
1081 if (mirror_flush(s
) < 0) {
1082 /* Go check s->ret. */
1085 /* We're out of the streaming phase. From now on, if the job
1086 * is cancelled we will actually complete all pending I/O and
1087 * report completion. This way, block-job-cancel will leave
1088 * the target in a consistent state.
1090 job_transition_to_ready(&s
->common
.job
);
1092 if (qatomic_read(&s
->copy_mode
) != MIRROR_COPY_MODE_BACKGROUND
) {
1093 qatomic_set(&s
->actively_synced
, true);
1096 should_complete
= s
->should_complete
||
1097 job_cancel_requested(&s
->common
.job
);
1098 cnt
= bdrv_get_dirty_count(s
->dirty_bitmap
);
1101 if (cnt
== 0 && should_complete
) {
1102 /* The dirty bitmap is not updated while operations are pending.
1103 * If we're about to exit, wait for pending operations before
1104 * calling bdrv_get_dirty_count(bs), or we may exit while the
1105 * source has dirty data to copy!
1107 * Note that I/O can be submitted by the guest while
1108 * mirror_populate runs, so pause it now. Before deciding
1109 * whether to switch to target check one last time if I/O has
1110 * come in the meanwhile, and if not flush the data to disk.
1112 trace_mirror_before_drain(s
, cnt
);
1115 bdrv_drained_begin(bs
);
1117 /* Must be zero because we are drained */
1118 assert(s
->in_active_write_counter
== 0);
1120 cnt
= bdrv_get_dirty_count(s
->dirty_bitmap
);
1121 if (cnt
> 0 || mirror_flush(s
) < 0) {
1122 bdrv_drained_end(bs
);
1123 s
->in_drain
= false;
1127 /* The two disks are in sync. Exit and report successful
1130 assert(QLIST_EMPTY(&bs
->tracked_requests
));
1135 if (job_is_ready(&s
->common
.job
) && !should_complete
) {
1136 if (s
->in_flight
== 0 && cnt
== 0) {
1137 trace_mirror_before_sleep(s
, cnt
, job_is_ready(&s
->common
.job
),
1138 BLOCK_JOB_SLICE_TIME
);
1139 job_sleep_ns(&s
->common
.job
, BLOCK_JOB_SLICE_TIME
);
1142 block_job_ratelimit_sleep(&s
->common
);
1144 s
->last_pause_ns
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
1148 if (s
->in_flight
> 0) {
1149 /* We get here only if something went wrong. Either the job failed,
1150 * or it was cancelled prematurely so that we do not guarantee that
1151 * the target is a copy of the source.
1153 assert(ret
< 0 || job_is_cancelled(&s
->common
.job
));
1155 mirror_wait_for_all_io(s
);
1158 assert(s
->in_flight
== 0);
1160 g_free(s
->cow_bitmap
);
1161 g_free(s
->in_flight_bitmap
);
1162 bdrv_dirty_iter_free(s
->dbi
);
1166 bdrv_drained_begin(bs
);
1172 static void mirror_complete(Job
*job
, Error
**errp
)
1174 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
.job
);
1176 if (!job_is_ready(job
)) {
1177 error_setg(errp
, "The active block job '%s' cannot be completed",
1182 /* block all operations on to_replace bs */
1184 AioContext
*replace_aio_context
;
1186 s
->to_replace
= bdrv_find_node(s
->replaces
);
1187 if (!s
->to_replace
) {
1188 error_setg(errp
, "Node name '%s' not found", s
->replaces
);
1192 replace_aio_context
= bdrv_get_aio_context(s
->to_replace
);
1193 aio_context_acquire(replace_aio_context
);
1195 /* TODO Translate this into child freeze system. */
1196 error_setg(&s
->replace_blocker
,
1197 "block device is in use by block-job-complete");
1198 bdrv_op_block_all(s
->to_replace
, s
->replace_blocker
);
1199 bdrv_ref(s
->to_replace
);
1201 aio_context_release(replace_aio_context
);
1204 s
->should_complete
= true;
1206 /* If the job is paused, it will be re-entered when it is resumed */
1207 WITH_JOB_LOCK_GUARD() {
1209 job_enter_cond_locked(job
, NULL
);
1214 static void coroutine_fn
mirror_pause(Job
*job
)
1216 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
.job
);
1218 mirror_wait_for_all_io(s
);
1221 static bool mirror_drained_poll(BlockJob
*job
)
1223 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
1225 /* If the job isn't paused nor cancelled, we can't be sure that it won't
1226 * issue more requests. We make an exception if we've reached this point
1227 * from one of our own drain sections, to avoid a deadlock waiting for
1230 WITH_JOB_LOCK_GUARD() {
1231 if (!s
->common
.job
.paused
&& !job_is_cancelled_locked(&job
->job
)
1237 return !!s
->in_flight
;
1240 static bool mirror_cancel(Job
*job
, bool force
)
1242 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
.job
);
1243 BlockDriverState
*target
= blk_bs(s
->target
);
1246 * Before the job is READY, we treat any cancellation like a
1247 * force-cancellation.
1249 force
= force
|| !job_is_ready(job
);
1252 bdrv_cancel_in_flight(target
);
1257 static bool commit_active_cancel(Job
*job
, bool force
)
1259 /* Same as above in mirror_cancel() */
1260 return force
|| !job_is_ready(job
);
1263 static void mirror_change(BlockJob
*job
, BlockJobChangeOptions
*opts
,
1266 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
1267 BlockJobChangeOptionsMirror
*change_opts
= &opts
->u
.mirror
;
1268 MirrorCopyMode current
;
1271 * The implementation relies on the fact that copy_mode is only written
1272 * under the BQL. Otherwise, further synchronization would be required.
1275 GLOBAL_STATE_CODE();
1277 if (qatomic_read(&s
->copy_mode
) == change_opts
->copy_mode
) {
1281 if (change_opts
->copy_mode
!= MIRROR_COPY_MODE_WRITE_BLOCKING
) {
1282 error_setg(errp
, "Change to copy mode '%s' is not implemented",
1283 MirrorCopyMode_str(change_opts
->copy_mode
));
1287 current
= qatomic_cmpxchg(&s
->copy_mode
, MIRROR_COPY_MODE_BACKGROUND
,
1288 change_opts
->copy_mode
);
1289 if (current
!= MIRROR_COPY_MODE_BACKGROUND
) {
1290 error_setg(errp
, "Expected current copy mode '%s', got '%s'",
1291 MirrorCopyMode_str(MIRROR_COPY_MODE_BACKGROUND
),
1292 MirrorCopyMode_str(current
));
1296 static void mirror_query(BlockJob
*job
, BlockJobInfo
*info
)
1298 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
1300 info
->u
.mirror
= (BlockJobInfoMirror
) {
1301 .actively_synced
= qatomic_read(&s
->actively_synced
),
1305 static const BlockJobDriver mirror_job_driver
= {
1307 .instance_size
= sizeof(MirrorBlockJob
),
1308 .job_type
= JOB_TYPE_MIRROR
,
1309 .free
= block_job_free
,
1310 .user_resume
= block_job_user_resume
,
1312 .prepare
= mirror_prepare
,
1313 .abort
= mirror_abort
,
1314 .pause
= mirror_pause
,
1315 .complete
= mirror_complete
,
1316 .cancel
= mirror_cancel
,
1318 .drained_poll
= mirror_drained_poll
,
1319 .change
= mirror_change
,
1320 .query
= mirror_query
,
1323 static const BlockJobDriver commit_active_job_driver
= {
1325 .instance_size
= sizeof(MirrorBlockJob
),
1326 .job_type
= JOB_TYPE_COMMIT
,
1327 .free
= block_job_free
,
1328 .user_resume
= block_job_user_resume
,
1330 .prepare
= mirror_prepare
,
1331 .abort
= mirror_abort
,
1332 .pause
= mirror_pause
,
1333 .complete
= mirror_complete
,
1334 .cancel
= commit_active_cancel
,
1336 .drained_poll
= mirror_drained_poll
,
1339 static void coroutine_fn
1340 do_sync_target_write(MirrorBlockJob
*job
, MirrorMethod method
,
1341 uint64_t offset
, uint64_t bytes
,
1342 QEMUIOVector
*qiov
, int flags
)
1345 size_t qiov_offset
= 0;
1346 int64_t bitmap_offset
, bitmap_end
;
1348 if (!QEMU_IS_ALIGNED(offset
, job
->granularity
) &&
1349 bdrv_dirty_bitmap_get(job
->dirty_bitmap
, offset
))
1352 * Dirty unaligned padding: ignore it.
1355 * 1. If we copy it, we can't reset corresponding bit in
1356 * dirty_bitmap as there may be some "dirty" bytes still not
1358 * 2. It's already dirty, so skipping it we don't diverge mirror
1361 * Note, that because of this, guest write may have no contribution
1362 * into mirror converge, but that's not bad, as we have background
1363 * process of mirroring. If under some bad circumstances (high guest
1364 * IO load) background process starve, we will not converge anyway,
1365 * even if each write will contribute, as guest is not guaranteed to
1366 * rewrite the whole disk.
1368 qiov_offset
= QEMU_ALIGN_UP(offset
, job
->granularity
) - offset
;
1369 if (bytes
<= qiov_offset
) {
1370 /* nothing to do after shrink */
1373 offset
+= qiov_offset
;
1374 bytes
-= qiov_offset
;
1377 if (!QEMU_IS_ALIGNED(offset
+ bytes
, job
->granularity
) &&
1378 bdrv_dirty_bitmap_get(job
->dirty_bitmap
, offset
+ bytes
- 1))
1380 uint64_t tail
= (offset
+ bytes
) % job
->granularity
;
1382 if (bytes
<= tail
) {
1383 /* nothing to do after shrink */
1390 * Tails are either clean or shrunk, so for bitmap resetting
1391 * we safely align the range down.
1393 bitmap_offset
= QEMU_ALIGN_UP(offset
, job
->granularity
);
1394 bitmap_end
= QEMU_ALIGN_DOWN(offset
+ bytes
, job
->granularity
);
1395 if (bitmap_offset
< bitmap_end
) {
1396 bdrv_reset_dirty_bitmap(job
->dirty_bitmap
, bitmap_offset
,
1397 bitmap_end
- bitmap_offset
);
1400 job_progress_increase_remaining(&job
->common
.job
, bytes
);
1401 job
->active_write_bytes_in_flight
+= bytes
;
1404 case MIRROR_METHOD_COPY
:
1405 ret
= blk_co_pwritev_part(job
->target
, offset
, bytes
,
1406 qiov
, qiov_offset
, flags
);
1409 case MIRROR_METHOD_ZERO
:
1411 ret
= blk_co_pwrite_zeroes(job
->target
, offset
, bytes
, flags
);
1414 case MIRROR_METHOD_DISCARD
:
1416 ret
= blk_co_pdiscard(job
->target
, offset
, bytes
);
1423 job
->active_write_bytes_in_flight
-= bytes
;
1425 job_progress_update(&job
->common
.job
, bytes
);
1427 BlockErrorAction action
;
1430 * We failed, so we should mark dirty the whole area, aligned up.
1431 * Note that we don't care about shrunk tails if any: they were dirty
1432 * at function start, and they must be still dirty, as we've locked
1433 * the region for in-flight op.
1435 bitmap_offset
= QEMU_ALIGN_DOWN(offset
, job
->granularity
);
1436 bitmap_end
= QEMU_ALIGN_UP(offset
+ bytes
, job
->granularity
);
1437 bdrv_set_dirty_bitmap(job
->dirty_bitmap
, bitmap_offset
,
1438 bitmap_end
- bitmap_offset
);
1439 qatomic_set(&job
->actively_synced
, false);
1441 action
= mirror_error_action(job
, false, -ret
);
1442 if (action
== BLOCK_ERROR_ACTION_REPORT
) {
1450 static MirrorOp
*coroutine_fn
active_write_prepare(MirrorBlockJob
*s
,
1455 uint64_t start_chunk
= offset
/ s
->granularity
;
1456 uint64_t end_chunk
= DIV_ROUND_UP(offset
+ bytes
, s
->granularity
);
1458 op
= g_new(MirrorOp
, 1);
1463 .is_active_write
= true,
1464 .is_in_flight
= true,
1465 .co
= qemu_coroutine_self(),
1467 qemu_co_queue_init(&op
->waiting_requests
);
1468 QTAILQ_INSERT_TAIL(&s
->ops_in_flight
, op
, next
);
1470 s
->in_active_write_counter
++;
1473 * Wait for concurrent requests affecting the area. If there are already
1474 * running requests that are copying off now-to-be stale data in the area,
1475 * we must wait for them to finish before we begin writing fresh data to the
1476 * target so that the write operations appear in the correct order.
1477 * Note that background requests (see mirror_iteration()) in contrast only
1478 * wait for conflicting requests at the start of the dirty area, and then
1479 * (based on the in_flight_bitmap) truncate the area to copy so it will not
1480 * conflict with any requests beyond that. For active writes, however, we
1481 * cannot truncate that area. The request from our parent must be blocked
1482 * until the area is copied in full. Therefore, we must wait for the whole
1483 * area to become free of concurrent requests.
1485 mirror_wait_on_conflicts(op
, s
, offset
, bytes
);
1487 bitmap_set(s
->in_flight_bitmap
, start_chunk
, end_chunk
- start_chunk
);
1492 static void coroutine_fn GRAPH_RDLOCK
active_write_settle(MirrorOp
*op
)
1494 uint64_t start_chunk
= op
->offset
/ op
->s
->granularity
;
1495 uint64_t end_chunk
= DIV_ROUND_UP(op
->offset
+ op
->bytes
,
1496 op
->s
->granularity
);
1498 if (!--op
->s
->in_active_write_counter
&&
1499 qatomic_read(&op
->s
->actively_synced
)) {
1500 BdrvChild
*source
= op
->s
->mirror_top_bs
->backing
;
1502 if (QLIST_FIRST(&source
->bs
->parents
) == source
&&
1503 QLIST_NEXT(source
, next_parent
) == NULL
)
1505 /* Assert that we are back in sync once all active write
1506 * operations are settled.
1507 * Note that we can only assert this if the mirror node
1508 * is the source node's only parent. */
1509 assert(!bdrv_get_dirty_count(op
->s
->dirty_bitmap
));
1512 bitmap_clear(op
->s
->in_flight_bitmap
, start_chunk
, end_chunk
- start_chunk
);
1513 QTAILQ_REMOVE(&op
->s
->ops_in_flight
, op
, next
);
1514 qemu_co_queue_restart_all(&op
->waiting_requests
);
1518 static int coroutine_fn GRAPH_RDLOCK
1519 bdrv_mirror_top_preadv(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
1520 QEMUIOVector
*qiov
, BdrvRequestFlags flags
)
1522 return bdrv_co_preadv(bs
->backing
, offset
, bytes
, qiov
, flags
);
1525 static bool should_copy_to_target(MirrorBDSOpaque
*s
)
1527 return s
->job
&& s
->job
->ret
>= 0 &&
1528 !job_is_cancelled(&s
->job
->common
.job
) &&
1529 qatomic_read(&s
->job
->copy_mode
) == MIRROR_COPY_MODE_WRITE_BLOCKING
;
1532 static int coroutine_fn GRAPH_RDLOCK
1533 bdrv_mirror_top_do_write(BlockDriverState
*bs
, MirrorMethod method
,
1534 bool copy_to_target
, uint64_t offset
, uint64_t bytes
,
1535 QEMUIOVector
*qiov
, int flags
)
1537 MirrorOp
*op
= NULL
;
1538 MirrorBDSOpaque
*s
= bs
->opaque
;
1541 if (copy_to_target
) {
1542 op
= active_write_prepare(s
->job
, offset
, bytes
);
1546 case MIRROR_METHOD_COPY
:
1547 ret
= bdrv_co_pwritev(bs
->backing
, offset
, bytes
, qiov
, flags
);
1550 case MIRROR_METHOD_ZERO
:
1551 ret
= bdrv_co_pwrite_zeroes(bs
->backing
, offset
, bytes
, flags
);
1554 case MIRROR_METHOD_DISCARD
:
1555 ret
= bdrv_co_pdiscard(bs
->backing
, offset
, bytes
);
1562 if (!copy_to_target
&& s
->job
&& s
->job
->dirty_bitmap
) {
1563 qatomic_set(&s
->job
->actively_synced
, false);
1564 bdrv_set_dirty_bitmap(s
->job
->dirty_bitmap
, offset
, bytes
);
1571 if (copy_to_target
) {
1572 do_sync_target_write(s
->job
, method
, offset
, bytes
, qiov
, flags
);
1576 if (copy_to_target
) {
1577 active_write_settle(op
);
1582 static int coroutine_fn GRAPH_RDLOCK
1583 bdrv_mirror_top_pwritev(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
,
1584 QEMUIOVector
*qiov
, BdrvRequestFlags flags
)
1586 QEMUIOVector bounce_qiov
;
1589 bool copy_to_target
= should_copy_to_target(bs
->opaque
);
1591 if (copy_to_target
) {
1592 /* The guest might concurrently modify the data to write; but
1593 * the data on source and destination must match, so we have
1594 * to use a bounce buffer if we are going to write to the
1596 bounce_buf
= qemu_blockalign(bs
, bytes
);
1597 iov_to_buf_full(qiov
->iov
, qiov
->niov
, 0, bounce_buf
, bytes
);
1599 qemu_iovec_init(&bounce_qiov
, 1);
1600 qemu_iovec_add(&bounce_qiov
, bounce_buf
, bytes
);
1601 qiov
= &bounce_qiov
;
1603 flags
&= ~BDRV_REQ_REGISTERED_BUF
;
1606 ret
= bdrv_mirror_top_do_write(bs
, MIRROR_METHOD_COPY
, copy_to_target
,
1607 offset
, bytes
, qiov
, flags
);
1609 if (copy_to_target
) {
1610 qemu_iovec_destroy(&bounce_qiov
);
1611 qemu_vfree(bounce_buf
);
1617 static int coroutine_fn GRAPH_RDLOCK
bdrv_mirror_top_flush(BlockDriverState
*bs
)
1619 if (bs
->backing
== NULL
) {
1620 /* we can be here after failed bdrv_append in mirror_start_job */
1623 return bdrv_co_flush(bs
->backing
->bs
);
1626 static int coroutine_fn GRAPH_RDLOCK
1627 bdrv_mirror_top_pwrite_zeroes(BlockDriverState
*bs
, int64_t offset
,
1628 int64_t bytes
, BdrvRequestFlags flags
)
1630 bool copy_to_target
= should_copy_to_target(bs
->opaque
);
1631 return bdrv_mirror_top_do_write(bs
, MIRROR_METHOD_ZERO
, copy_to_target
,
1632 offset
, bytes
, NULL
, flags
);
1635 static int coroutine_fn GRAPH_RDLOCK
1636 bdrv_mirror_top_pdiscard(BlockDriverState
*bs
, int64_t offset
, int64_t bytes
)
1638 bool copy_to_target
= should_copy_to_target(bs
->opaque
);
1639 return bdrv_mirror_top_do_write(bs
, MIRROR_METHOD_DISCARD
, copy_to_target
,
1640 offset
, bytes
, NULL
, 0);
1643 static void bdrv_mirror_top_refresh_filename(BlockDriverState
*bs
)
1645 if (bs
->backing
== NULL
) {
1646 /* we can be here after failed bdrv_attach_child in
1647 * bdrv_set_backing_hd */
1650 pstrcpy(bs
->exact_filename
, sizeof(bs
->exact_filename
),
1651 bs
->backing
->bs
->filename
);
1654 static void bdrv_mirror_top_child_perm(BlockDriverState
*bs
, BdrvChild
*c
,
1656 BlockReopenQueue
*reopen_queue
,
1657 uint64_t perm
, uint64_t shared
,
1658 uint64_t *nperm
, uint64_t *nshared
)
1660 MirrorBDSOpaque
*s
= bs
->opaque
;
1664 * If the job is to be stopped, we do not need to forward
1665 * anything to the real image.
1668 *nshared
= BLK_PERM_ALL
;
1672 bdrv_default_perms(bs
, c
, role
, reopen_queue
,
1673 perm
, shared
, nperm
, nshared
);
1677 * For commit jobs, we cannot take CONSISTENT_READ, because
1678 * that permission is unshared for everything above the base
1679 * node (except for filters on the base node).
1680 * We also have to force-share the WRITE permission, or
1681 * otherwise we would block ourselves at the base node (if
1682 * writes are blocked for a node, they are also blocked for
1683 * its backing file).
1684 * (We could also share RESIZE, because it may be needed for
1685 * the target if its size is less than the top node's; but
1686 * bdrv_default_perms_for_cow() automatically shares RESIZE
1687 * for backing nodes if WRITE is shared, so there is no need
1690 *nperm
&= ~BLK_PERM_CONSISTENT_READ
;
1691 *nshared
|= BLK_PERM_WRITE
;
1695 /* Dummy node that provides consistent read to its users without requiring it
1696 * from its backing file and that allows writes on the backing file chain. */
1697 static BlockDriver bdrv_mirror_top
= {
1698 .format_name
= "mirror_top",
1699 .bdrv_co_preadv
= bdrv_mirror_top_preadv
,
1700 .bdrv_co_pwritev
= bdrv_mirror_top_pwritev
,
1701 .bdrv_co_pwrite_zeroes
= bdrv_mirror_top_pwrite_zeroes
,
1702 .bdrv_co_pdiscard
= bdrv_mirror_top_pdiscard
,
1703 .bdrv_co_flush
= bdrv_mirror_top_flush
,
1704 .bdrv_refresh_filename
= bdrv_mirror_top_refresh_filename
,
1705 .bdrv_child_perm
= bdrv_mirror_top_child_perm
,
1708 .filtered_child_is_backing
= true,
1711 static BlockJob
*mirror_start_job(
1712 const char *job_id
, BlockDriverState
*bs
,
1713 int creation_flags
, BlockDriverState
*target
,
1714 const char *replaces
, int64_t speed
,
1715 uint32_t granularity
, int64_t buf_size
,
1716 BlockMirrorBackingMode backing_mode
,
1718 BlockdevOnError on_source_error
,
1719 BlockdevOnError on_target_error
,
1721 BlockCompletionFunc
*cb
,
1723 const BlockJobDriver
*driver
,
1724 bool is_none_mode
, BlockDriverState
*base
,
1725 bool auto_complete
, const char *filter_node_name
,
1726 bool is_mirror
, MirrorCopyMode copy_mode
,
1730 MirrorBDSOpaque
*bs_opaque
;
1731 BlockDriverState
*mirror_top_bs
;
1732 bool target_is_backing
;
1733 uint64_t target_perms
, target_shared_perms
;
1736 GLOBAL_STATE_CODE();
1738 if (granularity
== 0) {
1739 granularity
= bdrv_get_default_bitmap_granularity(target
);
1742 assert(is_power_of_2(granularity
));
1745 error_setg(errp
, "Invalid parameter 'buf-size'");
1749 if (buf_size
== 0) {
1750 buf_size
= DEFAULT_MIRROR_BUF_SIZE
;
1753 bdrv_graph_rdlock_main_loop();
1754 if (bdrv_skip_filters(bs
) == bdrv_skip_filters(target
)) {
1755 error_setg(errp
, "Can't mirror node into itself");
1756 bdrv_graph_rdunlock_main_loop();
1760 target_is_backing
= bdrv_chain_contains(bs
, target
);
1761 bdrv_graph_rdunlock_main_loop();
1763 /* In the case of active commit, add dummy driver to provide consistent
1764 * reads on the top, while disabling it in the intermediate nodes, and make
1765 * the backing chain writable. */
1766 mirror_top_bs
= bdrv_new_open_driver(&bdrv_mirror_top
, filter_node_name
,
1768 if (mirror_top_bs
== NULL
) {
1771 if (!filter_node_name
) {
1772 mirror_top_bs
->implicit
= true;
1775 /* So that we can always drop this node */
1776 mirror_top_bs
->never_freeze
= true;
1778 mirror_top_bs
->total_sectors
= bs
->total_sectors
;
1779 mirror_top_bs
->supported_write_flags
= BDRV_REQ_WRITE_UNCHANGED
;
1780 mirror_top_bs
->supported_zero_flags
= BDRV_REQ_WRITE_UNCHANGED
|
1781 BDRV_REQ_NO_FALLBACK
;
1782 bs_opaque
= g_new0(MirrorBDSOpaque
, 1);
1783 mirror_top_bs
->opaque
= bs_opaque
;
1785 bs_opaque
->is_commit
= target_is_backing
;
1787 bdrv_drained_begin(bs
);
1788 ret
= bdrv_append(mirror_top_bs
, bs
, errp
);
1789 bdrv_drained_end(bs
);
1792 bdrv_unref(mirror_top_bs
);
1796 /* Make sure that the source is not resized while the job is running */
1797 s
= block_job_create(job_id
, driver
, NULL
, mirror_top_bs
,
1798 BLK_PERM_CONSISTENT_READ
,
1799 BLK_PERM_CONSISTENT_READ
| BLK_PERM_WRITE_UNCHANGED
|
1800 BLK_PERM_WRITE
, speed
,
1801 creation_flags
, cb
, opaque
, errp
);
1806 /* The block job now has a reference to this node */
1807 bdrv_unref(mirror_top_bs
);
1809 s
->mirror_top_bs
= mirror_top_bs
;
1811 /* No resize for the target either; while the mirror is still running, a
1812 * consistent read isn't necessarily possible. We could possibly allow
1813 * writes and graph modifications, though it would likely defeat the
1814 * purpose of a mirror, so leave them blocked for now.
1816 * In the case of active commit, things look a bit different, though,
1817 * because the target is an already populated backing file in active use.
1818 * We can allow anything except resize there.*/
1820 target_perms
= BLK_PERM_WRITE
;
1821 target_shared_perms
= BLK_PERM_WRITE_UNCHANGED
;
1823 if (target_is_backing
) {
1824 int64_t bs_size
, target_size
;
1825 bs_size
= bdrv_getlength(bs
);
1827 error_setg_errno(errp
, -bs_size
,
1828 "Could not inquire top image size");
1832 target_size
= bdrv_getlength(target
);
1833 if (target_size
< 0) {
1834 error_setg_errno(errp
, -target_size
,
1835 "Could not inquire base image size");
1839 if (target_size
< bs_size
) {
1840 target_perms
|= BLK_PERM_RESIZE
;
1843 target_shared_perms
|= BLK_PERM_CONSISTENT_READ
| BLK_PERM_WRITE
;
1845 bdrv_graph_rdlock_main_loop();
1846 if (bdrv_chain_contains(bs
, bdrv_skip_filters(target
))) {
1848 * We may want to allow this in the future, but it would
1849 * require taking some extra care.
1851 error_setg(errp
, "Cannot mirror to a filter on top of a node in "
1852 "the source's backing chain");
1853 bdrv_graph_rdunlock_main_loop();
1856 bdrv_graph_rdunlock_main_loop();
1859 s
->target
= blk_new(s
->common
.job
.aio_context
,
1860 target_perms
, target_shared_perms
);
1861 ret
= blk_insert_bs(s
->target
, target
, errp
);
1866 /* XXX: Mirror target could be a NBD server of target QEMU in the case
1867 * of non-shared block migration. To allow migration completion, we
1868 * have to allow "inactivate" of the target BB. When that happens, we
1869 * know the job is drained, and the vcpus are stopped, so no write
1870 * operation will be performed. Block layer already has assertions to
1872 blk_set_force_allow_inactivate(s
->target
);
1874 blk_set_allow_aio_context_change(s
->target
, true);
1875 blk_set_disable_request_queuing(s
->target
, true);
1877 bdrv_graph_rdlock_main_loop();
1878 s
->replaces
= g_strdup(replaces
);
1879 s
->on_source_error
= on_source_error
;
1880 s
->on_target_error
= on_target_error
;
1881 s
->is_none_mode
= is_none_mode
;
1882 s
->backing_mode
= backing_mode
;
1883 s
->zero_target
= zero_target
;
1884 qatomic_set(&s
->copy_mode
, copy_mode
);
1886 s
->base_overlay
= bdrv_find_overlay(bs
, base
);
1887 s
->granularity
= granularity
;
1888 s
->buf_size
= ROUND_UP(buf_size
, granularity
);
1890 if (auto_complete
) {
1891 s
->should_complete
= true;
1893 bdrv_graph_rdunlock_main_loop();
1895 s
->dirty_bitmap
= bdrv_create_dirty_bitmap(s
->mirror_top_bs
, granularity
,
1897 if (!s
->dirty_bitmap
) {
1902 * The dirty bitmap is set by bdrv_mirror_top_do_write() when not in active
1905 bdrv_disable_dirty_bitmap(s
->dirty_bitmap
);
1907 bdrv_graph_wrlock(bs
);
1908 ret
= block_job_add_bdrv(&s
->common
, "source", bs
, 0,
1909 BLK_PERM_WRITE_UNCHANGED
| BLK_PERM_WRITE
|
1910 BLK_PERM_CONSISTENT_READ
,
1913 bdrv_graph_wrunlock();
1917 /* Required permissions are already taken with blk_new() */
1918 block_job_add_bdrv(&s
->common
, "target", target
, 0, BLK_PERM_ALL
,
1921 /* In commit_active_start() all intermediate nodes disappear, so
1922 * any jobs in them must be blocked */
1923 if (target_is_backing
) {
1924 BlockDriverState
*iter
, *filtered_target
;
1925 uint64_t iter_shared_perms
;
1928 * The topmost node with
1929 * bdrv_skip_filters(filtered_target) == bdrv_skip_filters(target)
1931 filtered_target
= bdrv_cow_bs(bdrv_find_overlay(bs
, target
));
1933 assert(bdrv_skip_filters(filtered_target
) ==
1934 bdrv_skip_filters(target
));
1937 * XXX BLK_PERM_WRITE needs to be allowed so we don't block
1938 * ourselves at s->base (if writes are blocked for a node, they are
1939 * also blocked for its backing file). The other options would be a
1940 * second filter driver above s->base (== target).
1942 iter_shared_perms
= BLK_PERM_WRITE_UNCHANGED
| BLK_PERM_WRITE
;
1944 for (iter
= bdrv_filter_or_cow_bs(bs
); iter
!= target
;
1945 iter
= bdrv_filter_or_cow_bs(iter
))
1947 if (iter
== filtered_target
) {
1949 * From here on, all nodes are filters on the base.
1950 * This allows us to share BLK_PERM_CONSISTENT_READ.
1952 iter_shared_perms
|= BLK_PERM_CONSISTENT_READ
;
1955 ret
= block_job_add_bdrv(&s
->common
, "intermediate node", iter
, 0,
1956 iter_shared_perms
, errp
);
1958 bdrv_graph_wrunlock();
1963 if (bdrv_freeze_backing_chain(mirror_top_bs
, target
, errp
) < 0) {
1964 bdrv_graph_wrunlock();
1968 bdrv_graph_wrunlock();
1970 QTAILQ_INIT(&s
->ops_in_flight
);
1972 trace_mirror_start(bs
, s
, opaque
);
1973 job_start(&s
->common
.job
);
1979 /* Make sure this BDS does not go away until we have completed the graph
1981 bdrv_ref(mirror_top_bs
);
1983 g_free(s
->replaces
);
1984 blk_unref(s
->target
);
1985 bs_opaque
->job
= NULL
;
1986 if (s
->dirty_bitmap
) {
1987 bdrv_release_dirty_bitmap(s
->dirty_bitmap
);
1989 job_early_fail(&s
->common
.job
);
1992 bs_opaque
->stop
= true;
1993 bdrv_drained_begin(bs
);
1994 bdrv_graph_wrlock(bs
);
1995 assert(mirror_top_bs
->backing
->bs
== bs
);
1996 bdrv_child_refresh_perms(mirror_top_bs
, mirror_top_bs
->backing
,
1998 bdrv_replace_node(mirror_top_bs
, bs
, &error_abort
);
1999 bdrv_graph_wrunlock();
2000 bdrv_drained_end(bs
);
2002 bdrv_unref(mirror_top_bs
);
2007 void mirror_start(const char *job_id
, BlockDriverState
*bs
,
2008 BlockDriverState
*target
, const char *replaces
,
2009 int creation_flags
, int64_t speed
,
2010 uint32_t granularity
, int64_t buf_size
,
2011 MirrorSyncMode mode
, BlockMirrorBackingMode backing_mode
,
2013 BlockdevOnError on_source_error
,
2014 BlockdevOnError on_target_error
,
2015 bool unmap
, const char *filter_node_name
,
2016 MirrorCopyMode copy_mode
, Error
**errp
)
2019 BlockDriverState
*base
;
2021 GLOBAL_STATE_CODE();
2023 if ((mode
== MIRROR_SYNC_MODE_INCREMENTAL
) ||
2024 (mode
== MIRROR_SYNC_MODE_BITMAP
)) {
2025 error_setg(errp
, "Sync mode '%s' not supported",
2026 MirrorSyncMode_str(mode
));
2030 bdrv_graph_rdlock_main_loop();
2031 is_none_mode
= mode
== MIRROR_SYNC_MODE_NONE
;
2032 base
= mode
== MIRROR_SYNC_MODE_TOP
? bdrv_backing_chain_next(bs
) : NULL
;
2033 bdrv_graph_rdunlock_main_loop();
2035 mirror_start_job(job_id
, bs
, creation_flags
, target
, replaces
,
2036 speed
, granularity
, buf_size
, backing_mode
, zero_target
,
2037 on_source_error
, on_target_error
, unmap
, NULL
, NULL
,
2038 &mirror_job_driver
, is_none_mode
, base
, false,
2039 filter_node_name
, true, copy_mode
, errp
);
2042 BlockJob
*commit_active_start(const char *job_id
, BlockDriverState
*bs
,
2043 BlockDriverState
*base
, int creation_flags
,
2044 int64_t speed
, BlockdevOnError on_error
,
2045 const char *filter_node_name
,
2046 BlockCompletionFunc
*cb
, void *opaque
,
2047 bool auto_complete
, Error
**errp
)
2049 bool base_read_only
;
2052 GLOBAL_STATE_CODE();
2054 base_read_only
= bdrv_is_read_only(base
);
2056 if (base_read_only
) {
2057 if (bdrv_reopen_set_read_only(base
, false, errp
) < 0) {
2062 job
= mirror_start_job(
2063 job_id
, bs
, creation_flags
, base
, NULL
, speed
, 0, 0,
2064 MIRROR_LEAVE_BACKING_CHAIN
, false,
2065 on_error
, on_error
, true, cb
, opaque
,
2066 &commit_active_job_driver
, false, base
, auto_complete
,
2067 filter_node_name
, false, MIRROR_COPY_MODE_BACKGROUND
,
2070 goto error_restore_flags
;
2075 error_restore_flags
:
2076 /* ignore error and errp for bdrv_reopen, because we want to propagate
2077 * the original error */
2078 if (base_read_only
) {
2079 bdrv_reopen_set_read_only(base
, true, NULL
);