4 * Copyright Red Hat, Inc. 2012
7 * Paolo Bonzini <pbonzini@redhat.com>
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/cutils.h"
16 #include "qemu/coroutine.h"
17 #include "qemu/range.h"
19 #include "block/blockjob_int.h"
20 #include "block/block_int.h"
21 #include "sysemu/block-backend.h"
22 #include "qapi/error.h"
23 #include "qapi/qmp/qerror.h"
24 #include "qemu/ratelimit.h"
25 #include "qemu/bitmap.h"
27 #define MAX_IN_FLIGHT 16
28 #define MAX_IO_BYTES (1 << 20) /* 1 Mb */
29 #define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES)
31 /* The mirroring buffer is a list of granularity-sized chunks.
32 * Free chunks are organized in a list.
34 typedef struct MirrorBuffer
{
35 QSIMPLEQ_ENTRY(MirrorBuffer
) next
;
38 typedef struct MirrorOp MirrorOp
;
40 typedef struct MirrorBlockJob
{
43 BlockDriverState
*mirror_top_bs
;
44 BlockDriverState
*base
;
46 /* The name of the graph node to replace */
48 /* The BDS to replace */
49 BlockDriverState
*to_replace
;
50 /* Used to block operations on the drive-mirror-replace target */
51 Error
*replace_blocker
;
53 BlockMirrorBackingMode backing_mode
;
54 BlockdevOnError on_source_error
, on_target_error
;
60 unsigned long *cow_bitmap
;
61 BdrvDirtyBitmap
*dirty_bitmap
;
62 BdrvDirtyBitmapIter
*dbi
;
64 QSIMPLEQ_HEAD(, MirrorBuffer
) buf_free
;
67 uint64_t last_pause_ns
;
68 unsigned long *in_flight_bitmap
;
70 int64_t bytes_in_flight
;
71 QTAILQ_HEAD(MirrorOpList
, MirrorOp
) ops_in_flight
;
74 int target_cluster_size
;
76 bool initial_zeroing_ongoing
;
79 typedef struct MirrorBDSOpaque
{
89 /* The pointee is set by mirror_co_read(), mirror_co_zero(), and
90 * mirror_co_discard() before yielding for the first time */
91 int64_t *bytes_handled
;
94 CoQueue waiting_requests
;
96 QTAILQ_ENTRY(MirrorOp
) next
;
99 typedef enum MirrorMethod
{
102 MIRROR_METHOD_DISCARD
,
105 static BlockErrorAction
mirror_error_action(MirrorBlockJob
*s
, bool read
,
110 return block_job_error_action(&s
->common
, s
->on_source_error
,
113 return block_job_error_action(&s
->common
, s
->on_target_error
,
118 static void coroutine_fn
mirror_wait_on_conflicts(MirrorOp
*self
,
123 uint64_t self_start_chunk
= offset
/ s
->granularity
;
124 uint64_t self_end_chunk
= DIV_ROUND_UP(offset
+ bytes
, s
->granularity
);
125 uint64_t self_nb_chunks
= self_end_chunk
- self_start_chunk
;
127 while (find_next_bit(s
->in_flight_bitmap
, self_end_chunk
,
128 self_start_chunk
) < self_end_chunk
&&
133 QTAILQ_FOREACH(op
, &s
->ops_in_flight
, next
) {
134 uint64_t op_start_chunk
= op
->offset
/ s
->granularity
;
135 uint64_t op_nb_chunks
= DIV_ROUND_UP(op
->offset
+ op
->bytes
,
143 if (ranges_overlap(self_start_chunk
, self_nb_chunks
,
144 op_start_chunk
, op_nb_chunks
))
146 qemu_co_queue_wait(&op
->waiting_requests
, NULL
);
153 static void coroutine_fn
mirror_iteration_done(MirrorOp
*op
, int ret
)
155 MirrorBlockJob
*s
= op
->s
;
160 trace_mirror_iteration_done(s
, op
->offset
, op
->bytes
, ret
);
163 s
->bytes_in_flight
-= op
->bytes
;
165 for (i
= 0; i
< op
->qiov
.niov
; i
++) {
166 MirrorBuffer
*buf
= (MirrorBuffer
*) iov
[i
].iov_base
;
167 QSIMPLEQ_INSERT_TAIL(&s
->buf_free
, buf
, next
);
171 chunk_num
= op
->offset
/ s
->granularity
;
172 nb_chunks
= DIV_ROUND_UP(op
->bytes
, s
->granularity
);
174 bitmap_clear(s
->in_flight_bitmap
, chunk_num
, nb_chunks
);
175 QTAILQ_REMOVE(&s
->ops_in_flight
, op
, next
);
178 bitmap_set(s
->cow_bitmap
, chunk_num
, nb_chunks
);
180 if (!s
->initial_zeroing_ongoing
) {
181 job_progress_update(&s
->common
.job
, op
->bytes
);
184 qemu_iovec_destroy(&op
->qiov
);
186 qemu_co_queue_restart_all(&op
->waiting_requests
);
190 static void coroutine_fn
mirror_write_complete(MirrorOp
*op
, int ret
)
192 MirrorBlockJob
*s
= op
->s
;
194 aio_context_acquire(blk_get_aio_context(s
->common
.blk
));
196 BlockErrorAction action
;
198 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, op
->offset
, op
->bytes
);
199 action
= mirror_error_action(s
, false, -ret
);
200 if (action
== BLOCK_ERROR_ACTION_REPORT
&& s
->ret
>= 0) {
204 mirror_iteration_done(op
, ret
);
205 aio_context_release(blk_get_aio_context(s
->common
.blk
));
208 static void coroutine_fn
mirror_read_complete(MirrorOp
*op
, int ret
)
210 MirrorBlockJob
*s
= op
->s
;
212 aio_context_acquire(blk_get_aio_context(s
->common
.blk
));
214 BlockErrorAction action
;
216 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, op
->offset
, op
->bytes
);
217 action
= mirror_error_action(s
, true, -ret
);
218 if (action
== BLOCK_ERROR_ACTION_REPORT
&& s
->ret
>= 0) {
222 mirror_iteration_done(op
, ret
);
224 ret
= blk_co_pwritev(s
->target
, op
->offset
,
225 op
->qiov
.size
, &op
->qiov
, 0);
226 mirror_write_complete(op
, ret
);
228 aio_context_release(blk_get_aio_context(s
->common
.blk
));
231 /* Clip bytes relative to offset to not exceed end-of-file */
232 static inline int64_t mirror_clip_bytes(MirrorBlockJob
*s
,
236 return MIN(bytes
, s
->bdev_length
- offset
);
239 /* Round offset and/or bytes to target cluster if COW is needed, and
240 * return the offset of the adjusted tail against original. */
241 static int mirror_cow_align(MirrorBlockJob
*s
, int64_t *offset
,
246 int64_t align_offset
= *offset
;
247 int64_t align_bytes
= *bytes
;
248 int max_bytes
= s
->granularity
* s
->max_iov
;
250 need_cow
= !test_bit(*offset
/ s
->granularity
, s
->cow_bitmap
);
251 need_cow
|= !test_bit((*offset
+ *bytes
- 1) / s
->granularity
,
254 bdrv_round_to_clusters(blk_bs(s
->target
), *offset
, *bytes
,
255 &align_offset
, &align_bytes
);
258 if (align_bytes
> max_bytes
) {
259 align_bytes
= max_bytes
;
261 align_bytes
= QEMU_ALIGN_DOWN(align_bytes
, s
->target_cluster_size
);
264 /* Clipping may result in align_bytes unaligned to chunk boundary, but
265 * that doesn't matter because it's already the end of source image. */
266 align_bytes
= mirror_clip_bytes(s
, align_offset
, align_bytes
);
268 ret
= align_offset
+ align_bytes
- (*offset
+ *bytes
);
269 *offset
= align_offset
;
270 *bytes
= align_bytes
;
275 static inline void mirror_wait_for_free_in_flight_slot(MirrorBlockJob
*s
)
279 QTAILQ_FOREACH(op
, &s
->ops_in_flight
, next
) {
280 /* Do not wait on pseudo ops, because it may in turn wait on
281 * some other operation to start, which may in fact be the
282 * caller of this function. Since there is only one pseudo op
283 * at any given time, we will always find some real operation
285 if (!op
->is_pseudo_op
) {
286 qemu_co_queue_wait(&op
->waiting_requests
, NULL
);
293 /* Perform a mirror copy operation.
295 * *op->bytes_handled is set to the number of bytes copied after and
296 * including offset, excluding any bytes copied prior to offset due
297 * to alignment. This will be op->bytes if no alignment is necessary,
298 * or (new_end - op->offset) if the tail is rounded up or down due to
299 * alignment or buffer limit.
301 static void coroutine_fn
mirror_co_read(void *opaque
)
303 MirrorOp
*op
= opaque
;
304 MirrorBlockJob
*s
= op
->s
;
309 max_bytes
= s
->granularity
* s
->max_iov
;
311 /* We can only handle as much as buf_size at a time. */
312 op
->bytes
= MIN(s
->buf_size
, MIN(max_bytes
, op
->bytes
));
314 assert(op
->bytes
< BDRV_REQUEST_MAX_BYTES
);
315 *op
->bytes_handled
= op
->bytes
;
318 *op
->bytes_handled
+= mirror_cow_align(s
, &op
->offset
, &op
->bytes
);
320 /* Cannot exceed BDRV_REQUEST_MAX_BYTES + INT_MAX */
321 assert(*op
->bytes_handled
<= UINT_MAX
);
322 assert(op
->bytes
<= s
->buf_size
);
323 /* The offset is granularity-aligned because:
324 * 1) Caller passes in aligned values;
325 * 2) mirror_cow_align is used only when target cluster is larger. */
326 assert(QEMU_IS_ALIGNED(op
->offset
, s
->granularity
));
327 /* The range is sector-aligned, since bdrv_getlength() rounds up. */
328 assert(QEMU_IS_ALIGNED(op
->bytes
, BDRV_SECTOR_SIZE
));
329 nb_chunks
= DIV_ROUND_UP(op
->bytes
, s
->granularity
);
331 while (s
->buf_free_count
< nb_chunks
) {
332 trace_mirror_yield_in_flight(s
, op
->offset
, s
->in_flight
);
333 mirror_wait_for_free_in_flight_slot(s
);
336 /* Now make a QEMUIOVector taking enough granularity-sized chunks
339 qemu_iovec_init(&op
->qiov
, nb_chunks
);
340 while (nb_chunks
-- > 0) {
341 MirrorBuffer
*buf
= QSIMPLEQ_FIRST(&s
->buf_free
);
342 size_t remaining
= op
->bytes
- op
->qiov
.size
;
344 QSIMPLEQ_REMOVE_HEAD(&s
->buf_free
, next
);
346 qemu_iovec_add(&op
->qiov
, buf
, MIN(s
->granularity
, remaining
));
349 /* Copy the dirty cluster. */
351 s
->bytes_in_flight
+= op
->bytes
;
352 trace_mirror_one_iteration(s
, op
->offset
, op
->bytes
);
354 ret
= bdrv_co_preadv(s
->mirror_top_bs
->backing
, op
->offset
, op
->bytes
,
356 mirror_read_complete(op
, ret
);
359 static void coroutine_fn
mirror_co_zero(void *opaque
)
361 MirrorOp
*op
= opaque
;
365 op
->s
->bytes_in_flight
+= op
->bytes
;
366 *op
->bytes_handled
= op
->bytes
;
368 ret
= blk_co_pwrite_zeroes(op
->s
->target
, op
->offset
, op
->bytes
,
369 op
->s
->unmap
? BDRV_REQ_MAY_UNMAP
: 0);
370 mirror_write_complete(op
, ret
);
373 static void coroutine_fn
mirror_co_discard(void *opaque
)
375 MirrorOp
*op
= opaque
;
379 op
->s
->bytes_in_flight
+= op
->bytes
;
380 *op
->bytes_handled
= op
->bytes
;
382 ret
= blk_co_pdiscard(op
->s
->target
, op
->offset
, op
->bytes
);
383 mirror_write_complete(op
, ret
);
386 static unsigned mirror_perform(MirrorBlockJob
*s
, int64_t offset
,
387 unsigned bytes
, MirrorMethod mirror_method
)
391 int64_t bytes_handled
= -1;
393 op
= g_new(MirrorOp
, 1);
398 .bytes_handled
= &bytes_handled
,
400 qemu_co_queue_init(&op
->waiting_requests
);
402 switch (mirror_method
) {
403 case MIRROR_METHOD_COPY
:
404 co
= qemu_coroutine_create(mirror_co_read
, op
);
406 case MIRROR_METHOD_ZERO
:
407 co
= qemu_coroutine_create(mirror_co_zero
, op
);
409 case MIRROR_METHOD_DISCARD
:
410 co
= qemu_coroutine_create(mirror_co_discard
, op
);
416 QTAILQ_INSERT_TAIL(&s
->ops_in_flight
, op
, next
);
417 qemu_coroutine_enter(co
);
418 /* At this point, ownership of op has been moved to the coroutine
419 * and the object may already be freed */
421 /* Assert that this value has been set */
422 assert(bytes_handled
>= 0);
424 /* Same assertion as in mirror_co_read() (and for mirror_co_read()
425 * and mirror_co_discard(), bytes_handled == op->bytes, which
426 * is the @bytes parameter given to this function) */
427 assert(bytes_handled
<= UINT_MAX
);
428 return bytes_handled
;
431 static uint64_t coroutine_fn
mirror_iteration(MirrorBlockJob
*s
)
433 BlockDriverState
*source
= s
->mirror_top_bs
->backing
->bs
;
436 uint64_t delay_ns
= 0, ret
= 0;
437 /* At least the first dirty chunk is mirrored in one iteration. */
439 bool write_zeroes_ok
= bdrv_can_write_zeroes_with_unmap(blk_bs(s
->target
));
440 int max_io_bytes
= MAX(s
->buf_size
/ MAX_IN_FLIGHT
, MAX_IO_BYTES
);
442 bdrv_dirty_bitmap_lock(s
->dirty_bitmap
);
443 offset
= bdrv_dirty_iter_next(s
->dbi
);
445 bdrv_set_dirty_iter(s
->dbi
, 0);
446 offset
= bdrv_dirty_iter_next(s
->dbi
);
447 trace_mirror_restart_iter(s
, bdrv_get_dirty_count(s
->dirty_bitmap
));
450 bdrv_dirty_bitmap_unlock(s
->dirty_bitmap
);
452 mirror_wait_on_conflicts(NULL
, s
, offset
, 1);
454 job_pause_point(&s
->common
.job
);
456 /* Find the number of consective dirty chunks following the first dirty
457 * one, and wait for in flight requests in them. */
458 bdrv_dirty_bitmap_lock(s
->dirty_bitmap
);
459 while (nb_chunks
* s
->granularity
< s
->buf_size
) {
461 int64_t next_offset
= offset
+ nb_chunks
* s
->granularity
;
462 int64_t next_chunk
= next_offset
/ s
->granularity
;
463 if (next_offset
>= s
->bdev_length
||
464 !bdrv_get_dirty_locked(source
, s
->dirty_bitmap
, next_offset
)) {
467 if (test_bit(next_chunk
, s
->in_flight_bitmap
)) {
471 next_dirty
= bdrv_dirty_iter_next(s
->dbi
);
472 if (next_dirty
> next_offset
|| next_dirty
< 0) {
473 /* The bitmap iterator's cache is stale, refresh it */
474 bdrv_set_dirty_iter(s
->dbi
, next_offset
);
475 next_dirty
= bdrv_dirty_iter_next(s
->dbi
);
477 assert(next_dirty
== next_offset
);
481 /* Clear dirty bits before querying the block status, because
482 * calling bdrv_block_status_above could yield - if some blocks are
483 * marked dirty in this window, we need to know.
485 bdrv_reset_dirty_bitmap_locked(s
->dirty_bitmap
, offset
,
486 nb_chunks
* s
->granularity
);
487 bdrv_dirty_bitmap_unlock(s
->dirty_bitmap
);
489 /* Before claiming an area in the in-flight bitmap, we have to
490 * create a MirrorOp for it so that conflicting requests can wait
491 * for it. mirror_perform() will create the real MirrorOps later,
492 * for now we just create a pseudo operation that will wake up all
493 * conflicting requests once all real operations have been
495 pseudo_op
= g_new(MirrorOp
, 1);
496 *pseudo_op
= (MirrorOp
){
498 .bytes
= nb_chunks
* s
->granularity
,
499 .is_pseudo_op
= true,
501 qemu_co_queue_init(&pseudo_op
->waiting_requests
);
502 QTAILQ_INSERT_TAIL(&s
->ops_in_flight
, pseudo_op
, next
);
504 bitmap_set(s
->in_flight_bitmap
, offset
/ s
->granularity
, nb_chunks
);
505 while (nb_chunks
> 0 && offset
< s
->bdev_length
) {
508 int64_t io_bytes_acct
;
509 MirrorMethod mirror_method
= MIRROR_METHOD_COPY
;
511 assert(!(offset
% s
->granularity
));
512 ret
= bdrv_block_status_above(source
, NULL
, offset
,
513 nb_chunks
* s
->granularity
,
514 &io_bytes
, NULL
, NULL
);
516 io_bytes
= MIN(nb_chunks
* s
->granularity
, max_io_bytes
);
517 } else if (ret
& BDRV_BLOCK_DATA
) {
518 io_bytes
= MIN(io_bytes
, max_io_bytes
);
521 io_bytes
-= io_bytes
% s
->granularity
;
522 if (io_bytes
< s
->granularity
) {
523 io_bytes
= s
->granularity
;
524 } else if (ret
>= 0 && !(ret
& BDRV_BLOCK_DATA
)) {
525 int64_t target_offset
;
526 int64_t target_bytes
;
527 bdrv_round_to_clusters(blk_bs(s
->target
), offset
, io_bytes
,
528 &target_offset
, &target_bytes
);
529 if (target_offset
== offset
&&
530 target_bytes
== io_bytes
) {
531 mirror_method
= ret
& BDRV_BLOCK_ZERO
?
533 MIRROR_METHOD_DISCARD
;
537 while (s
->in_flight
>= MAX_IN_FLIGHT
) {
538 trace_mirror_yield_in_flight(s
, offset
, s
->in_flight
);
539 mirror_wait_for_free_in_flight_slot(s
);
547 io_bytes
= mirror_clip_bytes(s
, offset
, io_bytes
);
548 io_bytes
= mirror_perform(s
, offset
, io_bytes
, mirror_method
);
549 if (mirror_method
!= MIRROR_METHOD_COPY
&& write_zeroes_ok
) {
552 io_bytes_acct
= io_bytes
;
556 nb_chunks
-= DIV_ROUND_UP(io_bytes
, s
->granularity
);
557 delay_ns
= block_job_ratelimit_get_delay(&s
->common
, io_bytes_acct
);
562 QTAILQ_REMOVE(&s
->ops_in_flight
, pseudo_op
, next
);
563 qemu_co_queue_restart_all(&pseudo_op
->waiting_requests
);
569 static void mirror_free_init(MirrorBlockJob
*s
)
571 int granularity
= s
->granularity
;
572 size_t buf_size
= s
->buf_size
;
573 uint8_t *buf
= s
->buf
;
575 assert(s
->buf_free_count
== 0);
576 QSIMPLEQ_INIT(&s
->buf_free
);
577 while (buf_size
!= 0) {
578 MirrorBuffer
*cur
= (MirrorBuffer
*)buf
;
579 QSIMPLEQ_INSERT_TAIL(&s
->buf_free
, cur
, next
);
581 buf_size
-= granularity
;
586 /* This is also used for the .pause callback. There is no matching
587 * mirror_resume() because mirror_run() will begin iterating again
588 * when the job is resumed.
590 static void mirror_wait_for_all_io(MirrorBlockJob
*s
)
592 while (s
->in_flight
> 0) {
593 mirror_wait_for_free_in_flight_slot(s
);
601 static void mirror_exit(Job
*job
, void *opaque
)
603 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
.job
);
604 BlockJob
*bjob
= &s
->common
;
605 MirrorExitData
*data
= opaque
;
606 MirrorBDSOpaque
*bs_opaque
= s
->mirror_top_bs
->opaque
;
607 AioContext
*replace_aio_context
= NULL
;
608 BlockDriverState
*src
= s
->mirror_top_bs
->backing
->bs
;
609 BlockDriverState
*target_bs
= blk_bs(s
->target
);
610 BlockDriverState
*mirror_top_bs
= s
->mirror_top_bs
;
611 Error
*local_err
= NULL
;
613 bdrv_release_dirty_bitmap(src
, s
->dirty_bitmap
);
615 /* Make sure that the source BDS doesn't go away before we called
616 * job_completed(). */
618 bdrv_ref(mirror_top_bs
);
621 /* Remove target parent that still uses BLK_PERM_WRITE/RESIZE before
622 * inserting target_bs at s->to_replace, where we might not be able to get
625 * Note that blk_unref() alone doesn't necessarily drop permissions because
626 * we might be running nested inside mirror_drain(), which takes an extra
627 * reference, so use an explicit blk_set_perm() first. */
628 blk_set_perm(s
->target
, 0, BLK_PERM_ALL
, &error_abort
);
629 blk_unref(s
->target
);
632 /* We don't access the source any more. Dropping any WRITE/RESIZE is
633 * required before it could become a backing file of target_bs. */
634 bdrv_child_try_set_perm(mirror_top_bs
->backing
, 0, BLK_PERM_ALL
,
636 if (s
->backing_mode
== MIRROR_SOURCE_BACKING_CHAIN
) {
637 BlockDriverState
*backing
= s
->is_none_mode
? src
: s
->base
;
638 if (backing_bs(target_bs
) != backing
) {
639 bdrv_set_backing_hd(target_bs
, backing
, &local_err
);
641 error_report_err(local_err
);
648 replace_aio_context
= bdrv_get_aio_context(s
->to_replace
);
649 aio_context_acquire(replace_aio_context
);
652 if (s
->should_complete
&& data
->ret
== 0) {
653 BlockDriverState
*to_replace
= src
;
655 to_replace
= s
->to_replace
;
658 if (bdrv_get_flags(target_bs
) != bdrv_get_flags(to_replace
)) {
659 bdrv_reopen(target_bs
, bdrv_get_flags(to_replace
), NULL
);
662 /* The mirror job has no requests in flight any more, but we need to
663 * drain potential other users of the BDS before changing the graph. */
664 bdrv_drained_begin(target_bs
);
665 bdrv_replace_node(to_replace
, target_bs
, &local_err
);
666 bdrv_drained_end(target_bs
);
668 error_report_err(local_err
);
673 bdrv_op_unblock_all(s
->to_replace
, s
->replace_blocker
);
674 error_free(s
->replace_blocker
);
675 bdrv_unref(s
->to_replace
);
677 if (replace_aio_context
) {
678 aio_context_release(replace_aio_context
);
681 bdrv_unref(target_bs
);
683 /* Remove the mirror filter driver from the graph. Before this, get rid of
684 * the blockers on the intermediate nodes so that the resulting state is
685 * valid. Also give up permissions on mirror_top_bs->backing, which might
686 * block the removal. */
687 block_job_remove_all_bdrv(bjob
);
688 bdrv_child_try_set_perm(mirror_top_bs
->backing
, 0, BLK_PERM_ALL
,
690 bdrv_replace_node(mirror_top_bs
, backing_bs(mirror_top_bs
), &error_abort
);
692 /* We just changed the BDS the job BB refers to (with either or both of the
693 * bdrv_replace_node() calls), so switch the BB back so the cleanup does
694 * the right thing. We don't need any permissions any more now. */
695 blk_remove_bs(bjob
->blk
);
696 blk_set_perm(bjob
->blk
, 0, BLK_PERM_ALL
, &error_abort
);
697 blk_insert_bs(bjob
->blk
, mirror_top_bs
, &error_abort
);
699 bs_opaque
->job
= NULL
;
700 job_completed(job
, data
->ret
, NULL
);
703 bdrv_drained_end(src
);
704 bdrv_unref(mirror_top_bs
);
708 static void mirror_throttle(MirrorBlockJob
*s
)
710 int64_t now
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
712 if (now
- s
->last_pause_ns
> BLOCK_JOB_SLICE_TIME
) {
713 s
->last_pause_ns
= now
;
714 job_sleep_ns(&s
->common
.job
, 0);
716 job_pause_point(&s
->common
.job
);
720 static int coroutine_fn
mirror_dirty_init(MirrorBlockJob
*s
)
723 BlockDriverState
*base
= s
->base
;
724 BlockDriverState
*bs
= s
->mirror_top_bs
->backing
->bs
;
725 BlockDriverState
*target_bs
= blk_bs(s
->target
);
729 if (base
== NULL
&& !bdrv_has_zero_init(target_bs
)) {
730 if (!bdrv_can_write_zeroes_with_unmap(target_bs
)) {
731 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, 0, s
->bdev_length
);
735 s
->initial_zeroing_ongoing
= true;
736 for (offset
= 0; offset
< s
->bdev_length
; ) {
737 int bytes
= MIN(s
->bdev_length
- offset
,
738 QEMU_ALIGN_DOWN(INT_MAX
, s
->granularity
));
742 if (job_is_cancelled(&s
->common
.job
)) {
743 s
->initial_zeroing_ongoing
= false;
747 if (s
->in_flight
>= MAX_IN_FLIGHT
) {
748 trace_mirror_yield(s
, UINT64_MAX
, s
->buf_free_count
,
750 mirror_wait_for_free_in_flight_slot(s
);
754 mirror_perform(s
, offset
, bytes
, MIRROR_METHOD_ZERO
);
758 mirror_wait_for_all_io(s
);
759 s
->initial_zeroing_ongoing
= false;
762 /* First part, loop on the sectors and initialize the dirty bitmap. */
763 for (offset
= 0; offset
< s
->bdev_length
; ) {
764 /* Just to make sure we are not exceeding int limit. */
765 int bytes
= MIN(s
->bdev_length
- offset
,
766 QEMU_ALIGN_DOWN(INT_MAX
, s
->granularity
));
770 if (job_is_cancelled(&s
->common
.job
)) {
774 ret
= bdrv_is_allocated_above(bs
, base
, offset
, bytes
, &count
);
781 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, offset
, count
);
788 /* Called when going out of the streaming phase to flush the bulk of the
789 * data to the medium, or just before completing.
791 static int mirror_flush(MirrorBlockJob
*s
)
793 int ret
= blk_flush(s
->target
);
795 if (mirror_error_action(s
, false, -ret
) == BLOCK_ERROR_ACTION_REPORT
) {
802 static void coroutine_fn
mirror_run(void *opaque
)
804 MirrorBlockJob
*s
= opaque
;
805 MirrorExitData
*data
;
806 BlockDriverState
*bs
= s
->mirror_top_bs
->backing
->bs
;
807 BlockDriverState
*target_bs
= blk_bs(s
->target
);
808 bool need_drain
= true;
811 char backing_filename
[2]; /* we only need 2 characters because we are only
812 checking for a NULL string */
815 if (job_is_cancelled(&s
->common
.job
)) {
819 s
->bdev_length
= bdrv_getlength(bs
);
820 if (s
->bdev_length
< 0) {
821 ret
= s
->bdev_length
;
825 /* Active commit must resize the base image if its size differs from the
827 if (s
->base
== blk_bs(s
->target
)) {
830 base_length
= blk_getlength(s
->target
);
831 if (base_length
< 0) {
836 if (s
->bdev_length
> base_length
) {
837 ret
= blk_truncate(s
->target
, s
->bdev_length
, PREALLOC_MODE_OFF
,
845 if (s
->bdev_length
== 0) {
846 /* Transition to the READY state and wait for complete. */
847 job_transition_to_ready(&s
->common
.job
);
849 while (!job_is_cancelled(&s
->common
.job
) && !s
->should_complete
) {
850 job_yield(&s
->common
.job
);
852 s
->common
.job
.cancelled
= false;
856 length
= DIV_ROUND_UP(s
->bdev_length
, s
->granularity
);
857 s
->in_flight_bitmap
= bitmap_new(length
);
859 /* If we have no backing file yet in the destination, we cannot let
860 * the destination do COW. Instead, we copy sectors around the
861 * dirty data if needed. We need a bitmap to do that.
863 bdrv_get_backing_filename(target_bs
, backing_filename
,
864 sizeof(backing_filename
));
865 if (!bdrv_get_info(target_bs
, &bdi
) && bdi
.cluster_size
) {
866 s
->target_cluster_size
= bdi
.cluster_size
;
868 s
->target_cluster_size
= BDRV_SECTOR_SIZE
;
870 if (backing_filename
[0] && !target_bs
->backing
&&
871 s
->granularity
< s
->target_cluster_size
) {
872 s
->buf_size
= MAX(s
->buf_size
, s
->target_cluster_size
);
873 s
->cow_bitmap
= bitmap_new(length
);
875 s
->max_iov
= MIN(bs
->bl
.max_iov
, target_bs
->bl
.max_iov
);
877 s
->buf
= qemu_try_blockalign(bs
, s
->buf_size
);
878 if (s
->buf
== NULL
) {
885 s
->last_pause_ns
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
886 if (!s
->is_none_mode
) {
887 ret
= mirror_dirty_init(s
);
888 if (ret
< 0 || job_is_cancelled(&s
->common
.job
)) {
894 s
->dbi
= bdrv_dirty_iter_new(s
->dirty_bitmap
);
896 uint64_t delay_ns
= 0;
898 bool should_complete
;
905 job_pause_point(&s
->common
.job
);
907 cnt
= bdrv_get_dirty_count(s
->dirty_bitmap
);
908 /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is
909 * the number of bytes currently being processed; together those are
910 * the current remaining operation length */
911 job_progress_set_remaining(&s
->common
.job
, s
->bytes_in_flight
+ cnt
);
913 /* Note that even when no rate limit is applied we need to yield
914 * periodically with no pending I/O so that bdrv_drain_all() returns.
915 * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is
916 * an error, or when the source is clean, whichever comes first. */
917 delta
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - s
->last_pause_ns
;
918 if (delta
< BLOCK_JOB_SLICE_TIME
&&
919 s
->common
.iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
920 if (s
->in_flight
>= MAX_IN_FLIGHT
|| s
->buf_free_count
== 0 ||
921 (cnt
== 0 && s
->in_flight
> 0)) {
922 trace_mirror_yield(s
, cnt
, s
->buf_free_count
, s
->in_flight
);
923 mirror_wait_for_free_in_flight_slot(s
);
925 } else if (cnt
!= 0) {
926 delay_ns
= mirror_iteration(s
);
930 should_complete
= false;
931 if (s
->in_flight
== 0 && cnt
== 0) {
932 trace_mirror_before_flush(s
);
934 if (mirror_flush(s
) < 0) {
935 /* Go check s->ret. */
938 /* We're out of the streaming phase. From now on, if the job
939 * is cancelled we will actually complete all pending I/O and
940 * report completion. This way, block-job-cancel will leave
941 * the target in a consistent state.
943 job_transition_to_ready(&s
->common
.job
);
947 should_complete
= s
->should_complete
||
948 job_is_cancelled(&s
->common
.job
);
949 cnt
= bdrv_get_dirty_count(s
->dirty_bitmap
);
952 if (cnt
== 0 && should_complete
) {
953 /* The dirty bitmap is not updated while operations are pending.
954 * If we're about to exit, wait for pending operations before
955 * calling bdrv_get_dirty_count(bs), or we may exit while the
956 * source has dirty data to copy!
958 * Note that I/O can be submitted by the guest while
959 * mirror_populate runs, so pause it now. Before deciding
960 * whether to switch to target check one last time if I/O has
961 * come in the meanwhile, and if not flush the data to disk.
963 trace_mirror_before_drain(s
, cnt
);
965 bdrv_drained_begin(bs
);
966 cnt
= bdrv_get_dirty_count(s
->dirty_bitmap
);
967 if (cnt
> 0 || mirror_flush(s
) < 0) {
968 bdrv_drained_end(bs
);
972 /* The two disks are in sync. Exit and report successful
975 assert(QLIST_EMPTY(&bs
->tracked_requests
));
976 s
->common
.job
.cancelled
= false;
983 if (s
->synced
&& !should_complete
) {
984 delay_ns
= (s
->in_flight
== 0 &&
985 cnt
== 0 ? BLOCK_JOB_SLICE_TIME
: 0);
987 trace_mirror_before_sleep(s
, cnt
, s
->synced
, delay_ns
);
988 job_sleep_ns(&s
->common
.job
, delay_ns
);
989 if (job_is_cancelled(&s
->common
.job
) &&
990 (!s
->synced
|| s
->common
.job
.force_cancel
))
994 s
->last_pause_ns
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
998 if (s
->in_flight
> 0) {
999 /* We get here only if something went wrong. Either the job failed,
1000 * or it was cancelled prematurely so that we do not guarantee that
1001 * the target is a copy of the source.
1003 assert(ret
< 0 || ((s
->common
.job
.force_cancel
|| !s
->synced
) &&
1004 job_is_cancelled(&s
->common
.job
)));
1006 mirror_wait_for_all_io(s
);
1009 assert(s
->in_flight
== 0);
1011 g_free(s
->cow_bitmap
);
1012 g_free(s
->in_flight_bitmap
);
1013 bdrv_dirty_iter_free(s
->dbi
);
1015 data
= g_malloc(sizeof(*data
));
1019 bdrv_drained_begin(bs
);
1021 job_defer_to_main_loop(&s
->common
.job
, mirror_exit
, data
);
1024 static void mirror_complete(Job
*job
, Error
**errp
)
1026 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
.job
);
1027 BlockDriverState
*target
;
1029 target
= blk_bs(s
->target
);
1032 error_setg(errp
, "The active block job '%s' cannot be completed",
1037 if (s
->backing_mode
== MIRROR_OPEN_BACKING_CHAIN
) {
1040 assert(!target
->backing
);
1041 ret
= bdrv_open_backing_file(target
, NULL
, "backing", errp
);
1047 /* block all operations on to_replace bs */
1049 AioContext
*replace_aio_context
;
1051 s
->to_replace
= bdrv_find_node(s
->replaces
);
1052 if (!s
->to_replace
) {
1053 error_setg(errp
, "Node name '%s' not found", s
->replaces
);
1057 replace_aio_context
= bdrv_get_aio_context(s
->to_replace
);
1058 aio_context_acquire(replace_aio_context
);
1060 /* TODO Translate this into permission system. Current definition of
1061 * GRAPH_MOD would require to request it for the parents; they might
1062 * not even be BlockDriverStates, however, so a BdrvChild can't address
1063 * them. May need redefinition of GRAPH_MOD. */
1064 error_setg(&s
->replace_blocker
,
1065 "block device is in use by block-job-complete");
1066 bdrv_op_block_all(s
->to_replace
, s
->replace_blocker
);
1067 bdrv_ref(s
->to_replace
);
1069 aio_context_release(replace_aio_context
);
1072 s
->should_complete
= true;
1076 static void mirror_pause(Job
*job
)
1078 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
.job
);
1080 mirror_wait_for_all_io(s
);
1083 static bool mirror_drained_poll(BlockJob
*job
)
1085 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
1086 return !!s
->in_flight
;
1089 static void mirror_attached_aio_context(BlockJob
*job
, AioContext
*new_context
)
1091 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
1093 blk_set_aio_context(s
->target
, new_context
);
1096 static void mirror_drain(BlockJob
*job
)
1098 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
1100 /* Need to keep a reference in case blk_drain triggers execution
1101 * of mirror_complete...
1104 BlockBackend
*target
= s
->target
;
1111 static const BlockJobDriver mirror_job_driver
= {
1113 .instance_size
= sizeof(MirrorBlockJob
),
1114 .job_type
= JOB_TYPE_MIRROR
,
1115 .free
= block_job_free
,
1116 .user_resume
= block_job_user_resume
,
1117 .drain
= block_job_drain
,
1118 .start
= mirror_run
,
1119 .pause
= mirror_pause
,
1120 .complete
= mirror_complete
,
1122 .drained_poll
= mirror_drained_poll
,
1123 .attached_aio_context
= mirror_attached_aio_context
,
1124 .drain
= mirror_drain
,
1127 static const BlockJobDriver commit_active_job_driver
= {
1129 .instance_size
= sizeof(MirrorBlockJob
),
1130 .job_type
= JOB_TYPE_COMMIT
,
1131 .free
= block_job_free
,
1132 .user_resume
= block_job_user_resume
,
1133 .drain
= block_job_drain
,
1134 .start
= mirror_run
,
1135 .pause
= mirror_pause
,
1136 .complete
= mirror_complete
,
1138 .drained_poll
= mirror_drained_poll
,
1139 .attached_aio_context
= mirror_attached_aio_context
,
1140 .drain
= mirror_drain
,
1143 static int coroutine_fn
bdrv_mirror_top_preadv(BlockDriverState
*bs
,
1144 uint64_t offset
, uint64_t bytes
, QEMUIOVector
*qiov
, int flags
)
1146 return bdrv_co_preadv(bs
->backing
, offset
, bytes
, qiov
, flags
);
1149 static int coroutine_fn
bdrv_mirror_top_pwritev(BlockDriverState
*bs
,
1150 uint64_t offset
, uint64_t bytes
, QEMUIOVector
*qiov
, int flags
)
1152 return bdrv_co_pwritev(bs
->backing
, offset
, bytes
, qiov
, flags
);
1155 static int coroutine_fn
bdrv_mirror_top_flush(BlockDriverState
*bs
)
1157 if (bs
->backing
== NULL
) {
1158 /* we can be here after failed bdrv_append in mirror_start_job */
1161 return bdrv_co_flush(bs
->backing
->bs
);
1164 static int coroutine_fn
bdrv_mirror_top_pwrite_zeroes(BlockDriverState
*bs
,
1165 int64_t offset
, int bytes
, BdrvRequestFlags flags
)
1167 return bdrv_co_pwrite_zeroes(bs
->backing
, offset
, bytes
, flags
);
1170 static int coroutine_fn
bdrv_mirror_top_pdiscard(BlockDriverState
*bs
,
1171 int64_t offset
, int bytes
)
1173 return bdrv_co_pdiscard(bs
->backing
->bs
, offset
, bytes
);
1176 static void bdrv_mirror_top_refresh_filename(BlockDriverState
*bs
, QDict
*opts
)
1178 if (bs
->backing
== NULL
) {
1179 /* we can be here after failed bdrv_attach_child in
1180 * bdrv_set_backing_hd */
1183 bdrv_refresh_filename(bs
->backing
->bs
);
1184 pstrcpy(bs
->exact_filename
, sizeof(bs
->exact_filename
),
1185 bs
->backing
->bs
->filename
);
1188 static void bdrv_mirror_top_close(BlockDriverState
*bs
)
1192 static void bdrv_mirror_top_child_perm(BlockDriverState
*bs
, BdrvChild
*c
,
1193 const BdrvChildRole
*role
,
1194 BlockReopenQueue
*reopen_queue
,
1195 uint64_t perm
, uint64_t shared
,
1196 uint64_t *nperm
, uint64_t *nshared
)
1198 /* Must be able to forward guest writes to the real image */
1200 if (perm
& BLK_PERM_WRITE
) {
1201 *nperm
|= BLK_PERM_WRITE
;
1204 *nshared
= BLK_PERM_ALL
;
1207 /* Dummy node that provides consistent read to its users without requiring it
1208 * from its backing file and that allows writes on the backing file chain. */
1209 static BlockDriver bdrv_mirror_top
= {
1210 .format_name
= "mirror_top",
1211 .bdrv_co_preadv
= bdrv_mirror_top_preadv
,
1212 .bdrv_co_pwritev
= bdrv_mirror_top_pwritev
,
1213 .bdrv_co_pwrite_zeroes
= bdrv_mirror_top_pwrite_zeroes
,
1214 .bdrv_co_pdiscard
= bdrv_mirror_top_pdiscard
,
1215 .bdrv_co_flush
= bdrv_mirror_top_flush
,
1216 .bdrv_co_block_status
= bdrv_co_block_status_from_backing
,
1217 .bdrv_refresh_filename
= bdrv_mirror_top_refresh_filename
,
1218 .bdrv_close
= bdrv_mirror_top_close
,
1219 .bdrv_child_perm
= bdrv_mirror_top_child_perm
,
1222 static void mirror_start_job(const char *job_id
, BlockDriverState
*bs
,
1223 int creation_flags
, BlockDriverState
*target
,
1224 const char *replaces
, int64_t speed
,
1225 uint32_t granularity
, int64_t buf_size
,
1226 BlockMirrorBackingMode backing_mode
,
1227 BlockdevOnError on_source_error
,
1228 BlockdevOnError on_target_error
,
1230 BlockCompletionFunc
*cb
,
1232 const BlockJobDriver
*driver
,
1233 bool is_none_mode
, BlockDriverState
*base
,
1234 bool auto_complete
, const char *filter_node_name
,
1239 MirrorBDSOpaque
*bs_opaque
;
1240 BlockDriverState
*mirror_top_bs
;
1241 bool target_graph_mod
;
1242 bool target_is_backing
;
1243 Error
*local_err
= NULL
;
1246 if (granularity
== 0) {
1247 granularity
= bdrv_get_default_bitmap_granularity(target
);
1250 assert(is_power_of_2(granularity
));
1253 error_setg(errp
, "Invalid parameter 'buf-size'");
1257 if (buf_size
== 0) {
1258 buf_size
= DEFAULT_MIRROR_BUF_SIZE
;
1261 /* In the case of active commit, add dummy driver to provide consistent
1262 * reads on the top, while disabling it in the intermediate nodes, and make
1263 * the backing chain writable. */
1264 mirror_top_bs
= bdrv_new_open_driver(&bdrv_mirror_top
, filter_node_name
,
1266 if (mirror_top_bs
== NULL
) {
1269 if (!filter_node_name
) {
1270 mirror_top_bs
->implicit
= true;
1272 mirror_top_bs
->total_sectors
= bs
->total_sectors
;
1273 mirror_top_bs
->supported_write_flags
= BDRV_REQ_WRITE_UNCHANGED
;
1274 mirror_top_bs
->supported_zero_flags
= BDRV_REQ_WRITE_UNCHANGED
;
1275 bs_opaque
= g_new0(MirrorBDSOpaque
, 1);
1276 mirror_top_bs
->opaque
= bs_opaque
;
1277 bdrv_set_aio_context(mirror_top_bs
, bdrv_get_aio_context(bs
));
1279 /* bdrv_append takes ownership of the mirror_top_bs reference, need to keep
1280 * it alive until block_job_create() succeeds even if bs has no parent. */
1281 bdrv_ref(mirror_top_bs
);
1282 bdrv_drained_begin(bs
);
1283 bdrv_append(mirror_top_bs
, bs
, &local_err
);
1284 bdrv_drained_end(bs
);
1287 bdrv_unref(mirror_top_bs
);
1288 error_propagate(errp
, local_err
);
1292 /* Make sure that the source is not resized while the job is running */
1293 s
= block_job_create(job_id
, driver
, NULL
, mirror_top_bs
,
1294 BLK_PERM_CONSISTENT_READ
,
1295 BLK_PERM_CONSISTENT_READ
| BLK_PERM_WRITE_UNCHANGED
|
1296 BLK_PERM_WRITE
| BLK_PERM_GRAPH_MOD
, speed
,
1297 creation_flags
, cb
, opaque
, errp
);
1303 /* The block job now has a reference to this node */
1304 bdrv_unref(mirror_top_bs
);
1306 s
->mirror_top_bs
= mirror_top_bs
;
1308 /* No resize for the target either; while the mirror is still running, a
1309 * consistent read isn't necessarily possible. We could possibly allow
1310 * writes and graph modifications, though it would likely defeat the
1311 * purpose of a mirror, so leave them blocked for now.
1313 * In the case of active commit, things look a bit different, though,
1314 * because the target is an already populated backing file in active use.
1315 * We can allow anything except resize there.*/
1316 target_is_backing
= bdrv_chain_contains(bs
, target
);
1317 target_graph_mod
= (backing_mode
!= MIRROR_LEAVE_BACKING_CHAIN
);
1318 s
->target
= blk_new(BLK_PERM_WRITE
| BLK_PERM_RESIZE
|
1319 (target_graph_mod
? BLK_PERM_GRAPH_MOD
: 0),
1320 BLK_PERM_WRITE_UNCHANGED
|
1321 (target_is_backing
? BLK_PERM_CONSISTENT_READ
|
1323 BLK_PERM_GRAPH_MOD
: 0));
1324 ret
= blk_insert_bs(s
->target
, target
, errp
);
1329 /* XXX: Mirror target could be a NBD server of target QEMU in the case
1330 * of non-shared block migration. To allow migration completion, we
1331 * have to allow "inactivate" of the target BB. When that happens, we
1332 * know the job is drained, and the vcpus are stopped, so no write
1333 * operation will be performed. Block layer already has assertions to
1335 blk_set_force_allow_inactivate(s
->target
);
1338 s
->replaces
= g_strdup(replaces
);
1339 s
->on_source_error
= on_source_error
;
1340 s
->on_target_error
= on_target_error
;
1341 s
->is_none_mode
= is_none_mode
;
1342 s
->backing_mode
= backing_mode
;
1344 s
->granularity
= granularity
;
1345 s
->buf_size
= ROUND_UP(buf_size
, granularity
);
1347 if (auto_complete
) {
1348 s
->should_complete
= true;
1351 s
->dirty_bitmap
= bdrv_create_dirty_bitmap(bs
, granularity
, NULL
, errp
);
1352 if (!s
->dirty_bitmap
) {
1356 /* Required permissions are already taken with blk_new() */
1357 block_job_add_bdrv(&s
->common
, "target", target
, 0, BLK_PERM_ALL
,
1360 /* In commit_active_start() all intermediate nodes disappear, so
1361 * any jobs in them must be blocked */
1362 if (target_is_backing
) {
1363 BlockDriverState
*iter
;
1364 for (iter
= backing_bs(bs
); iter
!= target
; iter
= backing_bs(iter
)) {
1365 /* XXX BLK_PERM_WRITE needs to be allowed so we don't block
1366 * ourselves at s->base (if writes are blocked for a node, they are
1367 * also blocked for its backing file). The other options would be a
1368 * second filter driver above s->base (== target). */
1369 ret
= block_job_add_bdrv(&s
->common
, "intermediate node", iter
, 0,
1370 BLK_PERM_WRITE_UNCHANGED
| BLK_PERM_WRITE
,
1378 QTAILQ_INIT(&s
->ops_in_flight
);
1380 trace_mirror_start(bs
, s
, opaque
);
1381 job_start(&s
->common
.job
);
1386 /* Make sure this BDS does not go away until we have completed the graph
1388 bdrv_ref(mirror_top_bs
);
1390 g_free(s
->replaces
);
1391 blk_unref(s
->target
);
1392 bs_opaque
->job
= NULL
;
1393 job_early_fail(&s
->common
.job
);
1396 bdrv_child_try_set_perm(mirror_top_bs
->backing
, 0, BLK_PERM_ALL
,
1398 bdrv_replace_node(mirror_top_bs
, backing_bs(mirror_top_bs
), &error_abort
);
1400 bdrv_unref(mirror_top_bs
);
1403 void mirror_start(const char *job_id
, BlockDriverState
*bs
,
1404 BlockDriverState
*target
, const char *replaces
,
1405 int64_t speed
, uint32_t granularity
, int64_t buf_size
,
1406 MirrorSyncMode mode
, BlockMirrorBackingMode backing_mode
,
1407 BlockdevOnError on_source_error
,
1408 BlockdevOnError on_target_error
,
1409 bool unmap
, const char *filter_node_name
, Error
**errp
)
1412 BlockDriverState
*base
;
1414 if (mode
== MIRROR_SYNC_MODE_INCREMENTAL
) {
1415 error_setg(errp
, "Sync mode 'incremental' not supported");
1418 is_none_mode
= mode
== MIRROR_SYNC_MODE_NONE
;
1419 base
= mode
== MIRROR_SYNC_MODE_TOP
? backing_bs(bs
) : NULL
;
1420 mirror_start_job(job_id
, bs
, JOB_DEFAULT
, target
, replaces
,
1421 speed
, granularity
, buf_size
, backing_mode
,
1422 on_source_error
, on_target_error
, unmap
, NULL
, NULL
,
1423 &mirror_job_driver
, is_none_mode
, base
, false,
1424 filter_node_name
, true, errp
);
1427 void commit_active_start(const char *job_id
, BlockDriverState
*bs
,
1428 BlockDriverState
*base
, int creation_flags
,
1429 int64_t speed
, BlockdevOnError on_error
,
1430 const char *filter_node_name
,
1431 BlockCompletionFunc
*cb
, void *opaque
,
1432 bool auto_complete
, Error
**errp
)
1434 int orig_base_flags
;
1435 Error
*local_err
= NULL
;
1437 orig_base_flags
= bdrv_get_flags(base
);
1439 if (bdrv_reopen(base
, bs
->open_flags
, errp
)) {
1443 mirror_start_job(job_id
, bs
, creation_flags
, base
, NULL
, speed
, 0, 0,
1444 MIRROR_LEAVE_BACKING_CHAIN
,
1445 on_error
, on_error
, true, cb
, opaque
,
1446 &commit_active_job_driver
, false, base
, auto_complete
,
1447 filter_node_name
, false, &local_err
);
1449 error_propagate(errp
, local_err
);
1450 goto error_restore_flags
;
1455 error_restore_flags
:
1456 /* ignore error and errp for bdrv_reopen, because we want to propagate
1457 * the original error */
1458 bdrv_reopen(base
, orig_base_flags
, NULL
);