4 * Copyright Red Hat, Inc. 2012
7 * Paolo Bonzini <pbonzini@redhat.com>
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/cutils.h"
17 #include "block/blockjob_int.h"
18 #include "block/block_int.h"
19 #include "sysemu/block-backend.h"
20 #include "qapi/error.h"
21 #include "qapi/qmp/qerror.h"
22 #include "qemu/ratelimit.h"
23 #include "qemu/bitmap.h"
25 #define MAX_IN_FLIGHT 16
26 #define MAX_IO_BYTES (1 << 20) /* 1 Mb */
27 #define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES)
29 /* The mirroring buffer is a list of granularity-sized chunks.
30 * Free chunks are organized in a list.
32 typedef struct MirrorBuffer
{
33 QSIMPLEQ_ENTRY(MirrorBuffer
) next
;
36 typedef struct MirrorBlockJob
{
39 BlockDriverState
*mirror_top_bs
;
40 BlockDriverState
*source
;
41 BlockDriverState
*base
;
43 /* The name of the graph node to replace */
45 /* The BDS to replace */
46 BlockDriverState
*to_replace
;
47 /* Used to block operations on the drive-mirror-replace target */
48 Error
*replace_blocker
;
50 BlockMirrorBackingMode backing_mode
;
51 BlockdevOnError on_source_error
, on_target_error
;
57 unsigned long *cow_bitmap
;
58 BdrvDirtyBitmap
*dirty_bitmap
;
59 BdrvDirtyBitmapIter
*dbi
;
61 QSIMPLEQ_HEAD(, MirrorBuffer
) buf_free
;
64 uint64_t last_pause_ns
;
65 unsigned long *in_flight_bitmap
;
67 int64_t bytes_in_flight
;
71 int target_cluster_size
;
73 bool initial_zeroing_ongoing
;
76 typedef struct MirrorOp
{
83 static BlockErrorAction
mirror_error_action(MirrorBlockJob
*s
, bool read
,
88 return block_job_error_action(&s
->common
, s
->on_source_error
,
91 return block_job_error_action(&s
->common
, s
->on_target_error
,
96 static void mirror_iteration_done(MirrorOp
*op
, int ret
)
98 MirrorBlockJob
*s
= op
->s
;
103 trace_mirror_iteration_done(s
, op
->offset
, op
->bytes
, ret
);
106 s
->bytes_in_flight
-= op
->bytes
;
108 for (i
= 0; i
< op
->qiov
.niov
; i
++) {
109 MirrorBuffer
*buf
= (MirrorBuffer
*) iov
[i
].iov_base
;
110 QSIMPLEQ_INSERT_TAIL(&s
->buf_free
, buf
, next
);
114 chunk_num
= op
->offset
/ s
->granularity
;
115 nb_chunks
= DIV_ROUND_UP(op
->bytes
, s
->granularity
);
116 bitmap_clear(s
->in_flight_bitmap
, chunk_num
, nb_chunks
);
119 bitmap_set(s
->cow_bitmap
, chunk_num
, nb_chunks
);
121 if (!s
->initial_zeroing_ongoing
) {
122 job_progress_update(&s
->common
.job
, op
->bytes
);
125 qemu_iovec_destroy(&op
->qiov
);
128 if (s
->waiting_for_io
) {
129 qemu_coroutine_enter(s
->common
.job
.co
);
133 static void mirror_write_complete(void *opaque
, int ret
)
135 MirrorOp
*op
= opaque
;
136 MirrorBlockJob
*s
= op
->s
;
138 aio_context_acquire(blk_get_aio_context(s
->common
.blk
));
140 BlockErrorAction action
;
142 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, op
->offset
, op
->bytes
);
143 action
= mirror_error_action(s
, false, -ret
);
144 if (action
== BLOCK_ERROR_ACTION_REPORT
&& s
->ret
>= 0) {
148 mirror_iteration_done(op
, ret
);
149 aio_context_release(blk_get_aio_context(s
->common
.blk
));
152 static void mirror_read_complete(void *opaque
, int ret
)
154 MirrorOp
*op
= opaque
;
155 MirrorBlockJob
*s
= op
->s
;
157 aio_context_acquire(blk_get_aio_context(s
->common
.blk
));
159 BlockErrorAction action
;
161 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, op
->offset
, op
->bytes
);
162 action
= mirror_error_action(s
, true, -ret
);
163 if (action
== BLOCK_ERROR_ACTION_REPORT
&& s
->ret
>= 0) {
167 mirror_iteration_done(op
, ret
);
169 blk_aio_pwritev(s
->target
, op
->offset
, &op
->qiov
,
170 0, mirror_write_complete
, op
);
172 aio_context_release(blk_get_aio_context(s
->common
.blk
));
175 /* Clip bytes relative to offset to not exceed end-of-file */
176 static inline int64_t mirror_clip_bytes(MirrorBlockJob
*s
,
180 return MIN(bytes
, s
->bdev_length
- offset
);
183 /* Round offset and/or bytes to target cluster if COW is needed, and
184 * return the offset of the adjusted tail against original. */
185 static int mirror_cow_align(MirrorBlockJob
*s
, int64_t *offset
,
190 int64_t align_offset
= *offset
;
191 int64_t align_bytes
= *bytes
;
192 int max_bytes
= s
->granularity
* s
->max_iov
;
194 need_cow
= !test_bit(*offset
/ s
->granularity
, s
->cow_bitmap
);
195 need_cow
|= !test_bit((*offset
+ *bytes
- 1) / s
->granularity
,
198 bdrv_round_to_clusters(blk_bs(s
->target
), *offset
, *bytes
,
199 &align_offset
, &align_bytes
);
202 if (align_bytes
> max_bytes
) {
203 align_bytes
= max_bytes
;
205 align_bytes
= QEMU_ALIGN_DOWN(align_bytes
, s
->target_cluster_size
);
208 /* Clipping may result in align_bytes unaligned to chunk boundary, but
209 * that doesn't matter because it's already the end of source image. */
210 align_bytes
= mirror_clip_bytes(s
, align_offset
, align_bytes
);
212 ret
= align_offset
+ align_bytes
- (*offset
+ *bytes
);
213 *offset
= align_offset
;
214 *bytes
= align_bytes
;
219 static inline void mirror_wait_for_io(MirrorBlockJob
*s
)
221 assert(!s
->waiting_for_io
);
222 s
->waiting_for_io
= true;
223 qemu_coroutine_yield();
224 s
->waiting_for_io
= false;
227 /* Submit async read while handling COW.
228 * Returns: The number of bytes copied after and including offset,
229 * excluding any bytes copied prior to offset due to alignment.
230 * This will be @bytes if no alignment is necessary, or
231 * (new_end - offset) if tail is rounded up or down due to
232 * alignment or buffer limit.
234 static uint64_t mirror_do_read(MirrorBlockJob
*s
, int64_t offset
,
237 BlockBackend
*source
= s
->common
.blk
;
243 max_bytes
= s
->granularity
* s
->max_iov
;
245 /* We can only handle as much as buf_size at a time. */
246 bytes
= MIN(s
->buf_size
, MIN(max_bytes
, bytes
));
248 assert(bytes
< BDRV_REQUEST_MAX_BYTES
);
252 ret
+= mirror_cow_align(s
, &offset
, &bytes
);
254 assert(bytes
<= s
->buf_size
);
255 /* The offset is granularity-aligned because:
256 * 1) Caller passes in aligned values;
257 * 2) mirror_cow_align is used only when target cluster is larger. */
258 assert(QEMU_IS_ALIGNED(offset
, s
->granularity
));
259 /* The range is sector-aligned, since bdrv_getlength() rounds up. */
260 assert(QEMU_IS_ALIGNED(bytes
, BDRV_SECTOR_SIZE
));
261 nb_chunks
= DIV_ROUND_UP(bytes
, s
->granularity
);
263 while (s
->buf_free_count
< nb_chunks
) {
264 trace_mirror_yield_in_flight(s
, offset
, s
->in_flight
);
265 mirror_wait_for_io(s
);
268 /* Allocate a MirrorOp that is used as an AIO callback. */
269 op
= g_new(MirrorOp
, 1);
274 /* Now make a QEMUIOVector taking enough granularity-sized chunks
277 qemu_iovec_init(&op
->qiov
, nb_chunks
);
278 while (nb_chunks
-- > 0) {
279 MirrorBuffer
*buf
= QSIMPLEQ_FIRST(&s
->buf_free
);
280 size_t remaining
= bytes
- op
->qiov
.size
;
282 QSIMPLEQ_REMOVE_HEAD(&s
->buf_free
, next
);
284 qemu_iovec_add(&op
->qiov
, buf
, MIN(s
->granularity
, remaining
));
287 /* Copy the dirty cluster. */
289 s
->bytes_in_flight
+= bytes
;
290 trace_mirror_one_iteration(s
, offset
, bytes
);
292 blk_aio_preadv(source
, offset
, &op
->qiov
, 0, mirror_read_complete
, op
);
296 static void mirror_do_zero_or_discard(MirrorBlockJob
*s
,
303 /* Allocate a MirrorOp that is used as an AIO callback. The qiov is zeroed
304 * so the freeing in mirror_iteration_done is nop. */
305 op
= g_new0(MirrorOp
, 1);
311 s
->bytes_in_flight
+= bytes
;
313 blk_aio_pdiscard(s
->target
, offset
,
314 op
->bytes
, mirror_write_complete
, op
);
316 blk_aio_pwrite_zeroes(s
->target
, offset
,
317 op
->bytes
, s
->unmap
? BDRV_REQ_MAY_UNMAP
: 0,
318 mirror_write_complete
, op
);
322 static uint64_t coroutine_fn
mirror_iteration(MirrorBlockJob
*s
)
324 BlockDriverState
*source
= s
->source
;
325 int64_t offset
, first_chunk
;
326 uint64_t delay_ns
= 0;
327 /* At least the first dirty chunk is mirrored in one iteration. */
329 bool write_zeroes_ok
= bdrv_can_write_zeroes_with_unmap(blk_bs(s
->target
));
330 int max_io_bytes
= MAX(s
->buf_size
/ MAX_IN_FLIGHT
, MAX_IO_BYTES
);
332 bdrv_dirty_bitmap_lock(s
->dirty_bitmap
);
333 offset
= bdrv_dirty_iter_next(s
->dbi
);
335 bdrv_set_dirty_iter(s
->dbi
, 0);
336 offset
= bdrv_dirty_iter_next(s
->dbi
);
337 trace_mirror_restart_iter(s
, bdrv_get_dirty_count(s
->dirty_bitmap
));
340 bdrv_dirty_bitmap_unlock(s
->dirty_bitmap
);
342 first_chunk
= offset
/ s
->granularity
;
343 while (test_bit(first_chunk
, s
->in_flight_bitmap
)) {
344 trace_mirror_yield_in_flight(s
, offset
, s
->in_flight
);
345 mirror_wait_for_io(s
);
348 job_pause_point(&s
->common
.job
);
350 /* Find the number of consective dirty chunks following the first dirty
351 * one, and wait for in flight requests in them. */
352 bdrv_dirty_bitmap_lock(s
->dirty_bitmap
);
353 while (nb_chunks
* s
->granularity
< s
->buf_size
) {
355 int64_t next_offset
= offset
+ nb_chunks
* s
->granularity
;
356 int64_t next_chunk
= next_offset
/ s
->granularity
;
357 if (next_offset
>= s
->bdev_length
||
358 !bdrv_get_dirty_locked(source
, s
->dirty_bitmap
, next_offset
)) {
361 if (test_bit(next_chunk
, s
->in_flight_bitmap
)) {
365 next_dirty
= bdrv_dirty_iter_next(s
->dbi
);
366 if (next_dirty
> next_offset
|| next_dirty
< 0) {
367 /* The bitmap iterator's cache is stale, refresh it */
368 bdrv_set_dirty_iter(s
->dbi
, next_offset
);
369 next_dirty
= bdrv_dirty_iter_next(s
->dbi
);
371 assert(next_dirty
== next_offset
);
375 /* Clear dirty bits before querying the block status, because
376 * calling bdrv_block_status_above could yield - if some blocks are
377 * marked dirty in this window, we need to know.
379 bdrv_reset_dirty_bitmap_locked(s
->dirty_bitmap
, offset
,
380 nb_chunks
* s
->granularity
);
381 bdrv_dirty_bitmap_unlock(s
->dirty_bitmap
);
383 bitmap_set(s
->in_flight_bitmap
, offset
/ s
->granularity
, nb_chunks
);
384 while (nb_chunks
> 0 && offset
< s
->bdev_length
) {
387 int64_t io_bytes_acct
;
391 MIRROR_METHOD_DISCARD
392 } mirror_method
= MIRROR_METHOD_COPY
;
394 assert(!(offset
% s
->granularity
));
395 ret
= bdrv_block_status_above(source
, NULL
, offset
,
396 nb_chunks
* s
->granularity
,
397 &io_bytes
, NULL
, NULL
);
399 io_bytes
= MIN(nb_chunks
* s
->granularity
, max_io_bytes
);
400 } else if (ret
& BDRV_BLOCK_DATA
) {
401 io_bytes
= MIN(io_bytes
, max_io_bytes
);
404 io_bytes
-= io_bytes
% s
->granularity
;
405 if (io_bytes
< s
->granularity
) {
406 io_bytes
= s
->granularity
;
407 } else if (ret
>= 0 && !(ret
& BDRV_BLOCK_DATA
)) {
408 int64_t target_offset
;
409 int64_t target_bytes
;
410 bdrv_round_to_clusters(blk_bs(s
->target
), offset
, io_bytes
,
411 &target_offset
, &target_bytes
);
412 if (target_offset
== offset
&&
413 target_bytes
== io_bytes
) {
414 mirror_method
= ret
& BDRV_BLOCK_ZERO
?
416 MIRROR_METHOD_DISCARD
;
420 while (s
->in_flight
>= MAX_IN_FLIGHT
) {
421 trace_mirror_yield_in_flight(s
, offset
, s
->in_flight
);
422 mirror_wait_for_io(s
);
429 io_bytes
= mirror_clip_bytes(s
, offset
, io_bytes
);
430 switch (mirror_method
) {
431 case MIRROR_METHOD_COPY
:
432 io_bytes
= io_bytes_acct
= mirror_do_read(s
, offset
, io_bytes
);
434 case MIRROR_METHOD_ZERO
:
435 case MIRROR_METHOD_DISCARD
:
436 mirror_do_zero_or_discard(s
, offset
, io_bytes
,
437 mirror_method
== MIRROR_METHOD_DISCARD
);
438 if (write_zeroes_ok
) {
441 io_bytes_acct
= io_bytes
;
449 nb_chunks
-= DIV_ROUND_UP(io_bytes
, s
->granularity
);
450 delay_ns
= block_job_ratelimit_get_delay(&s
->common
, io_bytes_acct
);
455 static void mirror_free_init(MirrorBlockJob
*s
)
457 int granularity
= s
->granularity
;
458 size_t buf_size
= s
->buf_size
;
459 uint8_t *buf
= s
->buf
;
461 assert(s
->buf_free_count
== 0);
462 QSIMPLEQ_INIT(&s
->buf_free
);
463 while (buf_size
!= 0) {
464 MirrorBuffer
*cur
= (MirrorBuffer
*)buf
;
465 QSIMPLEQ_INSERT_TAIL(&s
->buf_free
, cur
, next
);
467 buf_size
-= granularity
;
472 /* This is also used for the .pause callback. There is no matching
473 * mirror_resume() because mirror_run() will begin iterating again
474 * when the job is resumed.
476 static void mirror_wait_for_all_io(MirrorBlockJob
*s
)
478 while (s
->in_flight
> 0) {
479 mirror_wait_for_io(s
);
487 static void mirror_exit(Job
*job
, void *opaque
)
489 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
.job
);
490 BlockJob
*bjob
= &s
->common
;
491 MirrorExitData
*data
= opaque
;
492 AioContext
*replace_aio_context
= NULL
;
493 BlockDriverState
*src
= s
->source
;
494 BlockDriverState
*target_bs
= blk_bs(s
->target
);
495 BlockDriverState
*mirror_top_bs
= s
->mirror_top_bs
;
496 Error
*local_err
= NULL
;
498 bdrv_release_dirty_bitmap(src
, s
->dirty_bitmap
);
500 /* Make sure that the source BDS doesn't go away before we called
501 * job_completed(). */
503 bdrv_ref(mirror_top_bs
);
506 /* Remove target parent that still uses BLK_PERM_WRITE/RESIZE before
507 * inserting target_bs at s->to_replace, where we might not be able to get
510 * Note that blk_unref() alone doesn't necessarily drop permissions because
511 * we might be running nested inside mirror_drain(), which takes an extra
512 * reference, so use an explicit blk_set_perm() first. */
513 blk_set_perm(s
->target
, 0, BLK_PERM_ALL
, &error_abort
);
514 blk_unref(s
->target
);
517 /* We don't access the source any more. Dropping any WRITE/RESIZE is
518 * required before it could become a backing file of target_bs. */
519 bdrv_child_try_set_perm(mirror_top_bs
->backing
, 0, BLK_PERM_ALL
,
521 if (s
->backing_mode
== MIRROR_SOURCE_BACKING_CHAIN
) {
522 BlockDriverState
*backing
= s
->is_none_mode
? src
: s
->base
;
523 if (backing_bs(target_bs
) != backing
) {
524 bdrv_set_backing_hd(target_bs
, backing
, &local_err
);
526 error_report_err(local_err
);
533 replace_aio_context
= bdrv_get_aio_context(s
->to_replace
);
534 aio_context_acquire(replace_aio_context
);
537 if (s
->should_complete
&& data
->ret
== 0) {
538 BlockDriverState
*to_replace
= src
;
540 to_replace
= s
->to_replace
;
543 if (bdrv_get_flags(target_bs
) != bdrv_get_flags(to_replace
)) {
544 bdrv_reopen(target_bs
, bdrv_get_flags(to_replace
), NULL
);
547 /* The mirror job has no requests in flight any more, but we need to
548 * drain potential other users of the BDS before changing the graph. */
549 bdrv_drained_begin(target_bs
);
550 bdrv_replace_node(to_replace
, target_bs
, &local_err
);
551 bdrv_drained_end(target_bs
);
553 error_report_err(local_err
);
558 bdrv_op_unblock_all(s
->to_replace
, s
->replace_blocker
);
559 error_free(s
->replace_blocker
);
560 bdrv_unref(s
->to_replace
);
562 if (replace_aio_context
) {
563 aio_context_release(replace_aio_context
);
566 bdrv_unref(target_bs
);
568 /* Remove the mirror filter driver from the graph. Before this, get rid of
569 * the blockers on the intermediate nodes so that the resulting state is
570 * valid. Also give up permissions on mirror_top_bs->backing, which might
571 * block the removal. */
572 block_job_remove_all_bdrv(bjob
);
573 bdrv_child_try_set_perm(mirror_top_bs
->backing
, 0, BLK_PERM_ALL
,
575 bdrv_replace_node(mirror_top_bs
, backing_bs(mirror_top_bs
), &error_abort
);
577 /* We just changed the BDS the job BB refers to (with either or both of the
578 * bdrv_replace_node() calls), so switch the BB back so the cleanup does
579 * the right thing. We don't need any permissions any more now. */
580 blk_remove_bs(bjob
->blk
);
581 blk_set_perm(bjob
->blk
, 0, BLK_PERM_ALL
, &error_abort
);
582 blk_insert_bs(bjob
->blk
, mirror_top_bs
, &error_abort
);
584 job_completed(job
, data
->ret
);
587 bdrv_drained_end(src
);
588 bdrv_unref(mirror_top_bs
);
592 static void mirror_throttle(MirrorBlockJob
*s
)
594 int64_t now
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
596 if (now
- s
->last_pause_ns
> BLOCK_JOB_SLICE_TIME
) {
597 s
->last_pause_ns
= now
;
598 job_sleep_ns(&s
->common
.job
, 0);
600 job_pause_point(&s
->common
.job
);
604 static int coroutine_fn
mirror_dirty_init(MirrorBlockJob
*s
)
607 BlockDriverState
*base
= s
->base
;
608 BlockDriverState
*bs
= s
->source
;
609 BlockDriverState
*target_bs
= blk_bs(s
->target
);
613 if (base
== NULL
&& !bdrv_has_zero_init(target_bs
)) {
614 if (!bdrv_can_write_zeroes_with_unmap(target_bs
)) {
615 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, 0, s
->bdev_length
);
619 s
->initial_zeroing_ongoing
= true;
620 for (offset
= 0; offset
< s
->bdev_length
; ) {
621 int bytes
= MIN(s
->bdev_length
- offset
,
622 QEMU_ALIGN_DOWN(INT_MAX
, s
->granularity
));
626 if (job_is_cancelled(&s
->common
.job
)) {
627 s
->initial_zeroing_ongoing
= false;
631 if (s
->in_flight
>= MAX_IN_FLIGHT
) {
632 trace_mirror_yield(s
, UINT64_MAX
, s
->buf_free_count
,
634 mirror_wait_for_io(s
);
638 mirror_do_zero_or_discard(s
, offset
, bytes
, false);
642 mirror_wait_for_all_io(s
);
643 s
->initial_zeroing_ongoing
= false;
646 /* First part, loop on the sectors and initialize the dirty bitmap. */
647 for (offset
= 0; offset
< s
->bdev_length
; ) {
648 /* Just to make sure we are not exceeding int limit. */
649 int bytes
= MIN(s
->bdev_length
- offset
,
650 QEMU_ALIGN_DOWN(INT_MAX
, s
->granularity
));
654 if (job_is_cancelled(&s
->common
.job
)) {
658 ret
= bdrv_is_allocated_above(bs
, base
, offset
, bytes
, &count
);
665 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, offset
, count
);
672 /* Called when going out of the streaming phase to flush the bulk of the
673 * data to the medium, or just before completing.
675 static int mirror_flush(MirrorBlockJob
*s
)
677 int ret
= blk_flush(s
->target
);
679 if (mirror_error_action(s
, false, -ret
) == BLOCK_ERROR_ACTION_REPORT
) {
686 static void coroutine_fn
mirror_run(void *opaque
)
688 MirrorBlockJob
*s
= opaque
;
689 MirrorExitData
*data
;
690 BlockDriverState
*bs
= s
->source
;
691 BlockDriverState
*target_bs
= blk_bs(s
->target
);
692 bool need_drain
= true;
695 char backing_filename
[2]; /* we only need 2 characters because we are only
696 checking for a NULL string */
699 if (job_is_cancelled(&s
->common
.job
)) {
703 s
->bdev_length
= bdrv_getlength(bs
);
704 if (s
->bdev_length
< 0) {
705 ret
= s
->bdev_length
;
709 /* Active commit must resize the base image if its size differs from the
711 if (s
->base
== blk_bs(s
->target
)) {
714 base_length
= blk_getlength(s
->target
);
715 if (base_length
< 0) {
720 if (s
->bdev_length
> base_length
) {
721 ret
= blk_truncate(s
->target
, s
->bdev_length
, PREALLOC_MODE_OFF
,
729 if (s
->bdev_length
== 0) {
730 /* Transition to the READY state and wait for complete. */
731 job_transition_to_ready(&s
->common
.job
);
733 while (!job_is_cancelled(&s
->common
.job
) && !s
->should_complete
) {
734 job_yield(&s
->common
.job
);
736 s
->common
.job
.cancelled
= false;
740 length
= DIV_ROUND_UP(s
->bdev_length
, s
->granularity
);
741 s
->in_flight_bitmap
= bitmap_new(length
);
743 /* If we have no backing file yet in the destination, we cannot let
744 * the destination do COW. Instead, we copy sectors around the
745 * dirty data if needed. We need a bitmap to do that.
747 bdrv_get_backing_filename(target_bs
, backing_filename
,
748 sizeof(backing_filename
));
749 if (!bdrv_get_info(target_bs
, &bdi
) && bdi
.cluster_size
) {
750 s
->target_cluster_size
= bdi
.cluster_size
;
752 s
->target_cluster_size
= BDRV_SECTOR_SIZE
;
754 if (backing_filename
[0] && !target_bs
->backing
&&
755 s
->granularity
< s
->target_cluster_size
) {
756 s
->buf_size
= MAX(s
->buf_size
, s
->target_cluster_size
);
757 s
->cow_bitmap
= bitmap_new(length
);
759 s
->max_iov
= MIN(bs
->bl
.max_iov
, target_bs
->bl
.max_iov
);
761 s
->buf
= qemu_try_blockalign(bs
, s
->buf_size
);
762 if (s
->buf
== NULL
) {
769 s
->last_pause_ns
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
770 if (!s
->is_none_mode
) {
771 ret
= mirror_dirty_init(s
);
772 if (ret
< 0 || job_is_cancelled(&s
->common
.job
)) {
778 s
->dbi
= bdrv_dirty_iter_new(s
->dirty_bitmap
);
780 uint64_t delay_ns
= 0;
782 bool should_complete
;
789 job_pause_point(&s
->common
.job
);
791 cnt
= bdrv_get_dirty_count(s
->dirty_bitmap
);
792 /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is
793 * the number of bytes currently being processed; together those are
794 * the current remaining operation length */
795 job_progress_set_remaining(&s
->common
.job
, s
->bytes_in_flight
+ cnt
);
797 /* Note that even when no rate limit is applied we need to yield
798 * periodically with no pending I/O so that bdrv_drain_all() returns.
799 * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is
800 * an error, or when the source is clean, whichever comes first. */
801 delta
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - s
->last_pause_ns
;
802 if (delta
< BLOCK_JOB_SLICE_TIME
&&
803 s
->common
.iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
804 if (s
->in_flight
>= MAX_IN_FLIGHT
|| s
->buf_free_count
== 0 ||
805 (cnt
== 0 && s
->in_flight
> 0)) {
806 trace_mirror_yield(s
, cnt
, s
->buf_free_count
, s
->in_flight
);
807 mirror_wait_for_io(s
);
809 } else if (cnt
!= 0) {
810 delay_ns
= mirror_iteration(s
);
814 should_complete
= false;
815 if (s
->in_flight
== 0 && cnt
== 0) {
816 trace_mirror_before_flush(s
);
818 if (mirror_flush(s
) < 0) {
819 /* Go check s->ret. */
822 /* We're out of the streaming phase. From now on, if the job
823 * is cancelled we will actually complete all pending I/O and
824 * report completion. This way, block-job-cancel will leave
825 * the target in a consistent state.
827 job_transition_to_ready(&s
->common
.job
);
831 should_complete
= s
->should_complete
||
832 job_is_cancelled(&s
->common
.job
);
833 cnt
= bdrv_get_dirty_count(s
->dirty_bitmap
);
836 if (cnt
== 0 && should_complete
) {
837 /* The dirty bitmap is not updated while operations are pending.
838 * If we're about to exit, wait for pending operations before
839 * calling bdrv_get_dirty_count(bs), or we may exit while the
840 * source has dirty data to copy!
842 * Note that I/O can be submitted by the guest while
843 * mirror_populate runs, so pause it now. Before deciding
844 * whether to switch to target check one last time if I/O has
845 * come in the meanwhile, and if not flush the data to disk.
847 trace_mirror_before_drain(s
, cnt
);
849 bdrv_drained_begin(bs
);
850 cnt
= bdrv_get_dirty_count(s
->dirty_bitmap
);
851 if (cnt
> 0 || mirror_flush(s
) < 0) {
852 bdrv_drained_end(bs
);
856 /* The two disks are in sync. Exit and report successful
859 assert(QLIST_EMPTY(&bs
->tracked_requests
));
860 s
->common
.job
.cancelled
= false;
867 if (s
->synced
&& !should_complete
) {
868 delay_ns
= (s
->in_flight
== 0 &&
869 cnt
== 0 ? BLOCK_JOB_SLICE_TIME
: 0);
871 trace_mirror_before_sleep(s
, cnt
, s
->synced
, delay_ns
);
872 job_sleep_ns(&s
->common
.job
, delay_ns
);
873 if (job_is_cancelled(&s
->common
.job
) &&
874 (!s
->synced
|| s
->common
.job
.force_cancel
))
878 s
->last_pause_ns
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
882 if (s
->in_flight
> 0) {
883 /* We get here only if something went wrong. Either the job failed,
884 * or it was cancelled prematurely so that we do not guarantee that
885 * the target is a copy of the source.
887 assert(ret
< 0 || ((s
->common
.job
.force_cancel
|| !s
->synced
) &&
888 job_is_cancelled(&s
->common
.job
)));
890 mirror_wait_for_all_io(s
);
893 assert(s
->in_flight
== 0);
895 g_free(s
->cow_bitmap
);
896 g_free(s
->in_flight_bitmap
);
897 bdrv_dirty_iter_free(s
->dbi
);
899 data
= g_malloc(sizeof(*data
));
903 bdrv_drained_begin(bs
);
905 job_defer_to_main_loop(&s
->common
.job
, mirror_exit
, data
);
908 static void mirror_complete(Job
*job
, Error
**errp
)
910 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
.job
);
911 BlockDriverState
*target
;
913 target
= blk_bs(s
->target
);
916 error_setg(errp
, "The active block job '%s' cannot be completed",
921 if (s
->backing_mode
== MIRROR_OPEN_BACKING_CHAIN
) {
924 assert(!target
->backing
);
925 ret
= bdrv_open_backing_file(target
, NULL
, "backing", errp
);
931 /* block all operations on to_replace bs */
933 AioContext
*replace_aio_context
;
935 s
->to_replace
= bdrv_find_node(s
->replaces
);
936 if (!s
->to_replace
) {
937 error_setg(errp
, "Node name '%s' not found", s
->replaces
);
941 replace_aio_context
= bdrv_get_aio_context(s
->to_replace
);
942 aio_context_acquire(replace_aio_context
);
944 /* TODO Translate this into permission system. Current definition of
945 * GRAPH_MOD would require to request it for the parents; they might
946 * not even be BlockDriverStates, however, so a BdrvChild can't address
947 * them. May need redefinition of GRAPH_MOD. */
948 error_setg(&s
->replace_blocker
,
949 "block device is in use by block-job-complete");
950 bdrv_op_block_all(s
->to_replace
, s
->replace_blocker
);
951 bdrv_ref(s
->to_replace
);
953 aio_context_release(replace_aio_context
);
956 s
->should_complete
= true;
960 static void mirror_pause(Job
*job
)
962 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
.job
);
964 mirror_wait_for_all_io(s
);
967 static void mirror_attached_aio_context(BlockJob
*job
, AioContext
*new_context
)
969 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
971 blk_set_aio_context(s
->target
, new_context
);
974 static void mirror_drain(BlockJob
*job
)
976 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
978 /* Need to keep a reference in case blk_drain triggers execution
979 * of mirror_complete...
982 BlockBackend
*target
= s
->target
;
989 static const BlockJobDriver mirror_job_driver
= {
991 .instance_size
= sizeof(MirrorBlockJob
),
992 .job_type
= JOB_TYPE_MIRROR
,
993 .free
= block_job_free
,
994 .user_resume
= block_job_user_resume
,
995 .drain
= block_job_drain
,
997 .pause
= mirror_pause
,
998 .complete
= mirror_complete
,
1000 .attached_aio_context
= mirror_attached_aio_context
,
1001 .drain
= mirror_drain
,
1004 static const BlockJobDriver commit_active_job_driver
= {
1006 .instance_size
= sizeof(MirrorBlockJob
),
1007 .job_type
= JOB_TYPE_COMMIT
,
1008 .free
= block_job_free
,
1009 .user_resume
= block_job_user_resume
,
1010 .drain
= block_job_drain
,
1011 .start
= mirror_run
,
1012 .pause
= mirror_pause
,
1013 .complete
= mirror_complete
,
1015 .attached_aio_context
= mirror_attached_aio_context
,
1016 .drain
= mirror_drain
,
1019 static int coroutine_fn
bdrv_mirror_top_preadv(BlockDriverState
*bs
,
1020 uint64_t offset
, uint64_t bytes
, QEMUIOVector
*qiov
, int flags
)
1022 return bdrv_co_preadv(bs
->backing
, offset
, bytes
, qiov
, flags
);
1025 static int coroutine_fn
bdrv_mirror_top_pwritev(BlockDriverState
*bs
,
1026 uint64_t offset
, uint64_t bytes
, QEMUIOVector
*qiov
, int flags
)
1028 return bdrv_co_pwritev(bs
->backing
, offset
, bytes
, qiov
, flags
);
1031 static int coroutine_fn
bdrv_mirror_top_flush(BlockDriverState
*bs
)
1033 if (bs
->backing
== NULL
) {
1034 /* we can be here after failed bdrv_append in mirror_start_job */
1037 return bdrv_co_flush(bs
->backing
->bs
);
1040 static int coroutine_fn
bdrv_mirror_top_pwrite_zeroes(BlockDriverState
*bs
,
1041 int64_t offset
, int bytes
, BdrvRequestFlags flags
)
1043 return bdrv_co_pwrite_zeroes(bs
->backing
, offset
, bytes
, flags
);
1046 static int coroutine_fn
bdrv_mirror_top_pdiscard(BlockDriverState
*bs
,
1047 int64_t offset
, int bytes
)
1049 return bdrv_co_pdiscard(bs
->backing
->bs
, offset
, bytes
);
1052 static void bdrv_mirror_top_refresh_filename(BlockDriverState
*bs
, QDict
*opts
)
1054 if (bs
->backing
== NULL
) {
1055 /* we can be here after failed bdrv_attach_child in
1056 * bdrv_set_backing_hd */
1059 bdrv_refresh_filename(bs
->backing
->bs
);
1060 pstrcpy(bs
->exact_filename
, sizeof(bs
->exact_filename
),
1061 bs
->backing
->bs
->filename
);
1064 static void bdrv_mirror_top_close(BlockDriverState
*bs
)
1068 static void bdrv_mirror_top_child_perm(BlockDriverState
*bs
, BdrvChild
*c
,
1069 const BdrvChildRole
*role
,
1070 BlockReopenQueue
*reopen_queue
,
1071 uint64_t perm
, uint64_t shared
,
1072 uint64_t *nperm
, uint64_t *nshared
)
1074 /* Must be able to forward guest writes to the real image */
1076 if (perm
& BLK_PERM_WRITE
) {
1077 *nperm
|= BLK_PERM_WRITE
;
1080 *nshared
= BLK_PERM_ALL
;
1083 /* Dummy node that provides consistent read to its users without requiring it
1084 * from its backing file and that allows writes on the backing file chain. */
1085 static BlockDriver bdrv_mirror_top
= {
1086 .format_name
= "mirror_top",
1087 .bdrv_co_preadv
= bdrv_mirror_top_preadv
,
1088 .bdrv_co_pwritev
= bdrv_mirror_top_pwritev
,
1089 .bdrv_co_pwrite_zeroes
= bdrv_mirror_top_pwrite_zeroes
,
1090 .bdrv_co_pdiscard
= bdrv_mirror_top_pdiscard
,
1091 .bdrv_co_flush
= bdrv_mirror_top_flush
,
1092 .bdrv_co_block_status
= bdrv_co_block_status_from_backing
,
1093 .bdrv_refresh_filename
= bdrv_mirror_top_refresh_filename
,
1094 .bdrv_close
= bdrv_mirror_top_close
,
1095 .bdrv_child_perm
= bdrv_mirror_top_child_perm
,
1098 static void mirror_start_job(const char *job_id
, BlockDriverState
*bs
,
1099 int creation_flags
, BlockDriverState
*target
,
1100 const char *replaces
, int64_t speed
,
1101 uint32_t granularity
, int64_t buf_size
,
1102 BlockMirrorBackingMode backing_mode
,
1103 BlockdevOnError on_source_error
,
1104 BlockdevOnError on_target_error
,
1106 BlockCompletionFunc
*cb
,
1108 const BlockJobDriver
*driver
,
1109 bool is_none_mode
, BlockDriverState
*base
,
1110 bool auto_complete
, const char *filter_node_name
,
1115 BlockDriverState
*mirror_top_bs
;
1116 bool target_graph_mod
;
1117 bool target_is_backing
;
1118 Error
*local_err
= NULL
;
1121 if (granularity
== 0) {
1122 granularity
= bdrv_get_default_bitmap_granularity(target
);
1125 assert(is_power_of_2(granularity
));
1128 error_setg(errp
, "Invalid parameter 'buf-size'");
1132 if (buf_size
== 0) {
1133 buf_size
= DEFAULT_MIRROR_BUF_SIZE
;
1136 /* In the case of active commit, add dummy driver to provide consistent
1137 * reads on the top, while disabling it in the intermediate nodes, and make
1138 * the backing chain writable. */
1139 mirror_top_bs
= bdrv_new_open_driver(&bdrv_mirror_top
, filter_node_name
,
1141 if (mirror_top_bs
== NULL
) {
1144 if (!filter_node_name
) {
1145 mirror_top_bs
->implicit
= true;
1147 mirror_top_bs
->total_sectors
= bs
->total_sectors
;
1148 mirror_top_bs
->supported_write_flags
= BDRV_REQ_WRITE_UNCHANGED
;
1149 mirror_top_bs
->supported_zero_flags
= BDRV_REQ_WRITE_UNCHANGED
;
1150 bdrv_set_aio_context(mirror_top_bs
, bdrv_get_aio_context(bs
));
1152 /* bdrv_append takes ownership of the mirror_top_bs reference, need to keep
1153 * it alive until block_job_create() succeeds even if bs has no parent. */
1154 bdrv_ref(mirror_top_bs
);
1155 bdrv_drained_begin(bs
);
1156 bdrv_append(mirror_top_bs
, bs
, &local_err
);
1157 bdrv_drained_end(bs
);
1160 bdrv_unref(mirror_top_bs
);
1161 error_propagate(errp
, local_err
);
1165 /* Make sure that the source is not resized while the job is running */
1166 s
= block_job_create(job_id
, driver
, NULL
, mirror_top_bs
,
1167 BLK_PERM_CONSISTENT_READ
,
1168 BLK_PERM_CONSISTENT_READ
| BLK_PERM_WRITE_UNCHANGED
|
1169 BLK_PERM_WRITE
| BLK_PERM_GRAPH_MOD
, speed
,
1170 creation_flags
, cb
, opaque
, errp
);
1174 /* The block job now has a reference to this node */
1175 bdrv_unref(mirror_top_bs
);
1178 s
->mirror_top_bs
= mirror_top_bs
;
1180 /* No resize for the target either; while the mirror is still running, a
1181 * consistent read isn't necessarily possible. We could possibly allow
1182 * writes and graph modifications, though it would likely defeat the
1183 * purpose of a mirror, so leave them blocked for now.
1185 * In the case of active commit, things look a bit different, though,
1186 * because the target is an already populated backing file in active use.
1187 * We can allow anything except resize there.*/
1188 target_is_backing
= bdrv_chain_contains(bs
, target
);
1189 target_graph_mod
= (backing_mode
!= MIRROR_LEAVE_BACKING_CHAIN
);
1190 s
->target
= blk_new(BLK_PERM_WRITE
| BLK_PERM_RESIZE
|
1191 (target_graph_mod
? BLK_PERM_GRAPH_MOD
: 0),
1192 BLK_PERM_WRITE_UNCHANGED
|
1193 (target_is_backing
? BLK_PERM_CONSISTENT_READ
|
1195 BLK_PERM_GRAPH_MOD
: 0));
1196 ret
= blk_insert_bs(s
->target
, target
, errp
);
1201 /* XXX: Mirror target could be a NBD server of target QEMU in the case
1202 * of non-shared block migration. To allow migration completion, we
1203 * have to allow "inactivate" of the target BB. When that happens, we
1204 * know the job is drained, and the vcpus are stopped, so no write
1205 * operation will be performed. Block layer already has assertions to
1207 blk_set_force_allow_inactivate(s
->target
);
1210 s
->replaces
= g_strdup(replaces
);
1211 s
->on_source_error
= on_source_error
;
1212 s
->on_target_error
= on_target_error
;
1213 s
->is_none_mode
= is_none_mode
;
1214 s
->backing_mode
= backing_mode
;
1216 s
->granularity
= granularity
;
1217 s
->buf_size
= ROUND_UP(buf_size
, granularity
);
1219 if (auto_complete
) {
1220 s
->should_complete
= true;
1223 s
->dirty_bitmap
= bdrv_create_dirty_bitmap(bs
, granularity
, NULL
, errp
);
1224 if (!s
->dirty_bitmap
) {
1228 /* Required permissions are already taken with blk_new() */
1229 block_job_add_bdrv(&s
->common
, "target", target
, 0, BLK_PERM_ALL
,
1232 /* In commit_active_start() all intermediate nodes disappear, so
1233 * any jobs in them must be blocked */
1234 if (target_is_backing
) {
1235 BlockDriverState
*iter
;
1236 for (iter
= backing_bs(bs
); iter
!= target
; iter
= backing_bs(iter
)) {
1237 /* XXX BLK_PERM_WRITE needs to be allowed so we don't block
1238 * ourselves at s->base (if writes are blocked for a node, they are
1239 * also blocked for its backing file). The other options would be a
1240 * second filter driver above s->base (== target). */
1241 ret
= block_job_add_bdrv(&s
->common
, "intermediate node", iter
, 0,
1242 BLK_PERM_WRITE_UNCHANGED
| BLK_PERM_WRITE
,
1250 trace_mirror_start(bs
, s
, opaque
);
1251 job_start(&s
->common
.job
);
1256 /* Make sure this BDS does not go away until we have completed the graph
1258 bdrv_ref(mirror_top_bs
);
1260 g_free(s
->replaces
);
1261 blk_unref(s
->target
);
1262 job_early_fail(&s
->common
.job
);
1265 bdrv_child_try_set_perm(mirror_top_bs
->backing
, 0, BLK_PERM_ALL
,
1267 bdrv_replace_node(mirror_top_bs
, backing_bs(mirror_top_bs
), &error_abort
);
1269 bdrv_unref(mirror_top_bs
);
1272 void mirror_start(const char *job_id
, BlockDriverState
*bs
,
1273 BlockDriverState
*target
, const char *replaces
,
1274 int64_t speed
, uint32_t granularity
, int64_t buf_size
,
1275 MirrorSyncMode mode
, BlockMirrorBackingMode backing_mode
,
1276 BlockdevOnError on_source_error
,
1277 BlockdevOnError on_target_error
,
1278 bool unmap
, const char *filter_node_name
, Error
**errp
)
1281 BlockDriverState
*base
;
1283 if (mode
== MIRROR_SYNC_MODE_INCREMENTAL
) {
1284 error_setg(errp
, "Sync mode 'incremental' not supported");
1287 is_none_mode
= mode
== MIRROR_SYNC_MODE_NONE
;
1288 base
= mode
== MIRROR_SYNC_MODE_TOP
? backing_bs(bs
) : NULL
;
1289 mirror_start_job(job_id
, bs
, JOB_DEFAULT
, target
, replaces
,
1290 speed
, granularity
, buf_size
, backing_mode
,
1291 on_source_error
, on_target_error
, unmap
, NULL
, NULL
,
1292 &mirror_job_driver
, is_none_mode
, base
, false,
1293 filter_node_name
, true, errp
);
1296 void commit_active_start(const char *job_id
, BlockDriverState
*bs
,
1297 BlockDriverState
*base
, int creation_flags
,
1298 int64_t speed
, BlockdevOnError on_error
,
1299 const char *filter_node_name
,
1300 BlockCompletionFunc
*cb
, void *opaque
,
1301 bool auto_complete
, Error
**errp
)
1303 int orig_base_flags
;
1304 Error
*local_err
= NULL
;
1306 orig_base_flags
= bdrv_get_flags(base
);
1308 if (bdrv_reopen(base
, bs
->open_flags
, errp
)) {
1312 mirror_start_job(job_id
, bs
, creation_flags
, base
, NULL
, speed
, 0, 0,
1313 MIRROR_LEAVE_BACKING_CHAIN
,
1314 on_error
, on_error
, true, cb
, opaque
,
1315 &commit_active_job_driver
, false, base
, auto_complete
,
1316 filter_node_name
, false, &local_err
);
1318 error_propagate(errp
, local_err
);
1319 goto error_restore_flags
;
1324 error_restore_flags
:
1325 /* ignore error and errp for bdrv_reopen, because we want to propagate
1326 * the original error */
1327 bdrv_reopen(base
, orig_base_flags
, NULL
);