4 * Copyright Red Hat, Inc. 2012
7 * Paolo Bonzini <pbonzini@redhat.com>
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
14 #include "qemu/osdep.h"
16 #include "block/blockjob.h"
17 #include "block/block_int.h"
18 #include "sysemu/block-backend.h"
19 #include "qapi/error.h"
20 #include "qapi/qmp/qerror.h"
21 #include "qemu/ratelimit.h"
22 #include "qemu/bitmap.h"
24 #define SLICE_TIME 100000000ULL /* ns */
25 #define MAX_IN_FLIGHT 16
26 #define DEFAULT_MIRROR_BUF_SIZE (10 << 20)
28 /* The mirroring buffer is a list of granularity-sized chunks.
29 * Free chunks are organized in a list.
31 typedef struct MirrorBuffer
{
32 QSIMPLEQ_ENTRY(MirrorBuffer
) next
;
35 typedef struct MirrorBlockJob
{
39 BlockDriverState
*base
;
40 /* The name of the graph node to replace */
42 /* The BDS to replace */
43 BlockDriverState
*to_replace
;
44 /* Used to block operations on the drive-mirror-replace target */
45 Error
*replace_blocker
;
47 BlockMirrorBackingMode backing_mode
;
48 BlockdevOnError on_source_error
, on_target_error
;
54 unsigned long *cow_bitmap
;
55 BdrvDirtyBitmap
*dirty_bitmap
;
58 QSIMPLEQ_HEAD(, MirrorBuffer
) buf_free
;
61 unsigned long *in_flight_bitmap
;
63 int sectors_in_flight
;
67 int target_cluster_sectors
;
71 typedef struct MirrorOp
{
78 static BlockErrorAction
mirror_error_action(MirrorBlockJob
*s
, bool read
,
83 return block_job_error_action(&s
->common
, s
->on_source_error
,
86 return block_job_error_action(&s
->common
, s
->on_target_error
,
91 static void mirror_iteration_done(MirrorOp
*op
, int ret
)
93 MirrorBlockJob
*s
= op
->s
;
96 int i
, nb_chunks
, sectors_per_chunk
;
98 trace_mirror_iteration_done(s
, op
->sector_num
, op
->nb_sectors
, ret
);
101 s
->sectors_in_flight
-= op
->nb_sectors
;
103 for (i
= 0; i
< op
->qiov
.niov
; i
++) {
104 MirrorBuffer
*buf
= (MirrorBuffer
*) iov
[i
].iov_base
;
105 QSIMPLEQ_INSERT_TAIL(&s
->buf_free
, buf
, next
);
109 sectors_per_chunk
= s
->granularity
>> BDRV_SECTOR_BITS
;
110 chunk_num
= op
->sector_num
/ sectors_per_chunk
;
111 nb_chunks
= DIV_ROUND_UP(op
->nb_sectors
, sectors_per_chunk
);
112 bitmap_clear(s
->in_flight_bitmap
, chunk_num
, nb_chunks
);
115 bitmap_set(s
->cow_bitmap
, chunk_num
, nb_chunks
);
117 s
->common
.offset
+= (uint64_t)op
->nb_sectors
* BDRV_SECTOR_SIZE
;
120 qemu_iovec_destroy(&op
->qiov
);
123 if (s
->waiting_for_io
) {
124 qemu_coroutine_enter(s
->common
.co
, NULL
);
128 static void mirror_write_complete(void *opaque
, int ret
)
130 MirrorOp
*op
= opaque
;
131 MirrorBlockJob
*s
= op
->s
;
133 BlockErrorAction action
;
135 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, op
->sector_num
, op
->nb_sectors
);
136 action
= mirror_error_action(s
, false, -ret
);
137 if (action
== BLOCK_ERROR_ACTION_REPORT
&& s
->ret
>= 0) {
141 mirror_iteration_done(op
, ret
);
144 static void mirror_read_complete(void *opaque
, int ret
)
146 MirrorOp
*op
= opaque
;
147 MirrorBlockJob
*s
= op
->s
;
149 BlockErrorAction action
;
151 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, op
->sector_num
, op
->nb_sectors
);
152 action
= mirror_error_action(s
, true, -ret
);
153 if (action
== BLOCK_ERROR_ACTION_REPORT
&& s
->ret
>= 0) {
157 mirror_iteration_done(op
, ret
);
160 blk_aio_pwritev(s
->target
, op
->sector_num
* BDRV_SECTOR_SIZE
, &op
->qiov
,
161 0, mirror_write_complete
, op
);
164 static inline void mirror_clip_sectors(MirrorBlockJob
*s
,
168 *nb_sectors
= MIN(*nb_sectors
,
169 s
->bdev_length
/ BDRV_SECTOR_SIZE
- sector_num
);
172 /* Round sector_num and/or nb_sectors to target cluster if COW is needed, and
173 * return the offset of the adjusted tail sector against original. */
174 static int mirror_cow_align(MirrorBlockJob
*s
,
180 int chunk_sectors
= s
->granularity
>> BDRV_SECTOR_BITS
;
181 int64_t align_sector_num
= *sector_num
;
182 int align_nb_sectors
= *nb_sectors
;
183 int max_sectors
= chunk_sectors
* s
->max_iov
;
185 need_cow
= !test_bit(*sector_num
/ chunk_sectors
, s
->cow_bitmap
);
186 need_cow
|= !test_bit((*sector_num
+ *nb_sectors
- 1) / chunk_sectors
,
189 bdrv_round_sectors_to_clusters(blk_bs(s
->target
), *sector_num
,
190 *nb_sectors
, &align_sector_num
,
194 if (align_nb_sectors
> max_sectors
) {
195 align_nb_sectors
= max_sectors
;
197 align_nb_sectors
= QEMU_ALIGN_DOWN(align_nb_sectors
,
198 s
->target_cluster_sectors
);
201 /* Clipping may result in align_nb_sectors unaligned to chunk boundary, but
202 * that doesn't matter because it's already the end of source image. */
203 mirror_clip_sectors(s
, align_sector_num
, &align_nb_sectors
);
205 ret
= align_sector_num
+ align_nb_sectors
- (*sector_num
+ *nb_sectors
);
206 *sector_num
= align_sector_num
;
207 *nb_sectors
= align_nb_sectors
;
212 static inline void mirror_wait_for_io(MirrorBlockJob
*s
)
214 assert(!s
->waiting_for_io
);
215 s
->waiting_for_io
= true;
216 qemu_coroutine_yield();
217 s
->waiting_for_io
= false;
220 /* Submit async read while handling COW.
221 * Returns: nb_sectors if no alignment is necessary, or
222 * (new_end - sector_num) if tail is rounded up or down due to
223 * alignment or buffer limit.
225 static int mirror_do_read(MirrorBlockJob
*s
, int64_t sector_num
,
228 BlockBackend
*source
= s
->common
.blk
;
229 int sectors_per_chunk
, nb_chunks
;
230 int ret
= nb_sectors
;
233 sectors_per_chunk
= s
->granularity
>> BDRV_SECTOR_BITS
;
235 /* We can only handle as much as buf_size at a time. */
236 nb_sectors
= MIN(s
->buf_size
>> BDRV_SECTOR_BITS
, nb_sectors
);
240 ret
+= mirror_cow_align(s
, §or_num
, &nb_sectors
);
242 assert(nb_sectors
<< BDRV_SECTOR_BITS
<= s
->buf_size
);
243 /* The sector range must meet granularity because:
244 * 1) Caller passes in aligned values;
245 * 2) mirror_cow_align is used only when target cluster is larger. */
246 assert(!(sector_num
% sectors_per_chunk
));
247 nb_chunks
= DIV_ROUND_UP(nb_sectors
, sectors_per_chunk
);
249 while (s
->buf_free_count
< nb_chunks
) {
250 trace_mirror_yield_in_flight(s
, sector_num
, s
->in_flight
);
251 mirror_wait_for_io(s
);
254 /* Allocate a MirrorOp that is used as an AIO callback. */
255 op
= g_new(MirrorOp
, 1);
257 op
->sector_num
= sector_num
;
258 op
->nb_sectors
= nb_sectors
;
260 /* Now make a QEMUIOVector taking enough granularity-sized chunks
263 qemu_iovec_init(&op
->qiov
, nb_chunks
);
264 while (nb_chunks
-- > 0) {
265 MirrorBuffer
*buf
= QSIMPLEQ_FIRST(&s
->buf_free
);
266 size_t remaining
= nb_sectors
* BDRV_SECTOR_SIZE
- op
->qiov
.size
;
268 QSIMPLEQ_REMOVE_HEAD(&s
->buf_free
, next
);
270 qemu_iovec_add(&op
->qiov
, buf
, MIN(s
->granularity
, remaining
));
273 /* Copy the dirty cluster. */
275 s
->sectors_in_flight
+= nb_sectors
;
276 trace_mirror_one_iteration(s
, sector_num
, nb_sectors
);
278 blk_aio_preadv(source
, sector_num
* BDRV_SECTOR_SIZE
, &op
->qiov
, 0,
279 mirror_read_complete
, op
);
283 static void mirror_do_zero_or_discard(MirrorBlockJob
*s
,
290 /* Allocate a MirrorOp that is used as an AIO callback. The qiov is zeroed
291 * so the freeing in mirror_iteration_done is nop. */
292 op
= g_new0(MirrorOp
, 1);
294 op
->sector_num
= sector_num
;
295 op
->nb_sectors
= nb_sectors
;
298 s
->sectors_in_flight
+= nb_sectors
;
300 blk_aio_discard(s
->target
, sector_num
, op
->nb_sectors
,
301 mirror_write_complete
, op
);
303 blk_aio_pwrite_zeroes(s
->target
, sector_num
* BDRV_SECTOR_SIZE
,
304 op
->nb_sectors
* BDRV_SECTOR_SIZE
,
305 s
->unmap
? BDRV_REQ_MAY_UNMAP
: 0,
306 mirror_write_complete
, op
);
310 static uint64_t coroutine_fn
mirror_iteration(MirrorBlockJob
*s
)
312 BlockDriverState
*source
= blk_bs(s
->common
.blk
);
313 int64_t sector_num
, first_chunk
;
314 uint64_t delay_ns
= 0;
315 /* At least the first dirty chunk is mirrored in one iteration. */
317 int64_t end
= s
->bdev_length
/ BDRV_SECTOR_SIZE
;
318 int sectors_per_chunk
= s
->granularity
>> BDRV_SECTOR_BITS
;
320 sector_num
= hbitmap_iter_next(&s
->hbi
);
321 if (sector_num
< 0) {
322 bdrv_dirty_iter_init(s
->dirty_bitmap
, &s
->hbi
);
323 sector_num
= hbitmap_iter_next(&s
->hbi
);
324 trace_mirror_restart_iter(s
, bdrv_get_dirty_count(s
->dirty_bitmap
));
325 assert(sector_num
>= 0);
328 first_chunk
= sector_num
/ sectors_per_chunk
;
329 while (test_bit(first_chunk
, s
->in_flight_bitmap
)) {
330 trace_mirror_yield_in_flight(s
, first_chunk
, s
->in_flight
);
331 mirror_wait_for_io(s
);
334 block_job_pause_point(&s
->common
);
336 /* Find the number of consective dirty chunks following the first dirty
337 * one, and wait for in flight requests in them. */
338 while (nb_chunks
* sectors_per_chunk
< (s
->buf_size
>> BDRV_SECTOR_BITS
)) {
339 int64_t hbitmap_next
;
340 int64_t next_sector
= sector_num
+ nb_chunks
* sectors_per_chunk
;
341 int64_t next_chunk
= next_sector
/ sectors_per_chunk
;
342 if (next_sector
>= end
||
343 !bdrv_get_dirty(source
, s
->dirty_bitmap
, next_sector
)) {
346 if (test_bit(next_chunk
, s
->in_flight_bitmap
)) {
350 hbitmap_next
= hbitmap_iter_next(&s
->hbi
);
351 if (hbitmap_next
> next_sector
|| hbitmap_next
< 0) {
352 /* The bitmap iterator's cache is stale, refresh it */
353 bdrv_set_dirty_iter(&s
->hbi
, next_sector
);
354 hbitmap_next
= hbitmap_iter_next(&s
->hbi
);
356 assert(hbitmap_next
== next_sector
);
360 /* Clear dirty bits before querying the block status, because
361 * calling bdrv_get_block_status_above could yield - if some blocks are
362 * marked dirty in this window, we need to know.
364 bdrv_reset_dirty_bitmap(s
->dirty_bitmap
, sector_num
,
365 nb_chunks
* sectors_per_chunk
);
366 bitmap_set(s
->in_flight_bitmap
, sector_num
/ sectors_per_chunk
, nb_chunks
);
367 while (nb_chunks
> 0 && sector_num
< end
) {
370 BlockDriverState
*file
;
374 MIRROR_METHOD_DISCARD
375 } mirror_method
= MIRROR_METHOD_COPY
;
377 assert(!(sector_num
% sectors_per_chunk
));
378 ret
= bdrv_get_block_status_above(source
, NULL
, sector_num
,
379 nb_chunks
* sectors_per_chunk
,
382 io_sectors
= nb_chunks
* sectors_per_chunk
;
385 io_sectors
-= io_sectors
% sectors_per_chunk
;
386 if (io_sectors
< sectors_per_chunk
) {
387 io_sectors
= sectors_per_chunk
;
388 } else if (ret
>= 0 && !(ret
& BDRV_BLOCK_DATA
)) {
389 int64_t target_sector_num
;
390 int target_nb_sectors
;
391 bdrv_round_sectors_to_clusters(blk_bs(s
->target
), sector_num
,
392 io_sectors
, &target_sector_num
,
394 if (target_sector_num
== sector_num
&&
395 target_nb_sectors
== io_sectors
) {
396 mirror_method
= ret
& BDRV_BLOCK_ZERO
?
398 MIRROR_METHOD_DISCARD
;
402 mirror_clip_sectors(s
, sector_num
, &io_sectors
);
403 switch (mirror_method
) {
404 case MIRROR_METHOD_COPY
:
405 io_sectors
= mirror_do_read(s
, sector_num
, io_sectors
);
407 case MIRROR_METHOD_ZERO
:
408 mirror_do_zero_or_discard(s
, sector_num
, io_sectors
, false);
410 case MIRROR_METHOD_DISCARD
:
411 mirror_do_zero_or_discard(s
, sector_num
, io_sectors
, true);
417 sector_num
+= io_sectors
;
418 nb_chunks
-= DIV_ROUND_UP(io_sectors
, sectors_per_chunk
);
419 delay_ns
+= ratelimit_calculate_delay(&s
->limit
, io_sectors
);
424 static void mirror_free_init(MirrorBlockJob
*s
)
426 int granularity
= s
->granularity
;
427 size_t buf_size
= s
->buf_size
;
428 uint8_t *buf
= s
->buf
;
430 assert(s
->buf_free_count
== 0);
431 QSIMPLEQ_INIT(&s
->buf_free
);
432 while (buf_size
!= 0) {
433 MirrorBuffer
*cur
= (MirrorBuffer
*)buf
;
434 QSIMPLEQ_INSERT_TAIL(&s
->buf_free
, cur
, next
);
436 buf_size
-= granularity
;
441 static void mirror_drain(MirrorBlockJob
*s
)
443 while (s
->in_flight
> 0) {
444 mirror_wait_for_io(s
);
452 static void mirror_exit(BlockJob
*job
, void *opaque
)
454 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
455 MirrorExitData
*data
= opaque
;
456 AioContext
*replace_aio_context
= NULL
;
457 BlockDriverState
*src
= blk_bs(s
->common
.blk
);
458 BlockDriverState
*target_bs
= blk_bs(s
->target
);
460 /* Make sure that the source BDS doesn't go away before we called
461 * block_job_completed(). */
465 replace_aio_context
= bdrv_get_aio_context(s
->to_replace
);
466 aio_context_acquire(replace_aio_context
);
469 if (s
->should_complete
&& data
->ret
== 0) {
470 BlockDriverState
*to_replace
= src
;
472 to_replace
= s
->to_replace
;
475 if (bdrv_get_flags(target_bs
) != bdrv_get_flags(to_replace
)) {
476 bdrv_reopen(target_bs
, bdrv_get_flags(to_replace
), NULL
);
479 /* The mirror job has no requests in flight any more, but we need to
480 * drain potential other users of the BDS before changing the graph. */
481 bdrv_drained_begin(target_bs
);
482 bdrv_replace_in_backing_chain(to_replace
, target_bs
);
483 bdrv_drained_end(target_bs
);
485 /* We just changed the BDS the job BB refers to */
486 blk_remove_bs(job
->blk
);
487 blk_insert_bs(job
->blk
, src
);
490 bdrv_op_unblock_all(s
->to_replace
, s
->replace_blocker
);
491 error_free(s
->replace_blocker
);
492 bdrv_unref(s
->to_replace
);
494 if (replace_aio_context
) {
495 aio_context_release(replace_aio_context
);
498 bdrv_op_unblock_all(target_bs
, s
->common
.blocker
);
499 blk_unref(s
->target
);
500 block_job_completed(&s
->common
, data
->ret
);
502 bdrv_drained_end(src
);
503 if (qemu_get_aio_context() == bdrv_get_aio_context(src
)) {
504 aio_enable_external(iohandler_get_aio_context());
509 static void coroutine_fn
mirror_run(void *opaque
)
511 MirrorBlockJob
*s
= opaque
;
512 MirrorExitData
*data
;
513 BlockDriverState
*bs
= blk_bs(s
->common
.blk
);
514 BlockDriverState
*target_bs
= blk_bs(s
->target
);
515 int64_t sector_num
, end
, length
;
516 uint64_t last_pause_ns
;
518 char backing_filename
[2]; /* we only need 2 characters because we are only
519 checking for a NULL string */
522 int target_cluster_size
= BDRV_SECTOR_SIZE
;
524 if (block_job_is_cancelled(&s
->common
)) {
528 s
->bdev_length
= bdrv_getlength(bs
);
529 if (s
->bdev_length
< 0) {
530 ret
= s
->bdev_length
;
532 } else if (s
->bdev_length
== 0) {
533 /* Report BLOCK_JOB_READY and wait for complete. */
534 block_job_event_ready(&s
->common
);
536 while (!block_job_is_cancelled(&s
->common
) && !s
->should_complete
) {
537 block_job_yield(&s
->common
);
539 s
->common
.cancelled
= false;
543 length
= DIV_ROUND_UP(s
->bdev_length
, s
->granularity
);
544 s
->in_flight_bitmap
= bitmap_new(length
);
546 /* If we have no backing file yet in the destination, we cannot let
547 * the destination do COW. Instead, we copy sectors around the
548 * dirty data if needed. We need a bitmap to do that.
550 bdrv_get_backing_filename(target_bs
, backing_filename
,
551 sizeof(backing_filename
));
552 if (!bdrv_get_info(target_bs
, &bdi
) && bdi
.cluster_size
) {
553 target_cluster_size
= bdi
.cluster_size
;
555 if (backing_filename
[0] && !target_bs
->backing
556 && s
->granularity
< target_cluster_size
) {
557 s
->buf_size
= MAX(s
->buf_size
, target_cluster_size
);
558 s
->cow_bitmap
= bitmap_new(length
);
560 s
->target_cluster_sectors
= target_cluster_size
>> BDRV_SECTOR_BITS
;
561 s
->max_iov
= MIN(bs
->bl
.max_iov
, target_bs
->bl
.max_iov
);
563 end
= s
->bdev_length
/ BDRV_SECTOR_SIZE
;
564 s
->buf
= qemu_try_blockalign(bs
, s
->buf_size
);
565 if (s
->buf
== NULL
) {
572 last_pause_ns
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
573 if (!s
->is_none_mode
) {
574 /* First part, loop on the sectors and initialize the dirty bitmap. */
575 BlockDriverState
*base
= s
->base
;
576 bool mark_all_dirty
= s
->base
== NULL
&& !bdrv_has_zero_init(target_bs
);
578 for (sector_num
= 0; sector_num
< end
; ) {
579 /* Just to make sure we are not exceeding int limit. */
580 int nb_sectors
= MIN(INT_MAX
>> BDRV_SECTOR_BITS
,
582 int64_t now
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
584 if (now
- last_pause_ns
> SLICE_TIME
) {
586 block_job_sleep_ns(&s
->common
, QEMU_CLOCK_REALTIME
, 0);
588 block_job_pause_point(&s
->common
);
591 if (block_job_is_cancelled(&s
->common
)) {
595 ret
= bdrv_is_allocated_above(bs
, base
, sector_num
, nb_sectors
, &n
);
602 if (ret
== 1 || mark_all_dirty
) {
603 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, sector_num
, n
);
609 bdrv_dirty_iter_init(s
->dirty_bitmap
, &s
->hbi
);
611 uint64_t delay_ns
= 0;
613 bool should_complete
;
620 block_job_pause_point(&s
->common
);
622 cnt
= bdrv_get_dirty_count(s
->dirty_bitmap
);
623 /* s->common.offset contains the number of bytes already processed so
624 * far, cnt is the number of dirty sectors remaining and
625 * s->sectors_in_flight is the number of sectors currently being
626 * processed; together those are the current total operation length */
627 s
->common
.len
= s
->common
.offset
+
628 (cnt
+ s
->sectors_in_flight
) * BDRV_SECTOR_SIZE
;
630 /* Note that even when no rate limit is applied we need to yield
631 * periodically with no pending I/O so that bdrv_drain_all() returns.
632 * We do so every SLICE_TIME nanoseconds, or when there is an error,
633 * or when the source is clean, whichever comes first.
635 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - last_pause_ns
< SLICE_TIME
&&
636 s
->common
.iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
637 if (s
->in_flight
== MAX_IN_FLIGHT
|| s
->buf_free_count
== 0 ||
638 (cnt
== 0 && s
->in_flight
> 0)) {
639 trace_mirror_yield(s
, s
->in_flight
, s
->buf_free_count
, cnt
);
640 mirror_wait_for_io(s
);
642 } else if (cnt
!= 0) {
643 delay_ns
= mirror_iteration(s
);
647 should_complete
= false;
648 if (s
->in_flight
== 0 && cnt
== 0) {
649 trace_mirror_before_flush(s
);
650 ret
= blk_flush(s
->target
);
652 if (mirror_error_action(s
, false, -ret
) ==
653 BLOCK_ERROR_ACTION_REPORT
) {
657 /* We're out of the streaming phase. From now on, if the job
658 * is cancelled we will actually complete all pending I/O and
659 * report completion. This way, block-job-cancel will leave
660 * the target in a consistent state.
663 block_job_event_ready(&s
->common
);
667 should_complete
= s
->should_complete
||
668 block_job_is_cancelled(&s
->common
);
669 cnt
= bdrv_get_dirty_count(s
->dirty_bitmap
);
673 if (cnt
== 0 && should_complete
) {
674 /* The dirty bitmap is not updated while operations are pending.
675 * If we're about to exit, wait for pending operations before
676 * calling bdrv_get_dirty_count(bs), or we may exit while the
677 * source has dirty data to copy!
679 * Note that I/O can be submitted by the guest while
680 * mirror_populate runs.
682 trace_mirror_before_drain(s
, cnt
);
684 cnt
= bdrv_get_dirty_count(s
->dirty_bitmap
);
688 trace_mirror_before_sleep(s
, cnt
, s
->synced
, delay_ns
);
690 block_job_sleep_ns(&s
->common
, QEMU_CLOCK_REALTIME
, delay_ns
);
691 if (block_job_is_cancelled(&s
->common
)) {
694 } else if (!should_complete
) {
695 delay_ns
= (s
->in_flight
== 0 && cnt
== 0 ? SLICE_TIME
: 0);
696 block_job_sleep_ns(&s
->common
, QEMU_CLOCK_REALTIME
, delay_ns
);
697 } else if (cnt
== 0) {
698 /* The two disks are in sync. Exit and report successful
701 assert(QLIST_EMPTY(&bs
->tracked_requests
));
702 s
->common
.cancelled
= false;
705 last_pause_ns
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
709 if (s
->in_flight
> 0) {
710 /* We get here only if something went wrong. Either the job failed,
711 * or it was cancelled prematurely so that we do not guarantee that
712 * the target is a copy of the source.
714 assert(ret
< 0 || (!s
->synced
&& block_job_is_cancelled(&s
->common
)));
718 assert(s
->in_flight
== 0);
720 g_free(s
->cow_bitmap
);
721 g_free(s
->in_flight_bitmap
);
722 bdrv_release_dirty_bitmap(bs
, s
->dirty_bitmap
);
724 data
= g_malloc(sizeof(*data
));
726 /* Before we switch to target in mirror_exit, make sure data doesn't
728 bdrv_drained_begin(bs
);
729 if (qemu_get_aio_context() == bdrv_get_aio_context(bs
)) {
730 /* FIXME: virtio host notifiers run on iohandler_ctx, therefore the
731 * above bdrv_drained_end isn't enough to quiesce it. This is ugly, we
732 * need a block layer API change to achieve this. */
733 aio_disable_external(iohandler_get_aio_context());
735 block_job_defer_to_main_loop(&s
->common
, mirror_exit
, data
);
738 static void mirror_set_speed(BlockJob
*job
, int64_t speed
, Error
**errp
)
740 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
743 error_setg(errp
, QERR_INVALID_PARAMETER
, "speed");
746 ratelimit_set_speed(&s
->limit
, speed
/ BDRV_SECTOR_SIZE
, SLICE_TIME
);
749 static void mirror_complete(BlockJob
*job
, Error
**errp
)
751 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
752 BlockDriverState
*src
, *target
;
754 src
= blk_bs(job
->blk
);
755 target
= blk_bs(s
->target
);
758 error_setg(errp
, QERR_BLOCK_JOB_NOT_READY
, job
->id
);
762 if (s
->backing_mode
== MIRROR_OPEN_BACKING_CHAIN
) {
765 assert(!target
->backing
);
766 ret
= bdrv_open_backing_file(target
, NULL
, "backing", errp
);
772 /* check the target bs is not blocked and block all operations on it */
774 AioContext
*replace_aio_context
;
776 s
->to_replace
= bdrv_find_node(s
->replaces
);
777 if (!s
->to_replace
) {
778 error_setg(errp
, "Node name '%s' not found", s
->replaces
);
782 replace_aio_context
= bdrv_get_aio_context(s
->to_replace
);
783 aio_context_acquire(replace_aio_context
);
785 error_setg(&s
->replace_blocker
,
786 "block device is in use by block-job-complete");
787 bdrv_op_block_all(s
->to_replace
, s
->replace_blocker
);
788 bdrv_ref(s
->to_replace
);
790 aio_context_release(replace_aio_context
);
793 if (s
->backing_mode
== MIRROR_SOURCE_BACKING_CHAIN
) {
794 BlockDriverState
*backing
= s
->is_none_mode
? src
: s
->base
;
795 if (backing_bs(target
) != backing
) {
796 bdrv_set_backing_hd(target
, backing
);
800 s
->should_complete
= true;
801 block_job_enter(&s
->common
);
804 /* There is no matching mirror_resume() because mirror_run() will begin
805 * iterating again when the job is resumed.
807 static void coroutine_fn
mirror_pause(BlockJob
*job
)
809 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
814 static void mirror_attached_aio_context(BlockJob
*job
, AioContext
*new_context
)
816 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
818 blk_set_aio_context(s
->target
, new_context
);
821 static const BlockJobDriver mirror_job_driver
= {
822 .instance_size
= sizeof(MirrorBlockJob
),
823 .job_type
= BLOCK_JOB_TYPE_MIRROR
,
824 .set_speed
= mirror_set_speed
,
825 .complete
= mirror_complete
,
826 .pause
= mirror_pause
,
827 .attached_aio_context
= mirror_attached_aio_context
,
830 static const BlockJobDriver commit_active_job_driver
= {
831 .instance_size
= sizeof(MirrorBlockJob
),
832 .job_type
= BLOCK_JOB_TYPE_COMMIT
,
833 .set_speed
= mirror_set_speed
,
834 .complete
= mirror_complete
,
835 .pause
= mirror_pause
,
836 .attached_aio_context
= mirror_attached_aio_context
,
839 static void mirror_start_job(BlockDriverState
*bs
, BlockDriverState
*target
,
840 const char *replaces
,
841 int64_t speed
, uint32_t granularity
,
843 BlockMirrorBackingMode backing_mode
,
844 BlockdevOnError on_source_error
,
845 BlockdevOnError on_target_error
,
847 BlockCompletionFunc
*cb
,
848 void *opaque
, Error
**errp
,
849 const BlockJobDriver
*driver
,
850 bool is_none_mode
, BlockDriverState
*base
)
854 if (granularity
== 0) {
855 granularity
= bdrv_get_default_bitmap_granularity(target
);
858 assert ((granularity
& (granularity
- 1)) == 0);
861 error_setg(errp
, "Invalid parameter 'buf-size'");
866 buf_size
= DEFAULT_MIRROR_BUF_SIZE
;
869 s
= block_job_create(driver
, bs
, speed
, cb
, opaque
, errp
);
874 s
->target
= blk_new();
875 blk_insert_bs(s
->target
, target
);
877 s
->replaces
= g_strdup(replaces
);
878 s
->on_source_error
= on_source_error
;
879 s
->on_target_error
= on_target_error
;
880 s
->is_none_mode
= is_none_mode
;
881 s
->backing_mode
= backing_mode
;
883 s
->granularity
= granularity
;
884 s
->buf_size
= ROUND_UP(buf_size
, granularity
);
887 s
->dirty_bitmap
= bdrv_create_dirty_bitmap(bs
, granularity
, NULL
, errp
);
888 if (!s
->dirty_bitmap
) {
890 blk_unref(s
->target
);
891 block_job_unref(&s
->common
);
895 bdrv_op_block_all(target
, s
->common
.blocker
);
897 s
->common
.co
= qemu_coroutine_create(mirror_run
);
898 trace_mirror_start(bs
, s
, s
->common
.co
, opaque
);
899 qemu_coroutine_enter(s
->common
.co
, s
);
902 void mirror_start(BlockDriverState
*bs
, BlockDriverState
*target
,
903 const char *replaces
,
904 int64_t speed
, uint32_t granularity
, int64_t buf_size
,
905 MirrorSyncMode mode
, BlockMirrorBackingMode backing_mode
,
906 BlockdevOnError on_source_error
,
907 BlockdevOnError on_target_error
,
909 BlockCompletionFunc
*cb
,
910 void *opaque
, Error
**errp
)
913 BlockDriverState
*base
;
915 if (mode
== MIRROR_SYNC_MODE_INCREMENTAL
) {
916 error_setg(errp
, "Sync mode 'incremental' not supported");
919 is_none_mode
= mode
== MIRROR_SYNC_MODE_NONE
;
920 base
= mode
== MIRROR_SYNC_MODE_TOP
? backing_bs(bs
) : NULL
;
921 mirror_start_job(bs
, target
, replaces
,
922 speed
, granularity
, buf_size
, backing_mode
,
923 on_source_error
, on_target_error
, unmap
, cb
, opaque
, errp
,
924 &mirror_job_driver
, is_none_mode
, base
);
927 void commit_active_start(BlockDriverState
*bs
, BlockDriverState
*base
,
929 BlockdevOnError on_error
,
930 BlockCompletionFunc
*cb
,
931 void *opaque
, Error
**errp
)
933 int64_t length
, base_length
;
936 Error
*local_err
= NULL
;
938 orig_base_flags
= bdrv_get_flags(base
);
940 if (bdrv_reopen(base
, bs
->open_flags
, errp
)) {
944 length
= bdrv_getlength(bs
);
946 error_setg_errno(errp
, -length
,
947 "Unable to determine length of %s", bs
->filename
);
948 goto error_restore_flags
;
951 base_length
= bdrv_getlength(base
);
952 if (base_length
< 0) {
953 error_setg_errno(errp
, -base_length
,
954 "Unable to determine length of %s", base
->filename
);
955 goto error_restore_flags
;
958 if (length
> base_length
) {
959 ret
= bdrv_truncate(base
, length
);
961 error_setg_errno(errp
, -ret
,
962 "Top image %s is larger than base image %s, and "
963 "resize of base image failed",
964 bs
->filename
, base
->filename
);
965 goto error_restore_flags
;
969 mirror_start_job(bs
, base
, NULL
, speed
, 0, 0, MIRROR_LEAVE_BACKING_CHAIN
,
970 on_error
, on_error
, false, cb
, opaque
, &local_err
,
971 &commit_active_job_driver
, false, base
);
973 error_propagate(errp
, local_err
);
974 goto error_restore_flags
;
980 /* ignore error and errp for bdrv_reopen, because we want to propagate
981 * the original error */
982 bdrv_reopen(base
, orig_base_flags
, NULL
);