4 * Copyright Red Hat, Inc. 2012
7 * Paolo Bonzini <pbonzini@redhat.com>
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/cutils.h"
17 #include "block/blockjob_int.h"
18 #include "block/block_int.h"
19 #include "sysemu/block-backend.h"
20 #include "qapi/error.h"
21 #include "qapi/qmp/qerror.h"
22 #include "qemu/ratelimit.h"
23 #include "qemu/bitmap.h"
25 #define SLICE_TIME 100000000ULL /* ns */
26 #define MAX_IN_FLIGHT 16
27 #define MAX_IO_SECTORS ((1 << 20) >> BDRV_SECTOR_BITS) /* 1 Mb */
28 #define DEFAULT_MIRROR_BUF_SIZE \
29 (MAX_IN_FLIGHT * MAX_IO_SECTORS * BDRV_SECTOR_SIZE)
31 /* The mirroring buffer is a list of granularity-sized chunks.
32 * Free chunks are organized in a list.
34 typedef struct MirrorBuffer
{
35 QSIMPLEQ_ENTRY(MirrorBuffer
) next
;
38 typedef struct MirrorBlockJob
{
42 BlockDriverState
*mirror_top_bs
;
43 BlockDriverState
*source
;
44 BlockDriverState
*base
;
46 /* The name of the graph node to replace */
48 /* The BDS to replace */
49 BlockDriverState
*to_replace
;
50 /* Used to block operations on the drive-mirror-replace target */
51 Error
*replace_blocker
;
53 BlockMirrorBackingMode backing_mode
;
54 BlockdevOnError on_source_error
, on_target_error
;
60 unsigned long *cow_bitmap
;
61 BdrvDirtyBitmap
*dirty_bitmap
;
62 BdrvDirtyBitmapIter
*dbi
;
64 QSIMPLEQ_HEAD(, MirrorBuffer
) buf_free
;
67 uint64_t last_pause_ns
;
68 unsigned long *in_flight_bitmap
;
70 int64_t sectors_in_flight
;
74 int target_cluster_sectors
;
76 bool initial_zeroing_ongoing
;
79 typedef struct MirrorOp
{
86 static BlockErrorAction
mirror_error_action(MirrorBlockJob
*s
, bool read
,
91 return block_job_error_action(&s
->common
, s
->on_source_error
,
94 return block_job_error_action(&s
->common
, s
->on_target_error
,
99 static void mirror_iteration_done(MirrorOp
*op
, int ret
)
101 MirrorBlockJob
*s
= op
->s
;
104 int i
, nb_chunks
, sectors_per_chunk
;
106 trace_mirror_iteration_done(s
, op
->sector_num
* BDRV_SECTOR_SIZE
,
107 op
->nb_sectors
* BDRV_SECTOR_SIZE
, ret
);
110 s
->sectors_in_flight
-= op
->nb_sectors
;
112 for (i
= 0; i
< op
->qiov
.niov
; i
++) {
113 MirrorBuffer
*buf
= (MirrorBuffer
*) iov
[i
].iov_base
;
114 QSIMPLEQ_INSERT_TAIL(&s
->buf_free
, buf
, next
);
118 sectors_per_chunk
= s
->granularity
>> BDRV_SECTOR_BITS
;
119 chunk_num
= op
->sector_num
/ sectors_per_chunk
;
120 nb_chunks
= DIV_ROUND_UP(op
->nb_sectors
, sectors_per_chunk
);
121 bitmap_clear(s
->in_flight_bitmap
, chunk_num
, nb_chunks
);
124 bitmap_set(s
->cow_bitmap
, chunk_num
, nb_chunks
);
126 if (!s
->initial_zeroing_ongoing
) {
127 s
->common
.offset
+= (uint64_t)op
->nb_sectors
* BDRV_SECTOR_SIZE
;
130 qemu_iovec_destroy(&op
->qiov
);
133 if (s
->waiting_for_io
) {
134 qemu_coroutine_enter(s
->common
.co
);
138 static void mirror_write_complete(void *opaque
, int ret
)
140 MirrorOp
*op
= opaque
;
141 MirrorBlockJob
*s
= op
->s
;
143 aio_context_acquire(blk_get_aio_context(s
->common
.blk
));
145 BlockErrorAction action
;
147 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, op
->sector_num
, op
->nb_sectors
);
148 action
= mirror_error_action(s
, false, -ret
);
149 if (action
== BLOCK_ERROR_ACTION_REPORT
&& s
->ret
>= 0) {
153 mirror_iteration_done(op
, ret
);
154 aio_context_release(blk_get_aio_context(s
->common
.blk
));
157 static void mirror_read_complete(void *opaque
, int ret
)
159 MirrorOp
*op
= opaque
;
160 MirrorBlockJob
*s
= op
->s
;
162 aio_context_acquire(blk_get_aio_context(s
->common
.blk
));
164 BlockErrorAction action
;
166 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, op
->sector_num
, op
->nb_sectors
);
167 action
= mirror_error_action(s
, true, -ret
);
168 if (action
== BLOCK_ERROR_ACTION_REPORT
&& s
->ret
>= 0) {
172 mirror_iteration_done(op
, ret
);
174 blk_aio_pwritev(s
->target
, op
->sector_num
* BDRV_SECTOR_SIZE
, &op
->qiov
,
175 0, mirror_write_complete
, op
);
177 aio_context_release(blk_get_aio_context(s
->common
.blk
));
180 static inline void mirror_clip_sectors(MirrorBlockJob
*s
,
184 *nb_sectors
= MIN(*nb_sectors
,
185 s
->bdev_length
/ BDRV_SECTOR_SIZE
- sector_num
);
188 /* Round sector_num and/or nb_sectors to target cluster if COW is needed, and
189 * return the offset of the adjusted tail sector against original. */
190 static int mirror_cow_align(MirrorBlockJob
*s
,
196 int chunk_sectors
= s
->granularity
>> BDRV_SECTOR_BITS
;
197 int64_t align_sector_num
= *sector_num
;
198 int align_nb_sectors
= *nb_sectors
;
199 int max_sectors
= chunk_sectors
* s
->max_iov
;
201 need_cow
= !test_bit(*sector_num
/ chunk_sectors
, s
->cow_bitmap
);
202 need_cow
|= !test_bit((*sector_num
+ *nb_sectors
- 1) / chunk_sectors
,
205 bdrv_round_sectors_to_clusters(blk_bs(s
->target
), *sector_num
,
206 *nb_sectors
, &align_sector_num
,
210 if (align_nb_sectors
> max_sectors
) {
211 align_nb_sectors
= max_sectors
;
213 align_nb_sectors
= QEMU_ALIGN_DOWN(align_nb_sectors
,
214 s
->target_cluster_sectors
);
217 /* Clipping may result in align_nb_sectors unaligned to chunk boundary, but
218 * that doesn't matter because it's already the end of source image. */
219 mirror_clip_sectors(s
, align_sector_num
, &align_nb_sectors
);
221 ret
= align_sector_num
+ align_nb_sectors
- (*sector_num
+ *nb_sectors
);
222 *sector_num
= align_sector_num
;
223 *nb_sectors
= align_nb_sectors
;
228 static inline void mirror_wait_for_io(MirrorBlockJob
*s
)
230 assert(!s
->waiting_for_io
);
231 s
->waiting_for_io
= true;
232 qemu_coroutine_yield();
233 s
->waiting_for_io
= false;
236 /* Submit async read while handling COW.
237 * Returns: The number of sectors copied after and including sector_num,
238 * excluding any sectors copied prior to sector_num due to alignment.
239 * This will be nb_sectors if no alignment is necessary, or
240 * (new_end - sector_num) if tail is rounded up or down due to
241 * alignment or buffer limit.
243 static int mirror_do_read(MirrorBlockJob
*s
, int64_t sector_num
,
246 BlockBackend
*source
= s
->common
.blk
;
247 int sectors_per_chunk
, nb_chunks
;
252 sectors_per_chunk
= s
->granularity
>> BDRV_SECTOR_BITS
;
253 max_sectors
= sectors_per_chunk
* s
->max_iov
;
255 /* We can only handle as much as buf_size at a time. */
256 nb_sectors
= MIN(s
->buf_size
>> BDRV_SECTOR_BITS
, nb_sectors
);
257 nb_sectors
= MIN(max_sectors
, nb_sectors
);
262 ret
+= mirror_cow_align(s
, §or_num
, &nb_sectors
);
264 assert(nb_sectors
<< BDRV_SECTOR_BITS
<= s
->buf_size
);
265 /* The sector range must meet granularity because:
266 * 1) Caller passes in aligned values;
267 * 2) mirror_cow_align is used only when target cluster is larger. */
268 assert(!(sector_num
% sectors_per_chunk
));
269 nb_chunks
= DIV_ROUND_UP(nb_sectors
, sectors_per_chunk
);
271 while (s
->buf_free_count
< nb_chunks
) {
272 trace_mirror_yield_in_flight(s
, sector_num
* BDRV_SECTOR_SIZE
,
274 mirror_wait_for_io(s
);
277 /* Allocate a MirrorOp that is used as an AIO callback. */
278 op
= g_new(MirrorOp
, 1);
280 op
->sector_num
= sector_num
;
281 op
->nb_sectors
= nb_sectors
;
283 /* Now make a QEMUIOVector taking enough granularity-sized chunks
286 qemu_iovec_init(&op
->qiov
, nb_chunks
);
287 while (nb_chunks
-- > 0) {
288 MirrorBuffer
*buf
= QSIMPLEQ_FIRST(&s
->buf_free
);
289 size_t remaining
= nb_sectors
* BDRV_SECTOR_SIZE
- op
->qiov
.size
;
291 QSIMPLEQ_REMOVE_HEAD(&s
->buf_free
, next
);
293 qemu_iovec_add(&op
->qiov
, buf
, MIN(s
->granularity
, remaining
));
296 /* Copy the dirty cluster. */
298 s
->sectors_in_flight
+= nb_sectors
;
299 trace_mirror_one_iteration(s
, sector_num
* BDRV_SECTOR_SIZE
,
300 nb_sectors
* BDRV_SECTOR_SIZE
);
302 blk_aio_preadv(source
, sector_num
* BDRV_SECTOR_SIZE
, &op
->qiov
, 0,
303 mirror_read_complete
, op
);
307 static void mirror_do_zero_or_discard(MirrorBlockJob
*s
,
314 /* Allocate a MirrorOp that is used as an AIO callback. The qiov is zeroed
315 * so the freeing in mirror_iteration_done is nop. */
316 op
= g_new0(MirrorOp
, 1);
318 op
->sector_num
= sector_num
;
319 op
->nb_sectors
= nb_sectors
;
322 s
->sectors_in_flight
+= nb_sectors
;
324 blk_aio_pdiscard(s
->target
, sector_num
<< BDRV_SECTOR_BITS
,
325 op
->nb_sectors
<< BDRV_SECTOR_BITS
,
326 mirror_write_complete
, op
);
328 blk_aio_pwrite_zeroes(s
->target
, sector_num
* BDRV_SECTOR_SIZE
,
329 op
->nb_sectors
* BDRV_SECTOR_SIZE
,
330 s
->unmap
? BDRV_REQ_MAY_UNMAP
: 0,
331 mirror_write_complete
, op
);
335 static uint64_t coroutine_fn
mirror_iteration(MirrorBlockJob
*s
)
337 BlockDriverState
*source
= s
->source
;
338 int64_t sector_num
, first_chunk
;
339 uint64_t delay_ns
= 0;
340 /* At least the first dirty chunk is mirrored in one iteration. */
342 int64_t end
= s
->bdev_length
/ BDRV_SECTOR_SIZE
;
343 int sectors_per_chunk
= s
->granularity
>> BDRV_SECTOR_BITS
;
344 bool write_zeroes_ok
= bdrv_can_write_zeroes_with_unmap(blk_bs(s
->target
));
345 int max_io_sectors
= MAX((s
->buf_size
>> BDRV_SECTOR_BITS
) / MAX_IN_FLIGHT
,
348 bdrv_dirty_bitmap_lock(s
->dirty_bitmap
);
349 sector_num
= bdrv_dirty_iter_next(s
->dbi
);
350 if (sector_num
< 0) {
351 bdrv_set_dirty_iter(s
->dbi
, 0);
352 sector_num
= bdrv_dirty_iter_next(s
->dbi
);
353 trace_mirror_restart_iter(s
, bdrv_get_dirty_count(s
->dirty_bitmap
) *
355 assert(sector_num
>= 0);
357 bdrv_dirty_bitmap_unlock(s
->dirty_bitmap
);
359 first_chunk
= sector_num
/ sectors_per_chunk
;
360 while (test_bit(first_chunk
, s
->in_flight_bitmap
)) {
361 trace_mirror_yield_in_flight(s
, sector_num
* BDRV_SECTOR_SIZE
,
363 mirror_wait_for_io(s
);
366 block_job_pause_point(&s
->common
);
368 /* Find the number of consective dirty chunks following the first dirty
369 * one, and wait for in flight requests in them. */
370 bdrv_dirty_bitmap_lock(s
->dirty_bitmap
);
371 while (nb_chunks
* sectors_per_chunk
< (s
->buf_size
>> BDRV_SECTOR_BITS
)) {
373 int64_t next_sector
= sector_num
+ nb_chunks
* sectors_per_chunk
;
374 int64_t next_chunk
= next_sector
/ sectors_per_chunk
;
375 if (next_sector
>= end
||
376 !bdrv_get_dirty_locked(source
, s
->dirty_bitmap
, next_sector
)) {
379 if (test_bit(next_chunk
, s
->in_flight_bitmap
)) {
383 next_dirty
= bdrv_dirty_iter_next(s
->dbi
);
384 if (next_dirty
> next_sector
|| next_dirty
< 0) {
385 /* The bitmap iterator's cache is stale, refresh it */
386 bdrv_set_dirty_iter(s
->dbi
, next_sector
);
387 next_dirty
= bdrv_dirty_iter_next(s
->dbi
);
389 assert(next_dirty
== next_sector
);
393 /* Clear dirty bits before querying the block status, because
394 * calling bdrv_get_block_status_above could yield - if some blocks are
395 * marked dirty in this window, we need to know.
397 bdrv_reset_dirty_bitmap_locked(s
->dirty_bitmap
, sector_num
,
398 nb_chunks
* sectors_per_chunk
);
399 bdrv_dirty_bitmap_unlock(s
->dirty_bitmap
);
401 bitmap_set(s
->in_flight_bitmap
, sector_num
/ sectors_per_chunk
, nb_chunks
);
402 while (nb_chunks
> 0 && sector_num
< end
) {
405 int64_t io_bytes_acct
;
406 BlockDriverState
*file
;
410 MIRROR_METHOD_DISCARD
411 } mirror_method
= MIRROR_METHOD_COPY
;
413 assert(!(sector_num
% sectors_per_chunk
));
414 ret
= bdrv_get_block_status_above(source
, NULL
, sector_num
,
415 nb_chunks
* sectors_per_chunk
,
418 io_sectors
= MIN(nb_chunks
* sectors_per_chunk
, max_io_sectors
);
419 } else if (ret
& BDRV_BLOCK_DATA
) {
420 io_sectors
= MIN(io_sectors
, max_io_sectors
);
423 io_sectors
-= io_sectors
% sectors_per_chunk
;
424 if (io_sectors
< sectors_per_chunk
) {
425 io_sectors
= sectors_per_chunk
;
426 } else if (ret
>= 0 && !(ret
& BDRV_BLOCK_DATA
)) {
427 int64_t target_sector_num
;
428 int target_nb_sectors
;
429 bdrv_round_sectors_to_clusters(blk_bs(s
->target
), sector_num
,
430 io_sectors
, &target_sector_num
,
432 if (target_sector_num
== sector_num
&&
433 target_nb_sectors
== io_sectors
) {
434 mirror_method
= ret
& BDRV_BLOCK_ZERO
?
436 MIRROR_METHOD_DISCARD
;
440 while (s
->in_flight
>= MAX_IN_FLIGHT
) {
441 trace_mirror_yield_in_flight(s
, sector_num
* BDRV_SECTOR_SIZE
,
443 mirror_wait_for_io(s
);
450 mirror_clip_sectors(s
, sector_num
, &io_sectors
);
451 switch (mirror_method
) {
452 case MIRROR_METHOD_COPY
:
453 io_sectors
= mirror_do_read(s
, sector_num
, io_sectors
);
454 io_bytes_acct
= io_sectors
* BDRV_SECTOR_SIZE
;
456 case MIRROR_METHOD_ZERO
:
457 case MIRROR_METHOD_DISCARD
:
458 mirror_do_zero_or_discard(s
, sector_num
, io_sectors
,
459 mirror_method
== MIRROR_METHOD_DISCARD
);
460 if (write_zeroes_ok
) {
463 io_bytes_acct
= io_sectors
* BDRV_SECTOR_SIZE
;
470 sector_num
+= io_sectors
;
471 nb_chunks
-= DIV_ROUND_UP(io_sectors
, sectors_per_chunk
);
472 if (s
->common
.speed
) {
473 delay_ns
= ratelimit_calculate_delay(&s
->limit
, io_bytes_acct
);
479 static void mirror_free_init(MirrorBlockJob
*s
)
481 int granularity
= s
->granularity
;
482 size_t buf_size
= s
->buf_size
;
483 uint8_t *buf
= s
->buf
;
485 assert(s
->buf_free_count
== 0);
486 QSIMPLEQ_INIT(&s
->buf_free
);
487 while (buf_size
!= 0) {
488 MirrorBuffer
*cur
= (MirrorBuffer
*)buf
;
489 QSIMPLEQ_INSERT_TAIL(&s
->buf_free
, cur
, next
);
491 buf_size
-= granularity
;
496 /* This is also used for the .pause callback. There is no matching
497 * mirror_resume() because mirror_run() will begin iterating again
498 * when the job is resumed.
500 static void mirror_wait_for_all_io(MirrorBlockJob
*s
)
502 while (s
->in_flight
> 0) {
503 mirror_wait_for_io(s
);
511 static void mirror_exit(BlockJob
*job
, void *opaque
)
513 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
514 MirrorExitData
*data
= opaque
;
515 AioContext
*replace_aio_context
= NULL
;
516 BlockDriverState
*src
= s
->source
;
517 BlockDriverState
*target_bs
= blk_bs(s
->target
);
518 BlockDriverState
*mirror_top_bs
= s
->mirror_top_bs
;
519 Error
*local_err
= NULL
;
521 bdrv_release_dirty_bitmap(src
, s
->dirty_bitmap
);
523 /* Make sure that the source BDS doesn't go away before we called
524 * block_job_completed(). */
526 bdrv_ref(mirror_top_bs
);
529 /* Remove target parent that still uses BLK_PERM_WRITE/RESIZE before
530 * inserting target_bs at s->to_replace, where we might not be able to get
533 * Note that blk_unref() alone doesn't necessarily drop permissions because
534 * we might be running nested inside mirror_drain(), which takes an extra
535 * reference, so use an explicit blk_set_perm() first. */
536 blk_set_perm(s
->target
, 0, BLK_PERM_ALL
, &error_abort
);
537 blk_unref(s
->target
);
540 /* We don't access the source any more. Dropping any WRITE/RESIZE is
541 * required before it could become a backing file of target_bs. */
542 bdrv_child_try_set_perm(mirror_top_bs
->backing
, 0, BLK_PERM_ALL
,
544 if (s
->backing_mode
== MIRROR_SOURCE_BACKING_CHAIN
) {
545 BlockDriverState
*backing
= s
->is_none_mode
? src
: s
->base
;
546 if (backing_bs(target_bs
) != backing
) {
547 bdrv_set_backing_hd(target_bs
, backing
, &local_err
);
549 error_report_err(local_err
);
556 replace_aio_context
= bdrv_get_aio_context(s
->to_replace
);
557 aio_context_acquire(replace_aio_context
);
560 if (s
->should_complete
&& data
->ret
== 0) {
561 BlockDriverState
*to_replace
= src
;
563 to_replace
= s
->to_replace
;
566 if (bdrv_get_flags(target_bs
) != bdrv_get_flags(to_replace
)) {
567 bdrv_reopen(target_bs
, bdrv_get_flags(to_replace
), NULL
);
570 /* The mirror job has no requests in flight any more, but we need to
571 * drain potential other users of the BDS before changing the graph. */
572 bdrv_drained_begin(target_bs
);
573 bdrv_replace_node(to_replace
, target_bs
, &local_err
);
574 bdrv_drained_end(target_bs
);
576 error_report_err(local_err
);
581 bdrv_op_unblock_all(s
->to_replace
, s
->replace_blocker
);
582 error_free(s
->replace_blocker
);
583 bdrv_unref(s
->to_replace
);
585 if (replace_aio_context
) {
586 aio_context_release(replace_aio_context
);
589 bdrv_unref(target_bs
);
591 /* Remove the mirror filter driver from the graph. Before this, get rid of
592 * the blockers on the intermediate nodes so that the resulting state is
593 * valid. Also give up permissions on mirror_top_bs->backing, which might
594 * block the removal. */
595 block_job_remove_all_bdrv(job
);
596 bdrv_child_try_set_perm(mirror_top_bs
->backing
, 0, BLK_PERM_ALL
,
598 bdrv_replace_node(mirror_top_bs
, backing_bs(mirror_top_bs
), &error_abort
);
600 /* We just changed the BDS the job BB refers to (with either or both of the
601 * bdrv_replace_node() calls), so switch the BB back so the cleanup does
602 * the right thing. We don't need any permissions any more now. */
603 blk_remove_bs(job
->blk
);
604 blk_set_perm(job
->blk
, 0, BLK_PERM_ALL
, &error_abort
);
605 blk_insert_bs(job
->blk
, mirror_top_bs
, &error_abort
);
607 block_job_completed(&s
->common
, data
->ret
);
610 bdrv_drained_end(src
);
611 bdrv_unref(mirror_top_bs
);
615 static void mirror_throttle(MirrorBlockJob
*s
)
617 int64_t now
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
619 if (now
- s
->last_pause_ns
> SLICE_TIME
) {
620 s
->last_pause_ns
= now
;
621 block_job_sleep_ns(&s
->common
, QEMU_CLOCK_REALTIME
, 0);
623 block_job_pause_point(&s
->common
);
627 static int coroutine_fn
mirror_dirty_init(MirrorBlockJob
*s
)
629 int64_t sector_num
, end
;
630 BlockDriverState
*base
= s
->base
;
631 BlockDriverState
*bs
= s
->source
;
632 BlockDriverState
*target_bs
= blk_bs(s
->target
);
635 end
= s
->bdev_length
/ BDRV_SECTOR_SIZE
;
637 if (base
== NULL
&& !bdrv_has_zero_init(target_bs
)) {
638 if (!bdrv_can_write_zeroes_with_unmap(target_bs
)) {
639 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, 0, end
);
643 s
->initial_zeroing_ongoing
= true;
644 for (sector_num
= 0; sector_num
< end
; ) {
645 int nb_sectors
= MIN(end
- sector_num
,
646 QEMU_ALIGN_DOWN(INT_MAX
, s
->granularity
) >> BDRV_SECTOR_BITS
);
650 if (block_job_is_cancelled(&s
->common
)) {
651 s
->initial_zeroing_ongoing
= false;
655 if (s
->in_flight
>= MAX_IN_FLIGHT
) {
656 trace_mirror_yield(s
, UINT64_MAX
, s
->buf_free_count
,
658 mirror_wait_for_io(s
);
662 mirror_do_zero_or_discard(s
, sector_num
, nb_sectors
, false);
663 sector_num
+= nb_sectors
;
666 mirror_wait_for_all_io(s
);
667 s
->initial_zeroing_ongoing
= false;
670 /* First part, loop on the sectors and initialize the dirty bitmap. */
671 for (sector_num
= 0; sector_num
< end
; ) {
672 /* Just to make sure we are not exceeding int limit. */
673 int nb_sectors
= MIN(INT_MAX
>> BDRV_SECTOR_BITS
,
678 if (block_job_is_cancelled(&s
->common
)) {
682 ret
= bdrv_is_allocated_above(bs
, base
, sector_num
, nb_sectors
, &n
);
689 bdrv_set_dirty_bitmap(s
->dirty_bitmap
, sector_num
, n
);
696 /* Called when going out of the streaming phase to flush the bulk of the
697 * data to the medium, or just before completing.
699 static int mirror_flush(MirrorBlockJob
*s
)
701 int ret
= blk_flush(s
->target
);
703 if (mirror_error_action(s
, false, -ret
) == BLOCK_ERROR_ACTION_REPORT
) {
710 static void coroutine_fn
mirror_run(void *opaque
)
712 MirrorBlockJob
*s
= opaque
;
713 MirrorExitData
*data
;
714 BlockDriverState
*bs
= s
->source
;
715 BlockDriverState
*target_bs
= blk_bs(s
->target
);
716 bool need_drain
= true;
719 char backing_filename
[2]; /* we only need 2 characters because we are only
720 checking for a NULL string */
722 int target_cluster_size
= BDRV_SECTOR_SIZE
;
724 if (block_job_is_cancelled(&s
->common
)) {
728 s
->bdev_length
= bdrv_getlength(bs
);
729 if (s
->bdev_length
< 0) {
730 ret
= s
->bdev_length
;
734 /* Active commit must resize the base image if its size differs from the
736 if (s
->base
== blk_bs(s
->target
)) {
739 base_length
= blk_getlength(s
->target
);
740 if (base_length
< 0) {
745 if (s
->bdev_length
> base_length
) {
746 ret
= blk_truncate(s
->target
, s
->bdev_length
, NULL
);
753 if (s
->bdev_length
== 0) {
754 /* Report BLOCK_JOB_READY and wait for complete. */
755 block_job_event_ready(&s
->common
);
757 while (!block_job_is_cancelled(&s
->common
) && !s
->should_complete
) {
758 block_job_yield(&s
->common
);
760 s
->common
.cancelled
= false;
764 length
= DIV_ROUND_UP(s
->bdev_length
, s
->granularity
);
765 s
->in_flight_bitmap
= bitmap_new(length
);
767 /* If we have no backing file yet in the destination, we cannot let
768 * the destination do COW. Instead, we copy sectors around the
769 * dirty data if needed. We need a bitmap to do that.
771 bdrv_get_backing_filename(target_bs
, backing_filename
,
772 sizeof(backing_filename
));
773 if (!bdrv_get_info(target_bs
, &bdi
) && bdi
.cluster_size
) {
774 target_cluster_size
= bdi
.cluster_size
;
776 if (backing_filename
[0] && !target_bs
->backing
777 && s
->granularity
< target_cluster_size
) {
778 s
->buf_size
= MAX(s
->buf_size
, target_cluster_size
);
779 s
->cow_bitmap
= bitmap_new(length
);
781 s
->target_cluster_sectors
= target_cluster_size
>> BDRV_SECTOR_BITS
;
782 s
->max_iov
= MIN(bs
->bl
.max_iov
, target_bs
->bl
.max_iov
);
784 s
->buf
= qemu_try_blockalign(bs
, s
->buf_size
);
785 if (s
->buf
== NULL
) {
792 s
->last_pause_ns
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
793 if (!s
->is_none_mode
) {
794 ret
= mirror_dirty_init(s
);
795 if (ret
< 0 || block_job_is_cancelled(&s
->common
)) {
801 s
->dbi
= bdrv_dirty_iter_new(s
->dirty_bitmap
, 0);
803 uint64_t delay_ns
= 0;
805 bool should_complete
;
812 block_job_pause_point(&s
->common
);
814 cnt
= bdrv_get_dirty_count(s
->dirty_bitmap
);
815 /* s->common.offset contains the number of bytes already processed so
816 * far, cnt is the number of dirty sectors remaining and
817 * s->sectors_in_flight is the number of sectors currently being
818 * processed; together those are the current total operation length */
819 s
->common
.len
= s
->common
.offset
+
820 (cnt
+ s
->sectors_in_flight
) * BDRV_SECTOR_SIZE
;
822 /* Note that even when no rate limit is applied we need to yield
823 * periodically with no pending I/O so that bdrv_drain_all() returns.
824 * We do so every SLICE_TIME nanoseconds, or when there is an error,
825 * or when the source is clean, whichever comes first.
827 delta
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
) - s
->last_pause_ns
;
828 if (delta
< SLICE_TIME
&&
829 s
->common
.iostatus
== BLOCK_DEVICE_IO_STATUS_OK
) {
830 if (s
->in_flight
>= MAX_IN_FLIGHT
|| s
->buf_free_count
== 0 ||
831 (cnt
== 0 && s
->in_flight
> 0)) {
832 trace_mirror_yield(s
, cnt
* BDRV_SECTOR_SIZE
,
833 s
->buf_free_count
, s
->in_flight
);
834 mirror_wait_for_io(s
);
836 } else if (cnt
!= 0) {
837 delay_ns
= mirror_iteration(s
);
841 should_complete
= false;
842 if (s
->in_flight
== 0 && cnt
== 0) {
843 trace_mirror_before_flush(s
);
845 if (mirror_flush(s
) < 0) {
846 /* Go check s->ret. */
849 /* We're out of the streaming phase. From now on, if the job
850 * is cancelled we will actually complete all pending I/O and
851 * report completion. This way, block-job-cancel will leave
852 * the target in a consistent state.
854 block_job_event_ready(&s
->common
);
858 should_complete
= s
->should_complete
||
859 block_job_is_cancelled(&s
->common
);
860 cnt
= bdrv_get_dirty_count(s
->dirty_bitmap
);
863 if (cnt
== 0 && should_complete
) {
864 /* The dirty bitmap is not updated while operations are pending.
865 * If we're about to exit, wait for pending operations before
866 * calling bdrv_get_dirty_count(bs), or we may exit while the
867 * source has dirty data to copy!
869 * Note that I/O can be submitted by the guest while
870 * mirror_populate runs, so pause it now. Before deciding
871 * whether to switch to target check one last time if I/O has
872 * come in the meanwhile, and if not flush the data to disk.
874 trace_mirror_before_drain(s
, cnt
* BDRV_SECTOR_SIZE
);
876 bdrv_drained_begin(bs
);
877 cnt
= bdrv_get_dirty_count(s
->dirty_bitmap
);
878 if (cnt
> 0 || mirror_flush(s
) < 0) {
879 bdrv_drained_end(bs
);
883 /* The two disks are in sync. Exit and report successful
886 assert(QLIST_EMPTY(&bs
->tracked_requests
));
887 s
->common
.cancelled
= false;
893 trace_mirror_before_sleep(s
, cnt
* BDRV_SECTOR_SIZE
,
894 s
->synced
, delay_ns
);
896 block_job_sleep_ns(&s
->common
, QEMU_CLOCK_REALTIME
, delay_ns
);
897 if (block_job_is_cancelled(&s
->common
)) {
900 } else if (!should_complete
) {
901 delay_ns
= (s
->in_flight
== 0 && cnt
== 0 ? SLICE_TIME
: 0);
902 block_job_sleep_ns(&s
->common
, QEMU_CLOCK_REALTIME
, delay_ns
);
904 s
->last_pause_ns
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
908 if (s
->in_flight
> 0) {
909 /* We get here only if something went wrong. Either the job failed,
910 * or it was cancelled prematurely so that we do not guarantee that
911 * the target is a copy of the source.
913 assert(ret
< 0 || (!s
->synced
&& block_job_is_cancelled(&s
->common
)));
915 mirror_wait_for_all_io(s
);
918 assert(s
->in_flight
== 0);
920 g_free(s
->cow_bitmap
);
921 g_free(s
->in_flight_bitmap
);
922 bdrv_dirty_iter_free(s
->dbi
);
924 data
= g_malloc(sizeof(*data
));
928 bdrv_drained_begin(bs
);
930 block_job_defer_to_main_loop(&s
->common
, mirror_exit
, data
);
933 static void mirror_set_speed(BlockJob
*job
, int64_t speed
, Error
**errp
)
935 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
938 error_setg(errp
, QERR_INVALID_PARAMETER
, "speed");
941 ratelimit_set_speed(&s
->limit
, speed
, SLICE_TIME
);
944 static void mirror_complete(BlockJob
*job
, Error
**errp
)
946 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
947 BlockDriverState
*target
;
949 target
= blk_bs(s
->target
);
952 error_setg(errp
, "The active block job '%s' cannot be completed",
957 if (s
->backing_mode
== MIRROR_OPEN_BACKING_CHAIN
) {
960 assert(!target
->backing
);
961 ret
= bdrv_open_backing_file(target
, NULL
, "backing", errp
);
967 /* block all operations on to_replace bs */
969 AioContext
*replace_aio_context
;
971 s
->to_replace
= bdrv_find_node(s
->replaces
);
972 if (!s
->to_replace
) {
973 error_setg(errp
, "Node name '%s' not found", s
->replaces
);
977 replace_aio_context
= bdrv_get_aio_context(s
->to_replace
);
978 aio_context_acquire(replace_aio_context
);
980 /* TODO Translate this into permission system. Current definition of
981 * GRAPH_MOD would require to request it for the parents; they might
982 * not even be BlockDriverStates, however, so a BdrvChild can't address
983 * them. May need redefinition of GRAPH_MOD. */
984 error_setg(&s
->replace_blocker
,
985 "block device is in use by block-job-complete");
986 bdrv_op_block_all(s
->to_replace
, s
->replace_blocker
);
987 bdrv_ref(s
->to_replace
);
989 aio_context_release(replace_aio_context
);
992 s
->should_complete
= true;
993 block_job_enter(&s
->common
);
996 static void mirror_pause(BlockJob
*job
)
998 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
1000 mirror_wait_for_all_io(s
);
1003 static void mirror_attached_aio_context(BlockJob
*job
, AioContext
*new_context
)
1005 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
1007 blk_set_aio_context(s
->target
, new_context
);
1010 static void mirror_drain(BlockJob
*job
)
1012 MirrorBlockJob
*s
= container_of(job
, MirrorBlockJob
, common
);
1014 /* Need to keep a reference in case blk_drain triggers execution
1015 * of mirror_complete...
1018 BlockBackend
*target
= s
->target
;
1025 static const BlockJobDriver mirror_job_driver
= {
1026 .instance_size
= sizeof(MirrorBlockJob
),
1027 .job_type
= BLOCK_JOB_TYPE_MIRROR
,
1028 .set_speed
= mirror_set_speed
,
1029 .start
= mirror_run
,
1030 .complete
= mirror_complete
,
1031 .pause
= mirror_pause
,
1032 .attached_aio_context
= mirror_attached_aio_context
,
1033 .drain
= mirror_drain
,
1036 static const BlockJobDriver commit_active_job_driver
= {
1037 .instance_size
= sizeof(MirrorBlockJob
),
1038 .job_type
= BLOCK_JOB_TYPE_COMMIT
,
1039 .set_speed
= mirror_set_speed
,
1040 .start
= mirror_run
,
1041 .complete
= mirror_complete
,
1042 .pause
= mirror_pause
,
1043 .attached_aio_context
= mirror_attached_aio_context
,
1044 .drain
= mirror_drain
,
1047 static int coroutine_fn
bdrv_mirror_top_preadv(BlockDriverState
*bs
,
1048 uint64_t offset
, uint64_t bytes
, QEMUIOVector
*qiov
, int flags
)
1050 return bdrv_co_preadv(bs
->backing
, offset
, bytes
, qiov
, flags
);
1053 static int coroutine_fn
bdrv_mirror_top_pwritev(BlockDriverState
*bs
,
1054 uint64_t offset
, uint64_t bytes
, QEMUIOVector
*qiov
, int flags
)
1056 return bdrv_co_pwritev(bs
->backing
, offset
, bytes
, qiov
, flags
);
1059 static int coroutine_fn
bdrv_mirror_top_flush(BlockDriverState
*bs
)
1061 return bdrv_co_flush(bs
->backing
->bs
);
1064 static int64_t coroutine_fn
bdrv_mirror_top_get_block_status(
1065 BlockDriverState
*bs
, int64_t sector_num
, int nb_sectors
, int *pnum
,
1066 BlockDriverState
**file
)
1069 *file
= bs
->backing
->bs
;
1070 return BDRV_BLOCK_RAW
| BDRV_BLOCK_OFFSET_VALID
|
1071 (sector_num
<< BDRV_SECTOR_BITS
);
1074 static int coroutine_fn
bdrv_mirror_top_pwrite_zeroes(BlockDriverState
*bs
,
1075 int64_t offset
, int bytes
, BdrvRequestFlags flags
)
1077 return bdrv_co_pwrite_zeroes(bs
->backing
, offset
, bytes
, flags
);
1080 static int coroutine_fn
bdrv_mirror_top_pdiscard(BlockDriverState
*bs
,
1081 int64_t offset
, int bytes
)
1083 return bdrv_co_pdiscard(bs
->backing
->bs
, offset
, bytes
);
1086 static void bdrv_mirror_top_refresh_filename(BlockDriverState
*bs
, QDict
*opts
)
1088 bdrv_refresh_filename(bs
->backing
->bs
);
1089 pstrcpy(bs
->exact_filename
, sizeof(bs
->exact_filename
),
1090 bs
->backing
->bs
->filename
);
1093 static void bdrv_mirror_top_close(BlockDriverState
*bs
)
1097 static void bdrv_mirror_top_child_perm(BlockDriverState
*bs
, BdrvChild
*c
,
1098 const BdrvChildRole
*role
,
1099 uint64_t perm
, uint64_t shared
,
1100 uint64_t *nperm
, uint64_t *nshared
)
1102 /* Must be able to forward guest writes to the real image */
1104 if (perm
& BLK_PERM_WRITE
) {
1105 *nperm
|= BLK_PERM_WRITE
;
1108 *nshared
= BLK_PERM_ALL
;
1111 /* Dummy node that provides consistent read to its users without requiring it
1112 * from its backing file and that allows writes on the backing file chain. */
1113 static BlockDriver bdrv_mirror_top
= {
1114 .format_name
= "mirror_top",
1115 .bdrv_co_preadv
= bdrv_mirror_top_preadv
,
1116 .bdrv_co_pwritev
= bdrv_mirror_top_pwritev
,
1117 .bdrv_co_pwrite_zeroes
= bdrv_mirror_top_pwrite_zeroes
,
1118 .bdrv_co_pdiscard
= bdrv_mirror_top_pdiscard
,
1119 .bdrv_co_flush
= bdrv_mirror_top_flush
,
1120 .bdrv_co_get_block_status
= bdrv_mirror_top_get_block_status
,
1121 .bdrv_refresh_filename
= bdrv_mirror_top_refresh_filename
,
1122 .bdrv_close
= bdrv_mirror_top_close
,
1123 .bdrv_child_perm
= bdrv_mirror_top_child_perm
,
1126 static void mirror_start_job(const char *job_id
, BlockDriverState
*bs
,
1127 int creation_flags
, BlockDriverState
*target
,
1128 const char *replaces
, int64_t speed
,
1129 uint32_t granularity
, int64_t buf_size
,
1130 BlockMirrorBackingMode backing_mode
,
1131 BlockdevOnError on_source_error
,
1132 BlockdevOnError on_target_error
,
1134 BlockCompletionFunc
*cb
,
1136 const BlockJobDriver
*driver
,
1137 bool is_none_mode
, BlockDriverState
*base
,
1138 bool auto_complete
, const char *filter_node_name
,
1142 BlockDriverState
*mirror_top_bs
;
1143 bool target_graph_mod
;
1144 bool target_is_backing
;
1145 Error
*local_err
= NULL
;
1148 if (granularity
== 0) {
1149 granularity
= bdrv_get_default_bitmap_granularity(target
);
1152 assert ((granularity
& (granularity
- 1)) == 0);
1155 error_setg(errp
, "Invalid parameter 'buf-size'");
1159 if (buf_size
== 0) {
1160 buf_size
= DEFAULT_MIRROR_BUF_SIZE
;
1163 /* In the case of active commit, add dummy driver to provide consistent
1164 * reads on the top, while disabling it in the intermediate nodes, and make
1165 * the backing chain writable. */
1166 mirror_top_bs
= bdrv_new_open_driver(&bdrv_mirror_top
, filter_node_name
,
1168 if (mirror_top_bs
== NULL
) {
1171 mirror_top_bs
->total_sectors
= bs
->total_sectors
;
1172 bdrv_set_aio_context(mirror_top_bs
, bdrv_get_aio_context(bs
));
1174 /* bdrv_append takes ownership of the mirror_top_bs reference, need to keep
1175 * it alive until block_job_create() succeeds even if bs has no parent. */
1176 bdrv_ref(mirror_top_bs
);
1177 bdrv_drained_begin(bs
);
1178 bdrv_append(mirror_top_bs
, bs
, &local_err
);
1179 bdrv_drained_end(bs
);
1182 bdrv_unref(mirror_top_bs
);
1183 error_propagate(errp
, local_err
);
1187 /* Make sure that the source is not resized while the job is running */
1188 s
= block_job_create(job_id
, driver
, mirror_top_bs
,
1189 BLK_PERM_CONSISTENT_READ
,
1190 BLK_PERM_CONSISTENT_READ
| BLK_PERM_WRITE_UNCHANGED
|
1191 BLK_PERM_WRITE
| BLK_PERM_GRAPH_MOD
, speed
,
1192 creation_flags
, cb
, opaque
, errp
);
1196 /* The block job now has a reference to this node */
1197 bdrv_unref(mirror_top_bs
);
1200 s
->mirror_top_bs
= mirror_top_bs
;
1202 /* No resize for the target either; while the mirror is still running, a
1203 * consistent read isn't necessarily possible. We could possibly allow
1204 * writes and graph modifications, though it would likely defeat the
1205 * purpose of a mirror, so leave them blocked for now.
1207 * In the case of active commit, things look a bit different, though,
1208 * because the target is an already populated backing file in active use.
1209 * We can allow anything except resize there.*/
1210 target_is_backing
= bdrv_chain_contains(bs
, target
);
1211 target_graph_mod
= (backing_mode
!= MIRROR_LEAVE_BACKING_CHAIN
);
1212 s
->target
= blk_new(BLK_PERM_WRITE
| BLK_PERM_RESIZE
|
1213 (target_graph_mod
? BLK_PERM_GRAPH_MOD
: 0),
1214 BLK_PERM_WRITE_UNCHANGED
|
1215 (target_is_backing
? BLK_PERM_CONSISTENT_READ
|
1217 BLK_PERM_GRAPH_MOD
: 0));
1218 ret
= blk_insert_bs(s
->target
, target
, errp
);
1223 s
->replaces
= g_strdup(replaces
);
1224 s
->on_source_error
= on_source_error
;
1225 s
->on_target_error
= on_target_error
;
1226 s
->is_none_mode
= is_none_mode
;
1227 s
->backing_mode
= backing_mode
;
1229 s
->granularity
= granularity
;
1230 s
->buf_size
= ROUND_UP(buf_size
, granularity
);
1232 if (auto_complete
) {
1233 s
->should_complete
= true;
1236 s
->dirty_bitmap
= bdrv_create_dirty_bitmap(bs
, granularity
, NULL
, errp
);
1237 if (!s
->dirty_bitmap
) {
1241 /* Required permissions are already taken with blk_new() */
1242 block_job_add_bdrv(&s
->common
, "target", target
, 0, BLK_PERM_ALL
,
1245 /* In commit_active_start() all intermediate nodes disappear, so
1246 * any jobs in them must be blocked */
1247 if (target_is_backing
) {
1248 BlockDriverState
*iter
;
1249 for (iter
= backing_bs(bs
); iter
!= target
; iter
= backing_bs(iter
)) {
1250 /* XXX BLK_PERM_WRITE needs to be allowed so we don't block
1251 * ourselves at s->base (if writes are blocked for a node, they are
1252 * also blocked for its backing file). The other options would be a
1253 * second filter driver above s->base (== target). */
1254 ret
= block_job_add_bdrv(&s
->common
, "intermediate node", iter
, 0,
1255 BLK_PERM_WRITE_UNCHANGED
| BLK_PERM_WRITE
,
1263 trace_mirror_start(bs
, s
, opaque
);
1264 block_job_start(&s
->common
);
1269 /* Make sure this BDS does not go away until we have completed the graph
1271 bdrv_ref(mirror_top_bs
);
1273 g_free(s
->replaces
);
1274 blk_unref(s
->target
);
1275 block_job_early_fail(&s
->common
);
1278 bdrv_child_try_set_perm(mirror_top_bs
->backing
, 0, BLK_PERM_ALL
,
1280 bdrv_replace_node(mirror_top_bs
, backing_bs(mirror_top_bs
), &error_abort
);
1282 bdrv_unref(mirror_top_bs
);
1285 void mirror_start(const char *job_id
, BlockDriverState
*bs
,
1286 BlockDriverState
*target
, const char *replaces
,
1287 int64_t speed
, uint32_t granularity
, int64_t buf_size
,
1288 MirrorSyncMode mode
, BlockMirrorBackingMode backing_mode
,
1289 BlockdevOnError on_source_error
,
1290 BlockdevOnError on_target_error
,
1291 bool unmap
, const char *filter_node_name
, Error
**errp
)
1294 BlockDriverState
*base
;
1296 if (mode
== MIRROR_SYNC_MODE_INCREMENTAL
) {
1297 error_setg(errp
, "Sync mode 'incremental' not supported");
1300 is_none_mode
= mode
== MIRROR_SYNC_MODE_NONE
;
1301 base
= mode
== MIRROR_SYNC_MODE_TOP
? backing_bs(bs
) : NULL
;
1302 mirror_start_job(job_id
, bs
, BLOCK_JOB_DEFAULT
, target
, replaces
,
1303 speed
, granularity
, buf_size
, backing_mode
,
1304 on_source_error
, on_target_error
, unmap
, NULL
, NULL
,
1305 &mirror_job_driver
, is_none_mode
, base
, false,
1306 filter_node_name
, errp
);
1309 void commit_active_start(const char *job_id
, BlockDriverState
*bs
,
1310 BlockDriverState
*base
, int creation_flags
,
1311 int64_t speed
, BlockdevOnError on_error
,
1312 const char *filter_node_name
,
1313 BlockCompletionFunc
*cb
, void *opaque
,
1314 bool auto_complete
, Error
**errp
)
1316 int orig_base_flags
;
1317 Error
*local_err
= NULL
;
1319 orig_base_flags
= bdrv_get_flags(base
);
1321 if (bdrv_reopen(base
, bs
->open_flags
, errp
)) {
1325 mirror_start_job(job_id
, bs
, creation_flags
, base
, NULL
, speed
, 0, 0,
1326 MIRROR_LEAVE_BACKING_CHAIN
,
1327 on_error
, on_error
, true, cb
, opaque
,
1328 &commit_active_job_driver
, false, base
, auto_complete
,
1329 filter_node_name
, &local_err
);
1331 error_propagate(errp
, local_err
);
1332 goto error_restore_flags
;
1337 error_restore_flags
:
1338 /* ignore error and errp for bdrv_reopen, because we want to propagate
1339 * the original error */
1340 bdrv_reopen(base
, orig_base_flags
, NULL
);