i386/pc: factor out above-4g end to an helper
[qemu.git] / block / mirror.c
blob3c4ab1159de86ef88dae8217d38d3fa4065d961d
1 /*
2 * Image mirroring
4 * Copyright Red Hat, Inc. 2012
6 * Authors:
7 * Paolo Bonzini <pbonzini@redhat.com>
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/cutils.h"
16 #include "qemu/coroutine.h"
17 #include "qemu/range.h"
18 #include "trace.h"
19 #include "block/blockjob_int.h"
20 #include "block/block_int.h"
21 #include "sysemu/block-backend.h"
22 #include "qapi/error.h"
23 #include "qapi/qmp/qerror.h"
24 #include "qemu/ratelimit.h"
25 #include "qemu/bitmap.h"
26 #include "qemu/memalign.h"
28 #define MAX_IN_FLIGHT 16
29 #define MAX_IO_BYTES (1 << 20) /* 1 Mb */
30 #define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES)
32 /* The mirroring buffer is a list of granularity-sized chunks.
33 * Free chunks are organized in a list.
35 typedef struct MirrorBuffer {
36 QSIMPLEQ_ENTRY(MirrorBuffer) next;
37 } MirrorBuffer;
39 typedef struct MirrorOp MirrorOp;
41 typedef struct MirrorBlockJob {
42 BlockJob common;
43 BlockBackend *target;
44 BlockDriverState *mirror_top_bs;
45 BlockDriverState *base;
46 BlockDriverState *base_overlay;
48 /* The name of the graph node to replace */
49 char *replaces;
50 /* The BDS to replace */
51 BlockDriverState *to_replace;
52 /* Used to block operations on the drive-mirror-replace target */
53 Error *replace_blocker;
54 bool is_none_mode;
55 BlockMirrorBackingMode backing_mode;
56 /* Whether the target image requires explicit zero-initialization */
57 bool zero_target;
58 MirrorCopyMode copy_mode;
59 BlockdevOnError on_source_error, on_target_error;
60 /* Set when the target is synced (dirty bitmap is clean, nothing
61 * in flight) and the job is running in active mode */
62 bool actively_synced;
63 bool should_complete;
64 int64_t granularity;
65 size_t buf_size;
66 int64_t bdev_length;
67 unsigned long *cow_bitmap;
68 BdrvDirtyBitmap *dirty_bitmap;
69 BdrvDirtyBitmapIter *dbi;
70 uint8_t *buf;
71 QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
72 int buf_free_count;
74 uint64_t last_pause_ns;
75 unsigned long *in_flight_bitmap;
76 unsigned in_flight;
77 int64_t bytes_in_flight;
78 QTAILQ_HEAD(, MirrorOp) ops_in_flight;
79 int ret;
80 bool unmap;
81 int target_cluster_size;
82 int max_iov;
83 bool initial_zeroing_ongoing;
84 int in_active_write_counter;
85 bool prepared;
86 bool in_drain;
87 } MirrorBlockJob;
89 typedef struct MirrorBDSOpaque {
90 MirrorBlockJob *job;
91 bool stop;
92 bool is_commit;
93 } MirrorBDSOpaque;
95 struct MirrorOp {
96 MirrorBlockJob *s;
97 QEMUIOVector qiov;
98 int64_t offset;
99 uint64_t bytes;
101 /* The pointee is set by mirror_co_read(), mirror_co_zero(), and
102 * mirror_co_discard() before yielding for the first time */
103 int64_t *bytes_handled;
105 bool is_pseudo_op;
106 bool is_active_write;
107 bool is_in_flight;
108 CoQueue waiting_requests;
109 Coroutine *co;
110 MirrorOp *waiting_for_op;
112 QTAILQ_ENTRY(MirrorOp) next;
115 typedef enum MirrorMethod {
116 MIRROR_METHOD_COPY,
117 MIRROR_METHOD_ZERO,
118 MIRROR_METHOD_DISCARD,
119 } MirrorMethod;
121 static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
122 int error)
124 s->actively_synced = false;
125 if (read) {
126 return block_job_error_action(&s->common, s->on_source_error,
127 true, error);
128 } else {
129 return block_job_error_action(&s->common, s->on_target_error,
130 false, error);
134 static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self,
135 MirrorBlockJob *s,
136 uint64_t offset,
137 uint64_t bytes)
139 uint64_t self_start_chunk = offset / s->granularity;
140 uint64_t self_end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
141 uint64_t self_nb_chunks = self_end_chunk - self_start_chunk;
143 while (find_next_bit(s->in_flight_bitmap, self_end_chunk,
144 self_start_chunk) < self_end_chunk &&
145 s->ret >= 0)
147 MirrorOp *op;
149 QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
150 uint64_t op_start_chunk = op->offset / s->granularity;
151 uint64_t op_nb_chunks = DIV_ROUND_UP(op->offset + op->bytes,
152 s->granularity) -
153 op_start_chunk;
155 if (op == self) {
156 continue;
159 if (ranges_overlap(self_start_chunk, self_nb_chunks,
160 op_start_chunk, op_nb_chunks))
162 if (self) {
164 * If the operation is already (indirectly) waiting for us,
165 * or will wait for us as soon as it wakes up, then just go
166 * on (instead of producing a deadlock in the former case).
168 if (op->waiting_for_op) {
169 continue;
172 self->waiting_for_op = op;
175 qemu_co_queue_wait(&op->waiting_requests, NULL);
177 if (self) {
178 self->waiting_for_op = NULL;
181 break;
187 static void coroutine_fn mirror_iteration_done(MirrorOp *op, int ret)
189 MirrorBlockJob *s = op->s;
190 struct iovec *iov;
191 int64_t chunk_num;
192 int i, nb_chunks;
194 trace_mirror_iteration_done(s, op->offset, op->bytes, ret);
196 s->in_flight--;
197 s->bytes_in_flight -= op->bytes;
198 iov = op->qiov.iov;
199 for (i = 0; i < op->qiov.niov; i++) {
200 MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
201 QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
202 s->buf_free_count++;
205 chunk_num = op->offset / s->granularity;
206 nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
208 bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
209 QTAILQ_REMOVE(&s->ops_in_flight, op, next);
210 if (ret >= 0) {
211 if (s->cow_bitmap) {
212 bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
214 if (!s->initial_zeroing_ongoing) {
215 job_progress_update(&s->common.job, op->bytes);
218 qemu_iovec_destroy(&op->qiov);
220 qemu_co_queue_restart_all(&op->waiting_requests);
221 g_free(op);
224 static void coroutine_fn mirror_write_complete(MirrorOp *op, int ret)
226 MirrorBlockJob *s = op->s;
228 if (ret < 0) {
229 BlockErrorAction action;
231 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
232 action = mirror_error_action(s, false, -ret);
233 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
234 s->ret = ret;
238 mirror_iteration_done(op, ret);
241 static void coroutine_fn mirror_read_complete(MirrorOp *op, int ret)
243 MirrorBlockJob *s = op->s;
245 if (ret < 0) {
246 BlockErrorAction action;
248 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
249 action = mirror_error_action(s, true, -ret);
250 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
251 s->ret = ret;
254 mirror_iteration_done(op, ret);
255 return;
258 ret = blk_co_pwritev(s->target, op->offset, op->qiov.size, &op->qiov, 0);
259 mirror_write_complete(op, ret);
262 /* Clip bytes relative to offset to not exceed end-of-file */
263 static inline int64_t mirror_clip_bytes(MirrorBlockJob *s,
264 int64_t offset,
265 int64_t bytes)
267 return MIN(bytes, s->bdev_length - offset);
270 /* Round offset and/or bytes to target cluster if COW is needed, and
271 * return the offset of the adjusted tail against original. */
272 static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset,
273 uint64_t *bytes)
275 bool need_cow;
276 int ret = 0;
277 int64_t align_offset = *offset;
278 int64_t align_bytes = *bytes;
279 int max_bytes = s->granularity * s->max_iov;
281 need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap);
282 need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity,
283 s->cow_bitmap);
284 if (need_cow) {
285 bdrv_round_to_clusters(blk_bs(s->target), *offset, *bytes,
286 &align_offset, &align_bytes);
289 if (align_bytes > max_bytes) {
290 align_bytes = max_bytes;
291 if (need_cow) {
292 align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size);
295 /* Clipping may result in align_bytes unaligned to chunk boundary, but
296 * that doesn't matter because it's already the end of source image. */
297 align_bytes = mirror_clip_bytes(s, align_offset, align_bytes);
299 ret = align_offset + align_bytes - (*offset + *bytes);
300 *offset = align_offset;
301 *bytes = align_bytes;
302 assert(ret >= 0);
303 return ret;
306 static inline void coroutine_fn
307 mirror_wait_for_any_operation(MirrorBlockJob *s, bool active)
309 MirrorOp *op;
311 QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
312 /* Do not wait on pseudo ops, because it may in turn wait on
313 * some other operation to start, which may in fact be the
314 * caller of this function. Since there is only one pseudo op
315 * at any given time, we will always find some real operation
316 * to wait on. */
317 if (!op->is_pseudo_op && op->is_in_flight &&
318 op->is_active_write == active)
320 qemu_co_queue_wait(&op->waiting_requests, NULL);
321 return;
324 abort();
327 static inline void coroutine_fn
328 mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s)
330 /* Only non-active operations use up in-flight slots */
331 mirror_wait_for_any_operation(s, false);
334 /* Perform a mirror copy operation.
336 * *op->bytes_handled is set to the number of bytes copied after and
337 * including offset, excluding any bytes copied prior to offset due
338 * to alignment. This will be op->bytes if no alignment is necessary,
339 * or (new_end - op->offset) if the tail is rounded up or down due to
340 * alignment or buffer limit.
342 static void coroutine_fn mirror_co_read(void *opaque)
344 MirrorOp *op = opaque;
345 MirrorBlockJob *s = op->s;
346 int nb_chunks;
347 uint64_t ret;
348 uint64_t max_bytes;
350 max_bytes = s->granularity * s->max_iov;
352 /* We can only handle as much as buf_size at a time. */
353 op->bytes = MIN(s->buf_size, MIN(max_bytes, op->bytes));
354 assert(op->bytes);
355 assert(op->bytes < BDRV_REQUEST_MAX_BYTES);
356 *op->bytes_handled = op->bytes;
358 if (s->cow_bitmap) {
359 *op->bytes_handled += mirror_cow_align(s, &op->offset, &op->bytes);
361 /* Cannot exceed BDRV_REQUEST_MAX_BYTES + INT_MAX */
362 assert(*op->bytes_handled <= UINT_MAX);
363 assert(op->bytes <= s->buf_size);
364 /* The offset is granularity-aligned because:
365 * 1) Caller passes in aligned values;
366 * 2) mirror_cow_align is used only when target cluster is larger. */
367 assert(QEMU_IS_ALIGNED(op->offset, s->granularity));
368 /* The range is sector-aligned, since bdrv_getlength() rounds up. */
369 assert(QEMU_IS_ALIGNED(op->bytes, BDRV_SECTOR_SIZE));
370 nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
372 while (s->buf_free_count < nb_chunks) {
373 trace_mirror_yield_in_flight(s, op->offset, s->in_flight);
374 mirror_wait_for_free_in_flight_slot(s);
377 /* Now make a QEMUIOVector taking enough granularity-sized chunks
378 * from s->buf_free.
380 qemu_iovec_init(&op->qiov, nb_chunks);
381 while (nb_chunks-- > 0) {
382 MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
383 size_t remaining = op->bytes - op->qiov.size;
385 QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
386 s->buf_free_count--;
387 qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
390 /* Copy the dirty cluster. */
391 s->in_flight++;
392 s->bytes_in_flight += op->bytes;
393 op->is_in_flight = true;
394 trace_mirror_one_iteration(s, op->offset, op->bytes);
396 ret = bdrv_co_preadv(s->mirror_top_bs->backing, op->offset, op->bytes,
397 &op->qiov, 0);
398 mirror_read_complete(op, ret);
401 static void coroutine_fn mirror_co_zero(void *opaque)
403 MirrorOp *op = opaque;
404 int ret;
406 op->s->in_flight++;
407 op->s->bytes_in_flight += op->bytes;
408 *op->bytes_handled = op->bytes;
409 op->is_in_flight = true;
411 ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes,
412 op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0);
413 mirror_write_complete(op, ret);
416 static void coroutine_fn mirror_co_discard(void *opaque)
418 MirrorOp *op = opaque;
419 int ret;
421 op->s->in_flight++;
422 op->s->bytes_in_flight += op->bytes;
423 *op->bytes_handled = op->bytes;
424 op->is_in_flight = true;
426 ret = blk_co_pdiscard(op->s->target, op->offset, op->bytes);
427 mirror_write_complete(op, ret);
430 static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset,
431 unsigned bytes, MirrorMethod mirror_method)
433 MirrorOp *op;
434 Coroutine *co;
435 int64_t bytes_handled = -1;
437 op = g_new(MirrorOp, 1);
438 *op = (MirrorOp){
439 .s = s,
440 .offset = offset,
441 .bytes = bytes,
442 .bytes_handled = &bytes_handled,
444 qemu_co_queue_init(&op->waiting_requests);
446 switch (mirror_method) {
447 case MIRROR_METHOD_COPY:
448 co = qemu_coroutine_create(mirror_co_read, op);
449 break;
450 case MIRROR_METHOD_ZERO:
451 co = qemu_coroutine_create(mirror_co_zero, op);
452 break;
453 case MIRROR_METHOD_DISCARD:
454 co = qemu_coroutine_create(mirror_co_discard, op);
455 break;
456 default:
457 abort();
459 op->co = co;
461 QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
462 qemu_coroutine_enter(co);
463 /* At this point, ownership of op has been moved to the coroutine
464 * and the object may already be freed */
466 /* Assert that this value has been set */
467 assert(bytes_handled >= 0);
469 /* Same assertion as in mirror_co_read() (and for mirror_co_read()
470 * and mirror_co_discard(), bytes_handled == op->bytes, which
471 * is the @bytes parameter given to this function) */
472 assert(bytes_handled <= UINT_MAX);
473 return bytes_handled;
476 static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
478 BlockDriverState *source = s->mirror_top_bs->backing->bs;
479 MirrorOp *pseudo_op;
480 int64_t offset;
481 uint64_t delay_ns = 0, ret = 0;
482 /* At least the first dirty chunk is mirrored in one iteration. */
483 int nb_chunks = 1;
484 bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target));
485 int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES);
487 bdrv_dirty_bitmap_lock(s->dirty_bitmap);
488 offset = bdrv_dirty_iter_next(s->dbi);
489 if (offset < 0) {
490 bdrv_set_dirty_iter(s->dbi, 0);
491 offset = bdrv_dirty_iter_next(s->dbi);
492 trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap));
493 assert(offset >= 0);
495 bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
497 mirror_wait_on_conflicts(NULL, s, offset, 1);
499 job_pause_point(&s->common.job);
501 /* Find the number of consective dirty chunks following the first dirty
502 * one, and wait for in flight requests in them. */
503 bdrv_dirty_bitmap_lock(s->dirty_bitmap);
504 while (nb_chunks * s->granularity < s->buf_size) {
505 int64_t next_dirty;
506 int64_t next_offset = offset + nb_chunks * s->granularity;
507 int64_t next_chunk = next_offset / s->granularity;
508 if (next_offset >= s->bdev_length ||
509 !bdrv_dirty_bitmap_get_locked(s->dirty_bitmap, next_offset)) {
510 break;
512 if (test_bit(next_chunk, s->in_flight_bitmap)) {
513 break;
516 next_dirty = bdrv_dirty_iter_next(s->dbi);
517 if (next_dirty > next_offset || next_dirty < 0) {
518 /* The bitmap iterator's cache is stale, refresh it */
519 bdrv_set_dirty_iter(s->dbi, next_offset);
520 next_dirty = bdrv_dirty_iter_next(s->dbi);
522 assert(next_dirty == next_offset);
523 nb_chunks++;
526 /* Clear dirty bits before querying the block status, because
527 * calling bdrv_block_status_above could yield - if some blocks are
528 * marked dirty in this window, we need to know.
530 bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset,
531 nb_chunks * s->granularity);
532 bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
534 /* Before claiming an area in the in-flight bitmap, we have to
535 * create a MirrorOp for it so that conflicting requests can wait
536 * for it. mirror_perform() will create the real MirrorOps later,
537 * for now we just create a pseudo operation that will wake up all
538 * conflicting requests once all real operations have been
539 * launched. */
540 pseudo_op = g_new(MirrorOp, 1);
541 *pseudo_op = (MirrorOp){
542 .offset = offset,
543 .bytes = nb_chunks * s->granularity,
544 .is_pseudo_op = true,
546 qemu_co_queue_init(&pseudo_op->waiting_requests);
547 QTAILQ_INSERT_TAIL(&s->ops_in_flight, pseudo_op, next);
549 bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks);
550 while (nb_chunks > 0 && offset < s->bdev_length) {
551 int ret;
552 int64_t io_bytes;
553 int64_t io_bytes_acct;
554 MirrorMethod mirror_method = MIRROR_METHOD_COPY;
556 assert(!(offset % s->granularity));
557 ret = bdrv_block_status_above(source, NULL, offset,
558 nb_chunks * s->granularity,
559 &io_bytes, NULL, NULL);
560 if (ret < 0) {
561 io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes);
562 } else if (ret & BDRV_BLOCK_DATA) {
563 io_bytes = MIN(io_bytes, max_io_bytes);
566 io_bytes -= io_bytes % s->granularity;
567 if (io_bytes < s->granularity) {
568 io_bytes = s->granularity;
569 } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) {
570 int64_t target_offset;
571 int64_t target_bytes;
572 bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes,
573 &target_offset, &target_bytes);
574 if (target_offset == offset &&
575 target_bytes == io_bytes) {
576 mirror_method = ret & BDRV_BLOCK_ZERO ?
577 MIRROR_METHOD_ZERO :
578 MIRROR_METHOD_DISCARD;
582 while (s->in_flight >= MAX_IN_FLIGHT) {
583 trace_mirror_yield_in_flight(s, offset, s->in_flight);
584 mirror_wait_for_free_in_flight_slot(s);
587 if (s->ret < 0) {
588 ret = 0;
589 goto fail;
592 io_bytes = mirror_clip_bytes(s, offset, io_bytes);
593 io_bytes = mirror_perform(s, offset, io_bytes, mirror_method);
594 if (mirror_method != MIRROR_METHOD_COPY && write_zeroes_ok) {
595 io_bytes_acct = 0;
596 } else {
597 io_bytes_acct = io_bytes;
599 assert(io_bytes);
600 offset += io_bytes;
601 nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity);
602 delay_ns = block_job_ratelimit_get_delay(&s->common, io_bytes_acct);
605 ret = delay_ns;
606 fail:
607 QTAILQ_REMOVE(&s->ops_in_flight, pseudo_op, next);
608 qemu_co_queue_restart_all(&pseudo_op->waiting_requests);
609 g_free(pseudo_op);
611 return ret;
614 static void mirror_free_init(MirrorBlockJob *s)
616 int granularity = s->granularity;
617 size_t buf_size = s->buf_size;
618 uint8_t *buf = s->buf;
620 assert(s->buf_free_count == 0);
621 QSIMPLEQ_INIT(&s->buf_free);
622 while (buf_size != 0) {
623 MirrorBuffer *cur = (MirrorBuffer *)buf;
624 QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
625 s->buf_free_count++;
626 buf_size -= granularity;
627 buf += granularity;
631 /* This is also used for the .pause callback. There is no matching
632 * mirror_resume() because mirror_run() will begin iterating again
633 * when the job is resumed.
635 static void coroutine_fn mirror_wait_for_all_io(MirrorBlockJob *s)
637 while (s->in_flight > 0) {
638 mirror_wait_for_free_in_flight_slot(s);
643 * mirror_exit_common: handle both abort() and prepare() cases.
644 * for .prepare, returns 0 on success and -errno on failure.
645 * for .abort cases, denoted by abort = true, MUST return 0.
647 static int mirror_exit_common(Job *job)
649 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
650 BlockJob *bjob = &s->common;
651 MirrorBDSOpaque *bs_opaque;
652 AioContext *replace_aio_context = NULL;
653 BlockDriverState *src;
654 BlockDriverState *target_bs;
655 BlockDriverState *mirror_top_bs;
656 Error *local_err = NULL;
657 bool abort = job->ret < 0;
658 int ret = 0;
660 if (s->prepared) {
661 return 0;
663 s->prepared = true;
665 mirror_top_bs = s->mirror_top_bs;
666 bs_opaque = mirror_top_bs->opaque;
667 src = mirror_top_bs->backing->bs;
668 target_bs = blk_bs(s->target);
670 if (bdrv_chain_contains(src, target_bs)) {
671 bdrv_unfreeze_backing_chain(mirror_top_bs, target_bs);
674 bdrv_release_dirty_bitmap(s->dirty_bitmap);
676 /* Make sure that the source BDS doesn't go away during bdrv_replace_node,
677 * before we can call bdrv_drained_end */
678 bdrv_ref(src);
679 bdrv_ref(mirror_top_bs);
680 bdrv_ref(target_bs);
683 * Remove target parent that still uses BLK_PERM_WRITE/RESIZE before
684 * inserting target_bs at s->to_replace, where we might not be able to get
685 * these permissions.
687 blk_unref(s->target);
688 s->target = NULL;
690 /* We don't access the source any more. Dropping any WRITE/RESIZE is
691 * required before it could become a backing file of target_bs. Not having
692 * these permissions any more means that we can't allow any new requests on
693 * mirror_top_bs from now on, so keep it drained. */
694 bdrv_drained_begin(mirror_top_bs);
695 bs_opaque->stop = true;
696 bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
697 &error_abort);
698 if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
699 BlockDriverState *backing = s->is_none_mode ? src : s->base;
700 BlockDriverState *unfiltered_target = bdrv_skip_filters(target_bs);
702 if (bdrv_cow_bs(unfiltered_target) != backing) {
703 bdrv_set_backing_hd(unfiltered_target, backing, &local_err);
704 if (local_err) {
705 error_report_err(local_err);
706 local_err = NULL;
707 ret = -EPERM;
710 } else if (!abort && s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) {
711 assert(!bdrv_backing_chain_next(target_bs));
712 ret = bdrv_open_backing_file(bdrv_skip_filters(target_bs), NULL,
713 "backing", &local_err);
714 if (ret < 0) {
715 error_report_err(local_err);
716 local_err = NULL;
720 if (s->to_replace) {
721 replace_aio_context = bdrv_get_aio_context(s->to_replace);
722 aio_context_acquire(replace_aio_context);
725 if (s->should_complete && !abort) {
726 BlockDriverState *to_replace = s->to_replace ?: src;
727 bool ro = bdrv_is_read_only(to_replace);
729 if (ro != bdrv_is_read_only(target_bs)) {
730 bdrv_reopen_set_read_only(target_bs, ro, NULL);
733 /* The mirror job has no requests in flight any more, but we need to
734 * drain potential other users of the BDS before changing the graph. */
735 assert(s->in_drain);
736 bdrv_drained_begin(target_bs);
738 * Cannot use check_to_replace_node() here, because that would
739 * check for an op blocker on @to_replace, and we have our own
740 * there.
742 if (bdrv_recurse_can_replace(src, to_replace)) {
743 bdrv_replace_node(to_replace, target_bs, &local_err);
744 } else {
745 error_setg(&local_err, "Can no longer replace '%s' by '%s', "
746 "because it can no longer be guaranteed that doing so "
747 "would not lead to an abrupt change of visible data",
748 to_replace->node_name, target_bs->node_name);
750 bdrv_drained_end(target_bs);
751 if (local_err) {
752 error_report_err(local_err);
753 ret = -EPERM;
756 if (s->to_replace) {
757 bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
758 error_free(s->replace_blocker);
759 bdrv_unref(s->to_replace);
761 if (replace_aio_context) {
762 aio_context_release(replace_aio_context);
764 g_free(s->replaces);
765 bdrv_unref(target_bs);
768 * Remove the mirror filter driver from the graph. Before this, get rid of
769 * the blockers on the intermediate nodes so that the resulting state is
770 * valid.
772 block_job_remove_all_bdrv(bjob);
773 bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort);
775 bs_opaque->job = NULL;
777 bdrv_drained_end(src);
778 bdrv_drained_end(mirror_top_bs);
779 s->in_drain = false;
780 bdrv_unref(mirror_top_bs);
781 bdrv_unref(src);
783 return ret;
786 static int mirror_prepare(Job *job)
788 return mirror_exit_common(job);
791 static void mirror_abort(Job *job)
793 int ret = mirror_exit_common(job);
794 assert(ret == 0);
797 static void coroutine_fn mirror_throttle(MirrorBlockJob *s)
799 int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
801 if (now - s->last_pause_ns > BLOCK_JOB_SLICE_TIME) {
802 s->last_pause_ns = now;
803 job_sleep_ns(&s->common.job, 0);
804 } else {
805 job_pause_point(&s->common.job);
809 static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
811 int64_t offset;
812 BlockDriverState *bs = s->mirror_top_bs->backing->bs;
813 BlockDriverState *target_bs = blk_bs(s->target);
814 int ret;
815 int64_t count;
817 if (s->zero_target) {
818 if (!bdrv_can_write_zeroes_with_unmap(target_bs)) {
819 bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length);
820 return 0;
823 s->initial_zeroing_ongoing = true;
824 for (offset = 0; offset < s->bdev_length; ) {
825 int bytes = MIN(s->bdev_length - offset,
826 QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
828 mirror_throttle(s);
830 if (job_is_cancelled(&s->common.job)) {
831 s->initial_zeroing_ongoing = false;
832 return 0;
835 if (s->in_flight >= MAX_IN_FLIGHT) {
836 trace_mirror_yield(s, UINT64_MAX, s->buf_free_count,
837 s->in_flight);
838 mirror_wait_for_free_in_flight_slot(s);
839 continue;
842 mirror_perform(s, offset, bytes, MIRROR_METHOD_ZERO);
843 offset += bytes;
846 mirror_wait_for_all_io(s);
847 s->initial_zeroing_ongoing = false;
850 /* First part, loop on the sectors and initialize the dirty bitmap. */
851 for (offset = 0; offset < s->bdev_length; ) {
852 /* Just to make sure we are not exceeding int limit. */
853 int bytes = MIN(s->bdev_length - offset,
854 QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
856 mirror_throttle(s);
858 if (job_is_cancelled(&s->common.job)) {
859 return 0;
862 ret = bdrv_is_allocated_above(bs, s->base_overlay, true, offset, bytes,
863 &count);
864 if (ret < 0) {
865 return ret;
868 assert(count);
869 if (ret > 0) {
870 bdrv_set_dirty_bitmap(s->dirty_bitmap, offset, count);
872 offset += count;
874 return 0;
877 /* Called when going out of the streaming phase to flush the bulk of the
878 * data to the medium, or just before completing.
880 static int mirror_flush(MirrorBlockJob *s)
882 int ret = blk_flush(s->target);
883 if (ret < 0) {
884 if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) {
885 s->ret = ret;
888 return ret;
891 static int coroutine_fn mirror_run(Job *job, Error **errp)
893 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
894 BlockDriverState *bs = s->mirror_top_bs->backing->bs;
895 BlockDriverState *target_bs = blk_bs(s->target);
896 bool need_drain = true;
897 int64_t length;
898 int64_t target_length;
899 BlockDriverInfo bdi;
900 char backing_filename[2]; /* we only need 2 characters because we are only
901 checking for a NULL string */
902 int ret = 0;
904 if (job_is_cancelled(&s->common.job)) {
905 goto immediate_exit;
908 s->bdev_length = bdrv_getlength(bs);
909 if (s->bdev_length < 0) {
910 ret = s->bdev_length;
911 goto immediate_exit;
914 target_length = blk_getlength(s->target);
915 if (target_length < 0) {
916 ret = target_length;
917 goto immediate_exit;
920 /* Active commit must resize the base image if its size differs from the
921 * active layer. */
922 if (s->base == blk_bs(s->target)) {
923 if (s->bdev_length > target_length) {
924 ret = blk_truncate(s->target, s->bdev_length, false,
925 PREALLOC_MODE_OFF, 0, NULL);
926 if (ret < 0) {
927 goto immediate_exit;
930 } else if (s->bdev_length != target_length) {
931 error_setg(errp, "Source and target image have different sizes");
932 ret = -EINVAL;
933 goto immediate_exit;
936 if (s->bdev_length == 0) {
937 /* Transition to the READY state and wait for complete. */
938 job_transition_to_ready(&s->common.job);
939 s->actively_synced = true;
940 while (!job_cancel_requested(&s->common.job) && !s->should_complete) {
941 job_yield(&s->common.job);
943 goto immediate_exit;
946 length = DIV_ROUND_UP(s->bdev_length, s->granularity);
947 s->in_flight_bitmap = bitmap_new(length);
949 /* If we have no backing file yet in the destination, we cannot let
950 * the destination do COW. Instead, we copy sectors around the
951 * dirty data if needed. We need a bitmap to do that.
953 bdrv_get_backing_filename(target_bs, backing_filename,
954 sizeof(backing_filename));
955 if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) {
956 s->target_cluster_size = bdi.cluster_size;
957 } else {
958 s->target_cluster_size = BDRV_SECTOR_SIZE;
960 if (backing_filename[0] && !bdrv_backing_chain_next(target_bs) &&
961 s->granularity < s->target_cluster_size) {
962 s->buf_size = MAX(s->buf_size, s->target_cluster_size);
963 s->cow_bitmap = bitmap_new(length);
965 s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov);
967 s->buf = qemu_try_blockalign(bs, s->buf_size);
968 if (s->buf == NULL) {
969 ret = -ENOMEM;
970 goto immediate_exit;
973 mirror_free_init(s);
975 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
976 if (!s->is_none_mode) {
977 ret = mirror_dirty_init(s);
978 if (ret < 0 || job_is_cancelled(&s->common.job)) {
979 goto immediate_exit;
983 assert(!s->dbi);
984 s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap);
985 for (;;) {
986 uint64_t delay_ns = 0;
987 int64_t cnt, delta;
988 bool should_complete;
990 /* Do not start passive operations while there are active
991 * writes in progress */
992 while (s->in_active_write_counter) {
993 mirror_wait_for_any_operation(s, true);
996 if (s->ret < 0) {
997 ret = s->ret;
998 goto immediate_exit;
1001 job_pause_point(&s->common.job);
1003 if (job_is_cancelled(&s->common.job)) {
1004 ret = 0;
1005 goto immediate_exit;
1008 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
1009 /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is
1010 * the number of bytes currently being processed; together those are
1011 * the current remaining operation length */
1012 job_progress_set_remaining(&s->common.job, s->bytes_in_flight + cnt);
1014 /* Note that even when no rate limit is applied we need to yield
1015 * periodically with no pending I/O so that bdrv_drain_all() returns.
1016 * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is
1017 * an error, or when the source is clean, whichever comes first. */
1018 delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns;
1019 if (delta < BLOCK_JOB_SLICE_TIME &&
1020 s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
1021 if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 ||
1022 (cnt == 0 && s->in_flight > 0)) {
1023 trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight);
1024 mirror_wait_for_free_in_flight_slot(s);
1025 continue;
1026 } else if (cnt != 0) {
1027 delay_ns = mirror_iteration(s);
1031 should_complete = false;
1032 if (s->in_flight == 0 && cnt == 0) {
1033 trace_mirror_before_flush(s);
1034 if (!job_is_ready(&s->common.job)) {
1035 if (mirror_flush(s) < 0) {
1036 /* Go check s->ret. */
1037 continue;
1039 /* We're out of the streaming phase. From now on, if the job
1040 * is cancelled we will actually complete all pending I/O and
1041 * report completion. This way, block-job-cancel will leave
1042 * the target in a consistent state.
1044 job_transition_to_ready(&s->common.job);
1045 if (s->copy_mode != MIRROR_COPY_MODE_BACKGROUND) {
1046 s->actively_synced = true;
1050 should_complete = s->should_complete ||
1051 job_cancel_requested(&s->common.job);
1052 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
1055 if (cnt == 0 && should_complete) {
1056 /* The dirty bitmap is not updated while operations are pending.
1057 * If we're about to exit, wait for pending operations before
1058 * calling bdrv_get_dirty_count(bs), or we may exit while the
1059 * source has dirty data to copy!
1061 * Note that I/O can be submitted by the guest while
1062 * mirror_populate runs, so pause it now. Before deciding
1063 * whether to switch to target check one last time if I/O has
1064 * come in the meanwhile, and if not flush the data to disk.
1066 trace_mirror_before_drain(s, cnt);
1068 s->in_drain = true;
1069 bdrv_drained_begin(bs);
1070 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
1071 if (cnt > 0 || mirror_flush(s) < 0) {
1072 bdrv_drained_end(bs);
1073 s->in_drain = false;
1074 continue;
1077 /* The two disks are in sync. Exit and report successful
1078 * completion.
1080 assert(QLIST_EMPTY(&bs->tracked_requests));
1081 need_drain = false;
1082 break;
1085 if (job_is_ready(&s->common.job) && !should_complete) {
1086 delay_ns = (s->in_flight == 0 &&
1087 cnt == 0 ? BLOCK_JOB_SLICE_TIME : 0);
1089 trace_mirror_before_sleep(s, cnt, job_is_ready(&s->common.job),
1090 delay_ns);
1091 job_sleep_ns(&s->common.job, delay_ns);
1092 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1095 immediate_exit:
1096 if (s->in_flight > 0) {
1097 /* We get here only if something went wrong. Either the job failed,
1098 * or it was cancelled prematurely so that we do not guarantee that
1099 * the target is a copy of the source.
1101 assert(ret < 0 || job_is_cancelled(&s->common.job));
1102 assert(need_drain);
1103 mirror_wait_for_all_io(s);
1106 assert(s->in_flight == 0);
1107 qemu_vfree(s->buf);
1108 g_free(s->cow_bitmap);
1109 g_free(s->in_flight_bitmap);
1110 bdrv_dirty_iter_free(s->dbi);
1112 if (need_drain) {
1113 s->in_drain = true;
1114 bdrv_drained_begin(bs);
1117 return ret;
1120 static void mirror_complete(Job *job, Error **errp)
1122 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1124 if (!job_is_ready(job)) {
1125 error_setg(errp, "The active block job '%s' cannot be completed",
1126 job->id);
1127 return;
1130 /* block all operations on to_replace bs */
1131 if (s->replaces) {
1132 AioContext *replace_aio_context;
1134 s->to_replace = bdrv_find_node(s->replaces);
1135 if (!s->to_replace) {
1136 error_setg(errp, "Node name '%s' not found", s->replaces);
1137 return;
1140 replace_aio_context = bdrv_get_aio_context(s->to_replace);
1141 aio_context_acquire(replace_aio_context);
1143 /* TODO Translate this into child freeze system. */
1144 error_setg(&s->replace_blocker,
1145 "block device is in use by block-job-complete");
1146 bdrv_op_block_all(s->to_replace, s->replace_blocker);
1147 bdrv_ref(s->to_replace);
1149 aio_context_release(replace_aio_context);
1152 s->should_complete = true;
1154 /* If the job is paused, it will be re-entered when it is resumed */
1155 if (!job->paused) {
1156 job_enter(job);
1160 static void coroutine_fn mirror_pause(Job *job)
1162 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1164 mirror_wait_for_all_io(s);
1167 static bool mirror_drained_poll(BlockJob *job)
1169 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1171 /* If the job isn't paused nor cancelled, we can't be sure that it won't
1172 * issue more requests. We make an exception if we've reached this point
1173 * from one of our own drain sections, to avoid a deadlock waiting for
1174 * ourselves.
1176 if (!s->common.job.paused && !job_is_cancelled(&job->job) && !s->in_drain) {
1177 return true;
1180 return !!s->in_flight;
1183 static bool mirror_cancel(Job *job, bool force)
1185 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1186 BlockDriverState *target = blk_bs(s->target);
1189 * Before the job is READY, we treat any cancellation like a
1190 * force-cancellation.
1192 force = force || !job_is_ready(job);
1194 if (force) {
1195 bdrv_cancel_in_flight(target);
1197 return force;
1200 static bool commit_active_cancel(Job *job, bool force)
1202 /* Same as above in mirror_cancel() */
1203 return force || !job_is_ready(job);
1206 static const BlockJobDriver mirror_job_driver = {
1207 .job_driver = {
1208 .instance_size = sizeof(MirrorBlockJob),
1209 .job_type = JOB_TYPE_MIRROR,
1210 .free = block_job_free,
1211 .user_resume = block_job_user_resume,
1212 .run = mirror_run,
1213 .prepare = mirror_prepare,
1214 .abort = mirror_abort,
1215 .pause = mirror_pause,
1216 .complete = mirror_complete,
1217 .cancel = mirror_cancel,
1219 .drained_poll = mirror_drained_poll,
1222 static const BlockJobDriver commit_active_job_driver = {
1223 .job_driver = {
1224 .instance_size = sizeof(MirrorBlockJob),
1225 .job_type = JOB_TYPE_COMMIT,
1226 .free = block_job_free,
1227 .user_resume = block_job_user_resume,
1228 .run = mirror_run,
1229 .prepare = mirror_prepare,
1230 .abort = mirror_abort,
1231 .pause = mirror_pause,
1232 .complete = mirror_complete,
1233 .cancel = commit_active_cancel,
1235 .drained_poll = mirror_drained_poll,
1238 static void coroutine_fn
1239 do_sync_target_write(MirrorBlockJob *job, MirrorMethod method,
1240 uint64_t offset, uint64_t bytes,
1241 QEMUIOVector *qiov, int flags)
1243 int ret;
1244 size_t qiov_offset = 0;
1245 int64_t bitmap_offset, bitmap_end;
1247 if (!QEMU_IS_ALIGNED(offset, job->granularity) &&
1248 bdrv_dirty_bitmap_get(job->dirty_bitmap, offset))
1251 * Dirty unaligned padding: ignore it.
1253 * Reasoning:
1254 * 1. If we copy it, we can't reset corresponding bit in
1255 * dirty_bitmap as there may be some "dirty" bytes still not
1256 * copied.
1257 * 2. It's already dirty, so skipping it we don't diverge mirror
1258 * progress.
1260 * Note, that because of this, guest write may have no contribution
1261 * into mirror converge, but that's not bad, as we have background
1262 * process of mirroring. If under some bad circumstances (high guest
1263 * IO load) background process starve, we will not converge anyway,
1264 * even if each write will contribute, as guest is not guaranteed to
1265 * rewrite the whole disk.
1267 qiov_offset = QEMU_ALIGN_UP(offset, job->granularity) - offset;
1268 if (bytes <= qiov_offset) {
1269 /* nothing to do after shrink */
1270 return;
1272 offset += qiov_offset;
1273 bytes -= qiov_offset;
1276 if (!QEMU_IS_ALIGNED(offset + bytes, job->granularity) &&
1277 bdrv_dirty_bitmap_get(job->dirty_bitmap, offset + bytes - 1))
1279 uint64_t tail = (offset + bytes) % job->granularity;
1281 if (bytes <= tail) {
1282 /* nothing to do after shrink */
1283 return;
1285 bytes -= tail;
1289 * Tails are either clean or shrunk, so for bitmap resetting
1290 * we safely align the range down.
1292 bitmap_offset = QEMU_ALIGN_UP(offset, job->granularity);
1293 bitmap_end = QEMU_ALIGN_DOWN(offset + bytes, job->granularity);
1294 if (bitmap_offset < bitmap_end) {
1295 bdrv_reset_dirty_bitmap(job->dirty_bitmap, bitmap_offset,
1296 bitmap_end - bitmap_offset);
1299 job_progress_increase_remaining(&job->common.job, bytes);
1301 switch (method) {
1302 case MIRROR_METHOD_COPY:
1303 ret = blk_co_pwritev_part(job->target, offset, bytes,
1304 qiov, qiov_offset, flags);
1305 break;
1307 case MIRROR_METHOD_ZERO:
1308 assert(!qiov);
1309 ret = blk_co_pwrite_zeroes(job->target, offset, bytes, flags);
1310 break;
1312 case MIRROR_METHOD_DISCARD:
1313 assert(!qiov);
1314 ret = blk_co_pdiscard(job->target, offset, bytes);
1315 break;
1317 default:
1318 abort();
1321 if (ret >= 0) {
1322 job_progress_update(&job->common.job, bytes);
1323 } else {
1324 BlockErrorAction action;
1327 * We failed, so we should mark dirty the whole area, aligned up.
1328 * Note that we don't care about shrunk tails if any: they were dirty
1329 * at function start, and they must be still dirty, as we've locked
1330 * the region for in-flight op.
1332 bitmap_offset = QEMU_ALIGN_DOWN(offset, job->granularity);
1333 bitmap_end = QEMU_ALIGN_UP(offset + bytes, job->granularity);
1334 bdrv_set_dirty_bitmap(job->dirty_bitmap, bitmap_offset,
1335 bitmap_end - bitmap_offset);
1336 job->actively_synced = false;
1338 action = mirror_error_action(job, false, -ret);
1339 if (action == BLOCK_ERROR_ACTION_REPORT) {
1340 if (!job->ret) {
1341 job->ret = ret;
1347 static MirrorOp *coroutine_fn active_write_prepare(MirrorBlockJob *s,
1348 uint64_t offset,
1349 uint64_t bytes)
1351 MirrorOp *op;
1352 uint64_t start_chunk = offset / s->granularity;
1353 uint64_t end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
1355 op = g_new(MirrorOp, 1);
1356 *op = (MirrorOp){
1357 .s = s,
1358 .offset = offset,
1359 .bytes = bytes,
1360 .is_active_write = true,
1361 .is_in_flight = true,
1362 .co = qemu_coroutine_self(),
1364 qemu_co_queue_init(&op->waiting_requests);
1365 QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
1367 s->in_active_write_counter++;
1369 mirror_wait_on_conflicts(op, s, offset, bytes);
1371 bitmap_set(s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
1373 return op;
1376 static void coroutine_fn active_write_settle(MirrorOp *op)
1378 uint64_t start_chunk = op->offset / op->s->granularity;
1379 uint64_t end_chunk = DIV_ROUND_UP(op->offset + op->bytes,
1380 op->s->granularity);
1382 if (!--op->s->in_active_write_counter && op->s->actively_synced) {
1383 BdrvChild *source = op->s->mirror_top_bs->backing;
1385 if (QLIST_FIRST(&source->bs->parents) == source &&
1386 QLIST_NEXT(source, next_parent) == NULL)
1388 /* Assert that we are back in sync once all active write
1389 * operations are settled.
1390 * Note that we can only assert this if the mirror node
1391 * is the source node's only parent. */
1392 assert(!bdrv_get_dirty_count(op->s->dirty_bitmap));
1395 bitmap_clear(op->s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
1396 QTAILQ_REMOVE(&op->s->ops_in_flight, op, next);
1397 qemu_co_queue_restart_all(&op->waiting_requests);
1398 g_free(op);
1401 static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs,
1402 int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags)
1404 return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
1407 static int coroutine_fn bdrv_mirror_top_do_write(BlockDriverState *bs,
1408 MirrorMethod method, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov,
1409 int flags)
1411 MirrorOp *op = NULL;
1412 MirrorBDSOpaque *s = bs->opaque;
1413 int ret = 0;
1414 bool copy_to_target;
1416 copy_to_target = s->job->ret >= 0 &&
1417 !job_is_cancelled(&s->job->common.job) &&
1418 s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING;
1420 if (copy_to_target) {
1421 op = active_write_prepare(s->job, offset, bytes);
1424 switch (method) {
1425 case MIRROR_METHOD_COPY:
1426 ret = bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags);
1427 break;
1429 case MIRROR_METHOD_ZERO:
1430 ret = bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags);
1431 break;
1433 case MIRROR_METHOD_DISCARD:
1434 ret = bdrv_co_pdiscard(bs->backing, offset, bytes);
1435 break;
1437 default:
1438 abort();
1441 if (ret < 0) {
1442 goto out;
1445 if (copy_to_target) {
1446 do_sync_target_write(s->job, method, offset, bytes, qiov, flags);
1449 out:
1450 if (copy_to_target) {
1451 active_write_settle(op);
1453 return ret;
1456 static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs,
1457 int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags)
1459 MirrorBDSOpaque *s = bs->opaque;
1460 QEMUIOVector bounce_qiov;
1461 void *bounce_buf;
1462 int ret = 0;
1463 bool copy_to_target;
1465 copy_to_target = s->job->ret >= 0 &&
1466 !job_is_cancelled(&s->job->common.job) &&
1467 s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING;
1469 if (copy_to_target) {
1470 /* The guest might concurrently modify the data to write; but
1471 * the data on source and destination must match, so we have
1472 * to use a bounce buffer if we are going to write to the
1473 * target now. */
1474 bounce_buf = qemu_blockalign(bs, bytes);
1475 iov_to_buf_full(qiov->iov, qiov->niov, 0, bounce_buf, bytes);
1477 qemu_iovec_init(&bounce_qiov, 1);
1478 qemu_iovec_add(&bounce_qiov, bounce_buf, bytes);
1479 qiov = &bounce_qiov;
1482 ret = bdrv_mirror_top_do_write(bs, MIRROR_METHOD_COPY, offset, bytes, qiov,
1483 flags);
1485 if (copy_to_target) {
1486 qemu_iovec_destroy(&bounce_qiov);
1487 qemu_vfree(bounce_buf);
1490 return ret;
1493 static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs)
1495 if (bs->backing == NULL) {
1496 /* we can be here after failed bdrv_append in mirror_start_job */
1497 return 0;
1499 return bdrv_co_flush(bs->backing->bs);
1502 static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs,
1503 int64_t offset, int64_t bytes, BdrvRequestFlags flags)
1505 return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, offset, bytes, NULL,
1506 flags);
1509 static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs,
1510 int64_t offset, int64_t bytes)
1512 return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, offset, bytes,
1513 NULL, 0);
1516 static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs)
1518 if (bs->backing == NULL) {
1519 /* we can be here after failed bdrv_attach_child in
1520 * bdrv_set_backing_hd */
1521 return;
1523 pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
1524 bs->backing->bs->filename);
1527 static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c,
1528 BdrvChildRole role,
1529 BlockReopenQueue *reopen_queue,
1530 uint64_t perm, uint64_t shared,
1531 uint64_t *nperm, uint64_t *nshared)
1533 MirrorBDSOpaque *s = bs->opaque;
1535 if (s->stop) {
1537 * If the job is to be stopped, we do not need to forward
1538 * anything to the real image.
1540 *nperm = 0;
1541 *nshared = BLK_PERM_ALL;
1542 return;
1545 bdrv_default_perms(bs, c, role, reopen_queue,
1546 perm, shared, nperm, nshared);
1548 if (s->is_commit) {
1550 * For commit jobs, we cannot take CONSISTENT_READ, because
1551 * that permission is unshared for everything above the base
1552 * node (except for filters on the base node).
1553 * We also have to force-share the WRITE permission, or
1554 * otherwise we would block ourselves at the base node (if
1555 * writes are blocked for a node, they are also blocked for
1556 * its backing file).
1557 * (We could also share RESIZE, because it may be needed for
1558 * the target if its size is less than the top node's; but
1559 * bdrv_default_perms_for_cow() automatically shares RESIZE
1560 * for backing nodes if WRITE is shared, so there is no need
1561 * to do it here.)
1563 *nperm &= ~BLK_PERM_CONSISTENT_READ;
1564 *nshared |= BLK_PERM_WRITE;
1568 /* Dummy node that provides consistent read to its users without requiring it
1569 * from its backing file and that allows writes on the backing file chain. */
1570 static BlockDriver bdrv_mirror_top = {
1571 .format_name = "mirror_top",
1572 .bdrv_co_preadv = bdrv_mirror_top_preadv,
1573 .bdrv_co_pwritev = bdrv_mirror_top_pwritev,
1574 .bdrv_co_pwrite_zeroes = bdrv_mirror_top_pwrite_zeroes,
1575 .bdrv_co_pdiscard = bdrv_mirror_top_pdiscard,
1576 .bdrv_co_flush = bdrv_mirror_top_flush,
1577 .bdrv_refresh_filename = bdrv_mirror_top_refresh_filename,
1578 .bdrv_child_perm = bdrv_mirror_top_child_perm,
1580 .is_filter = true,
1583 static BlockJob *mirror_start_job(
1584 const char *job_id, BlockDriverState *bs,
1585 int creation_flags, BlockDriverState *target,
1586 const char *replaces, int64_t speed,
1587 uint32_t granularity, int64_t buf_size,
1588 BlockMirrorBackingMode backing_mode,
1589 bool zero_target,
1590 BlockdevOnError on_source_error,
1591 BlockdevOnError on_target_error,
1592 bool unmap,
1593 BlockCompletionFunc *cb,
1594 void *opaque,
1595 const BlockJobDriver *driver,
1596 bool is_none_mode, BlockDriverState *base,
1597 bool auto_complete, const char *filter_node_name,
1598 bool is_mirror, MirrorCopyMode copy_mode,
1599 Error **errp)
1601 MirrorBlockJob *s;
1602 MirrorBDSOpaque *bs_opaque;
1603 BlockDriverState *mirror_top_bs;
1604 bool target_is_backing;
1605 uint64_t target_perms, target_shared_perms;
1606 int ret;
1608 if (granularity == 0) {
1609 granularity = bdrv_get_default_bitmap_granularity(target);
1612 assert(is_power_of_2(granularity));
1614 if (buf_size < 0) {
1615 error_setg(errp, "Invalid parameter 'buf-size'");
1616 return NULL;
1619 if (buf_size == 0) {
1620 buf_size = DEFAULT_MIRROR_BUF_SIZE;
1623 if (bdrv_skip_filters(bs) == bdrv_skip_filters(target)) {
1624 error_setg(errp, "Can't mirror node into itself");
1625 return NULL;
1628 target_is_backing = bdrv_chain_contains(bs, target);
1630 /* In the case of active commit, add dummy driver to provide consistent
1631 * reads on the top, while disabling it in the intermediate nodes, and make
1632 * the backing chain writable. */
1633 mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name,
1634 BDRV_O_RDWR, errp);
1635 if (mirror_top_bs == NULL) {
1636 return NULL;
1638 if (!filter_node_name) {
1639 mirror_top_bs->implicit = true;
1642 /* So that we can always drop this node */
1643 mirror_top_bs->never_freeze = true;
1645 mirror_top_bs->total_sectors = bs->total_sectors;
1646 mirror_top_bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED;
1647 mirror_top_bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED |
1648 BDRV_REQ_NO_FALLBACK;
1649 bs_opaque = g_new0(MirrorBDSOpaque, 1);
1650 mirror_top_bs->opaque = bs_opaque;
1652 bs_opaque->is_commit = target_is_backing;
1654 bdrv_drained_begin(bs);
1655 ret = bdrv_append(mirror_top_bs, bs, errp);
1656 bdrv_drained_end(bs);
1658 if (ret < 0) {
1659 bdrv_unref(mirror_top_bs);
1660 return NULL;
1663 /* Make sure that the source is not resized while the job is running */
1664 s = block_job_create(job_id, driver, NULL, mirror_top_bs,
1665 BLK_PERM_CONSISTENT_READ,
1666 BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
1667 BLK_PERM_WRITE, speed,
1668 creation_flags, cb, opaque, errp);
1669 if (!s) {
1670 goto fail;
1672 bs_opaque->job = s;
1674 /* The block job now has a reference to this node */
1675 bdrv_unref(mirror_top_bs);
1677 s->mirror_top_bs = mirror_top_bs;
1679 /* No resize for the target either; while the mirror is still running, a
1680 * consistent read isn't necessarily possible. We could possibly allow
1681 * writes and graph modifications, though it would likely defeat the
1682 * purpose of a mirror, so leave them blocked for now.
1684 * In the case of active commit, things look a bit different, though,
1685 * because the target is an already populated backing file in active use.
1686 * We can allow anything except resize there.*/
1688 target_perms = BLK_PERM_WRITE;
1689 target_shared_perms = BLK_PERM_WRITE_UNCHANGED;
1691 if (target_is_backing) {
1692 int64_t bs_size, target_size;
1693 bs_size = bdrv_getlength(bs);
1694 if (bs_size < 0) {
1695 error_setg_errno(errp, -bs_size,
1696 "Could not inquire top image size");
1697 goto fail;
1700 target_size = bdrv_getlength(target);
1701 if (target_size < 0) {
1702 error_setg_errno(errp, -target_size,
1703 "Could not inquire base image size");
1704 goto fail;
1707 if (target_size < bs_size) {
1708 target_perms |= BLK_PERM_RESIZE;
1711 target_shared_perms |= BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE;
1712 } else if (bdrv_chain_contains(bs, bdrv_skip_filters(target))) {
1714 * We may want to allow this in the future, but it would
1715 * require taking some extra care.
1717 error_setg(errp, "Cannot mirror to a filter on top of a node in the "
1718 "source's backing chain");
1719 goto fail;
1722 s->target = blk_new(s->common.job.aio_context,
1723 target_perms, target_shared_perms);
1724 ret = blk_insert_bs(s->target, target, errp);
1725 if (ret < 0) {
1726 goto fail;
1728 if (is_mirror) {
1729 /* XXX: Mirror target could be a NBD server of target QEMU in the case
1730 * of non-shared block migration. To allow migration completion, we
1731 * have to allow "inactivate" of the target BB. When that happens, we
1732 * know the job is drained, and the vcpus are stopped, so no write
1733 * operation will be performed. Block layer already has assertions to
1734 * ensure that. */
1735 blk_set_force_allow_inactivate(s->target);
1737 blk_set_allow_aio_context_change(s->target, true);
1738 blk_set_disable_request_queuing(s->target, true);
1740 s->replaces = g_strdup(replaces);
1741 s->on_source_error = on_source_error;
1742 s->on_target_error = on_target_error;
1743 s->is_none_mode = is_none_mode;
1744 s->backing_mode = backing_mode;
1745 s->zero_target = zero_target;
1746 s->copy_mode = copy_mode;
1747 s->base = base;
1748 s->base_overlay = bdrv_find_overlay(bs, base);
1749 s->granularity = granularity;
1750 s->buf_size = ROUND_UP(buf_size, granularity);
1751 s->unmap = unmap;
1752 if (auto_complete) {
1753 s->should_complete = true;
1756 s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
1757 if (!s->dirty_bitmap) {
1758 goto fail;
1760 if (s->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING) {
1761 bdrv_disable_dirty_bitmap(s->dirty_bitmap);
1764 ret = block_job_add_bdrv(&s->common, "source", bs, 0,
1765 BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE |
1766 BLK_PERM_CONSISTENT_READ,
1767 errp);
1768 if (ret < 0) {
1769 goto fail;
1772 /* Required permissions are already taken with blk_new() */
1773 block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL,
1774 &error_abort);
1776 /* In commit_active_start() all intermediate nodes disappear, so
1777 * any jobs in them must be blocked */
1778 if (target_is_backing) {
1779 BlockDriverState *iter, *filtered_target;
1780 uint64_t iter_shared_perms;
1783 * The topmost node with
1784 * bdrv_skip_filters(filtered_target) == bdrv_skip_filters(target)
1786 filtered_target = bdrv_cow_bs(bdrv_find_overlay(bs, target));
1788 assert(bdrv_skip_filters(filtered_target) ==
1789 bdrv_skip_filters(target));
1792 * XXX BLK_PERM_WRITE needs to be allowed so we don't block
1793 * ourselves at s->base (if writes are blocked for a node, they are
1794 * also blocked for its backing file). The other options would be a
1795 * second filter driver above s->base (== target).
1797 iter_shared_perms = BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE;
1799 for (iter = bdrv_filter_or_cow_bs(bs); iter != target;
1800 iter = bdrv_filter_or_cow_bs(iter))
1802 if (iter == filtered_target) {
1804 * From here on, all nodes are filters on the base.
1805 * This allows us to share BLK_PERM_CONSISTENT_READ.
1807 iter_shared_perms |= BLK_PERM_CONSISTENT_READ;
1810 ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
1811 iter_shared_perms, errp);
1812 if (ret < 0) {
1813 goto fail;
1817 if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) {
1818 goto fail;
1822 QTAILQ_INIT(&s->ops_in_flight);
1824 trace_mirror_start(bs, s, opaque);
1825 job_start(&s->common.job);
1827 return &s->common;
1829 fail:
1830 if (s) {
1831 /* Make sure this BDS does not go away until we have completed the graph
1832 * changes below */
1833 bdrv_ref(mirror_top_bs);
1835 g_free(s->replaces);
1836 blk_unref(s->target);
1837 bs_opaque->job = NULL;
1838 if (s->dirty_bitmap) {
1839 bdrv_release_dirty_bitmap(s->dirty_bitmap);
1841 job_early_fail(&s->common.job);
1844 bs_opaque->stop = true;
1845 bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
1846 &error_abort);
1847 bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort);
1849 bdrv_unref(mirror_top_bs);
1851 return NULL;
1854 void mirror_start(const char *job_id, BlockDriverState *bs,
1855 BlockDriverState *target, const char *replaces,
1856 int creation_flags, int64_t speed,
1857 uint32_t granularity, int64_t buf_size,
1858 MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
1859 bool zero_target,
1860 BlockdevOnError on_source_error,
1861 BlockdevOnError on_target_error,
1862 bool unmap, const char *filter_node_name,
1863 MirrorCopyMode copy_mode, Error **errp)
1865 bool is_none_mode;
1866 BlockDriverState *base;
1868 GLOBAL_STATE_CODE();
1870 if ((mode == MIRROR_SYNC_MODE_INCREMENTAL) ||
1871 (mode == MIRROR_SYNC_MODE_BITMAP)) {
1872 error_setg(errp, "Sync mode '%s' not supported",
1873 MirrorSyncMode_str(mode));
1874 return;
1876 is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
1877 base = mode == MIRROR_SYNC_MODE_TOP ? bdrv_backing_chain_next(bs) : NULL;
1878 mirror_start_job(job_id, bs, creation_flags, target, replaces,
1879 speed, granularity, buf_size, backing_mode, zero_target,
1880 on_source_error, on_target_error, unmap, NULL, NULL,
1881 &mirror_job_driver, is_none_mode, base, false,
1882 filter_node_name, true, copy_mode, errp);
1885 BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs,
1886 BlockDriverState *base, int creation_flags,
1887 int64_t speed, BlockdevOnError on_error,
1888 const char *filter_node_name,
1889 BlockCompletionFunc *cb, void *opaque,
1890 bool auto_complete, Error **errp)
1892 bool base_read_only;
1893 BlockJob *job;
1895 GLOBAL_STATE_CODE();
1897 base_read_only = bdrv_is_read_only(base);
1899 if (base_read_only) {
1900 if (bdrv_reopen_set_read_only(base, false, errp) < 0) {
1901 return NULL;
1905 job = mirror_start_job(
1906 job_id, bs, creation_flags, base, NULL, speed, 0, 0,
1907 MIRROR_LEAVE_BACKING_CHAIN, false,
1908 on_error, on_error, true, cb, opaque,
1909 &commit_active_job_driver, false, base, auto_complete,
1910 filter_node_name, false, MIRROR_COPY_MODE_BACKGROUND,
1911 errp);
1912 if (!job) {
1913 goto error_restore_flags;
1916 return job;
1918 error_restore_flags:
1919 /* ignore error and errp for bdrv_reopen, because we want to propagate
1920 * the original error */
1921 if (base_read_only) {
1922 bdrv_reopen_set_read_only(base, true, NULL);
1924 return NULL;