ppc/pnv: Introduce PnvChipClass::xscom_core_base() method
[qemu/ar7.git] / block / mirror.c
blobf0f2d9dff150bea6d40065ff6f92d577af87e628
1 /*
2 * Image mirroring
4 * Copyright Red Hat, Inc. 2012
6 * Authors:
7 * Paolo Bonzini <pbonzini@redhat.com>
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/cutils.h"
16 #include "qemu/coroutine.h"
17 #include "qemu/range.h"
18 #include "trace.h"
19 #include "block/blockjob_int.h"
20 #include "block/block_int.h"
21 #include "sysemu/block-backend.h"
22 #include "qapi/error.h"
23 #include "qapi/qmp/qerror.h"
24 #include "qemu/ratelimit.h"
25 #include "qemu/bitmap.h"
27 #define MAX_IN_FLIGHT 16
28 #define MAX_IO_BYTES (1 << 20) /* 1 Mb */
29 #define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES)
31 /* The mirroring buffer is a list of granularity-sized chunks.
32 * Free chunks are organized in a list.
34 typedef struct MirrorBuffer {
35 QSIMPLEQ_ENTRY(MirrorBuffer) next;
36 } MirrorBuffer;
38 typedef struct MirrorOp MirrorOp;
40 typedef struct MirrorBlockJob {
41 BlockJob common;
42 BlockBackend *target;
43 BlockDriverState *mirror_top_bs;
44 BlockDriverState *base;
46 /* The name of the graph node to replace */
47 char *replaces;
48 /* The BDS to replace */
49 BlockDriverState *to_replace;
50 /* Used to block operations on the drive-mirror-replace target */
51 Error *replace_blocker;
52 bool is_none_mode;
53 BlockMirrorBackingMode backing_mode;
54 /* Whether the target image requires explicit zero-initialization */
55 bool zero_target;
56 MirrorCopyMode copy_mode;
57 BlockdevOnError on_source_error, on_target_error;
58 bool synced;
59 /* Set when the target is synced (dirty bitmap is clean, nothing
60 * in flight) and the job is running in active mode */
61 bool actively_synced;
62 bool should_complete;
63 int64_t granularity;
64 size_t buf_size;
65 int64_t bdev_length;
66 unsigned long *cow_bitmap;
67 BdrvDirtyBitmap *dirty_bitmap;
68 BdrvDirtyBitmapIter *dbi;
69 uint8_t *buf;
70 QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
71 int buf_free_count;
73 uint64_t last_pause_ns;
74 unsigned long *in_flight_bitmap;
75 int in_flight;
76 int64_t bytes_in_flight;
77 QTAILQ_HEAD(, MirrorOp) ops_in_flight;
78 int ret;
79 bool unmap;
80 int target_cluster_size;
81 int max_iov;
82 bool initial_zeroing_ongoing;
83 int in_active_write_counter;
84 bool prepared;
85 bool in_drain;
86 } MirrorBlockJob;
88 typedef struct MirrorBDSOpaque {
89 MirrorBlockJob *job;
90 bool stop;
91 } MirrorBDSOpaque;
93 struct MirrorOp {
94 MirrorBlockJob *s;
95 QEMUIOVector qiov;
96 int64_t offset;
97 uint64_t bytes;
99 /* The pointee is set by mirror_co_read(), mirror_co_zero(), and
100 * mirror_co_discard() before yielding for the first time */
101 int64_t *bytes_handled;
103 bool is_pseudo_op;
104 bool is_active_write;
105 CoQueue waiting_requests;
107 QTAILQ_ENTRY(MirrorOp) next;
110 typedef enum MirrorMethod {
111 MIRROR_METHOD_COPY,
112 MIRROR_METHOD_ZERO,
113 MIRROR_METHOD_DISCARD,
114 } MirrorMethod;
116 static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
117 int error)
119 s->synced = false;
120 s->actively_synced = false;
121 if (read) {
122 return block_job_error_action(&s->common, s->on_source_error,
123 true, error);
124 } else {
125 return block_job_error_action(&s->common, s->on_target_error,
126 false, error);
130 static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self,
131 MirrorBlockJob *s,
132 uint64_t offset,
133 uint64_t bytes)
135 uint64_t self_start_chunk = offset / s->granularity;
136 uint64_t self_end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
137 uint64_t self_nb_chunks = self_end_chunk - self_start_chunk;
139 while (find_next_bit(s->in_flight_bitmap, self_end_chunk,
140 self_start_chunk) < self_end_chunk &&
141 s->ret >= 0)
143 MirrorOp *op;
145 QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
146 uint64_t op_start_chunk = op->offset / s->granularity;
147 uint64_t op_nb_chunks = DIV_ROUND_UP(op->offset + op->bytes,
148 s->granularity) -
149 op_start_chunk;
151 if (op == self) {
152 continue;
155 if (ranges_overlap(self_start_chunk, self_nb_chunks,
156 op_start_chunk, op_nb_chunks))
158 qemu_co_queue_wait(&op->waiting_requests, NULL);
159 break;
165 static void coroutine_fn mirror_iteration_done(MirrorOp *op, int ret)
167 MirrorBlockJob *s = op->s;
168 struct iovec *iov;
169 int64_t chunk_num;
170 int i, nb_chunks;
172 trace_mirror_iteration_done(s, op->offset, op->bytes, ret);
174 s->in_flight--;
175 s->bytes_in_flight -= op->bytes;
176 iov = op->qiov.iov;
177 for (i = 0; i < op->qiov.niov; i++) {
178 MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
179 QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
180 s->buf_free_count++;
183 chunk_num = op->offset / s->granularity;
184 nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
186 bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
187 QTAILQ_REMOVE(&s->ops_in_flight, op, next);
188 if (ret >= 0) {
189 if (s->cow_bitmap) {
190 bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
192 if (!s->initial_zeroing_ongoing) {
193 job_progress_update(&s->common.job, op->bytes);
196 qemu_iovec_destroy(&op->qiov);
198 qemu_co_queue_restart_all(&op->waiting_requests);
199 g_free(op);
202 static void coroutine_fn mirror_write_complete(MirrorOp *op, int ret)
204 MirrorBlockJob *s = op->s;
206 if (ret < 0) {
207 BlockErrorAction action;
209 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
210 action = mirror_error_action(s, false, -ret);
211 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
212 s->ret = ret;
216 mirror_iteration_done(op, ret);
219 static void coroutine_fn mirror_read_complete(MirrorOp *op, int ret)
221 MirrorBlockJob *s = op->s;
223 if (ret < 0) {
224 BlockErrorAction action;
226 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
227 action = mirror_error_action(s, true, -ret);
228 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
229 s->ret = ret;
232 mirror_iteration_done(op, ret);
233 return;
236 ret = blk_co_pwritev(s->target, op->offset, op->qiov.size, &op->qiov, 0);
237 mirror_write_complete(op, ret);
240 /* Clip bytes relative to offset to not exceed end-of-file */
241 static inline int64_t mirror_clip_bytes(MirrorBlockJob *s,
242 int64_t offset,
243 int64_t bytes)
245 return MIN(bytes, s->bdev_length - offset);
248 /* Round offset and/or bytes to target cluster if COW is needed, and
249 * return the offset of the adjusted tail against original. */
250 static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset,
251 uint64_t *bytes)
253 bool need_cow;
254 int ret = 0;
255 int64_t align_offset = *offset;
256 int64_t align_bytes = *bytes;
257 int max_bytes = s->granularity * s->max_iov;
259 need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap);
260 need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity,
261 s->cow_bitmap);
262 if (need_cow) {
263 bdrv_round_to_clusters(blk_bs(s->target), *offset, *bytes,
264 &align_offset, &align_bytes);
267 if (align_bytes > max_bytes) {
268 align_bytes = max_bytes;
269 if (need_cow) {
270 align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size);
273 /* Clipping may result in align_bytes unaligned to chunk boundary, but
274 * that doesn't matter because it's already the end of source image. */
275 align_bytes = mirror_clip_bytes(s, align_offset, align_bytes);
277 ret = align_offset + align_bytes - (*offset + *bytes);
278 *offset = align_offset;
279 *bytes = align_bytes;
280 assert(ret >= 0);
281 return ret;
284 static inline void coroutine_fn
285 mirror_wait_for_any_operation(MirrorBlockJob *s, bool active)
287 MirrorOp *op;
289 QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
290 /* Do not wait on pseudo ops, because it may in turn wait on
291 * some other operation to start, which may in fact be the
292 * caller of this function. Since there is only one pseudo op
293 * at any given time, we will always find some real operation
294 * to wait on. */
295 if (!op->is_pseudo_op && op->is_active_write == active) {
296 qemu_co_queue_wait(&op->waiting_requests, NULL);
297 return;
300 abort();
303 static inline void coroutine_fn
304 mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s)
306 /* Only non-active operations use up in-flight slots */
307 mirror_wait_for_any_operation(s, false);
310 /* Perform a mirror copy operation.
312 * *op->bytes_handled is set to the number of bytes copied after and
313 * including offset, excluding any bytes copied prior to offset due
314 * to alignment. This will be op->bytes if no alignment is necessary,
315 * or (new_end - op->offset) if the tail is rounded up or down due to
316 * alignment or buffer limit.
318 static void coroutine_fn mirror_co_read(void *opaque)
320 MirrorOp *op = opaque;
321 MirrorBlockJob *s = op->s;
322 int nb_chunks;
323 uint64_t ret;
324 uint64_t max_bytes;
326 max_bytes = s->granularity * s->max_iov;
328 /* We can only handle as much as buf_size at a time. */
329 op->bytes = MIN(s->buf_size, MIN(max_bytes, op->bytes));
330 assert(op->bytes);
331 assert(op->bytes < BDRV_REQUEST_MAX_BYTES);
332 *op->bytes_handled = op->bytes;
334 if (s->cow_bitmap) {
335 *op->bytes_handled += mirror_cow_align(s, &op->offset, &op->bytes);
337 /* Cannot exceed BDRV_REQUEST_MAX_BYTES + INT_MAX */
338 assert(*op->bytes_handled <= UINT_MAX);
339 assert(op->bytes <= s->buf_size);
340 /* The offset is granularity-aligned because:
341 * 1) Caller passes in aligned values;
342 * 2) mirror_cow_align is used only when target cluster is larger. */
343 assert(QEMU_IS_ALIGNED(op->offset, s->granularity));
344 /* The range is sector-aligned, since bdrv_getlength() rounds up. */
345 assert(QEMU_IS_ALIGNED(op->bytes, BDRV_SECTOR_SIZE));
346 nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
348 while (s->buf_free_count < nb_chunks) {
349 trace_mirror_yield_in_flight(s, op->offset, s->in_flight);
350 mirror_wait_for_free_in_flight_slot(s);
353 /* Now make a QEMUIOVector taking enough granularity-sized chunks
354 * from s->buf_free.
356 qemu_iovec_init(&op->qiov, nb_chunks);
357 while (nb_chunks-- > 0) {
358 MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
359 size_t remaining = op->bytes - op->qiov.size;
361 QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
362 s->buf_free_count--;
363 qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
366 /* Copy the dirty cluster. */
367 s->in_flight++;
368 s->bytes_in_flight += op->bytes;
369 trace_mirror_one_iteration(s, op->offset, op->bytes);
371 ret = bdrv_co_preadv(s->mirror_top_bs->backing, op->offset, op->bytes,
372 &op->qiov, 0);
373 mirror_read_complete(op, ret);
376 static void coroutine_fn mirror_co_zero(void *opaque)
378 MirrorOp *op = opaque;
379 int ret;
381 op->s->in_flight++;
382 op->s->bytes_in_flight += op->bytes;
383 *op->bytes_handled = op->bytes;
385 ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes,
386 op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0);
387 mirror_write_complete(op, ret);
390 static void coroutine_fn mirror_co_discard(void *opaque)
392 MirrorOp *op = opaque;
393 int ret;
395 op->s->in_flight++;
396 op->s->bytes_in_flight += op->bytes;
397 *op->bytes_handled = op->bytes;
399 ret = blk_co_pdiscard(op->s->target, op->offset, op->bytes);
400 mirror_write_complete(op, ret);
403 static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset,
404 unsigned bytes, MirrorMethod mirror_method)
406 MirrorOp *op;
407 Coroutine *co;
408 int64_t bytes_handled = -1;
410 op = g_new(MirrorOp, 1);
411 *op = (MirrorOp){
412 .s = s,
413 .offset = offset,
414 .bytes = bytes,
415 .bytes_handled = &bytes_handled,
417 qemu_co_queue_init(&op->waiting_requests);
419 switch (mirror_method) {
420 case MIRROR_METHOD_COPY:
421 co = qemu_coroutine_create(mirror_co_read, op);
422 break;
423 case MIRROR_METHOD_ZERO:
424 co = qemu_coroutine_create(mirror_co_zero, op);
425 break;
426 case MIRROR_METHOD_DISCARD:
427 co = qemu_coroutine_create(mirror_co_discard, op);
428 break;
429 default:
430 abort();
433 QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
434 qemu_coroutine_enter(co);
435 /* At this point, ownership of op has been moved to the coroutine
436 * and the object may already be freed */
438 /* Assert that this value has been set */
439 assert(bytes_handled >= 0);
441 /* Same assertion as in mirror_co_read() (and for mirror_co_read()
442 * and mirror_co_discard(), bytes_handled == op->bytes, which
443 * is the @bytes parameter given to this function) */
444 assert(bytes_handled <= UINT_MAX);
445 return bytes_handled;
448 static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
450 BlockDriverState *source = s->mirror_top_bs->backing->bs;
451 MirrorOp *pseudo_op;
452 int64_t offset;
453 uint64_t delay_ns = 0, ret = 0;
454 /* At least the first dirty chunk is mirrored in one iteration. */
455 int nb_chunks = 1;
456 bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target));
457 int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES);
459 bdrv_dirty_bitmap_lock(s->dirty_bitmap);
460 offset = bdrv_dirty_iter_next(s->dbi);
461 if (offset < 0) {
462 bdrv_set_dirty_iter(s->dbi, 0);
463 offset = bdrv_dirty_iter_next(s->dbi);
464 trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap));
465 assert(offset >= 0);
467 bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
469 mirror_wait_on_conflicts(NULL, s, offset, 1);
471 job_pause_point(&s->common.job);
473 /* Find the number of consective dirty chunks following the first dirty
474 * one, and wait for in flight requests in them. */
475 bdrv_dirty_bitmap_lock(s->dirty_bitmap);
476 while (nb_chunks * s->granularity < s->buf_size) {
477 int64_t next_dirty;
478 int64_t next_offset = offset + nb_chunks * s->granularity;
479 int64_t next_chunk = next_offset / s->granularity;
480 if (next_offset >= s->bdev_length ||
481 !bdrv_dirty_bitmap_get_locked(s->dirty_bitmap, next_offset)) {
482 break;
484 if (test_bit(next_chunk, s->in_flight_bitmap)) {
485 break;
488 next_dirty = bdrv_dirty_iter_next(s->dbi);
489 if (next_dirty > next_offset || next_dirty < 0) {
490 /* The bitmap iterator's cache is stale, refresh it */
491 bdrv_set_dirty_iter(s->dbi, next_offset);
492 next_dirty = bdrv_dirty_iter_next(s->dbi);
494 assert(next_dirty == next_offset);
495 nb_chunks++;
498 /* Clear dirty bits before querying the block status, because
499 * calling bdrv_block_status_above could yield - if some blocks are
500 * marked dirty in this window, we need to know.
502 bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset,
503 nb_chunks * s->granularity);
504 bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
506 /* Before claiming an area in the in-flight bitmap, we have to
507 * create a MirrorOp for it so that conflicting requests can wait
508 * for it. mirror_perform() will create the real MirrorOps later,
509 * for now we just create a pseudo operation that will wake up all
510 * conflicting requests once all real operations have been
511 * launched. */
512 pseudo_op = g_new(MirrorOp, 1);
513 *pseudo_op = (MirrorOp){
514 .offset = offset,
515 .bytes = nb_chunks * s->granularity,
516 .is_pseudo_op = true,
518 qemu_co_queue_init(&pseudo_op->waiting_requests);
519 QTAILQ_INSERT_TAIL(&s->ops_in_flight, pseudo_op, next);
521 bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks);
522 while (nb_chunks > 0 && offset < s->bdev_length) {
523 int ret;
524 int64_t io_bytes;
525 int64_t io_bytes_acct;
526 MirrorMethod mirror_method = MIRROR_METHOD_COPY;
528 assert(!(offset % s->granularity));
529 ret = bdrv_block_status_above(source, NULL, offset,
530 nb_chunks * s->granularity,
531 &io_bytes, NULL, NULL);
532 if (ret < 0) {
533 io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes);
534 } else if (ret & BDRV_BLOCK_DATA) {
535 io_bytes = MIN(io_bytes, max_io_bytes);
538 io_bytes -= io_bytes % s->granularity;
539 if (io_bytes < s->granularity) {
540 io_bytes = s->granularity;
541 } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) {
542 int64_t target_offset;
543 int64_t target_bytes;
544 bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes,
545 &target_offset, &target_bytes);
546 if (target_offset == offset &&
547 target_bytes == io_bytes) {
548 mirror_method = ret & BDRV_BLOCK_ZERO ?
549 MIRROR_METHOD_ZERO :
550 MIRROR_METHOD_DISCARD;
554 while (s->in_flight >= MAX_IN_FLIGHT) {
555 trace_mirror_yield_in_flight(s, offset, s->in_flight);
556 mirror_wait_for_free_in_flight_slot(s);
559 if (s->ret < 0) {
560 ret = 0;
561 goto fail;
564 io_bytes = mirror_clip_bytes(s, offset, io_bytes);
565 io_bytes = mirror_perform(s, offset, io_bytes, mirror_method);
566 if (mirror_method != MIRROR_METHOD_COPY && write_zeroes_ok) {
567 io_bytes_acct = 0;
568 } else {
569 io_bytes_acct = io_bytes;
571 assert(io_bytes);
572 offset += io_bytes;
573 nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity);
574 delay_ns = block_job_ratelimit_get_delay(&s->common, io_bytes_acct);
577 ret = delay_ns;
578 fail:
579 QTAILQ_REMOVE(&s->ops_in_flight, pseudo_op, next);
580 qemu_co_queue_restart_all(&pseudo_op->waiting_requests);
581 g_free(pseudo_op);
583 return ret;
586 static void mirror_free_init(MirrorBlockJob *s)
588 int granularity = s->granularity;
589 size_t buf_size = s->buf_size;
590 uint8_t *buf = s->buf;
592 assert(s->buf_free_count == 0);
593 QSIMPLEQ_INIT(&s->buf_free);
594 while (buf_size != 0) {
595 MirrorBuffer *cur = (MirrorBuffer *)buf;
596 QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
597 s->buf_free_count++;
598 buf_size -= granularity;
599 buf += granularity;
603 /* This is also used for the .pause callback. There is no matching
604 * mirror_resume() because mirror_run() will begin iterating again
605 * when the job is resumed.
607 static void coroutine_fn mirror_wait_for_all_io(MirrorBlockJob *s)
609 while (s->in_flight > 0) {
610 mirror_wait_for_free_in_flight_slot(s);
615 * mirror_exit_common: handle both abort() and prepare() cases.
616 * for .prepare, returns 0 on success and -errno on failure.
617 * for .abort cases, denoted by abort = true, MUST return 0.
619 static int mirror_exit_common(Job *job)
621 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
622 BlockJob *bjob = &s->common;
623 MirrorBDSOpaque *bs_opaque;
624 AioContext *replace_aio_context = NULL;
625 BlockDriverState *src;
626 BlockDriverState *target_bs;
627 BlockDriverState *mirror_top_bs;
628 Error *local_err = NULL;
629 bool abort = job->ret < 0;
630 int ret = 0;
632 if (s->prepared) {
633 return 0;
635 s->prepared = true;
637 mirror_top_bs = s->mirror_top_bs;
638 bs_opaque = mirror_top_bs->opaque;
639 src = mirror_top_bs->backing->bs;
640 target_bs = blk_bs(s->target);
642 if (bdrv_chain_contains(src, target_bs)) {
643 bdrv_unfreeze_backing_chain(mirror_top_bs, target_bs);
646 bdrv_release_dirty_bitmap(s->dirty_bitmap);
648 /* Make sure that the source BDS doesn't go away during bdrv_replace_node,
649 * before we can call bdrv_drained_end */
650 bdrv_ref(src);
651 bdrv_ref(mirror_top_bs);
652 bdrv_ref(target_bs);
655 * Remove target parent that still uses BLK_PERM_WRITE/RESIZE before
656 * inserting target_bs at s->to_replace, where we might not be able to get
657 * these permissions.
659 blk_unref(s->target);
660 s->target = NULL;
662 /* We don't access the source any more. Dropping any WRITE/RESIZE is
663 * required before it could become a backing file of target_bs. Not having
664 * these permissions any more means that we can't allow any new requests on
665 * mirror_top_bs from now on, so keep it drained. */
666 bdrv_drained_begin(mirror_top_bs);
667 bs_opaque->stop = true;
668 bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
669 &error_abort);
670 if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
671 BlockDriverState *backing = s->is_none_mode ? src : s->base;
672 if (backing_bs(target_bs) != backing) {
673 bdrv_set_backing_hd(target_bs, backing, &local_err);
674 if (local_err) {
675 error_report_err(local_err);
676 ret = -EPERM;
681 if (s->to_replace) {
682 replace_aio_context = bdrv_get_aio_context(s->to_replace);
683 aio_context_acquire(replace_aio_context);
686 if (s->should_complete && !abort) {
687 BlockDriverState *to_replace = s->to_replace ?: src;
688 bool ro = bdrv_is_read_only(to_replace);
690 if (ro != bdrv_is_read_only(target_bs)) {
691 bdrv_reopen_set_read_only(target_bs, ro, NULL);
694 /* The mirror job has no requests in flight any more, but we need to
695 * drain potential other users of the BDS before changing the graph. */
696 assert(s->in_drain);
697 bdrv_drained_begin(target_bs);
698 bdrv_replace_node(to_replace, target_bs, &local_err);
699 bdrv_drained_end(target_bs);
700 if (local_err) {
701 error_report_err(local_err);
702 ret = -EPERM;
705 if (s->to_replace) {
706 bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
707 error_free(s->replace_blocker);
708 bdrv_unref(s->to_replace);
710 if (replace_aio_context) {
711 aio_context_release(replace_aio_context);
713 g_free(s->replaces);
714 bdrv_unref(target_bs);
717 * Remove the mirror filter driver from the graph. Before this, get rid of
718 * the blockers on the intermediate nodes so that the resulting state is
719 * valid.
721 block_job_remove_all_bdrv(bjob);
722 bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
724 /* We just changed the BDS the job BB refers to (with either or both of the
725 * bdrv_replace_node() calls), so switch the BB back so the cleanup does
726 * the right thing. We don't need any permissions any more now. */
727 blk_remove_bs(bjob->blk);
728 blk_set_perm(bjob->blk, 0, BLK_PERM_ALL, &error_abort);
729 blk_insert_bs(bjob->blk, mirror_top_bs, &error_abort);
731 bs_opaque->job = NULL;
733 bdrv_drained_end(src);
734 bdrv_drained_end(mirror_top_bs);
735 s->in_drain = false;
736 bdrv_unref(mirror_top_bs);
737 bdrv_unref(src);
739 return ret;
742 static int mirror_prepare(Job *job)
744 return mirror_exit_common(job);
747 static void mirror_abort(Job *job)
749 int ret = mirror_exit_common(job);
750 assert(ret == 0);
753 static void coroutine_fn mirror_throttle(MirrorBlockJob *s)
755 int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
757 if (now - s->last_pause_ns > BLOCK_JOB_SLICE_TIME) {
758 s->last_pause_ns = now;
759 job_sleep_ns(&s->common.job, 0);
760 } else {
761 job_pause_point(&s->common.job);
765 static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
767 int64_t offset;
768 BlockDriverState *base = s->base;
769 BlockDriverState *bs = s->mirror_top_bs->backing->bs;
770 BlockDriverState *target_bs = blk_bs(s->target);
771 int ret;
772 int64_t count;
774 if (s->zero_target) {
775 if (!bdrv_can_write_zeroes_with_unmap(target_bs)) {
776 bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length);
777 return 0;
780 s->initial_zeroing_ongoing = true;
781 for (offset = 0; offset < s->bdev_length; ) {
782 int bytes = MIN(s->bdev_length - offset,
783 QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
785 mirror_throttle(s);
787 if (job_is_cancelled(&s->common.job)) {
788 s->initial_zeroing_ongoing = false;
789 return 0;
792 if (s->in_flight >= MAX_IN_FLIGHT) {
793 trace_mirror_yield(s, UINT64_MAX, s->buf_free_count,
794 s->in_flight);
795 mirror_wait_for_free_in_flight_slot(s);
796 continue;
799 mirror_perform(s, offset, bytes, MIRROR_METHOD_ZERO);
800 offset += bytes;
803 mirror_wait_for_all_io(s);
804 s->initial_zeroing_ongoing = false;
807 /* First part, loop on the sectors and initialize the dirty bitmap. */
808 for (offset = 0; offset < s->bdev_length; ) {
809 /* Just to make sure we are not exceeding int limit. */
810 int bytes = MIN(s->bdev_length - offset,
811 QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
813 mirror_throttle(s);
815 if (job_is_cancelled(&s->common.job)) {
816 return 0;
819 ret = bdrv_is_allocated_above(bs, base, false, offset, bytes, &count);
820 if (ret < 0) {
821 return ret;
824 assert(count);
825 if (ret == 1) {
826 bdrv_set_dirty_bitmap(s->dirty_bitmap, offset, count);
828 offset += count;
830 return 0;
833 /* Called when going out of the streaming phase to flush the bulk of the
834 * data to the medium, or just before completing.
836 static int mirror_flush(MirrorBlockJob *s)
838 int ret = blk_flush(s->target);
839 if (ret < 0) {
840 if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) {
841 s->ret = ret;
844 return ret;
847 static int coroutine_fn mirror_run(Job *job, Error **errp)
849 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
850 BlockDriverState *bs = s->mirror_top_bs->backing->bs;
851 BlockDriverState *target_bs = blk_bs(s->target);
852 bool need_drain = true;
853 int64_t length;
854 BlockDriverInfo bdi;
855 char backing_filename[2]; /* we only need 2 characters because we are only
856 checking for a NULL string */
857 int ret = 0;
859 if (job_is_cancelled(&s->common.job)) {
860 goto immediate_exit;
863 s->bdev_length = bdrv_getlength(bs);
864 if (s->bdev_length < 0) {
865 ret = s->bdev_length;
866 goto immediate_exit;
869 /* Active commit must resize the base image if its size differs from the
870 * active layer. */
871 if (s->base == blk_bs(s->target)) {
872 int64_t base_length;
874 base_length = blk_getlength(s->target);
875 if (base_length < 0) {
876 ret = base_length;
877 goto immediate_exit;
880 if (s->bdev_length > base_length) {
881 ret = blk_truncate(s->target, s->bdev_length, false,
882 PREALLOC_MODE_OFF, NULL);
883 if (ret < 0) {
884 goto immediate_exit;
889 if (s->bdev_length == 0) {
890 /* Transition to the READY state and wait for complete. */
891 job_transition_to_ready(&s->common.job);
892 s->synced = true;
893 s->actively_synced = true;
894 while (!job_is_cancelled(&s->common.job) && !s->should_complete) {
895 job_yield(&s->common.job);
897 s->common.job.cancelled = false;
898 goto immediate_exit;
901 length = DIV_ROUND_UP(s->bdev_length, s->granularity);
902 s->in_flight_bitmap = bitmap_new(length);
904 /* If we have no backing file yet in the destination, we cannot let
905 * the destination do COW. Instead, we copy sectors around the
906 * dirty data if needed. We need a bitmap to do that.
908 bdrv_get_backing_filename(target_bs, backing_filename,
909 sizeof(backing_filename));
910 if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) {
911 s->target_cluster_size = bdi.cluster_size;
912 } else {
913 s->target_cluster_size = BDRV_SECTOR_SIZE;
915 if (backing_filename[0] && !target_bs->backing &&
916 s->granularity < s->target_cluster_size) {
917 s->buf_size = MAX(s->buf_size, s->target_cluster_size);
918 s->cow_bitmap = bitmap_new(length);
920 s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov);
922 s->buf = qemu_try_blockalign(bs, s->buf_size);
923 if (s->buf == NULL) {
924 ret = -ENOMEM;
925 goto immediate_exit;
928 mirror_free_init(s);
930 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
931 if (!s->is_none_mode) {
932 ret = mirror_dirty_init(s);
933 if (ret < 0 || job_is_cancelled(&s->common.job)) {
934 goto immediate_exit;
938 assert(!s->dbi);
939 s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap);
940 for (;;) {
941 uint64_t delay_ns = 0;
942 int64_t cnt, delta;
943 bool should_complete;
945 /* Do not start passive operations while there are active
946 * writes in progress */
947 while (s->in_active_write_counter) {
948 mirror_wait_for_any_operation(s, true);
951 if (s->ret < 0) {
952 ret = s->ret;
953 goto immediate_exit;
956 job_pause_point(&s->common.job);
958 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
959 /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is
960 * the number of bytes currently being processed; together those are
961 * the current remaining operation length */
962 job_progress_set_remaining(&s->common.job, s->bytes_in_flight + cnt);
964 /* Note that even when no rate limit is applied we need to yield
965 * periodically with no pending I/O so that bdrv_drain_all() returns.
966 * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is
967 * an error, or when the source is clean, whichever comes first. */
968 delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns;
969 if (delta < BLOCK_JOB_SLICE_TIME &&
970 s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
971 if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 ||
972 (cnt == 0 && s->in_flight > 0)) {
973 trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight);
974 mirror_wait_for_free_in_flight_slot(s);
975 continue;
976 } else if (cnt != 0) {
977 delay_ns = mirror_iteration(s);
981 should_complete = false;
982 if (s->in_flight == 0 && cnt == 0) {
983 trace_mirror_before_flush(s);
984 if (!s->synced) {
985 if (mirror_flush(s) < 0) {
986 /* Go check s->ret. */
987 continue;
989 /* We're out of the streaming phase. From now on, if the job
990 * is cancelled we will actually complete all pending I/O and
991 * report completion. This way, block-job-cancel will leave
992 * the target in a consistent state.
994 job_transition_to_ready(&s->common.job);
995 s->synced = true;
996 if (s->copy_mode != MIRROR_COPY_MODE_BACKGROUND) {
997 s->actively_synced = true;
1001 should_complete = s->should_complete ||
1002 job_is_cancelled(&s->common.job);
1003 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
1006 if (cnt == 0 && should_complete) {
1007 /* The dirty bitmap is not updated while operations are pending.
1008 * If we're about to exit, wait for pending operations before
1009 * calling bdrv_get_dirty_count(bs), or we may exit while the
1010 * source has dirty data to copy!
1012 * Note that I/O can be submitted by the guest while
1013 * mirror_populate runs, so pause it now. Before deciding
1014 * whether to switch to target check one last time if I/O has
1015 * come in the meanwhile, and if not flush the data to disk.
1017 trace_mirror_before_drain(s, cnt);
1019 s->in_drain = true;
1020 bdrv_drained_begin(bs);
1021 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
1022 if (cnt > 0 || mirror_flush(s) < 0) {
1023 bdrv_drained_end(bs);
1024 s->in_drain = false;
1025 continue;
1028 /* The two disks are in sync. Exit and report successful
1029 * completion.
1031 assert(QLIST_EMPTY(&bs->tracked_requests));
1032 s->common.job.cancelled = false;
1033 need_drain = false;
1034 break;
1037 ret = 0;
1039 if (s->synced && !should_complete) {
1040 delay_ns = (s->in_flight == 0 &&
1041 cnt == 0 ? BLOCK_JOB_SLICE_TIME : 0);
1043 trace_mirror_before_sleep(s, cnt, s->synced, delay_ns);
1044 job_sleep_ns(&s->common.job, delay_ns);
1045 if (job_is_cancelled(&s->common.job) &&
1046 (!s->synced || s->common.job.force_cancel))
1048 break;
1050 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1053 immediate_exit:
1054 if (s->in_flight > 0) {
1055 /* We get here only if something went wrong. Either the job failed,
1056 * or it was cancelled prematurely so that we do not guarantee that
1057 * the target is a copy of the source.
1059 assert(ret < 0 || ((s->common.job.force_cancel || !s->synced) &&
1060 job_is_cancelled(&s->common.job)));
1061 assert(need_drain);
1062 mirror_wait_for_all_io(s);
1065 assert(s->in_flight == 0);
1066 qemu_vfree(s->buf);
1067 g_free(s->cow_bitmap);
1068 g_free(s->in_flight_bitmap);
1069 bdrv_dirty_iter_free(s->dbi);
1071 if (need_drain) {
1072 s->in_drain = true;
1073 bdrv_drained_begin(bs);
1076 return ret;
1079 static void mirror_complete(Job *job, Error **errp)
1081 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1082 BlockDriverState *target;
1084 target = blk_bs(s->target);
1086 if (!s->synced) {
1087 error_setg(errp, "The active block job '%s' cannot be completed",
1088 job->id);
1089 return;
1092 if (s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) {
1093 int ret;
1095 assert(!target->backing);
1096 ret = bdrv_open_backing_file(target, NULL, "backing", errp);
1097 if (ret < 0) {
1098 return;
1102 /* block all operations on to_replace bs */
1103 if (s->replaces) {
1104 AioContext *replace_aio_context;
1106 s->to_replace = bdrv_find_node(s->replaces);
1107 if (!s->to_replace) {
1108 error_setg(errp, "Node name '%s' not found", s->replaces);
1109 return;
1112 replace_aio_context = bdrv_get_aio_context(s->to_replace);
1113 aio_context_acquire(replace_aio_context);
1115 /* TODO Translate this into permission system. Current definition of
1116 * GRAPH_MOD would require to request it for the parents; they might
1117 * not even be BlockDriverStates, however, so a BdrvChild can't address
1118 * them. May need redefinition of GRAPH_MOD. */
1119 error_setg(&s->replace_blocker,
1120 "block device is in use by block-job-complete");
1121 bdrv_op_block_all(s->to_replace, s->replace_blocker);
1122 bdrv_ref(s->to_replace);
1124 aio_context_release(replace_aio_context);
1127 s->should_complete = true;
1128 job_enter(job);
1131 static void coroutine_fn mirror_pause(Job *job)
1133 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1135 mirror_wait_for_all_io(s);
1138 static bool mirror_drained_poll(BlockJob *job)
1140 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1142 /* If the job isn't paused nor cancelled, we can't be sure that it won't
1143 * issue more requests. We make an exception if we've reached this point
1144 * from one of our own drain sections, to avoid a deadlock waiting for
1145 * ourselves.
1147 if (!s->common.job.paused && !s->common.job.cancelled && !s->in_drain) {
1148 return true;
1151 return !!s->in_flight;
1154 static const BlockJobDriver mirror_job_driver = {
1155 .job_driver = {
1156 .instance_size = sizeof(MirrorBlockJob),
1157 .job_type = JOB_TYPE_MIRROR,
1158 .free = block_job_free,
1159 .user_resume = block_job_user_resume,
1160 .run = mirror_run,
1161 .prepare = mirror_prepare,
1162 .abort = mirror_abort,
1163 .pause = mirror_pause,
1164 .complete = mirror_complete,
1166 .drained_poll = mirror_drained_poll,
1169 static const BlockJobDriver commit_active_job_driver = {
1170 .job_driver = {
1171 .instance_size = sizeof(MirrorBlockJob),
1172 .job_type = JOB_TYPE_COMMIT,
1173 .free = block_job_free,
1174 .user_resume = block_job_user_resume,
1175 .run = mirror_run,
1176 .prepare = mirror_prepare,
1177 .abort = mirror_abort,
1178 .pause = mirror_pause,
1179 .complete = mirror_complete,
1181 .drained_poll = mirror_drained_poll,
1184 static void coroutine_fn
1185 do_sync_target_write(MirrorBlockJob *job, MirrorMethod method,
1186 uint64_t offset, uint64_t bytes,
1187 QEMUIOVector *qiov, int flags)
1189 int ret;
1190 size_t qiov_offset = 0;
1191 int64_t bitmap_offset, bitmap_end;
1193 if (!QEMU_IS_ALIGNED(offset, job->granularity) &&
1194 bdrv_dirty_bitmap_get(job->dirty_bitmap, offset))
1197 * Dirty unaligned padding: ignore it.
1199 * Reasoning:
1200 * 1. If we copy it, we can't reset corresponding bit in
1201 * dirty_bitmap as there may be some "dirty" bytes still not
1202 * copied.
1203 * 2. It's already dirty, so skipping it we don't diverge mirror
1204 * progress.
1206 * Note, that because of this, guest write may have no contribution
1207 * into mirror converge, but that's not bad, as we have background
1208 * process of mirroring. If under some bad circumstances (high guest
1209 * IO load) background process starve, we will not converge anyway,
1210 * even if each write will contribute, as guest is not guaranteed to
1211 * rewrite the whole disk.
1213 qiov_offset = QEMU_ALIGN_UP(offset, job->granularity) - offset;
1214 if (bytes <= qiov_offset) {
1215 /* nothing to do after shrink */
1216 return;
1218 offset += qiov_offset;
1219 bytes -= qiov_offset;
1222 if (!QEMU_IS_ALIGNED(offset + bytes, job->granularity) &&
1223 bdrv_dirty_bitmap_get(job->dirty_bitmap, offset + bytes - 1))
1225 uint64_t tail = (offset + bytes) % job->granularity;
1227 if (bytes <= tail) {
1228 /* nothing to do after shrink */
1229 return;
1231 bytes -= tail;
1235 * Tails are either clean or shrunk, so for bitmap resetting
1236 * we safely align the range down.
1238 bitmap_offset = QEMU_ALIGN_UP(offset, job->granularity);
1239 bitmap_end = QEMU_ALIGN_DOWN(offset + bytes, job->granularity);
1240 if (bitmap_offset < bitmap_end) {
1241 bdrv_reset_dirty_bitmap(job->dirty_bitmap, bitmap_offset,
1242 bitmap_end - bitmap_offset);
1245 job_progress_increase_remaining(&job->common.job, bytes);
1247 switch (method) {
1248 case MIRROR_METHOD_COPY:
1249 ret = blk_co_pwritev_part(job->target, offset, bytes,
1250 qiov, qiov_offset, flags);
1251 break;
1253 case MIRROR_METHOD_ZERO:
1254 assert(!qiov);
1255 ret = blk_co_pwrite_zeroes(job->target, offset, bytes, flags);
1256 break;
1258 case MIRROR_METHOD_DISCARD:
1259 assert(!qiov);
1260 ret = blk_co_pdiscard(job->target, offset, bytes);
1261 break;
1263 default:
1264 abort();
1267 if (ret >= 0) {
1268 job_progress_update(&job->common.job, bytes);
1269 } else {
1270 BlockErrorAction action;
1273 * We failed, so we should mark dirty the whole area, aligned up.
1274 * Note that we don't care about shrunk tails if any: they were dirty
1275 * at function start, and they must be still dirty, as we've locked
1276 * the region for in-flight op.
1278 bitmap_offset = QEMU_ALIGN_DOWN(offset, job->granularity);
1279 bitmap_end = QEMU_ALIGN_UP(offset + bytes, job->granularity);
1280 bdrv_set_dirty_bitmap(job->dirty_bitmap, bitmap_offset,
1281 bitmap_end - bitmap_offset);
1282 job->actively_synced = false;
1284 action = mirror_error_action(job, false, -ret);
1285 if (action == BLOCK_ERROR_ACTION_REPORT) {
1286 if (!job->ret) {
1287 job->ret = ret;
1293 static MirrorOp *coroutine_fn active_write_prepare(MirrorBlockJob *s,
1294 uint64_t offset,
1295 uint64_t bytes)
1297 MirrorOp *op;
1298 uint64_t start_chunk = offset / s->granularity;
1299 uint64_t end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
1301 op = g_new(MirrorOp, 1);
1302 *op = (MirrorOp){
1303 .s = s,
1304 .offset = offset,
1305 .bytes = bytes,
1306 .is_active_write = true,
1308 qemu_co_queue_init(&op->waiting_requests);
1309 QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
1311 s->in_active_write_counter++;
1313 mirror_wait_on_conflicts(op, s, offset, bytes);
1315 bitmap_set(s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
1317 return op;
1320 static void coroutine_fn active_write_settle(MirrorOp *op)
1322 uint64_t start_chunk = op->offset / op->s->granularity;
1323 uint64_t end_chunk = DIV_ROUND_UP(op->offset + op->bytes,
1324 op->s->granularity);
1326 if (!--op->s->in_active_write_counter && op->s->actively_synced) {
1327 BdrvChild *source = op->s->mirror_top_bs->backing;
1329 if (QLIST_FIRST(&source->bs->parents) == source &&
1330 QLIST_NEXT(source, next_parent) == NULL)
1332 /* Assert that we are back in sync once all active write
1333 * operations are settled.
1334 * Note that we can only assert this if the mirror node
1335 * is the source node's only parent. */
1336 assert(!bdrv_get_dirty_count(op->s->dirty_bitmap));
1339 bitmap_clear(op->s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
1340 QTAILQ_REMOVE(&op->s->ops_in_flight, op, next);
1341 qemu_co_queue_restart_all(&op->waiting_requests);
1342 g_free(op);
1345 static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs,
1346 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
1348 return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
1351 static int coroutine_fn bdrv_mirror_top_do_write(BlockDriverState *bs,
1352 MirrorMethod method, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov,
1353 int flags)
1355 MirrorOp *op = NULL;
1356 MirrorBDSOpaque *s = bs->opaque;
1357 int ret = 0;
1358 bool copy_to_target;
1360 copy_to_target = s->job->ret >= 0 &&
1361 s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING;
1363 if (copy_to_target) {
1364 op = active_write_prepare(s->job, offset, bytes);
1367 switch (method) {
1368 case MIRROR_METHOD_COPY:
1369 ret = bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags);
1370 break;
1372 case MIRROR_METHOD_ZERO:
1373 ret = bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags);
1374 break;
1376 case MIRROR_METHOD_DISCARD:
1377 ret = bdrv_co_pdiscard(bs->backing, offset, bytes);
1378 break;
1380 default:
1381 abort();
1384 if (ret < 0) {
1385 goto out;
1388 if (copy_to_target) {
1389 do_sync_target_write(s->job, method, offset, bytes, qiov, flags);
1392 out:
1393 if (copy_to_target) {
1394 active_write_settle(op);
1396 return ret;
1399 static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs,
1400 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
1402 MirrorBDSOpaque *s = bs->opaque;
1403 QEMUIOVector bounce_qiov;
1404 void *bounce_buf;
1405 int ret = 0;
1406 bool copy_to_target;
1408 copy_to_target = s->job->ret >= 0 &&
1409 s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING;
1411 if (copy_to_target) {
1412 /* The guest might concurrently modify the data to write; but
1413 * the data on source and destination must match, so we have
1414 * to use a bounce buffer if we are going to write to the
1415 * target now. */
1416 bounce_buf = qemu_blockalign(bs, bytes);
1417 iov_to_buf_full(qiov->iov, qiov->niov, 0, bounce_buf, bytes);
1419 qemu_iovec_init(&bounce_qiov, 1);
1420 qemu_iovec_add(&bounce_qiov, bounce_buf, bytes);
1421 qiov = &bounce_qiov;
1424 ret = bdrv_mirror_top_do_write(bs, MIRROR_METHOD_COPY, offset, bytes, qiov,
1425 flags);
1427 if (copy_to_target) {
1428 qemu_iovec_destroy(&bounce_qiov);
1429 qemu_vfree(bounce_buf);
1432 return ret;
1435 static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs)
1437 if (bs->backing == NULL) {
1438 /* we can be here after failed bdrv_append in mirror_start_job */
1439 return 0;
1441 return bdrv_co_flush(bs->backing->bs);
1444 static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs,
1445 int64_t offset, int bytes, BdrvRequestFlags flags)
1447 return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, offset, bytes, NULL,
1448 flags);
1451 static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs,
1452 int64_t offset, int bytes)
1454 return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, offset, bytes,
1455 NULL, 0);
1458 static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs)
1460 if (bs->backing == NULL) {
1461 /* we can be here after failed bdrv_attach_child in
1462 * bdrv_set_backing_hd */
1463 return;
1465 pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
1466 bs->backing->bs->filename);
1469 static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c,
1470 const BdrvChildRole *role,
1471 BlockReopenQueue *reopen_queue,
1472 uint64_t perm, uint64_t shared,
1473 uint64_t *nperm, uint64_t *nshared)
1475 MirrorBDSOpaque *s = bs->opaque;
1477 if (s->stop) {
1479 * If the job is to be stopped, we do not need to forward
1480 * anything to the real image.
1482 *nperm = 0;
1483 *nshared = BLK_PERM_ALL;
1484 return;
1487 /* Must be able to forward guest writes to the real image */
1488 *nperm = 0;
1489 if (perm & BLK_PERM_WRITE) {
1490 *nperm |= BLK_PERM_WRITE;
1493 *nshared = BLK_PERM_ALL;
1496 /* Dummy node that provides consistent read to its users without requiring it
1497 * from its backing file and that allows writes on the backing file chain. */
1498 static BlockDriver bdrv_mirror_top = {
1499 .format_name = "mirror_top",
1500 .bdrv_co_preadv = bdrv_mirror_top_preadv,
1501 .bdrv_co_pwritev = bdrv_mirror_top_pwritev,
1502 .bdrv_co_pwrite_zeroes = bdrv_mirror_top_pwrite_zeroes,
1503 .bdrv_co_pdiscard = bdrv_mirror_top_pdiscard,
1504 .bdrv_co_flush = bdrv_mirror_top_flush,
1505 .bdrv_co_block_status = bdrv_co_block_status_from_backing,
1506 .bdrv_refresh_filename = bdrv_mirror_top_refresh_filename,
1507 .bdrv_child_perm = bdrv_mirror_top_child_perm,
1510 static BlockJob *mirror_start_job(
1511 const char *job_id, BlockDriverState *bs,
1512 int creation_flags, BlockDriverState *target,
1513 const char *replaces, int64_t speed,
1514 uint32_t granularity, int64_t buf_size,
1515 BlockMirrorBackingMode backing_mode,
1516 bool zero_target,
1517 BlockdevOnError on_source_error,
1518 BlockdevOnError on_target_error,
1519 bool unmap,
1520 BlockCompletionFunc *cb,
1521 void *opaque,
1522 const BlockJobDriver *driver,
1523 bool is_none_mode, BlockDriverState *base,
1524 bool auto_complete, const char *filter_node_name,
1525 bool is_mirror, MirrorCopyMode copy_mode,
1526 Error **errp)
1528 MirrorBlockJob *s;
1529 MirrorBDSOpaque *bs_opaque;
1530 BlockDriverState *mirror_top_bs;
1531 bool target_graph_mod;
1532 bool target_is_backing;
1533 Error *local_err = NULL;
1534 int ret;
1536 if (granularity == 0) {
1537 granularity = bdrv_get_default_bitmap_granularity(target);
1540 assert(is_power_of_2(granularity));
1542 if (buf_size < 0) {
1543 error_setg(errp, "Invalid parameter 'buf-size'");
1544 return NULL;
1547 if (buf_size == 0) {
1548 buf_size = DEFAULT_MIRROR_BUF_SIZE;
1551 if (bs == target) {
1552 error_setg(errp, "Can't mirror node into itself");
1553 return NULL;
1556 /* In the case of active commit, add dummy driver to provide consistent
1557 * reads on the top, while disabling it in the intermediate nodes, and make
1558 * the backing chain writable. */
1559 mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name,
1560 BDRV_O_RDWR, errp);
1561 if (mirror_top_bs == NULL) {
1562 return NULL;
1564 if (!filter_node_name) {
1565 mirror_top_bs->implicit = true;
1568 /* So that we can always drop this node */
1569 mirror_top_bs->never_freeze = true;
1571 mirror_top_bs->total_sectors = bs->total_sectors;
1572 mirror_top_bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED;
1573 mirror_top_bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED |
1574 BDRV_REQ_NO_FALLBACK;
1575 bs_opaque = g_new0(MirrorBDSOpaque, 1);
1576 mirror_top_bs->opaque = bs_opaque;
1578 /* bdrv_append takes ownership of the mirror_top_bs reference, need to keep
1579 * it alive until block_job_create() succeeds even if bs has no parent. */
1580 bdrv_ref(mirror_top_bs);
1581 bdrv_drained_begin(bs);
1582 bdrv_append(mirror_top_bs, bs, &local_err);
1583 bdrv_drained_end(bs);
1585 if (local_err) {
1586 bdrv_unref(mirror_top_bs);
1587 error_propagate(errp, local_err);
1588 return NULL;
1591 /* Make sure that the source is not resized while the job is running */
1592 s = block_job_create(job_id, driver, NULL, mirror_top_bs,
1593 BLK_PERM_CONSISTENT_READ,
1594 BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
1595 BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed,
1596 creation_flags, cb, opaque, errp);
1597 if (!s) {
1598 goto fail;
1600 bs_opaque->job = s;
1602 /* The block job now has a reference to this node */
1603 bdrv_unref(mirror_top_bs);
1605 s->mirror_top_bs = mirror_top_bs;
1607 /* No resize for the target either; while the mirror is still running, a
1608 * consistent read isn't necessarily possible. We could possibly allow
1609 * writes and graph modifications, though it would likely defeat the
1610 * purpose of a mirror, so leave them blocked for now.
1612 * In the case of active commit, things look a bit different, though,
1613 * because the target is an already populated backing file in active use.
1614 * We can allow anything except resize there.*/
1615 target_is_backing = bdrv_chain_contains(bs, target);
1616 target_graph_mod = (backing_mode != MIRROR_LEAVE_BACKING_CHAIN);
1617 s->target = blk_new(s->common.job.aio_context,
1618 BLK_PERM_WRITE | BLK_PERM_RESIZE |
1619 (target_graph_mod ? BLK_PERM_GRAPH_MOD : 0),
1620 BLK_PERM_WRITE_UNCHANGED |
1621 (target_is_backing ? BLK_PERM_CONSISTENT_READ |
1622 BLK_PERM_WRITE |
1623 BLK_PERM_GRAPH_MOD : 0));
1624 ret = blk_insert_bs(s->target, target, errp);
1625 if (ret < 0) {
1626 goto fail;
1628 if (is_mirror) {
1629 /* XXX: Mirror target could be a NBD server of target QEMU in the case
1630 * of non-shared block migration. To allow migration completion, we
1631 * have to allow "inactivate" of the target BB. When that happens, we
1632 * know the job is drained, and the vcpus are stopped, so no write
1633 * operation will be performed. Block layer already has assertions to
1634 * ensure that. */
1635 blk_set_force_allow_inactivate(s->target);
1637 blk_set_allow_aio_context_change(s->target, true);
1638 blk_set_disable_request_queuing(s->target, true);
1640 s->replaces = g_strdup(replaces);
1641 s->on_source_error = on_source_error;
1642 s->on_target_error = on_target_error;
1643 s->is_none_mode = is_none_mode;
1644 s->backing_mode = backing_mode;
1645 s->zero_target = zero_target;
1646 s->copy_mode = copy_mode;
1647 s->base = base;
1648 s->granularity = granularity;
1649 s->buf_size = ROUND_UP(buf_size, granularity);
1650 s->unmap = unmap;
1651 if (auto_complete) {
1652 s->should_complete = true;
1655 s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
1656 if (!s->dirty_bitmap) {
1657 goto fail;
1659 if (s->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING) {
1660 bdrv_disable_dirty_bitmap(s->dirty_bitmap);
1663 ret = block_job_add_bdrv(&s->common, "source", bs, 0,
1664 BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE |
1665 BLK_PERM_CONSISTENT_READ,
1666 errp);
1667 if (ret < 0) {
1668 goto fail;
1671 /* Required permissions are already taken with blk_new() */
1672 block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL,
1673 &error_abort);
1675 /* In commit_active_start() all intermediate nodes disappear, so
1676 * any jobs in them must be blocked */
1677 if (target_is_backing) {
1678 BlockDriverState *iter;
1679 for (iter = backing_bs(bs); iter != target; iter = backing_bs(iter)) {
1680 /* XXX BLK_PERM_WRITE needs to be allowed so we don't block
1681 * ourselves at s->base (if writes are blocked for a node, they are
1682 * also blocked for its backing file). The other options would be a
1683 * second filter driver above s->base (== target). */
1684 ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
1685 BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE,
1686 errp);
1687 if (ret < 0) {
1688 goto fail;
1692 if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) {
1693 goto fail;
1697 QTAILQ_INIT(&s->ops_in_flight);
1699 trace_mirror_start(bs, s, opaque);
1700 job_start(&s->common.job);
1702 return &s->common;
1704 fail:
1705 if (s) {
1706 /* Make sure this BDS does not go away until we have completed the graph
1707 * changes below */
1708 bdrv_ref(mirror_top_bs);
1710 g_free(s->replaces);
1711 blk_unref(s->target);
1712 bs_opaque->job = NULL;
1713 if (s->dirty_bitmap) {
1714 bdrv_release_dirty_bitmap(s->dirty_bitmap);
1716 job_early_fail(&s->common.job);
1719 bs_opaque->stop = true;
1720 bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
1721 &error_abort);
1722 bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
1724 bdrv_unref(mirror_top_bs);
1726 return NULL;
1729 void mirror_start(const char *job_id, BlockDriverState *bs,
1730 BlockDriverState *target, const char *replaces,
1731 int creation_flags, int64_t speed,
1732 uint32_t granularity, int64_t buf_size,
1733 MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
1734 bool zero_target,
1735 BlockdevOnError on_source_error,
1736 BlockdevOnError on_target_error,
1737 bool unmap, const char *filter_node_name,
1738 MirrorCopyMode copy_mode, Error **errp)
1740 bool is_none_mode;
1741 BlockDriverState *base;
1743 if ((mode == MIRROR_SYNC_MODE_INCREMENTAL) ||
1744 (mode == MIRROR_SYNC_MODE_BITMAP)) {
1745 error_setg(errp, "Sync mode '%s' not supported",
1746 MirrorSyncMode_str(mode));
1747 return;
1749 is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
1750 base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL;
1751 mirror_start_job(job_id, bs, creation_flags, target, replaces,
1752 speed, granularity, buf_size, backing_mode, zero_target,
1753 on_source_error, on_target_error, unmap, NULL, NULL,
1754 &mirror_job_driver, is_none_mode, base, false,
1755 filter_node_name, true, copy_mode, errp);
1758 BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs,
1759 BlockDriverState *base, int creation_flags,
1760 int64_t speed, BlockdevOnError on_error,
1761 const char *filter_node_name,
1762 BlockCompletionFunc *cb, void *opaque,
1763 bool auto_complete, Error **errp)
1765 bool base_read_only;
1766 Error *local_err = NULL;
1767 BlockJob *ret;
1769 base_read_only = bdrv_is_read_only(base);
1771 if (base_read_only) {
1772 if (bdrv_reopen_set_read_only(base, false, errp) < 0) {
1773 return NULL;
1777 ret = mirror_start_job(
1778 job_id, bs, creation_flags, base, NULL, speed, 0, 0,
1779 MIRROR_LEAVE_BACKING_CHAIN, false,
1780 on_error, on_error, true, cb, opaque,
1781 &commit_active_job_driver, false, base, auto_complete,
1782 filter_node_name, false, MIRROR_COPY_MODE_BACKGROUND,
1783 &local_err);
1784 if (local_err) {
1785 error_propagate(errp, local_err);
1786 goto error_restore_flags;
1789 return ret;
1791 error_restore_flags:
1792 /* ignore error and errp for bdrv_reopen, because we want to propagate
1793 * the original error */
1794 if (base_read_only) {
1795 bdrv_reopen_set_read_only(base, true, NULL);
1797 return NULL;