migration/multifd: Cleanup multifd_load_cleanup()
[qemu/armbru.git] / block / mirror.c
blob5145eb53e1027c487a6cd42864e375af1b88a49f
1 /*
2 * Image mirroring
4 * Copyright Red Hat, Inc. 2012
6 * Authors:
7 * Paolo Bonzini <pbonzini@redhat.com>
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/cutils.h"
16 #include "qemu/coroutine.h"
17 #include "qemu/range.h"
18 #include "trace.h"
19 #include "block/blockjob_int.h"
20 #include "block/block_int.h"
21 #include "block/dirty-bitmap.h"
22 #include "sysemu/block-backend.h"
23 #include "qapi/error.h"
24 #include "qemu/ratelimit.h"
25 #include "qemu/bitmap.h"
26 #include "qemu/memalign.h"
28 #define MAX_IN_FLIGHT 16
29 #define MAX_IO_BYTES (1 << 20) /* 1 Mb */
30 #define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES)
32 /* The mirroring buffer is a list of granularity-sized chunks.
33 * Free chunks are organized in a list.
35 typedef struct MirrorBuffer {
36 QSIMPLEQ_ENTRY(MirrorBuffer) next;
37 } MirrorBuffer;
39 typedef struct MirrorOp MirrorOp;
41 typedef struct MirrorBlockJob {
42 BlockJob common;
43 BlockBackend *target;
44 BlockDriverState *mirror_top_bs;
45 BlockDriverState *base;
46 BlockDriverState *base_overlay;
48 /* The name of the graph node to replace */
49 char *replaces;
50 /* The BDS to replace */
51 BlockDriverState *to_replace;
52 /* Used to block operations on the drive-mirror-replace target */
53 Error *replace_blocker;
54 bool is_none_mode;
55 BlockMirrorBackingMode backing_mode;
56 /* Whether the target image requires explicit zero-initialization */
57 bool zero_target;
59 * To be accesssed with atomics. Written only under the BQL (required by the
60 * current implementation of mirror_change()).
62 MirrorCopyMode copy_mode;
63 BlockdevOnError on_source_error, on_target_error;
65 * To be accessed with atomics.
67 * Set when the target is synced (dirty bitmap is clean, nothing in flight)
68 * and the job is running in active mode.
70 bool actively_synced;
71 bool should_complete;
72 int64_t granularity;
73 size_t buf_size;
74 int64_t bdev_length;
75 unsigned long *cow_bitmap;
76 BdrvDirtyBitmap *dirty_bitmap;
77 BdrvDirtyBitmapIter *dbi;
78 uint8_t *buf;
79 QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
80 int buf_free_count;
82 uint64_t last_pause_ns;
83 unsigned long *in_flight_bitmap;
84 unsigned in_flight;
85 int64_t bytes_in_flight;
86 QTAILQ_HEAD(, MirrorOp) ops_in_flight;
87 int ret;
88 bool unmap;
89 int target_cluster_size;
90 int max_iov;
91 bool initial_zeroing_ongoing;
92 int in_active_write_counter;
93 int64_t active_write_bytes_in_flight;
94 bool prepared;
95 bool in_drain;
96 } MirrorBlockJob;
98 typedef struct MirrorBDSOpaque {
99 MirrorBlockJob *job;
100 bool stop;
101 bool is_commit;
102 } MirrorBDSOpaque;
104 struct MirrorOp {
105 MirrorBlockJob *s;
106 QEMUIOVector qiov;
107 int64_t offset;
108 uint64_t bytes;
110 /* The pointee is set by mirror_co_read(), mirror_co_zero(), and
111 * mirror_co_discard() before yielding for the first time */
112 int64_t *bytes_handled;
114 bool is_pseudo_op;
115 bool is_active_write;
116 bool is_in_flight;
117 CoQueue waiting_requests;
118 Coroutine *co;
119 MirrorOp *waiting_for_op;
121 QTAILQ_ENTRY(MirrorOp) next;
124 typedef enum MirrorMethod {
125 MIRROR_METHOD_COPY,
126 MIRROR_METHOD_ZERO,
127 MIRROR_METHOD_DISCARD,
128 } MirrorMethod;
130 static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
131 int error)
133 qatomic_set(&s->actively_synced, false);
134 if (read) {
135 return block_job_error_action(&s->common, s->on_source_error,
136 true, error);
137 } else {
138 return block_job_error_action(&s->common, s->on_target_error,
139 false, error);
143 static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self,
144 MirrorBlockJob *s,
145 uint64_t offset,
146 uint64_t bytes)
148 uint64_t self_start_chunk = offset / s->granularity;
149 uint64_t self_end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
150 uint64_t self_nb_chunks = self_end_chunk - self_start_chunk;
152 while (find_next_bit(s->in_flight_bitmap, self_end_chunk,
153 self_start_chunk) < self_end_chunk &&
154 s->ret >= 0)
156 MirrorOp *op;
158 QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
159 uint64_t op_start_chunk = op->offset / s->granularity;
160 uint64_t op_nb_chunks = DIV_ROUND_UP(op->offset + op->bytes,
161 s->granularity) -
162 op_start_chunk;
164 if (op == self) {
165 continue;
168 if (ranges_overlap(self_start_chunk, self_nb_chunks,
169 op_start_chunk, op_nb_chunks))
171 if (self) {
173 * If the operation is already (indirectly) waiting for us,
174 * or will wait for us as soon as it wakes up, then just go
175 * on (instead of producing a deadlock in the former case).
177 if (op->waiting_for_op) {
178 continue;
181 self->waiting_for_op = op;
184 qemu_co_queue_wait(&op->waiting_requests, NULL);
186 if (self) {
187 self->waiting_for_op = NULL;
190 break;
196 static void coroutine_fn mirror_iteration_done(MirrorOp *op, int ret)
198 MirrorBlockJob *s = op->s;
199 struct iovec *iov;
200 int64_t chunk_num;
201 int i, nb_chunks;
203 trace_mirror_iteration_done(s, op->offset, op->bytes, ret);
205 s->in_flight--;
206 s->bytes_in_flight -= op->bytes;
207 iov = op->qiov.iov;
208 for (i = 0; i < op->qiov.niov; i++) {
209 MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
210 QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
211 s->buf_free_count++;
214 chunk_num = op->offset / s->granularity;
215 nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
217 bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
218 QTAILQ_REMOVE(&s->ops_in_flight, op, next);
219 if (ret >= 0) {
220 if (s->cow_bitmap) {
221 bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
223 if (!s->initial_zeroing_ongoing) {
224 job_progress_update(&s->common.job, op->bytes);
227 qemu_iovec_destroy(&op->qiov);
229 qemu_co_queue_restart_all(&op->waiting_requests);
230 g_free(op);
233 static void coroutine_fn mirror_write_complete(MirrorOp *op, int ret)
235 MirrorBlockJob *s = op->s;
237 if (ret < 0) {
238 BlockErrorAction action;
240 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
241 action = mirror_error_action(s, false, -ret);
242 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
243 s->ret = ret;
247 mirror_iteration_done(op, ret);
250 static void coroutine_fn mirror_read_complete(MirrorOp *op, int ret)
252 MirrorBlockJob *s = op->s;
254 if (ret < 0) {
255 BlockErrorAction action;
257 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
258 action = mirror_error_action(s, true, -ret);
259 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
260 s->ret = ret;
263 mirror_iteration_done(op, ret);
264 return;
267 ret = blk_co_pwritev(s->target, op->offset, op->qiov.size, &op->qiov, 0);
268 mirror_write_complete(op, ret);
271 /* Clip bytes relative to offset to not exceed end-of-file */
272 static inline int64_t mirror_clip_bytes(MirrorBlockJob *s,
273 int64_t offset,
274 int64_t bytes)
276 return MIN(bytes, s->bdev_length - offset);
279 /* Round offset and/or bytes to target cluster if COW is needed, and
280 * return the offset of the adjusted tail against original. */
281 static int coroutine_fn mirror_cow_align(MirrorBlockJob *s, int64_t *offset,
282 uint64_t *bytes)
284 bool need_cow;
285 int ret = 0;
286 int64_t align_offset = *offset;
287 int64_t align_bytes = *bytes;
288 int max_bytes = s->granularity * s->max_iov;
290 need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap);
291 need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity,
292 s->cow_bitmap);
293 if (need_cow) {
294 bdrv_round_to_subclusters(blk_bs(s->target), *offset, *bytes,
295 &align_offset, &align_bytes);
298 if (align_bytes > max_bytes) {
299 align_bytes = max_bytes;
300 if (need_cow) {
301 align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size);
304 /* Clipping may result in align_bytes unaligned to chunk boundary, but
305 * that doesn't matter because it's already the end of source image. */
306 align_bytes = mirror_clip_bytes(s, align_offset, align_bytes);
308 ret = align_offset + align_bytes - (*offset + *bytes);
309 *offset = align_offset;
310 *bytes = align_bytes;
311 assert(ret >= 0);
312 return ret;
315 static inline void coroutine_fn
316 mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s)
318 MirrorOp *op;
320 QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
322 * Do not wait on pseudo ops, because it may in turn wait on
323 * some other operation to start, which may in fact be the
324 * caller of this function. Since there is only one pseudo op
325 * at any given time, we will always find some real operation
326 * to wait on.
327 * Also, do not wait on active operations, because they do not
328 * use up in-flight slots.
330 if (!op->is_pseudo_op && op->is_in_flight && !op->is_active_write) {
331 qemu_co_queue_wait(&op->waiting_requests, NULL);
332 return;
335 abort();
338 /* Perform a mirror copy operation.
340 * *op->bytes_handled is set to the number of bytes copied after and
341 * including offset, excluding any bytes copied prior to offset due
342 * to alignment. This will be op->bytes if no alignment is necessary,
343 * or (new_end - op->offset) if the tail is rounded up or down due to
344 * alignment or buffer limit.
346 static void coroutine_fn mirror_co_read(void *opaque)
348 MirrorOp *op = opaque;
349 MirrorBlockJob *s = op->s;
350 int nb_chunks;
351 uint64_t ret;
352 uint64_t max_bytes;
354 max_bytes = s->granularity * s->max_iov;
356 /* We can only handle as much as buf_size at a time. */
357 op->bytes = MIN(s->buf_size, MIN(max_bytes, op->bytes));
358 assert(op->bytes);
359 assert(op->bytes < BDRV_REQUEST_MAX_BYTES);
360 *op->bytes_handled = op->bytes;
362 if (s->cow_bitmap) {
363 *op->bytes_handled += mirror_cow_align(s, &op->offset, &op->bytes);
365 /* Cannot exceed BDRV_REQUEST_MAX_BYTES + INT_MAX */
366 assert(*op->bytes_handled <= UINT_MAX);
367 assert(op->bytes <= s->buf_size);
368 /* The offset is granularity-aligned because:
369 * 1) Caller passes in aligned values;
370 * 2) mirror_cow_align is used only when target cluster is larger. */
371 assert(QEMU_IS_ALIGNED(op->offset, s->granularity));
372 /* The range is sector-aligned, since bdrv_getlength() rounds up. */
373 assert(QEMU_IS_ALIGNED(op->bytes, BDRV_SECTOR_SIZE));
374 nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
376 while (s->buf_free_count < nb_chunks) {
377 trace_mirror_yield_in_flight(s, op->offset, s->in_flight);
378 mirror_wait_for_free_in_flight_slot(s);
381 /* Now make a QEMUIOVector taking enough granularity-sized chunks
382 * from s->buf_free.
384 qemu_iovec_init(&op->qiov, nb_chunks);
385 while (nb_chunks-- > 0) {
386 MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
387 size_t remaining = op->bytes - op->qiov.size;
389 QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
390 s->buf_free_count--;
391 qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
394 /* Copy the dirty cluster. */
395 s->in_flight++;
396 s->bytes_in_flight += op->bytes;
397 op->is_in_flight = true;
398 trace_mirror_one_iteration(s, op->offset, op->bytes);
400 WITH_GRAPH_RDLOCK_GUARD() {
401 ret = bdrv_co_preadv(s->mirror_top_bs->backing, op->offset, op->bytes,
402 &op->qiov, 0);
404 mirror_read_complete(op, ret);
407 static void coroutine_fn mirror_co_zero(void *opaque)
409 MirrorOp *op = opaque;
410 int ret;
412 op->s->in_flight++;
413 op->s->bytes_in_flight += op->bytes;
414 *op->bytes_handled = op->bytes;
415 op->is_in_flight = true;
417 ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes,
418 op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0);
419 mirror_write_complete(op, ret);
422 static void coroutine_fn mirror_co_discard(void *opaque)
424 MirrorOp *op = opaque;
425 int ret;
427 op->s->in_flight++;
428 op->s->bytes_in_flight += op->bytes;
429 *op->bytes_handled = op->bytes;
430 op->is_in_flight = true;
432 ret = blk_co_pdiscard(op->s->target, op->offset, op->bytes);
433 mirror_write_complete(op, ret);
436 static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset,
437 unsigned bytes, MirrorMethod mirror_method)
439 MirrorOp *op;
440 Coroutine *co;
441 int64_t bytes_handled = -1;
443 op = g_new(MirrorOp, 1);
444 *op = (MirrorOp){
445 .s = s,
446 .offset = offset,
447 .bytes = bytes,
448 .bytes_handled = &bytes_handled,
450 qemu_co_queue_init(&op->waiting_requests);
452 switch (mirror_method) {
453 case MIRROR_METHOD_COPY:
454 co = qemu_coroutine_create(mirror_co_read, op);
455 break;
456 case MIRROR_METHOD_ZERO:
457 co = qemu_coroutine_create(mirror_co_zero, op);
458 break;
459 case MIRROR_METHOD_DISCARD:
460 co = qemu_coroutine_create(mirror_co_discard, op);
461 break;
462 default:
463 abort();
465 op->co = co;
467 QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
468 qemu_coroutine_enter(co);
469 /* At this point, ownership of op has been moved to the coroutine
470 * and the object may already be freed */
472 /* Assert that this value has been set */
473 assert(bytes_handled >= 0);
475 /* Same assertion as in mirror_co_read() (and for mirror_co_read()
476 * and mirror_co_discard(), bytes_handled == op->bytes, which
477 * is the @bytes parameter given to this function) */
478 assert(bytes_handled <= UINT_MAX);
479 return bytes_handled;
482 static void coroutine_fn GRAPH_RDLOCK mirror_iteration(MirrorBlockJob *s)
484 BlockDriverState *source = s->mirror_top_bs->backing->bs;
485 MirrorOp *pseudo_op;
486 int64_t offset;
487 /* At least the first dirty chunk is mirrored in one iteration. */
488 int nb_chunks = 1;
489 bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target));
490 int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES);
492 bdrv_dirty_bitmap_lock(s->dirty_bitmap);
493 offset = bdrv_dirty_iter_next(s->dbi);
494 if (offset < 0) {
495 bdrv_set_dirty_iter(s->dbi, 0);
496 offset = bdrv_dirty_iter_next(s->dbi);
497 trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap));
498 assert(offset >= 0);
500 bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
503 * Wait for concurrent requests to @offset. The next loop will limit the
504 * copied area based on in_flight_bitmap so we only copy an area that does
505 * not overlap with concurrent in-flight requests. Still, we would like to
506 * copy something, so wait until there are at least no more requests to the
507 * very beginning of the area.
509 mirror_wait_on_conflicts(NULL, s, offset, 1);
511 job_pause_point(&s->common.job);
513 /* Find the number of consecutive dirty chunks following the first dirty
514 * one, and wait for in flight requests in them. */
515 bdrv_dirty_bitmap_lock(s->dirty_bitmap);
516 while (nb_chunks * s->granularity < s->buf_size) {
517 int64_t next_dirty;
518 int64_t next_offset = offset + nb_chunks * s->granularity;
519 int64_t next_chunk = next_offset / s->granularity;
520 if (next_offset >= s->bdev_length ||
521 !bdrv_dirty_bitmap_get_locked(s->dirty_bitmap, next_offset)) {
522 break;
524 if (test_bit(next_chunk, s->in_flight_bitmap)) {
525 break;
528 next_dirty = bdrv_dirty_iter_next(s->dbi);
529 if (next_dirty > next_offset || next_dirty < 0) {
530 /* The bitmap iterator's cache is stale, refresh it */
531 bdrv_set_dirty_iter(s->dbi, next_offset);
532 next_dirty = bdrv_dirty_iter_next(s->dbi);
534 assert(next_dirty == next_offset);
535 nb_chunks++;
538 /* Clear dirty bits before querying the block status, because
539 * calling bdrv_block_status_above could yield - if some blocks are
540 * marked dirty in this window, we need to know.
542 bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset,
543 nb_chunks * s->granularity);
544 bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
546 /* Before claiming an area in the in-flight bitmap, we have to
547 * create a MirrorOp for it so that conflicting requests can wait
548 * for it. mirror_perform() will create the real MirrorOps later,
549 * for now we just create a pseudo operation that will wake up all
550 * conflicting requests once all real operations have been
551 * launched. */
552 pseudo_op = g_new(MirrorOp, 1);
553 *pseudo_op = (MirrorOp){
554 .offset = offset,
555 .bytes = nb_chunks * s->granularity,
556 .is_pseudo_op = true,
558 qemu_co_queue_init(&pseudo_op->waiting_requests);
559 QTAILQ_INSERT_TAIL(&s->ops_in_flight, pseudo_op, next);
561 bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks);
562 while (nb_chunks > 0 && offset < s->bdev_length) {
563 int ret;
564 int64_t io_bytes;
565 int64_t io_bytes_acct;
566 MirrorMethod mirror_method = MIRROR_METHOD_COPY;
568 assert(!(offset % s->granularity));
569 WITH_GRAPH_RDLOCK_GUARD() {
570 ret = bdrv_co_block_status_above(source, NULL, offset,
571 nb_chunks * s->granularity,
572 &io_bytes, NULL, NULL);
574 if (ret < 0) {
575 io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes);
576 } else if (ret & BDRV_BLOCK_DATA) {
577 io_bytes = MIN(io_bytes, max_io_bytes);
580 io_bytes -= io_bytes % s->granularity;
581 if (io_bytes < s->granularity) {
582 io_bytes = s->granularity;
583 } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) {
584 int64_t target_offset;
585 int64_t target_bytes;
586 WITH_GRAPH_RDLOCK_GUARD() {
587 bdrv_round_to_subclusters(blk_bs(s->target), offset, io_bytes,
588 &target_offset, &target_bytes);
590 if (target_offset == offset &&
591 target_bytes == io_bytes) {
592 mirror_method = ret & BDRV_BLOCK_ZERO ?
593 MIRROR_METHOD_ZERO :
594 MIRROR_METHOD_DISCARD;
598 while (s->in_flight >= MAX_IN_FLIGHT) {
599 trace_mirror_yield_in_flight(s, offset, s->in_flight);
600 mirror_wait_for_free_in_flight_slot(s);
603 if (s->ret < 0) {
604 ret = 0;
605 goto fail;
608 io_bytes = mirror_clip_bytes(s, offset, io_bytes);
609 io_bytes = mirror_perform(s, offset, io_bytes, mirror_method);
610 if (mirror_method != MIRROR_METHOD_COPY && write_zeroes_ok) {
611 io_bytes_acct = 0;
612 } else {
613 io_bytes_acct = io_bytes;
615 assert(io_bytes);
616 offset += io_bytes;
617 nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity);
618 block_job_ratelimit_processed_bytes(&s->common, io_bytes_acct);
621 fail:
622 QTAILQ_REMOVE(&s->ops_in_flight, pseudo_op, next);
623 qemu_co_queue_restart_all(&pseudo_op->waiting_requests);
624 g_free(pseudo_op);
627 static void mirror_free_init(MirrorBlockJob *s)
629 int granularity = s->granularity;
630 size_t buf_size = s->buf_size;
631 uint8_t *buf = s->buf;
633 assert(s->buf_free_count == 0);
634 QSIMPLEQ_INIT(&s->buf_free);
635 while (buf_size != 0) {
636 MirrorBuffer *cur = (MirrorBuffer *)buf;
637 QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
638 s->buf_free_count++;
639 buf_size -= granularity;
640 buf += granularity;
644 /* This is also used for the .pause callback. There is no matching
645 * mirror_resume() because mirror_run() will begin iterating again
646 * when the job is resumed.
648 static void coroutine_fn mirror_wait_for_all_io(MirrorBlockJob *s)
650 while (s->in_flight > 0) {
651 mirror_wait_for_free_in_flight_slot(s);
656 * mirror_exit_common: handle both abort() and prepare() cases.
657 * for .prepare, returns 0 on success and -errno on failure.
658 * for .abort cases, denoted by abort = true, MUST return 0.
660 static int mirror_exit_common(Job *job)
662 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
663 BlockJob *bjob = &s->common;
664 MirrorBDSOpaque *bs_opaque;
665 BlockDriverState *src;
666 BlockDriverState *target_bs;
667 BlockDriverState *mirror_top_bs;
668 Error *local_err = NULL;
669 bool abort = job->ret < 0;
670 int ret = 0;
672 GLOBAL_STATE_CODE();
674 if (s->prepared) {
675 return 0;
677 s->prepared = true;
679 bdrv_graph_rdlock_main_loop();
681 mirror_top_bs = s->mirror_top_bs;
682 bs_opaque = mirror_top_bs->opaque;
683 src = mirror_top_bs->backing->bs;
684 target_bs = blk_bs(s->target);
686 if (bdrv_chain_contains(src, target_bs)) {
687 bdrv_unfreeze_backing_chain(mirror_top_bs, target_bs);
690 bdrv_release_dirty_bitmap(s->dirty_bitmap);
692 /* Make sure that the source BDS doesn't go away during bdrv_replace_node,
693 * before we can call bdrv_drained_end */
694 bdrv_ref(src);
695 bdrv_ref(mirror_top_bs);
696 bdrv_ref(target_bs);
698 bdrv_graph_rdunlock_main_loop();
701 * Remove target parent that still uses BLK_PERM_WRITE/RESIZE before
702 * inserting target_bs at s->to_replace, where we might not be able to get
703 * these permissions.
705 blk_unref(s->target);
706 s->target = NULL;
708 /* We don't access the source any more. Dropping any WRITE/RESIZE is
709 * required before it could become a backing file of target_bs. Not having
710 * these permissions any more means that we can't allow any new requests on
711 * mirror_top_bs from now on, so keep it drained. */
712 bdrv_drained_begin(mirror_top_bs);
713 bdrv_drained_begin(target_bs);
714 bs_opaque->stop = true;
716 bdrv_graph_rdlock_main_loop();
717 bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
718 &error_abort);
720 if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
721 BlockDriverState *backing = s->is_none_mode ? src : s->base;
722 BlockDriverState *unfiltered_target = bdrv_skip_filters(target_bs);
724 if (bdrv_cow_bs(unfiltered_target) != backing) {
725 bdrv_set_backing_hd(unfiltered_target, backing, &local_err);
726 if (local_err) {
727 error_report_err(local_err);
728 local_err = NULL;
729 ret = -EPERM;
732 } else if (!abort && s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) {
733 assert(!bdrv_backing_chain_next(target_bs));
734 ret = bdrv_open_backing_file(bdrv_skip_filters(target_bs), NULL,
735 "backing", &local_err);
736 if (ret < 0) {
737 error_report_err(local_err);
738 local_err = NULL;
741 bdrv_graph_rdunlock_main_loop();
743 if (s->should_complete && !abort) {
744 BlockDriverState *to_replace = s->to_replace ?: src;
745 bool ro = bdrv_is_read_only(to_replace);
747 if (ro != bdrv_is_read_only(target_bs)) {
748 bdrv_reopen_set_read_only(target_bs, ro, NULL);
751 /* The mirror job has no requests in flight any more, but we need to
752 * drain potential other users of the BDS before changing the graph. */
753 assert(s->in_drain);
754 bdrv_drained_begin(to_replace);
756 * Cannot use check_to_replace_node() here, because that would
757 * check for an op blocker on @to_replace, and we have our own
758 * there.
760 bdrv_graph_wrlock();
761 if (bdrv_recurse_can_replace(src, to_replace)) {
762 bdrv_replace_node(to_replace, target_bs, &local_err);
763 } else {
764 error_setg(&local_err, "Can no longer replace '%s' by '%s', "
765 "because it can no longer be guaranteed that doing so "
766 "would not lead to an abrupt change of visible data",
767 to_replace->node_name, target_bs->node_name);
769 bdrv_graph_wrunlock();
770 bdrv_drained_end(to_replace);
771 if (local_err) {
772 error_report_err(local_err);
773 ret = -EPERM;
776 if (s->to_replace) {
777 bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
778 error_free(s->replace_blocker);
779 bdrv_unref(s->to_replace);
781 g_free(s->replaces);
784 * Remove the mirror filter driver from the graph. Before this, get rid of
785 * the blockers on the intermediate nodes so that the resulting state is
786 * valid.
788 block_job_remove_all_bdrv(bjob);
789 bdrv_graph_wrlock();
790 bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort);
791 bdrv_graph_wrunlock();
793 bdrv_drained_end(target_bs);
794 bdrv_unref(target_bs);
796 bs_opaque->job = NULL;
798 bdrv_drained_end(src);
799 bdrv_drained_end(mirror_top_bs);
800 s->in_drain = false;
801 bdrv_unref(mirror_top_bs);
802 bdrv_unref(src);
804 return ret;
807 static int mirror_prepare(Job *job)
809 return mirror_exit_common(job);
812 static void mirror_abort(Job *job)
814 int ret = mirror_exit_common(job);
815 assert(ret == 0);
818 static void coroutine_fn mirror_throttle(MirrorBlockJob *s)
820 int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
822 if (now - s->last_pause_ns > BLOCK_JOB_SLICE_TIME) {
823 s->last_pause_ns = now;
824 job_sleep_ns(&s->common.job, 0);
825 } else {
826 job_pause_point(&s->common.job);
830 static int coroutine_fn GRAPH_UNLOCKED mirror_dirty_init(MirrorBlockJob *s)
832 int64_t offset;
833 BlockDriverState *bs;
834 BlockDriverState *target_bs = blk_bs(s->target);
835 int ret;
836 int64_t count;
838 bdrv_graph_co_rdlock();
839 bs = s->mirror_top_bs->backing->bs;
840 bdrv_graph_co_rdunlock();
842 if (s->zero_target) {
843 if (!bdrv_can_write_zeroes_with_unmap(target_bs)) {
844 bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length);
845 return 0;
848 s->initial_zeroing_ongoing = true;
849 for (offset = 0; offset < s->bdev_length; ) {
850 int bytes = MIN(s->bdev_length - offset,
851 QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
853 mirror_throttle(s);
855 if (job_is_cancelled(&s->common.job)) {
856 s->initial_zeroing_ongoing = false;
857 return 0;
860 if (s->in_flight >= MAX_IN_FLIGHT) {
861 trace_mirror_yield(s, UINT64_MAX, s->buf_free_count,
862 s->in_flight);
863 mirror_wait_for_free_in_flight_slot(s);
864 continue;
867 mirror_perform(s, offset, bytes, MIRROR_METHOD_ZERO);
868 offset += bytes;
871 mirror_wait_for_all_io(s);
872 s->initial_zeroing_ongoing = false;
875 /* First part, loop on the sectors and initialize the dirty bitmap. */
876 for (offset = 0; offset < s->bdev_length; ) {
877 /* Just to make sure we are not exceeding int limit. */
878 int bytes = MIN(s->bdev_length - offset,
879 QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
881 mirror_throttle(s);
883 if (job_is_cancelled(&s->common.job)) {
884 return 0;
887 WITH_GRAPH_RDLOCK_GUARD() {
888 ret = bdrv_co_is_allocated_above(bs, s->base_overlay, true, offset,
889 bytes, &count);
891 if (ret < 0) {
892 return ret;
895 assert(count);
896 if (ret > 0) {
897 bdrv_set_dirty_bitmap(s->dirty_bitmap, offset, count);
899 offset += count;
901 return 0;
904 /* Called when going out of the streaming phase to flush the bulk of the
905 * data to the medium, or just before completing.
907 static int coroutine_fn mirror_flush(MirrorBlockJob *s)
909 int ret = blk_co_flush(s->target);
910 if (ret < 0) {
911 if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) {
912 s->ret = ret;
915 return ret;
918 static int coroutine_fn mirror_run(Job *job, Error **errp)
920 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
921 BlockDriverState *bs;
922 MirrorBDSOpaque *mirror_top_opaque = s->mirror_top_bs->opaque;
923 BlockDriverState *target_bs = blk_bs(s->target);
924 bool need_drain = true;
925 BlockDeviceIoStatus iostatus;
926 int64_t length;
927 int64_t target_length;
928 BlockDriverInfo bdi;
929 char backing_filename[2]; /* we only need 2 characters because we are only
930 checking for a NULL string */
931 int ret = 0;
933 bdrv_graph_co_rdlock();
934 bs = bdrv_filter_bs(s->mirror_top_bs);
935 bdrv_graph_co_rdunlock();
937 if (job_is_cancelled(&s->common.job)) {
938 goto immediate_exit;
941 bdrv_graph_co_rdlock();
942 s->bdev_length = bdrv_co_getlength(bs);
943 bdrv_graph_co_rdunlock();
945 if (s->bdev_length < 0) {
946 ret = s->bdev_length;
947 goto immediate_exit;
950 target_length = blk_co_getlength(s->target);
951 if (target_length < 0) {
952 ret = target_length;
953 goto immediate_exit;
956 /* Active commit must resize the base image if its size differs from the
957 * active layer. */
958 if (s->base == blk_bs(s->target)) {
959 if (s->bdev_length > target_length) {
960 ret = blk_co_truncate(s->target, s->bdev_length, false,
961 PREALLOC_MODE_OFF, 0, NULL);
962 if (ret < 0) {
963 goto immediate_exit;
966 } else if (s->bdev_length != target_length) {
967 error_setg(errp, "Source and target image have different sizes");
968 ret = -EINVAL;
969 goto immediate_exit;
972 if (s->bdev_length == 0) {
973 /* Transition to the READY state and wait for complete. */
974 job_transition_to_ready(&s->common.job);
975 qatomic_set(&s->actively_synced, true);
976 while (!job_cancel_requested(&s->common.job) && !s->should_complete) {
977 job_yield(&s->common.job);
979 goto immediate_exit;
982 length = DIV_ROUND_UP(s->bdev_length, s->granularity);
983 s->in_flight_bitmap = bitmap_new(length);
985 /* If we have no backing file yet in the destination, we cannot let
986 * the destination do COW. Instead, we copy sectors around the
987 * dirty data if needed. We need a bitmap to do that.
989 bdrv_get_backing_filename(target_bs, backing_filename,
990 sizeof(backing_filename));
991 bdrv_graph_co_rdlock();
992 if (!bdrv_co_get_info(target_bs, &bdi) && bdi.cluster_size) {
993 s->target_cluster_size = bdi.cluster_size;
994 } else {
995 s->target_cluster_size = BDRV_SECTOR_SIZE;
997 if (backing_filename[0] && !bdrv_backing_chain_next(target_bs) &&
998 s->granularity < s->target_cluster_size) {
999 s->buf_size = MAX(s->buf_size, s->target_cluster_size);
1000 s->cow_bitmap = bitmap_new(length);
1002 s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov);
1003 bdrv_graph_co_rdunlock();
1005 s->buf = qemu_try_blockalign(bs, s->buf_size);
1006 if (s->buf == NULL) {
1007 ret = -ENOMEM;
1008 goto immediate_exit;
1011 mirror_free_init(s);
1013 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1014 if (!s->is_none_mode) {
1015 ret = mirror_dirty_init(s);
1016 if (ret < 0 || job_is_cancelled(&s->common.job)) {
1017 goto immediate_exit;
1022 * Only now the job is fully initialised and mirror_top_bs should start
1023 * accessing it.
1025 mirror_top_opaque->job = s;
1027 assert(!s->dbi);
1028 s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap);
1029 for (;;) {
1030 int64_t cnt, delta;
1031 bool should_complete;
1033 if (s->ret < 0) {
1034 ret = s->ret;
1035 goto immediate_exit;
1038 job_pause_point(&s->common.job);
1040 if (job_is_cancelled(&s->common.job)) {
1041 ret = 0;
1042 goto immediate_exit;
1045 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
1046 /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is
1047 * the number of bytes currently being processed; together those are
1048 * the current remaining operation length */
1049 job_progress_set_remaining(&s->common.job,
1050 s->bytes_in_flight + cnt +
1051 s->active_write_bytes_in_flight);
1053 /* Note that even when no rate limit is applied we need to yield
1054 * periodically with no pending I/O so that bdrv_drain_all() returns.
1055 * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is
1056 * an error, or when the source is clean, whichever comes first. */
1057 delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns;
1058 WITH_JOB_LOCK_GUARD() {
1059 iostatus = s->common.iostatus;
1061 if (delta < BLOCK_JOB_SLICE_TIME &&
1062 iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
1063 if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 ||
1064 (cnt == 0 && s->in_flight > 0)) {
1065 trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight);
1066 mirror_wait_for_free_in_flight_slot(s);
1067 continue;
1068 } else if (cnt != 0) {
1069 bdrv_graph_co_rdlock();
1070 mirror_iteration(s);
1071 bdrv_graph_co_rdunlock();
1075 should_complete = false;
1076 if (s->in_flight == 0 && cnt == 0) {
1077 trace_mirror_before_flush(s);
1078 if (!job_is_ready(&s->common.job)) {
1079 if (mirror_flush(s) < 0) {
1080 /* Go check s->ret. */
1081 continue;
1083 /* We're out of the streaming phase. From now on, if the job
1084 * is cancelled we will actually complete all pending I/O and
1085 * report completion. This way, block-job-cancel will leave
1086 * the target in a consistent state.
1088 job_transition_to_ready(&s->common.job);
1090 if (qatomic_read(&s->copy_mode) != MIRROR_COPY_MODE_BACKGROUND) {
1091 qatomic_set(&s->actively_synced, true);
1094 should_complete = s->should_complete ||
1095 job_cancel_requested(&s->common.job);
1096 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
1099 if (cnt == 0 && should_complete) {
1100 /* The dirty bitmap is not updated while operations are pending.
1101 * If we're about to exit, wait for pending operations before
1102 * calling bdrv_get_dirty_count(bs), or we may exit while the
1103 * source has dirty data to copy!
1105 * Note that I/O can be submitted by the guest while
1106 * mirror_populate runs, so pause it now. Before deciding
1107 * whether to switch to target check one last time if I/O has
1108 * come in the meanwhile, and if not flush the data to disk.
1110 trace_mirror_before_drain(s, cnt);
1112 s->in_drain = true;
1113 bdrv_drained_begin(bs);
1115 /* Must be zero because we are drained */
1116 assert(s->in_active_write_counter == 0);
1118 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
1119 if (cnt > 0 || mirror_flush(s) < 0) {
1120 bdrv_drained_end(bs);
1121 s->in_drain = false;
1122 continue;
1125 /* The two disks are in sync. Exit and report successful
1126 * completion.
1128 assert(QLIST_EMPTY(&bs->tracked_requests));
1129 need_drain = false;
1130 break;
1133 if (job_is_ready(&s->common.job) && !should_complete) {
1134 if (s->in_flight == 0 && cnt == 0) {
1135 trace_mirror_before_sleep(s, cnt, job_is_ready(&s->common.job),
1136 BLOCK_JOB_SLICE_TIME);
1137 job_sleep_ns(&s->common.job, BLOCK_JOB_SLICE_TIME);
1139 } else {
1140 block_job_ratelimit_sleep(&s->common);
1142 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1145 immediate_exit:
1146 if (s->in_flight > 0) {
1147 /* We get here only if something went wrong. Either the job failed,
1148 * or it was cancelled prematurely so that we do not guarantee that
1149 * the target is a copy of the source.
1151 assert(ret < 0 || job_is_cancelled(&s->common.job));
1152 assert(need_drain);
1153 mirror_wait_for_all_io(s);
1156 assert(s->in_flight == 0);
1157 qemu_vfree(s->buf);
1158 g_free(s->cow_bitmap);
1159 g_free(s->in_flight_bitmap);
1160 bdrv_dirty_iter_free(s->dbi);
1162 if (need_drain) {
1163 s->in_drain = true;
1164 bdrv_drained_begin(bs);
1167 return ret;
1170 static void mirror_complete(Job *job, Error **errp)
1172 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1174 if (!job_is_ready(job)) {
1175 error_setg(errp, "The active block job '%s' cannot be completed",
1176 job->id);
1177 return;
1180 /* block all operations on to_replace bs */
1181 if (s->replaces) {
1182 s->to_replace = bdrv_find_node(s->replaces);
1183 if (!s->to_replace) {
1184 error_setg(errp, "Node name '%s' not found", s->replaces);
1185 return;
1188 /* TODO Translate this into child freeze system. */
1189 error_setg(&s->replace_blocker,
1190 "block device is in use by block-job-complete");
1191 bdrv_op_block_all(s->to_replace, s->replace_blocker);
1192 bdrv_ref(s->to_replace);
1195 s->should_complete = true;
1197 /* If the job is paused, it will be re-entered when it is resumed */
1198 WITH_JOB_LOCK_GUARD() {
1199 if (!job->paused) {
1200 job_enter_cond_locked(job, NULL);
1205 static void coroutine_fn mirror_pause(Job *job)
1207 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1209 mirror_wait_for_all_io(s);
1212 static bool mirror_drained_poll(BlockJob *job)
1214 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1216 /* If the job isn't paused nor cancelled, we can't be sure that it won't
1217 * issue more requests. We make an exception if we've reached this point
1218 * from one of our own drain sections, to avoid a deadlock waiting for
1219 * ourselves.
1221 WITH_JOB_LOCK_GUARD() {
1222 if (!s->common.job.paused && !job_is_cancelled_locked(&job->job)
1223 && !s->in_drain) {
1224 return true;
1228 return !!s->in_flight;
1231 static bool mirror_cancel(Job *job, bool force)
1233 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1234 BlockDriverState *target = blk_bs(s->target);
1237 * Before the job is READY, we treat any cancellation like a
1238 * force-cancellation.
1240 force = force || !job_is_ready(job);
1242 if (force) {
1243 bdrv_cancel_in_flight(target);
1245 return force;
1248 static bool commit_active_cancel(Job *job, bool force)
1250 /* Same as above in mirror_cancel() */
1251 return force || !job_is_ready(job);
1254 static void mirror_change(BlockJob *job, BlockJobChangeOptions *opts,
1255 Error **errp)
1257 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1258 BlockJobChangeOptionsMirror *change_opts = &opts->u.mirror;
1259 MirrorCopyMode current;
1262 * The implementation relies on the fact that copy_mode is only written
1263 * under the BQL. Otherwise, further synchronization would be required.
1266 GLOBAL_STATE_CODE();
1268 if (qatomic_read(&s->copy_mode) == change_opts->copy_mode) {
1269 return;
1272 if (change_opts->copy_mode != MIRROR_COPY_MODE_WRITE_BLOCKING) {
1273 error_setg(errp, "Change to copy mode '%s' is not implemented",
1274 MirrorCopyMode_str(change_opts->copy_mode));
1275 return;
1278 current = qatomic_cmpxchg(&s->copy_mode, MIRROR_COPY_MODE_BACKGROUND,
1279 change_opts->copy_mode);
1280 if (current != MIRROR_COPY_MODE_BACKGROUND) {
1281 error_setg(errp, "Expected current copy mode '%s', got '%s'",
1282 MirrorCopyMode_str(MIRROR_COPY_MODE_BACKGROUND),
1283 MirrorCopyMode_str(current));
1287 static void mirror_query(BlockJob *job, BlockJobInfo *info)
1289 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1291 info->u.mirror = (BlockJobInfoMirror) {
1292 .actively_synced = qatomic_read(&s->actively_synced),
1296 static const BlockJobDriver mirror_job_driver = {
1297 .job_driver = {
1298 .instance_size = sizeof(MirrorBlockJob),
1299 .job_type = JOB_TYPE_MIRROR,
1300 .free = block_job_free,
1301 .user_resume = block_job_user_resume,
1302 .run = mirror_run,
1303 .prepare = mirror_prepare,
1304 .abort = mirror_abort,
1305 .pause = mirror_pause,
1306 .complete = mirror_complete,
1307 .cancel = mirror_cancel,
1309 .drained_poll = mirror_drained_poll,
1310 .change = mirror_change,
1311 .query = mirror_query,
1314 static const BlockJobDriver commit_active_job_driver = {
1315 .job_driver = {
1316 .instance_size = sizeof(MirrorBlockJob),
1317 .job_type = JOB_TYPE_COMMIT,
1318 .free = block_job_free,
1319 .user_resume = block_job_user_resume,
1320 .run = mirror_run,
1321 .prepare = mirror_prepare,
1322 .abort = mirror_abort,
1323 .pause = mirror_pause,
1324 .complete = mirror_complete,
1325 .cancel = commit_active_cancel,
1327 .drained_poll = mirror_drained_poll,
1330 static void coroutine_fn
1331 do_sync_target_write(MirrorBlockJob *job, MirrorMethod method,
1332 uint64_t offset, uint64_t bytes,
1333 QEMUIOVector *qiov, int flags)
1335 int ret;
1336 size_t qiov_offset = 0;
1337 int64_t bitmap_offset, bitmap_end;
1339 if (!QEMU_IS_ALIGNED(offset, job->granularity) &&
1340 bdrv_dirty_bitmap_get(job->dirty_bitmap, offset))
1343 * Dirty unaligned padding: ignore it.
1345 * Reasoning:
1346 * 1. If we copy it, we can't reset corresponding bit in
1347 * dirty_bitmap as there may be some "dirty" bytes still not
1348 * copied.
1349 * 2. It's already dirty, so skipping it we don't diverge mirror
1350 * progress.
1352 * Note, that because of this, guest write may have no contribution
1353 * into mirror converge, but that's not bad, as we have background
1354 * process of mirroring. If under some bad circumstances (high guest
1355 * IO load) background process starve, we will not converge anyway,
1356 * even if each write will contribute, as guest is not guaranteed to
1357 * rewrite the whole disk.
1359 qiov_offset = QEMU_ALIGN_UP(offset, job->granularity) - offset;
1360 if (bytes <= qiov_offset) {
1361 /* nothing to do after shrink */
1362 return;
1364 offset += qiov_offset;
1365 bytes -= qiov_offset;
1368 if (!QEMU_IS_ALIGNED(offset + bytes, job->granularity) &&
1369 bdrv_dirty_bitmap_get(job->dirty_bitmap, offset + bytes - 1))
1371 uint64_t tail = (offset + bytes) % job->granularity;
1373 if (bytes <= tail) {
1374 /* nothing to do after shrink */
1375 return;
1377 bytes -= tail;
1381 * Tails are either clean or shrunk, so for bitmap resetting
1382 * we safely align the range down.
1384 bitmap_offset = QEMU_ALIGN_UP(offset, job->granularity);
1385 bitmap_end = QEMU_ALIGN_DOWN(offset + bytes, job->granularity);
1386 if (bitmap_offset < bitmap_end) {
1387 bdrv_reset_dirty_bitmap(job->dirty_bitmap, bitmap_offset,
1388 bitmap_end - bitmap_offset);
1391 job_progress_increase_remaining(&job->common.job, bytes);
1392 job->active_write_bytes_in_flight += bytes;
1394 switch (method) {
1395 case MIRROR_METHOD_COPY:
1396 ret = blk_co_pwritev_part(job->target, offset, bytes,
1397 qiov, qiov_offset, flags);
1398 break;
1400 case MIRROR_METHOD_ZERO:
1401 assert(!qiov);
1402 ret = blk_co_pwrite_zeroes(job->target, offset, bytes, flags);
1403 break;
1405 case MIRROR_METHOD_DISCARD:
1406 assert(!qiov);
1407 ret = blk_co_pdiscard(job->target, offset, bytes);
1408 break;
1410 default:
1411 abort();
1414 job->active_write_bytes_in_flight -= bytes;
1415 if (ret >= 0) {
1416 job_progress_update(&job->common.job, bytes);
1417 } else {
1418 BlockErrorAction action;
1421 * We failed, so we should mark dirty the whole area, aligned up.
1422 * Note that we don't care about shrunk tails if any: they were dirty
1423 * at function start, and they must be still dirty, as we've locked
1424 * the region for in-flight op.
1426 bitmap_offset = QEMU_ALIGN_DOWN(offset, job->granularity);
1427 bitmap_end = QEMU_ALIGN_UP(offset + bytes, job->granularity);
1428 bdrv_set_dirty_bitmap(job->dirty_bitmap, bitmap_offset,
1429 bitmap_end - bitmap_offset);
1430 qatomic_set(&job->actively_synced, false);
1432 action = mirror_error_action(job, false, -ret);
1433 if (action == BLOCK_ERROR_ACTION_REPORT) {
1434 if (!job->ret) {
1435 job->ret = ret;
1441 static MirrorOp *coroutine_fn active_write_prepare(MirrorBlockJob *s,
1442 uint64_t offset,
1443 uint64_t bytes)
1445 MirrorOp *op;
1446 uint64_t start_chunk = offset / s->granularity;
1447 uint64_t end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
1449 op = g_new(MirrorOp, 1);
1450 *op = (MirrorOp){
1451 .s = s,
1452 .offset = offset,
1453 .bytes = bytes,
1454 .is_active_write = true,
1455 .is_in_flight = true,
1456 .co = qemu_coroutine_self(),
1458 qemu_co_queue_init(&op->waiting_requests);
1459 QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
1461 s->in_active_write_counter++;
1464 * Wait for concurrent requests affecting the area. If there are already
1465 * running requests that are copying off now-to-be stale data in the area,
1466 * we must wait for them to finish before we begin writing fresh data to the
1467 * target so that the write operations appear in the correct order.
1468 * Note that background requests (see mirror_iteration()) in contrast only
1469 * wait for conflicting requests at the start of the dirty area, and then
1470 * (based on the in_flight_bitmap) truncate the area to copy so it will not
1471 * conflict with any requests beyond that. For active writes, however, we
1472 * cannot truncate that area. The request from our parent must be blocked
1473 * until the area is copied in full. Therefore, we must wait for the whole
1474 * area to become free of concurrent requests.
1476 mirror_wait_on_conflicts(op, s, offset, bytes);
1478 bitmap_set(s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
1480 return op;
1483 static void coroutine_fn GRAPH_RDLOCK active_write_settle(MirrorOp *op)
1485 uint64_t start_chunk = op->offset / op->s->granularity;
1486 uint64_t end_chunk = DIV_ROUND_UP(op->offset + op->bytes,
1487 op->s->granularity);
1489 if (!--op->s->in_active_write_counter &&
1490 qatomic_read(&op->s->actively_synced)) {
1491 BdrvChild *source = op->s->mirror_top_bs->backing;
1493 if (QLIST_FIRST(&source->bs->parents) == source &&
1494 QLIST_NEXT(source, next_parent) == NULL)
1496 /* Assert that we are back in sync once all active write
1497 * operations are settled.
1498 * Note that we can only assert this if the mirror node
1499 * is the source node's only parent. */
1500 assert(!bdrv_get_dirty_count(op->s->dirty_bitmap));
1503 bitmap_clear(op->s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
1504 QTAILQ_REMOVE(&op->s->ops_in_flight, op, next);
1505 qemu_co_queue_restart_all(&op->waiting_requests);
1506 g_free(op);
1509 static int coroutine_fn GRAPH_RDLOCK
1510 bdrv_mirror_top_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
1511 QEMUIOVector *qiov, BdrvRequestFlags flags)
1513 return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
1516 static bool should_copy_to_target(MirrorBDSOpaque *s)
1518 return s->job && s->job->ret >= 0 &&
1519 !job_is_cancelled(&s->job->common.job) &&
1520 qatomic_read(&s->job->copy_mode) == MIRROR_COPY_MODE_WRITE_BLOCKING;
1523 static int coroutine_fn GRAPH_RDLOCK
1524 bdrv_mirror_top_do_write(BlockDriverState *bs, MirrorMethod method,
1525 bool copy_to_target, uint64_t offset, uint64_t bytes,
1526 QEMUIOVector *qiov, int flags)
1528 MirrorOp *op = NULL;
1529 MirrorBDSOpaque *s = bs->opaque;
1530 int ret = 0;
1532 if (copy_to_target) {
1533 op = active_write_prepare(s->job, offset, bytes);
1536 switch (method) {
1537 case MIRROR_METHOD_COPY:
1538 ret = bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags);
1539 break;
1541 case MIRROR_METHOD_ZERO:
1542 ret = bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags);
1543 break;
1545 case MIRROR_METHOD_DISCARD:
1546 ret = bdrv_co_pdiscard(bs->backing, offset, bytes);
1547 break;
1549 default:
1550 abort();
1553 if (!copy_to_target && s->job && s->job->dirty_bitmap) {
1554 qatomic_set(&s->job->actively_synced, false);
1555 bdrv_set_dirty_bitmap(s->job->dirty_bitmap, offset, bytes);
1558 if (ret < 0) {
1559 goto out;
1562 if (copy_to_target) {
1563 do_sync_target_write(s->job, method, offset, bytes, qiov, flags);
1566 out:
1567 if (copy_to_target) {
1568 active_write_settle(op);
1570 return ret;
1573 static int coroutine_fn GRAPH_RDLOCK
1574 bdrv_mirror_top_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
1575 QEMUIOVector *qiov, BdrvRequestFlags flags)
1577 QEMUIOVector bounce_qiov;
1578 void *bounce_buf;
1579 int ret = 0;
1580 bool copy_to_target = should_copy_to_target(bs->opaque);
1582 if (copy_to_target) {
1583 /* The guest might concurrently modify the data to write; but
1584 * the data on source and destination must match, so we have
1585 * to use a bounce buffer if we are going to write to the
1586 * target now. */
1587 bounce_buf = qemu_blockalign(bs, bytes);
1588 iov_to_buf_full(qiov->iov, qiov->niov, 0, bounce_buf, bytes);
1590 qemu_iovec_init(&bounce_qiov, 1);
1591 qemu_iovec_add(&bounce_qiov, bounce_buf, bytes);
1592 qiov = &bounce_qiov;
1594 flags &= ~BDRV_REQ_REGISTERED_BUF;
1597 ret = bdrv_mirror_top_do_write(bs, MIRROR_METHOD_COPY, copy_to_target,
1598 offset, bytes, qiov, flags);
1600 if (copy_to_target) {
1601 qemu_iovec_destroy(&bounce_qiov);
1602 qemu_vfree(bounce_buf);
1605 return ret;
1608 static int coroutine_fn GRAPH_RDLOCK bdrv_mirror_top_flush(BlockDriverState *bs)
1610 if (bs->backing == NULL) {
1611 /* we can be here after failed bdrv_append in mirror_start_job */
1612 return 0;
1614 return bdrv_co_flush(bs->backing->bs);
1617 static int coroutine_fn GRAPH_RDLOCK
1618 bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
1619 int64_t bytes, BdrvRequestFlags flags)
1621 bool copy_to_target = should_copy_to_target(bs->opaque);
1622 return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, copy_to_target,
1623 offset, bytes, NULL, flags);
1626 static int coroutine_fn GRAPH_RDLOCK
1627 bdrv_mirror_top_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
1629 bool copy_to_target = should_copy_to_target(bs->opaque);
1630 return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, copy_to_target,
1631 offset, bytes, NULL, 0);
1634 static void GRAPH_RDLOCK bdrv_mirror_top_refresh_filename(BlockDriverState *bs)
1636 if (bs->backing == NULL) {
1637 /* we can be here after failed bdrv_attach_child in
1638 * bdrv_set_backing_hd */
1639 return;
1641 pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
1642 bs->backing->bs->filename);
1645 static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c,
1646 BdrvChildRole role,
1647 BlockReopenQueue *reopen_queue,
1648 uint64_t perm, uint64_t shared,
1649 uint64_t *nperm, uint64_t *nshared)
1651 MirrorBDSOpaque *s = bs->opaque;
1653 if (s->stop) {
1655 * If the job is to be stopped, we do not need to forward
1656 * anything to the real image.
1658 *nperm = 0;
1659 *nshared = BLK_PERM_ALL;
1660 return;
1663 bdrv_default_perms(bs, c, role, reopen_queue,
1664 perm, shared, nperm, nshared);
1666 if (s->is_commit) {
1668 * For commit jobs, we cannot take CONSISTENT_READ, because
1669 * that permission is unshared for everything above the base
1670 * node (except for filters on the base node).
1671 * We also have to force-share the WRITE permission, or
1672 * otherwise we would block ourselves at the base node (if
1673 * writes are blocked for a node, they are also blocked for
1674 * its backing file).
1675 * (We could also share RESIZE, because it may be needed for
1676 * the target if its size is less than the top node's; but
1677 * bdrv_default_perms_for_cow() automatically shares RESIZE
1678 * for backing nodes if WRITE is shared, so there is no need
1679 * to do it here.)
1681 *nperm &= ~BLK_PERM_CONSISTENT_READ;
1682 *nshared |= BLK_PERM_WRITE;
1686 /* Dummy node that provides consistent read to its users without requiring it
1687 * from its backing file and that allows writes on the backing file chain. */
1688 static BlockDriver bdrv_mirror_top = {
1689 .format_name = "mirror_top",
1690 .bdrv_co_preadv = bdrv_mirror_top_preadv,
1691 .bdrv_co_pwritev = bdrv_mirror_top_pwritev,
1692 .bdrv_co_pwrite_zeroes = bdrv_mirror_top_pwrite_zeroes,
1693 .bdrv_co_pdiscard = bdrv_mirror_top_pdiscard,
1694 .bdrv_co_flush = bdrv_mirror_top_flush,
1695 .bdrv_refresh_filename = bdrv_mirror_top_refresh_filename,
1696 .bdrv_child_perm = bdrv_mirror_top_child_perm,
1698 .is_filter = true,
1699 .filtered_child_is_backing = true,
1702 static BlockJob *mirror_start_job(
1703 const char *job_id, BlockDriverState *bs,
1704 int creation_flags, BlockDriverState *target,
1705 const char *replaces, int64_t speed,
1706 uint32_t granularity, int64_t buf_size,
1707 BlockMirrorBackingMode backing_mode,
1708 bool zero_target,
1709 BlockdevOnError on_source_error,
1710 BlockdevOnError on_target_error,
1711 bool unmap,
1712 BlockCompletionFunc *cb,
1713 void *opaque,
1714 const BlockJobDriver *driver,
1715 bool is_none_mode, BlockDriverState *base,
1716 bool auto_complete, const char *filter_node_name,
1717 bool is_mirror, MirrorCopyMode copy_mode,
1718 Error **errp)
1720 MirrorBlockJob *s;
1721 MirrorBDSOpaque *bs_opaque;
1722 BlockDriverState *mirror_top_bs;
1723 bool target_is_backing;
1724 uint64_t target_perms, target_shared_perms;
1725 int ret;
1727 GLOBAL_STATE_CODE();
1729 if (granularity == 0) {
1730 granularity = bdrv_get_default_bitmap_granularity(target);
1733 assert(is_power_of_2(granularity));
1735 if (buf_size < 0) {
1736 error_setg(errp, "Invalid parameter 'buf-size'");
1737 return NULL;
1740 if (buf_size == 0) {
1741 buf_size = DEFAULT_MIRROR_BUF_SIZE;
1744 bdrv_graph_rdlock_main_loop();
1745 if (bdrv_skip_filters(bs) == bdrv_skip_filters(target)) {
1746 error_setg(errp, "Can't mirror node into itself");
1747 bdrv_graph_rdunlock_main_loop();
1748 return NULL;
1751 target_is_backing = bdrv_chain_contains(bs, target);
1752 bdrv_graph_rdunlock_main_loop();
1754 /* In the case of active commit, add dummy driver to provide consistent
1755 * reads on the top, while disabling it in the intermediate nodes, and make
1756 * the backing chain writable. */
1757 mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name,
1758 BDRV_O_RDWR, errp);
1759 if (mirror_top_bs == NULL) {
1760 return NULL;
1762 if (!filter_node_name) {
1763 mirror_top_bs->implicit = true;
1766 /* So that we can always drop this node */
1767 mirror_top_bs->never_freeze = true;
1769 mirror_top_bs->total_sectors = bs->total_sectors;
1770 mirror_top_bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED;
1771 mirror_top_bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED |
1772 BDRV_REQ_NO_FALLBACK;
1773 bs_opaque = g_new0(MirrorBDSOpaque, 1);
1774 mirror_top_bs->opaque = bs_opaque;
1776 bs_opaque->is_commit = target_is_backing;
1778 bdrv_drained_begin(bs);
1779 ret = bdrv_append(mirror_top_bs, bs, errp);
1780 bdrv_drained_end(bs);
1782 if (ret < 0) {
1783 bdrv_unref(mirror_top_bs);
1784 return NULL;
1787 /* Make sure that the source is not resized while the job is running */
1788 s = block_job_create(job_id, driver, NULL, mirror_top_bs,
1789 BLK_PERM_CONSISTENT_READ,
1790 BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
1791 BLK_PERM_WRITE, speed,
1792 creation_flags, cb, opaque, errp);
1793 if (!s) {
1794 goto fail;
1797 /* The block job now has a reference to this node */
1798 bdrv_unref(mirror_top_bs);
1800 s->mirror_top_bs = mirror_top_bs;
1802 /* No resize for the target either; while the mirror is still running, a
1803 * consistent read isn't necessarily possible. We could possibly allow
1804 * writes and graph modifications, though it would likely defeat the
1805 * purpose of a mirror, so leave them blocked for now.
1807 * In the case of active commit, things look a bit different, though,
1808 * because the target is an already populated backing file in active use.
1809 * We can allow anything except resize there.*/
1811 target_perms = BLK_PERM_WRITE;
1812 target_shared_perms = BLK_PERM_WRITE_UNCHANGED;
1814 if (target_is_backing) {
1815 int64_t bs_size, target_size;
1816 bs_size = bdrv_getlength(bs);
1817 if (bs_size < 0) {
1818 error_setg_errno(errp, -bs_size,
1819 "Could not inquire top image size");
1820 goto fail;
1823 target_size = bdrv_getlength(target);
1824 if (target_size < 0) {
1825 error_setg_errno(errp, -target_size,
1826 "Could not inquire base image size");
1827 goto fail;
1830 if (target_size < bs_size) {
1831 target_perms |= BLK_PERM_RESIZE;
1834 target_shared_perms |= BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE;
1835 } else {
1836 bdrv_graph_rdlock_main_loop();
1837 if (bdrv_chain_contains(bs, bdrv_skip_filters(target))) {
1839 * We may want to allow this in the future, but it would
1840 * require taking some extra care.
1842 error_setg(errp, "Cannot mirror to a filter on top of a node in "
1843 "the source's backing chain");
1844 bdrv_graph_rdunlock_main_loop();
1845 goto fail;
1847 bdrv_graph_rdunlock_main_loop();
1850 s->target = blk_new(s->common.job.aio_context,
1851 target_perms, target_shared_perms);
1852 ret = blk_insert_bs(s->target, target, errp);
1853 if (ret < 0) {
1854 goto fail;
1856 if (is_mirror) {
1857 /* XXX: Mirror target could be a NBD server of target QEMU in the case
1858 * of non-shared block migration. To allow migration completion, we
1859 * have to allow "inactivate" of the target BB. When that happens, we
1860 * know the job is drained, and the vcpus are stopped, so no write
1861 * operation will be performed. Block layer already has assertions to
1862 * ensure that. */
1863 blk_set_force_allow_inactivate(s->target);
1865 blk_set_allow_aio_context_change(s->target, true);
1866 blk_set_disable_request_queuing(s->target, true);
1868 bdrv_graph_rdlock_main_loop();
1869 s->replaces = g_strdup(replaces);
1870 s->on_source_error = on_source_error;
1871 s->on_target_error = on_target_error;
1872 s->is_none_mode = is_none_mode;
1873 s->backing_mode = backing_mode;
1874 s->zero_target = zero_target;
1875 qatomic_set(&s->copy_mode, copy_mode);
1876 s->base = base;
1877 s->base_overlay = bdrv_find_overlay(bs, base);
1878 s->granularity = granularity;
1879 s->buf_size = ROUND_UP(buf_size, granularity);
1880 s->unmap = unmap;
1881 if (auto_complete) {
1882 s->should_complete = true;
1884 bdrv_graph_rdunlock_main_loop();
1886 s->dirty_bitmap = bdrv_create_dirty_bitmap(s->mirror_top_bs, granularity,
1887 NULL, errp);
1888 if (!s->dirty_bitmap) {
1889 goto fail;
1893 * The dirty bitmap is set by bdrv_mirror_top_do_write() when not in active
1894 * mode.
1896 bdrv_disable_dirty_bitmap(s->dirty_bitmap);
1898 bdrv_graph_wrlock();
1899 ret = block_job_add_bdrv(&s->common, "source", bs, 0,
1900 BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE |
1901 BLK_PERM_CONSISTENT_READ,
1902 errp);
1903 if (ret < 0) {
1904 bdrv_graph_wrunlock();
1905 goto fail;
1908 /* Required permissions are already taken with blk_new() */
1909 block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL,
1910 &error_abort);
1912 /* In commit_active_start() all intermediate nodes disappear, so
1913 * any jobs in them must be blocked */
1914 if (target_is_backing) {
1915 BlockDriverState *iter, *filtered_target;
1916 uint64_t iter_shared_perms;
1919 * The topmost node with
1920 * bdrv_skip_filters(filtered_target) == bdrv_skip_filters(target)
1922 filtered_target = bdrv_cow_bs(bdrv_find_overlay(bs, target));
1924 assert(bdrv_skip_filters(filtered_target) ==
1925 bdrv_skip_filters(target));
1928 * XXX BLK_PERM_WRITE needs to be allowed so we don't block
1929 * ourselves at s->base (if writes are blocked for a node, they are
1930 * also blocked for its backing file). The other options would be a
1931 * second filter driver above s->base (== target).
1933 iter_shared_perms = BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE;
1935 for (iter = bdrv_filter_or_cow_bs(bs); iter != target;
1936 iter = bdrv_filter_or_cow_bs(iter))
1938 if (iter == filtered_target) {
1940 * From here on, all nodes are filters on the base.
1941 * This allows us to share BLK_PERM_CONSISTENT_READ.
1943 iter_shared_perms |= BLK_PERM_CONSISTENT_READ;
1946 ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
1947 iter_shared_perms, errp);
1948 if (ret < 0) {
1949 bdrv_graph_wrunlock();
1950 goto fail;
1954 if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) {
1955 bdrv_graph_wrunlock();
1956 goto fail;
1959 bdrv_graph_wrunlock();
1961 QTAILQ_INIT(&s->ops_in_flight);
1963 trace_mirror_start(bs, s, opaque);
1964 job_start(&s->common.job);
1966 return &s->common;
1968 fail:
1969 if (s) {
1970 /* Make sure this BDS does not go away until we have completed the graph
1971 * changes below */
1972 bdrv_ref(mirror_top_bs);
1974 g_free(s->replaces);
1975 blk_unref(s->target);
1976 bs_opaque->job = NULL;
1977 if (s->dirty_bitmap) {
1978 bdrv_release_dirty_bitmap(s->dirty_bitmap);
1980 job_early_fail(&s->common.job);
1983 bs_opaque->stop = true;
1984 bdrv_drained_begin(bs);
1985 bdrv_graph_wrlock();
1986 assert(mirror_top_bs->backing->bs == bs);
1987 bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
1988 &error_abort);
1989 bdrv_replace_node(mirror_top_bs, bs, &error_abort);
1990 bdrv_graph_wrunlock();
1991 bdrv_drained_end(bs);
1993 bdrv_unref(mirror_top_bs);
1995 return NULL;
1998 void mirror_start(const char *job_id, BlockDriverState *bs,
1999 BlockDriverState *target, const char *replaces,
2000 int creation_flags, int64_t speed,
2001 uint32_t granularity, int64_t buf_size,
2002 MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
2003 bool zero_target,
2004 BlockdevOnError on_source_error,
2005 BlockdevOnError on_target_error,
2006 bool unmap, const char *filter_node_name,
2007 MirrorCopyMode copy_mode, Error **errp)
2009 bool is_none_mode;
2010 BlockDriverState *base;
2012 GLOBAL_STATE_CODE();
2014 if ((mode == MIRROR_SYNC_MODE_INCREMENTAL) ||
2015 (mode == MIRROR_SYNC_MODE_BITMAP)) {
2016 error_setg(errp, "Sync mode '%s' not supported",
2017 MirrorSyncMode_str(mode));
2018 return;
2021 bdrv_graph_rdlock_main_loop();
2022 is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
2023 base = mode == MIRROR_SYNC_MODE_TOP ? bdrv_backing_chain_next(bs) : NULL;
2024 bdrv_graph_rdunlock_main_loop();
2026 mirror_start_job(job_id, bs, creation_flags, target, replaces,
2027 speed, granularity, buf_size, backing_mode, zero_target,
2028 on_source_error, on_target_error, unmap, NULL, NULL,
2029 &mirror_job_driver, is_none_mode, base, false,
2030 filter_node_name, true, copy_mode, errp);
2033 BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs,
2034 BlockDriverState *base, int creation_flags,
2035 int64_t speed, BlockdevOnError on_error,
2036 const char *filter_node_name,
2037 BlockCompletionFunc *cb, void *opaque,
2038 bool auto_complete, Error **errp)
2040 bool base_read_only;
2041 BlockJob *job;
2043 GLOBAL_STATE_CODE();
2045 base_read_only = bdrv_is_read_only(base);
2047 if (base_read_only) {
2048 if (bdrv_reopen_set_read_only(base, false, errp) < 0) {
2049 return NULL;
2053 job = mirror_start_job(
2054 job_id, bs, creation_flags, base, NULL, speed, 0, 0,
2055 MIRROR_LEAVE_BACKING_CHAIN, false,
2056 on_error, on_error, true, cb, opaque,
2057 &commit_active_job_driver, false, base, auto_complete,
2058 filter_node_name, false, MIRROR_COPY_MODE_BACKGROUND,
2059 errp);
2060 if (!job) {
2061 goto error_restore_flags;
2064 return job;
2066 error_restore_flags:
2067 /* ignore error and errp for bdrv_reopen, because we want to propagate
2068 * the original error */
2069 if (base_read_only) {
2070 bdrv_reopen_set_read_only(base, true, NULL);
2072 return NULL;