dirty-bitmap: Change bdrv_get_dirty_locked() to take bytes
[qemu/kevin.git] / block / mirror.c
blob545dfc50aad3a08b10c1603f06d4a1bcdfe07bc9
1 /*
2 * Image mirroring
4 * Copyright Red Hat, Inc. 2012
6 * Authors:
7 * Paolo Bonzini <pbonzini@redhat.com>
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/cutils.h"
16 #include "trace.h"
17 #include "block/blockjob_int.h"
18 #include "block/block_int.h"
19 #include "sysemu/block-backend.h"
20 #include "qapi/error.h"
21 #include "qapi/qmp/qerror.h"
22 #include "qemu/ratelimit.h"
23 #include "qemu/bitmap.h"
25 #define SLICE_TIME 100000000ULL /* ns */
26 #define MAX_IN_FLIGHT 16
27 #define MAX_IO_BYTES (1 << 20) /* 1 Mb */
28 #define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES)
30 /* The mirroring buffer is a list of granularity-sized chunks.
31 * Free chunks are organized in a list.
33 typedef struct MirrorBuffer {
34 QSIMPLEQ_ENTRY(MirrorBuffer) next;
35 } MirrorBuffer;
37 typedef struct MirrorBlockJob {
38 BlockJob common;
39 RateLimit limit;
40 BlockBackend *target;
41 BlockDriverState *mirror_top_bs;
42 BlockDriverState *source;
43 BlockDriverState *base;
45 /* The name of the graph node to replace */
46 char *replaces;
47 /* The BDS to replace */
48 BlockDriverState *to_replace;
49 /* Used to block operations on the drive-mirror-replace target */
50 Error *replace_blocker;
51 bool is_none_mode;
52 BlockMirrorBackingMode backing_mode;
53 BlockdevOnError on_source_error, on_target_error;
54 bool synced;
55 bool should_complete;
56 int64_t granularity;
57 size_t buf_size;
58 int64_t bdev_length;
59 unsigned long *cow_bitmap;
60 BdrvDirtyBitmap *dirty_bitmap;
61 BdrvDirtyBitmapIter *dbi;
62 uint8_t *buf;
63 QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
64 int buf_free_count;
66 uint64_t last_pause_ns;
67 unsigned long *in_flight_bitmap;
68 int in_flight;
69 int64_t bytes_in_flight;
70 int ret;
71 bool unmap;
72 bool waiting_for_io;
73 int target_cluster_size;
74 int max_iov;
75 bool initial_zeroing_ongoing;
76 } MirrorBlockJob;
78 typedef struct MirrorOp {
79 MirrorBlockJob *s;
80 QEMUIOVector qiov;
81 int64_t offset;
82 uint64_t bytes;
83 } MirrorOp;
85 static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
86 int error)
88 s->synced = false;
89 if (read) {
90 return block_job_error_action(&s->common, s->on_source_error,
91 true, error);
92 } else {
93 return block_job_error_action(&s->common, s->on_target_error,
94 false, error);
98 static void mirror_iteration_done(MirrorOp *op, int ret)
100 MirrorBlockJob *s = op->s;
101 struct iovec *iov;
102 int64_t chunk_num;
103 int i, nb_chunks;
105 trace_mirror_iteration_done(s, op->offset, op->bytes, ret);
107 s->in_flight--;
108 s->bytes_in_flight -= op->bytes;
109 iov = op->qiov.iov;
110 for (i = 0; i < op->qiov.niov; i++) {
111 MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
112 QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
113 s->buf_free_count++;
116 chunk_num = op->offset / s->granularity;
117 nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
118 bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
119 if (ret >= 0) {
120 if (s->cow_bitmap) {
121 bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
123 if (!s->initial_zeroing_ongoing) {
124 s->common.offset += op->bytes;
127 qemu_iovec_destroy(&op->qiov);
128 g_free(op);
130 if (s->waiting_for_io) {
131 qemu_coroutine_enter(s->common.co);
135 static void mirror_write_complete(void *opaque, int ret)
137 MirrorOp *op = opaque;
138 MirrorBlockJob *s = op->s;
140 aio_context_acquire(blk_get_aio_context(s->common.blk));
141 if (ret < 0) {
142 BlockErrorAction action;
144 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset >> BDRV_SECTOR_BITS,
145 op->bytes >> BDRV_SECTOR_BITS);
146 action = mirror_error_action(s, false, -ret);
147 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
148 s->ret = ret;
151 mirror_iteration_done(op, ret);
152 aio_context_release(blk_get_aio_context(s->common.blk));
155 static void mirror_read_complete(void *opaque, int ret)
157 MirrorOp *op = opaque;
158 MirrorBlockJob *s = op->s;
160 aio_context_acquire(blk_get_aio_context(s->common.blk));
161 if (ret < 0) {
162 BlockErrorAction action;
164 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset >> BDRV_SECTOR_BITS,
165 op->bytes >> BDRV_SECTOR_BITS);
166 action = mirror_error_action(s, true, -ret);
167 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
168 s->ret = ret;
171 mirror_iteration_done(op, ret);
172 } else {
173 blk_aio_pwritev(s->target, op->offset, &op->qiov,
174 0, mirror_write_complete, op);
176 aio_context_release(blk_get_aio_context(s->common.blk));
179 /* Clip bytes relative to offset to not exceed end-of-file */
180 static inline int64_t mirror_clip_bytes(MirrorBlockJob *s,
181 int64_t offset,
182 int64_t bytes)
184 return MIN(bytes, s->bdev_length - offset);
187 /* Round offset and/or bytes to target cluster if COW is needed, and
188 * return the offset of the adjusted tail against original. */
189 static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset,
190 uint64_t *bytes)
192 bool need_cow;
193 int ret = 0;
194 int64_t align_offset = *offset;
195 unsigned int align_bytes = *bytes;
196 int max_bytes = s->granularity * s->max_iov;
198 assert(*bytes < INT_MAX);
199 need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap);
200 need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity,
201 s->cow_bitmap);
202 if (need_cow) {
203 bdrv_round_to_clusters(blk_bs(s->target), *offset, *bytes,
204 &align_offset, &align_bytes);
207 if (align_bytes > max_bytes) {
208 align_bytes = max_bytes;
209 if (need_cow) {
210 align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size);
213 /* Clipping may result in align_bytes unaligned to chunk boundary, but
214 * that doesn't matter because it's already the end of source image. */
215 align_bytes = mirror_clip_bytes(s, align_offset, align_bytes);
217 ret = align_offset + align_bytes - (*offset + *bytes);
218 *offset = align_offset;
219 *bytes = align_bytes;
220 assert(ret >= 0);
221 return ret;
224 static inline void mirror_wait_for_io(MirrorBlockJob *s)
226 assert(!s->waiting_for_io);
227 s->waiting_for_io = true;
228 qemu_coroutine_yield();
229 s->waiting_for_io = false;
232 /* Submit async read while handling COW.
233 * Returns: The number of bytes copied after and including offset,
234 * excluding any bytes copied prior to offset due to alignment.
235 * This will be @bytes if no alignment is necessary, or
236 * (new_end - offset) if tail is rounded up or down due to
237 * alignment or buffer limit.
239 static uint64_t mirror_do_read(MirrorBlockJob *s, int64_t offset,
240 uint64_t bytes)
242 BlockBackend *source = s->common.blk;
243 int nb_chunks;
244 uint64_t ret;
245 MirrorOp *op;
246 uint64_t max_bytes;
248 max_bytes = s->granularity * s->max_iov;
250 /* We can only handle as much as buf_size at a time. */
251 bytes = MIN(s->buf_size, MIN(max_bytes, bytes));
252 assert(bytes);
253 assert(bytes < BDRV_REQUEST_MAX_BYTES);
254 ret = bytes;
256 if (s->cow_bitmap) {
257 ret += mirror_cow_align(s, &offset, &bytes);
259 assert(bytes <= s->buf_size);
260 /* The offset is granularity-aligned because:
261 * 1) Caller passes in aligned values;
262 * 2) mirror_cow_align is used only when target cluster is larger. */
263 assert(QEMU_IS_ALIGNED(offset, s->granularity));
264 /* The range is sector-aligned, since bdrv_getlength() rounds up. */
265 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
266 nb_chunks = DIV_ROUND_UP(bytes, s->granularity);
268 while (s->buf_free_count < nb_chunks) {
269 trace_mirror_yield_in_flight(s, offset, s->in_flight);
270 mirror_wait_for_io(s);
273 /* Allocate a MirrorOp that is used as an AIO callback. */
274 op = g_new(MirrorOp, 1);
275 op->s = s;
276 op->offset = offset;
277 op->bytes = bytes;
279 /* Now make a QEMUIOVector taking enough granularity-sized chunks
280 * from s->buf_free.
282 qemu_iovec_init(&op->qiov, nb_chunks);
283 while (nb_chunks-- > 0) {
284 MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
285 size_t remaining = bytes - op->qiov.size;
287 QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
288 s->buf_free_count--;
289 qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
292 /* Copy the dirty cluster. */
293 s->in_flight++;
294 s->bytes_in_flight += bytes;
295 trace_mirror_one_iteration(s, offset, bytes);
297 blk_aio_preadv(source, offset, &op->qiov, 0, mirror_read_complete, op);
298 return ret;
301 static void mirror_do_zero_or_discard(MirrorBlockJob *s,
302 int64_t offset,
303 uint64_t bytes,
304 bool is_discard)
306 MirrorOp *op;
308 /* Allocate a MirrorOp that is used as an AIO callback. The qiov is zeroed
309 * so the freeing in mirror_iteration_done is nop. */
310 op = g_new0(MirrorOp, 1);
311 op->s = s;
312 op->offset = offset;
313 op->bytes = bytes;
315 s->in_flight++;
316 s->bytes_in_flight += bytes;
317 if (is_discard) {
318 blk_aio_pdiscard(s->target, offset,
319 op->bytes, mirror_write_complete, op);
320 } else {
321 blk_aio_pwrite_zeroes(s->target, offset,
322 op->bytes, s->unmap ? BDRV_REQ_MAY_UNMAP : 0,
323 mirror_write_complete, op);
327 static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
329 BlockDriverState *source = s->source;
330 int64_t offset, first_chunk;
331 uint64_t delay_ns = 0;
332 /* At least the first dirty chunk is mirrored in one iteration. */
333 int nb_chunks = 1;
334 int sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
335 bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target));
336 int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES);
338 bdrv_dirty_bitmap_lock(s->dirty_bitmap);
339 offset = bdrv_dirty_iter_next(s->dbi);
340 if (offset < 0) {
341 bdrv_set_dirty_iter(s->dbi, 0);
342 offset = bdrv_dirty_iter_next(s->dbi);
343 trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap));
344 assert(offset >= 0);
346 bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
348 first_chunk = offset / s->granularity;
349 while (test_bit(first_chunk, s->in_flight_bitmap)) {
350 trace_mirror_yield_in_flight(s, offset, s->in_flight);
351 mirror_wait_for_io(s);
354 block_job_pause_point(&s->common);
356 /* Find the number of consective dirty chunks following the first dirty
357 * one, and wait for in flight requests in them. */
358 bdrv_dirty_bitmap_lock(s->dirty_bitmap);
359 while (nb_chunks * s->granularity < s->buf_size) {
360 int64_t next_dirty;
361 int64_t next_offset = offset + nb_chunks * s->granularity;
362 int64_t next_chunk = next_offset / s->granularity;
363 if (next_offset >= s->bdev_length ||
364 !bdrv_get_dirty_locked(source, s->dirty_bitmap, next_offset)) {
365 break;
367 if (test_bit(next_chunk, s->in_flight_bitmap)) {
368 break;
371 next_dirty = bdrv_dirty_iter_next(s->dbi);
372 if (next_dirty > next_offset || next_dirty < 0) {
373 /* The bitmap iterator's cache is stale, refresh it */
374 bdrv_set_dirty_iter(s->dbi, next_offset);
375 next_dirty = bdrv_dirty_iter_next(s->dbi);
377 assert(next_dirty == next_offset);
378 nb_chunks++;
381 /* Clear dirty bits before querying the block status, because
382 * calling bdrv_get_block_status_above could yield - if some blocks are
383 * marked dirty in this window, we need to know.
385 bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset >> BDRV_SECTOR_BITS,
386 nb_chunks * sectors_per_chunk);
387 bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
389 bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks);
390 while (nb_chunks > 0 && offset < s->bdev_length) {
391 int64_t ret;
392 int io_sectors;
393 unsigned int io_bytes;
394 int64_t io_bytes_acct;
395 BlockDriverState *file;
396 enum MirrorMethod {
397 MIRROR_METHOD_COPY,
398 MIRROR_METHOD_ZERO,
399 MIRROR_METHOD_DISCARD
400 } mirror_method = MIRROR_METHOD_COPY;
402 assert(!(offset % s->granularity));
403 ret = bdrv_get_block_status_above(source, NULL,
404 offset >> BDRV_SECTOR_BITS,
405 nb_chunks * sectors_per_chunk,
406 &io_sectors, &file);
407 io_bytes = io_sectors * BDRV_SECTOR_SIZE;
408 if (ret < 0) {
409 io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes);
410 } else if (ret & BDRV_BLOCK_DATA) {
411 io_bytes = MIN(io_bytes, max_io_bytes);
414 io_bytes -= io_bytes % s->granularity;
415 if (io_bytes < s->granularity) {
416 io_bytes = s->granularity;
417 } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) {
418 int64_t target_offset;
419 unsigned int target_bytes;
420 bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes,
421 &target_offset, &target_bytes);
422 if (target_offset == offset &&
423 target_bytes == io_bytes) {
424 mirror_method = ret & BDRV_BLOCK_ZERO ?
425 MIRROR_METHOD_ZERO :
426 MIRROR_METHOD_DISCARD;
430 while (s->in_flight >= MAX_IN_FLIGHT) {
431 trace_mirror_yield_in_flight(s, offset, s->in_flight);
432 mirror_wait_for_io(s);
435 if (s->ret < 0) {
436 return 0;
439 io_bytes = mirror_clip_bytes(s, offset, io_bytes);
440 switch (mirror_method) {
441 case MIRROR_METHOD_COPY:
442 io_bytes = io_bytes_acct = mirror_do_read(s, offset, io_bytes);
443 break;
444 case MIRROR_METHOD_ZERO:
445 case MIRROR_METHOD_DISCARD:
446 mirror_do_zero_or_discard(s, offset, io_bytes,
447 mirror_method == MIRROR_METHOD_DISCARD);
448 if (write_zeroes_ok) {
449 io_bytes_acct = 0;
450 } else {
451 io_bytes_acct = io_bytes;
453 break;
454 default:
455 abort();
457 assert(io_bytes);
458 offset += io_bytes;
459 nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity);
460 if (s->common.speed) {
461 delay_ns = ratelimit_calculate_delay(&s->limit, io_bytes_acct);
464 return delay_ns;
467 static void mirror_free_init(MirrorBlockJob *s)
469 int granularity = s->granularity;
470 size_t buf_size = s->buf_size;
471 uint8_t *buf = s->buf;
473 assert(s->buf_free_count == 0);
474 QSIMPLEQ_INIT(&s->buf_free);
475 while (buf_size != 0) {
476 MirrorBuffer *cur = (MirrorBuffer *)buf;
477 QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
478 s->buf_free_count++;
479 buf_size -= granularity;
480 buf += granularity;
484 /* This is also used for the .pause callback. There is no matching
485 * mirror_resume() because mirror_run() will begin iterating again
486 * when the job is resumed.
488 static void mirror_wait_for_all_io(MirrorBlockJob *s)
490 while (s->in_flight > 0) {
491 mirror_wait_for_io(s);
495 typedef struct {
496 int ret;
497 } MirrorExitData;
499 static void mirror_exit(BlockJob *job, void *opaque)
501 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
502 MirrorExitData *data = opaque;
503 AioContext *replace_aio_context = NULL;
504 BlockDriverState *src = s->source;
505 BlockDriverState *target_bs = blk_bs(s->target);
506 BlockDriverState *mirror_top_bs = s->mirror_top_bs;
507 Error *local_err = NULL;
509 bdrv_release_dirty_bitmap(src, s->dirty_bitmap);
511 /* Make sure that the source BDS doesn't go away before we called
512 * block_job_completed(). */
513 bdrv_ref(src);
514 bdrv_ref(mirror_top_bs);
515 bdrv_ref(target_bs);
517 /* Remove target parent that still uses BLK_PERM_WRITE/RESIZE before
518 * inserting target_bs at s->to_replace, where we might not be able to get
519 * these permissions.
521 * Note that blk_unref() alone doesn't necessarily drop permissions because
522 * we might be running nested inside mirror_drain(), which takes an extra
523 * reference, so use an explicit blk_set_perm() first. */
524 blk_set_perm(s->target, 0, BLK_PERM_ALL, &error_abort);
525 blk_unref(s->target);
526 s->target = NULL;
528 /* We don't access the source any more. Dropping any WRITE/RESIZE is
529 * required before it could become a backing file of target_bs. */
530 bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
531 &error_abort);
532 if (s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
533 BlockDriverState *backing = s->is_none_mode ? src : s->base;
534 if (backing_bs(target_bs) != backing) {
535 bdrv_set_backing_hd(target_bs, backing, &local_err);
536 if (local_err) {
537 error_report_err(local_err);
538 data->ret = -EPERM;
543 if (s->to_replace) {
544 replace_aio_context = bdrv_get_aio_context(s->to_replace);
545 aio_context_acquire(replace_aio_context);
548 if (s->should_complete && data->ret == 0) {
549 BlockDriverState *to_replace = src;
550 if (s->to_replace) {
551 to_replace = s->to_replace;
554 if (bdrv_get_flags(target_bs) != bdrv_get_flags(to_replace)) {
555 bdrv_reopen(target_bs, bdrv_get_flags(to_replace), NULL);
558 /* The mirror job has no requests in flight any more, but we need to
559 * drain potential other users of the BDS before changing the graph. */
560 bdrv_drained_begin(target_bs);
561 bdrv_replace_node(to_replace, target_bs, &local_err);
562 bdrv_drained_end(target_bs);
563 if (local_err) {
564 error_report_err(local_err);
565 data->ret = -EPERM;
568 if (s->to_replace) {
569 bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
570 error_free(s->replace_blocker);
571 bdrv_unref(s->to_replace);
573 if (replace_aio_context) {
574 aio_context_release(replace_aio_context);
576 g_free(s->replaces);
577 bdrv_unref(target_bs);
579 /* Remove the mirror filter driver from the graph. Before this, get rid of
580 * the blockers on the intermediate nodes so that the resulting state is
581 * valid. Also give up permissions on mirror_top_bs->backing, which might
582 * block the removal. */
583 block_job_remove_all_bdrv(job);
584 bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
585 &error_abort);
586 bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
588 /* We just changed the BDS the job BB refers to (with either or both of the
589 * bdrv_replace_node() calls), so switch the BB back so the cleanup does
590 * the right thing. We don't need any permissions any more now. */
591 blk_remove_bs(job->blk);
592 blk_set_perm(job->blk, 0, BLK_PERM_ALL, &error_abort);
593 blk_insert_bs(job->blk, mirror_top_bs, &error_abort);
595 block_job_completed(&s->common, data->ret);
597 g_free(data);
598 bdrv_drained_end(src);
599 bdrv_unref(mirror_top_bs);
600 bdrv_unref(src);
603 static void mirror_throttle(MirrorBlockJob *s)
605 int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
607 if (now - s->last_pause_ns > SLICE_TIME) {
608 s->last_pause_ns = now;
609 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, 0);
610 } else {
611 block_job_pause_point(&s->common);
615 static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
617 int64_t sector_num, end;
618 BlockDriverState *base = s->base;
619 BlockDriverState *bs = s->source;
620 BlockDriverState *target_bs = blk_bs(s->target);
621 int ret, n;
622 int64_t count;
624 end = s->bdev_length / BDRV_SECTOR_SIZE;
626 if (base == NULL && !bdrv_has_zero_init(target_bs)) {
627 if (!bdrv_can_write_zeroes_with_unmap(target_bs)) {
628 bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, end);
629 return 0;
632 s->initial_zeroing_ongoing = true;
633 for (sector_num = 0; sector_num < end; ) {
634 int nb_sectors = MIN(end - sector_num,
635 QEMU_ALIGN_DOWN(INT_MAX, s->granularity) >> BDRV_SECTOR_BITS);
637 mirror_throttle(s);
639 if (block_job_is_cancelled(&s->common)) {
640 s->initial_zeroing_ongoing = false;
641 return 0;
644 if (s->in_flight >= MAX_IN_FLIGHT) {
645 trace_mirror_yield(s, UINT64_MAX, s->buf_free_count,
646 s->in_flight);
647 mirror_wait_for_io(s);
648 continue;
651 mirror_do_zero_or_discard(s, sector_num * BDRV_SECTOR_SIZE,
652 nb_sectors * BDRV_SECTOR_SIZE, false);
653 sector_num += nb_sectors;
656 mirror_wait_for_all_io(s);
657 s->initial_zeroing_ongoing = false;
660 /* First part, loop on the sectors and initialize the dirty bitmap. */
661 for (sector_num = 0; sector_num < end; ) {
662 /* Just to make sure we are not exceeding int limit. */
663 int nb_sectors = MIN(INT_MAX >> BDRV_SECTOR_BITS,
664 end - sector_num);
666 mirror_throttle(s);
668 if (block_job_is_cancelled(&s->common)) {
669 return 0;
672 ret = bdrv_is_allocated_above(bs, base, sector_num * BDRV_SECTOR_SIZE,
673 nb_sectors * BDRV_SECTOR_SIZE, &count);
674 if (ret < 0) {
675 return ret;
678 /* TODO: Relax this once bdrv_is_allocated_above and dirty
679 * bitmaps no longer require sector alignment. */
680 assert(QEMU_IS_ALIGNED(count, BDRV_SECTOR_SIZE));
681 n = count >> BDRV_SECTOR_BITS;
682 assert(n > 0);
683 if (ret == 1) {
684 bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n);
686 sector_num += n;
688 return 0;
691 /* Called when going out of the streaming phase to flush the bulk of the
692 * data to the medium, or just before completing.
694 static int mirror_flush(MirrorBlockJob *s)
696 int ret = blk_flush(s->target);
697 if (ret < 0) {
698 if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) {
699 s->ret = ret;
702 return ret;
705 static void coroutine_fn mirror_run(void *opaque)
707 MirrorBlockJob *s = opaque;
708 MirrorExitData *data;
709 BlockDriverState *bs = s->source;
710 BlockDriverState *target_bs = blk_bs(s->target);
711 bool need_drain = true;
712 int64_t length;
713 BlockDriverInfo bdi;
714 char backing_filename[2]; /* we only need 2 characters because we are only
715 checking for a NULL string */
716 int ret = 0;
718 if (block_job_is_cancelled(&s->common)) {
719 goto immediate_exit;
722 s->bdev_length = bdrv_getlength(bs);
723 if (s->bdev_length < 0) {
724 ret = s->bdev_length;
725 goto immediate_exit;
728 /* Active commit must resize the base image if its size differs from the
729 * active layer. */
730 if (s->base == blk_bs(s->target)) {
731 int64_t base_length;
733 base_length = blk_getlength(s->target);
734 if (base_length < 0) {
735 ret = base_length;
736 goto immediate_exit;
739 if (s->bdev_length > base_length) {
740 ret = blk_truncate(s->target, s->bdev_length, PREALLOC_MODE_OFF,
741 NULL);
742 if (ret < 0) {
743 goto immediate_exit;
748 if (s->bdev_length == 0) {
749 /* Report BLOCK_JOB_READY and wait for complete. */
750 block_job_event_ready(&s->common);
751 s->synced = true;
752 while (!block_job_is_cancelled(&s->common) && !s->should_complete) {
753 block_job_yield(&s->common);
755 s->common.cancelled = false;
756 goto immediate_exit;
759 length = DIV_ROUND_UP(s->bdev_length, s->granularity);
760 s->in_flight_bitmap = bitmap_new(length);
762 /* If we have no backing file yet in the destination, we cannot let
763 * the destination do COW. Instead, we copy sectors around the
764 * dirty data if needed. We need a bitmap to do that.
766 bdrv_get_backing_filename(target_bs, backing_filename,
767 sizeof(backing_filename));
768 if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) {
769 s->target_cluster_size = bdi.cluster_size;
770 } else {
771 s->target_cluster_size = BDRV_SECTOR_SIZE;
773 if (backing_filename[0] && !target_bs->backing &&
774 s->granularity < s->target_cluster_size) {
775 s->buf_size = MAX(s->buf_size, s->target_cluster_size);
776 s->cow_bitmap = bitmap_new(length);
778 s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov);
780 s->buf = qemu_try_blockalign(bs, s->buf_size);
781 if (s->buf == NULL) {
782 ret = -ENOMEM;
783 goto immediate_exit;
786 mirror_free_init(s);
788 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
789 if (!s->is_none_mode) {
790 ret = mirror_dirty_init(s);
791 if (ret < 0 || block_job_is_cancelled(&s->common)) {
792 goto immediate_exit;
796 assert(!s->dbi);
797 s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap);
798 for (;;) {
799 uint64_t delay_ns = 0;
800 int64_t cnt, delta;
801 bool should_complete;
803 if (s->ret < 0) {
804 ret = s->ret;
805 goto immediate_exit;
808 block_job_pause_point(&s->common);
810 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
811 /* s->common.offset contains the number of bytes already processed so
812 * far, cnt is the number of dirty bytes remaining and
813 * s->bytes_in_flight is the number of bytes currently being
814 * processed; together those are the current total operation length */
815 s->common.len = s->common.offset + s->bytes_in_flight + cnt;
817 /* Note that even when no rate limit is applied we need to yield
818 * periodically with no pending I/O so that bdrv_drain_all() returns.
819 * We do so every SLICE_TIME nanoseconds, or when there is an error,
820 * or when the source is clean, whichever comes first.
822 delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns;
823 if (delta < SLICE_TIME &&
824 s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
825 if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 ||
826 (cnt == 0 && s->in_flight > 0)) {
827 trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight);
828 mirror_wait_for_io(s);
829 continue;
830 } else if (cnt != 0) {
831 delay_ns = mirror_iteration(s);
835 should_complete = false;
836 if (s->in_flight == 0 && cnt == 0) {
837 trace_mirror_before_flush(s);
838 if (!s->synced) {
839 if (mirror_flush(s) < 0) {
840 /* Go check s->ret. */
841 continue;
843 /* We're out of the streaming phase. From now on, if the job
844 * is cancelled we will actually complete all pending I/O and
845 * report completion. This way, block-job-cancel will leave
846 * the target in a consistent state.
848 block_job_event_ready(&s->common);
849 s->synced = true;
852 should_complete = s->should_complete ||
853 block_job_is_cancelled(&s->common);
854 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
857 if (cnt == 0 && should_complete) {
858 /* The dirty bitmap is not updated while operations are pending.
859 * If we're about to exit, wait for pending operations before
860 * calling bdrv_get_dirty_count(bs), or we may exit while the
861 * source has dirty data to copy!
863 * Note that I/O can be submitted by the guest while
864 * mirror_populate runs, so pause it now. Before deciding
865 * whether to switch to target check one last time if I/O has
866 * come in the meanwhile, and if not flush the data to disk.
868 trace_mirror_before_drain(s, cnt);
870 bdrv_drained_begin(bs);
871 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
872 if (cnt > 0 || mirror_flush(s) < 0) {
873 bdrv_drained_end(bs);
874 continue;
877 /* The two disks are in sync. Exit and report successful
878 * completion.
880 assert(QLIST_EMPTY(&bs->tracked_requests));
881 s->common.cancelled = false;
882 need_drain = false;
883 break;
886 ret = 0;
887 trace_mirror_before_sleep(s, cnt, s->synced, delay_ns);
888 if (!s->synced) {
889 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
890 if (block_job_is_cancelled(&s->common)) {
891 break;
893 } else if (!should_complete) {
894 delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0);
895 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
897 s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
900 immediate_exit:
901 if (s->in_flight > 0) {
902 /* We get here only if something went wrong. Either the job failed,
903 * or it was cancelled prematurely so that we do not guarantee that
904 * the target is a copy of the source.
906 assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common)));
907 assert(need_drain);
908 mirror_wait_for_all_io(s);
911 assert(s->in_flight == 0);
912 qemu_vfree(s->buf);
913 g_free(s->cow_bitmap);
914 g_free(s->in_flight_bitmap);
915 bdrv_dirty_iter_free(s->dbi);
917 data = g_malloc(sizeof(*data));
918 data->ret = ret;
920 if (need_drain) {
921 bdrv_drained_begin(bs);
923 block_job_defer_to_main_loop(&s->common, mirror_exit, data);
926 static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp)
928 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
930 if (speed < 0) {
931 error_setg(errp, QERR_INVALID_PARAMETER, "speed");
932 return;
934 ratelimit_set_speed(&s->limit, speed, SLICE_TIME);
937 static void mirror_complete(BlockJob *job, Error **errp)
939 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
940 BlockDriverState *target;
942 target = blk_bs(s->target);
944 if (!s->synced) {
945 error_setg(errp, "The active block job '%s' cannot be completed",
946 job->id);
947 return;
950 if (s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) {
951 int ret;
953 assert(!target->backing);
954 ret = bdrv_open_backing_file(target, NULL, "backing", errp);
955 if (ret < 0) {
956 return;
960 /* block all operations on to_replace bs */
961 if (s->replaces) {
962 AioContext *replace_aio_context;
964 s->to_replace = bdrv_find_node(s->replaces);
965 if (!s->to_replace) {
966 error_setg(errp, "Node name '%s' not found", s->replaces);
967 return;
970 replace_aio_context = bdrv_get_aio_context(s->to_replace);
971 aio_context_acquire(replace_aio_context);
973 /* TODO Translate this into permission system. Current definition of
974 * GRAPH_MOD would require to request it for the parents; they might
975 * not even be BlockDriverStates, however, so a BdrvChild can't address
976 * them. May need redefinition of GRAPH_MOD. */
977 error_setg(&s->replace_blocker,
978 "block device is in use by block-job-complete");
979 bdrv_op_block_all(s->to_replace, s->replace_blocker);
980 bdrv_ref(s->to_replace);
982 aio_context_release(replace_aio_context);
985 s->should_complete = true;
986 block_job_enter(&s->common);
989 static void mirror_pause(BlockJob *job)
991 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
993 mirror_wait_for_all_io(s);
996 static void mirror_attached_aio_context(BlockJob *job, AioContext *new_context)
998 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1000 blk_set_aio_context(s->target, new_context);
1003 static void mirror_drain(BlockJob *job)
1005 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1007 /* Need to keep a reference in case blk_drain triggers execution
1008 * of mirror_complete...
1010 if (s->target) {
1011 BlockBackend *target = s->target;
1012 blk_ref(target);
1013 blk_drain(target);
1014 blk_unref(target);
1018 static const BlockJobDriver mirror_job_driver = {
1019 .instance_size = sizeof(MirrorBlockJob),
1020 .job_type = BLOCK_JOB_TYPE_MIRROR,
1021 .set_speed = mirror_set_speed,
1022 .start = mirror_run,
1023 .complete = mirror_complete,
1024 .pause = mirror_pause,
1025 .attached_aio_context = mirror_attached_aio_context,
1026 .drain = mirror_drain,
1029 static const BlockJobDriver commit_active_job_driver = {
1030 .instance_size = sizeof(MirrorBlockJob),
1031 .job_type = BLOCK_JOB_TYPE_COMMIT,
1032 .set_speed = mirror_set_speed,
1033 .start = mirror_run,
1034 .complete = mirror_complete,
1035 .pause = mirror_pause,
1036 .attached_aio_context = mirror_attached_aio_context,
1037 .drain = mirror_drain,
1040 static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs,
1041 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
1043 return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
1046 static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs,
1047 uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
1049 return bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags);
1052 static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs)
1054 return bdrv_co_flush(bs->backing->bs);
1057 static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs,
1058 int64_t offset, int bytes, BdrvRequestFlags flags)
1060 return bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags);
1063 static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs,
1064 int64_t offset, int bytes)
1066 return bdrv_co_pdiscard(bs->backing->bs, offset, bytes);
1069 static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs, QDict *opts)
1071 bdrv_refresh_filename(bs->backing->bs);
1072 pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
1073 bs->backing->bs->filename);
1076 static void bdrv_mirror_top_close(BlockDriverState *bs)
1080 static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c,
1081 const BdrvChildRole *role,
1082 BlockReopenQueue *reopen_queue,
1083 uint64_t perm, uint64_t shared,
1084 uint64_t *nperm, uint64_t *nshared)
1086 /* Must be able to forward guest writes to the real image */
1087 *nperm = 0;
1088 if (perm & BLK_PERM_WRITE) {
1089 *nperm |= BLK_PERM_WRITE;
1092 *nshared = BLK_PERM_ALL;
1095 /* Dummy node that provides consistent read to its users without requiring it
1096 * from its backing file and that allows writes on the backing file chain. */
1097 static BlockDriver bdrv_mirror_top = {
1098 .format_name = "mirror_top",
1099 .bdrv_co_preadv = bdrv_mirror_top_preadv,
1100 .bdrv_co_pwritev = bdrv_mirror_top_pwritev,
1101 .bdrv_co_pwrite_zeroes = bdrv_mirror_top_pwrite_zeroes,
1102 .bdrv_co_pdiscard = bdrv_mirror_top_pdiscard,
1103 .bdrv_co_flush = bdrv_mirror_top_flush,
1104 .bdrv_co_get_block_status = bdrv_co_get_block_status_from_backing,
1105 .bdrv_refresh_filename = bdrv_mirror_top_refresh_filename,
1106 .bdrv_close = bdrv_mirror_top_close,
1107 .bdrv_child_perm = bdrv_mirror_top_child_perm,
1110 static void mirror_start_job(const char *job_id, BlockDriverState *bs,
1111 int creation_flags, BlockDriverState *target,
1112 const char *replaces, int64_t speed,
1113 uint32_t granularity, int64_t buf_size,
1114 BlockMirrorBackingMode backing_mode,
1115 BlockdevOnError on_source_error,
1116 BlockdevOnError on_target_error,
1117 bool unmap,
1118 BlockCompletionFunc *cb,
1119 void *opaque,
1120 const BlockJobDriver *driver,
1121 bool is_none_mode, BlockDriverState *base,
1122 bool auto_complete, const char *filter_node_name,
1123 bool is_mirror,
1124 Error **errp)
1126 MirrorBlockJob *s;
1127 BlockDriverState *mirror_top_bs;
1128 bool target_graph_mod;
1129 bool target_is_backing;
1130 Error *local_err = NULL;
1131 int ret;
1133 if (granularity == 0) {
1134 granularity = bdrv_get_default_bitmap_granularity(target);
1137 assert ((granularity & (granularity - 1)) == 0);
1138 /* Granularity must be large enough for sector-based dirty bitmap */
1139 assert(granularity >= BDRV_SECTOR_SIZE);
1141 if (buf_size < 0) {
1142 error_setg(errp, "Invalid parameter 'buf-size'");
1143 return;
1146 if (buf_size == 0) {
1147 buf_size = DEFAULT_MIRROR_BUF_SIZE;
1150 /* In the case of active commit, add dummy driver to provide consistent
1151 * reads on the top, while disabling it in the intermediate nodes, and make
1152 * the backing chain writable. */
1153 mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name,
1154 BDRV_O_RDWR, errp);
1155 if (mirror_top_bs == NULL) {
1156 return;
1158 if (!filter_node_name) {
1159 mirror_top_bs->implicit = true;
1161 mirror_top_bs->total_sectors = bs->total_sectors;
1162 bdrv_set_aio_context(mirror_top_bs, bdrv_get_aio_context(bs));
1164 /* bdrv_append takes ownership of the mirror_top_bs reference, need to keep
1165 * it alive until block_job_create() succeeds even if bs has no parent. */
1166 bdrv_ref(mirror_top_bs);
1167 bdrv_drained_begin(bs);
1168 bdrv_append(mirror_top_bs, bs, &local_err);
1169 bdrv_drained_end(bs);
1171 if (local_err) {
1172 bdrv_unref(mirror_top_bs);
1173 error_propagate(errp, local_err);
1174 return;
1177 /* Make sure that the source is not resized while the job is running */
1178 s = block_job_create(job_id, driver, mirror_top_bs,
1179 BLK_PERM_CONSISTENT_READ,
1180 BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
1181 BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed,
1182 creation_flags, cb, opaque, errp);
1183 if (!s) {
1184 goto fail;
1186 /* The block job now has a reference to this node */
1187 bdrv_unref(mirror_top_bs);
1189 s->source = bs;
1190 s->mirror_top_bs = mirror_top_bs;
1192 /* No resize for the target either; while the mirror is still running, a
1193 * consistent read isn't necessarily possible. We could possibly allow
1194 * writes and graph modifications, though it would likely defeat the
1195 * purpose of a mirror, so leave them blocked for now.
1197 * In the case of active commit, things look a bit different, though,
1198 * because the target is an already populated backing file in active use.
1199 * We can allow anything except resize there.*/
1200 target_is_backing = bdrv_chain_contains(bs, target);
1201 target_graph_mod = (backing_mode != MIRROR_LEAVE_BACKING_CHAIN);
1202 s->target = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE |
1203 (target_graph_mod ? BLK_PERM_GRAPH_MOD : 0),
1204 BLK_PERM_WRITE_UNCHANGED |
1205 (target_is_backing ? BLK_PERM_CONSISTENT_READ |
1206 BLK_PERM_WRITE |
1207 BLK_PERM_GRAPH_MOD : 0));
1208 ret = blk_insert_bs(s->target, target, errp);
1209 if (ret < 0) {
1210 goto fail;
1212 if (is_mirror) {
1213 /* XXX: Mirror target could be a NBD server of target QEMU in the case
1214 * of non-shared block migration. To allow migration completion, we
1215 * have to allow "inactivate" of the target BB. When that happens, we
1216 * know the job is drained, and the vcpus are stopped, so no write
1217 * operation will be performed. Block layer already has assertions to
1218 * ensure that. */
1219 blk_set_force_allow_inactivate(s->target);
1222 s->replaces = g_strdup(replaces);
1223 s->on_source_error = on_source_error;
1224 s->on_target_error = on_target_error;
1225 s->is_none_mode = is_none_mode;
1226 s->backing_mode = backing_mode;
1227 s->base = base;
1228 s->granularity = granularity;
1229 s->buf_size = ROUND_UP(buf_size, granularity);
1230 s->unmap = unmap;
1231 if (auto_complete) {
1232 s->should_complete = true;
1235 s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
1236 if (!s->dirty_bitmap) {
1237 goto fail;
1240 /* Required permissions are already taken with blk_new() */
1241 block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL,
1242 &error_abort);
1244 /* In commit_active_start() all intermediate nodes disappear, so
1245 * any jobs in them must be blocked */
1246 if (target_is_backing) {
1247 BlockDriverState *iter;
1248 for (iter = backing_bs(bs); iter != target; iter = backing_bs(iter)) {
1249 /* XXX BLK_PERM_WRITE needs to be allowed so we don't block
1250 * ourselves at s->base (if writes are blocked for a node, they are
1251 * also blocked for its backing file). The other options would be a
1252 * second filter driver above s->base (== target). */
1253 ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
1254 BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE,
1255 errp);
1256 if (ret < 0) {
1257 goto fail;
1262 trace_mirror_start(bs, s, opaque);
1263 block_job_start(&s->common);
1264 return;
1266 fail:
1267 if (s) {
1268 /* Make sure this BDS does not go away until we have completed the graph
1269 * changes below */
1270 bdrv_ref(mirror_top_bs);
1272 g_free(s->replaces);
1273 blk_unref(s->target);
1274 block_job_early_fail(&s->common);
1277 bdrv_child_try_set_perm(mirror_top_bs->backing, 0, BLK_PERM_ALL,
1278 &error_abort);
1279 bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
1281 bdrv_unref(mirror_top_bs);
1284 void mirror_start(const char *job_id, BlockDriverState *bs,
1285 BlockDriverState *target, const char *replaces,
1286 int64_t speed, uint32_t granularity, int64_t buf_size,
1287 MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
1288 BlockdevOnError on_source_error,
1289 BlockdevOnError on_target_error,
1290 bool unmap, const char *filter_node_name, Error **errp)
1292 bool is_none_mode;
1293 BlockDriverState *base;
1295 if (mode == MIRROR_SYNC_MODE_INCREMENTAL) {
1296 error_setg(errp, "Sync mode 'incremental' not supported");
1297 return;
1299 is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
1300 base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL;
1301 mirror_start_job(job_id, bs, BLOCK_JOB_DEFAULT, target, replaces,
1302 speed, granularity, buf_size, backing_mode,
1303 on_source_error, on_target_error, unmap, NULL, NULL,
1304 &mirror_job_driver, is_none_mode, base, false,
1305 filter_node_name, true, errp);
1308 void commit_active_start(const char *job_id, BlockDriverState *bs,
1309 BlockDriverState *base, int creation_flags,
1310 int64_t speed, BlockdevOnError on_error,
1311 const char *filter_node_name,
1312 BlockCompletionFunc *cb, void *opaque,
1313 bool auto_complete, Error **errp)
1315 int orig_base_flags;
1316 Error *local_err = NULL;
1318 orig_base_flags = bdrv_get_flags(base);
1320 if (bdrv_reopen(base, bs->open_flags, errp)) {
1321 return;
1324 mirror_start_job(job_id, bs, creation_flags, base, NULL, speed, 0, 0,
1325 MIRROR_LEAVE_BACKING_CHAIN,
1326 on_error, on_error, true, cb, opaque,
1327 &commit_active_job_driver, false, base, auto_complete,
1328 filter_node_name, false, &local_err);
1329 if (local_err) {
1330 error_propagate(errp, local_err);
1331 goto error_restore_flags;
1334 return;
1336 error_restore_flags:
1337 /* ignore error and errp for bdrv_reopen, because we want to propagate
1338 * the original error */
1339 bdrv_reopen(base, orig_base_flags, NULL);
1340 return;