hw/arm/boot: Increase fdt alignment
[qemu.git] / block / mirror.c
blob323f747c7537befe70b10f1ca4cf208fb9f71c85
1 /*
2 * Image mirroring
4 * Copyright Red Hat, Inc. 2012
6 * Authors:
7 * Paolo Bonzini <pbonzini@redhat.com>
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
14 #include "trace.h"
15 #include "block/blockjob.h"
16 #include "block/block_int.h"
17 #include "qapi/qmp/qerror.h"
18 #include "qemu/ratelimit.h"
19 #include "qemu/bitmap.h"
21 #define SLICE_TIME 100000000ULL /* ns */
22 #define MAX_IN_FLIGHT 16
23 #define DEFAULT_MIRROR_BUF_SIZE (10 << 20)
25 /* The mirroring buffer is a list of granularity-sized chunks.
26 * Free chunks are organized in a list.
28 typedef struct MirrorBuffer {
29 QSIMPLEQ_ENTRY(MirrorBuffer) next;
30 } MirrorBuffer;
32 typedef struct MirrorBlockJob {
33 BlockJob common;
34 RateLimit limit;
35 BlockDriverState *target;
36 BlockDriverState *base;
37 /* The name of the graph node to replace */
38 char *replaces;
39 /* The BDS to replace */
40 BlockDriverState *to_replace;
41 /* Used to block operations on the drive-mirror-replace target */
42 Error *replace_blocker;
43 bool is_none_mode;
44 BlockdevOnError on_source_error, on_target_error;
45 bool synced;
46 bool should_complete;
47 int64_t sector_num;
48 int64_t granularity;
49 size_t buf_size;
50 int64_t bdev_length;
51 unsigned long *cow_bitmap;
52 BdrvDirtyBitmap *dirty_bitmap;
53 HBitmapIter hbi;
54 uint8_t *buf;
55 QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
56 int buf_free_count;
58 unsigned long *in_flight_bitmap;
59 int in_flight;
60 int sectors_in_flight;
61 int ret;
62 bool unmap;
63 } MirrorBlockJob;
65 typedef struct MirrorOp {
66 MirrorBlockJob *s;
67 QEMUIOVector qiov;
68 int64_t sector_num;
69 int nb_sectors;
70 } MirrorOp;
72 static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
73 int error)
75 s->synced = false;
76 if (read) {
77 return block_job_error_action(&s->common, s->common.bs,
78 s->on_source_error, true, error);
79 } else {
80 return block_job_error_action(&s->common, s->target,
81 s->on_target_error, false, error);
85 static void mirror_iteration_done(MirrorOp *op, int ret)
87 MirrorBlockJob *s = op->s;
88 struct iovec *iov;
89 int64_t chunk_num;
90 int i, nb_chunks, sectors_per_chunk;
92 trace_mirror_iteration_done(s, op->sector_num, op->nb_sectors, ret);
94 s->in_flight--;
95 s->sectors_in_flight -= op->nb_sectors;
96 iov = op->qiov.iov;
97 for (i = 0; i < op->qiov.niov; i++) {
98 MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
99 QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
100 s->buf_free_count++;
103 sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
104 chunk_num = op->sector_num / sectors_per_chunk;
105 nb_chunks = op->nb_sectors / sectors_per_chunk;
106 bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
107 if (ret >= 0) {
108 if (s->cow_bitmap) {
109 bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
111 s->common.offset += (uint64_t)op->nb_sectors * BDRV_SECTOR_SIZE;
114 qemu_iovec_destroy(&op->qiov);
115 g_slice_free(MirrorOp, op);
117 /* Enter coroutine when it is not sleeping. The coroutine sleeps to
118 * rate-limit itself. The coroutine will eventually resume since there is
119 * a sleep timeout so don't wake it early.
121 if (s->common.busy) {
122 qemu_coroutine_enter(s->common.co, NULL);
126 static void mirror_write_complete(void *opaque, int ret)
128 MirrorOp *op = opaque;
129 MirrorBlockJob *s = op->s;
130 if (ret < 0) {
131 BlockErrorAction action;
133 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors);
134 action = mirror_error_action(s, false, -ret);
135 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
136 s->ret = ret;
139 mirror_iteration_done(op, ret);
142 static void mirror_read_complete(void *opaque, int ret)
144 MirrorOp *op = opaque;
145 MirrorBlockJob *s = op->s;
146 if (ret < 0) {
147 BlockErrorAction action;
149 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors);
150 action = mirror_error_action(s, true, -ret);
151 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
152 s->ret = ret;
155 mirror_iteration_done(op, ret);
156 return;
158 bdrv_aio_writev(s->target, op->sector_num, &op->qiov, op->nb_sectors,
159 mirror_write_complete, op);
162 static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
164 BlockDriverState *source = s->common.bs;
165 int nb_sectors, sectors_per_chunk, nb_chunks;
166 int64_t end, sector_num, next_chunk, next_sector, hbitmap_next_sector;
167 uint64_t delay_ns = 0;
168 MirrorOp *op;
169 int pnum;
170 int64_t ret;
172 s->sector_num = hbitmap_iter_next(&s->hbi);
173 if (s->sector_num < 0) {
174 bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi);
175 s->sector_num = hbitmap_iter_next(&s->hbi);
176 trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap));
177 assert(s->sector_num >= 0);
180 hbitmap_next_sector = s->sector_num;
181 sector_num = s->sector_num;
182 sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
183 end = s->bdev_length / BDRV_SECTOR_SIZE;
185 /* Extend the QEMUIOVector to include all adjacent blocks that will
186 * be copied in this operation.
188 * We have to do this if we have no backing file yet in the destination,
189 * and the cluster size is very large. Then we need to do COW ourselves.
190 * The first time a cluster is copied, copy it entirely. Note that,
191 * because both the granularity and the cluster size are powers of two,
192 * the number of sectors to copy cannot exceed one cluster.
194 * We also want to extend the QEMUIOVector to include more adjacent
195 * dirty blocks if possible, to limit the number of I/O operations and
196 * run efficiently even with a small granularity.
198 nb_chunks = 0;
199 nb_sectors = 0;
200 next_sector = sector_num;
201 next_chunk = sector_num / sectors_per_chunk;
203 /* Wait for I/O to this cluster (from a previous iteration) to be done. */
204 while (test_bit(next_chunk, s->in_flight_bitmap)) {
205 trace_mirror_yield_in_flight(s, sector_num, s->in_flight);
206 qemu_coroutine_yield();
209 do {
210 int added_sectors, added_chunks;
212 if (!bdrv_get_dirty(source, s->dirty_bitmap, next_sector) ||
213 test_bit(next_chunk, s->in_flight_bitmap)) {
214 assert(nb_sectors > 0);
215 break;
218 added_sectors = sectors_per_chunk;
219 if (s->cow_bitmap && !test_bit(next_chunk, s->cow_bitmap)) {
220 bdrv_round_to_clusters(s->target,
221 next_sector, added_sectors,
222 &next_sector, &added_sectors);
224 /* On the first iteration, the rounding may make us copy
225 * sectors before the first dirty one.
227 if (next_sector < sector_num) {
228 assert(nb_sectors == 0);
229 sector_num = next_sector;
230 next_chunk = next_sector / sectors_per_chunk;
234 added_sectors = MIN(added_sectors, end - (sector_num + nb_sectors));
235 added_chunks = (added_sectors + sectors_per_chunk - 1) / sectors_per_chunk;
237 /* When doing COW, it may happen that there is not enough space for
238 * a full cluster. Wait if that is the case.
240 while (nb_chunks == 0 && s->buf_free_count < added_chunks) {
241 trace_mirror_yield_buf_busy(s, nb_chunks, s->in_flight);
242 qemu_coroutine_yield();
244 if (s->buf_free_count < nb_chunks + added_chunks) {
245 trace_mirror_break_buf_busy(s, nb_chunks, s->in_flight);
246 break;
249 /* We have enough free space to copy these sectors. */
250 bitmap_set(s->in_flight_bitmap, next_chunk, added_chunks);
252 nb_sectors += added_sectors;
253 nb_chunks += added_chunks;
254 next_sector += added_sectors;
255 next_chunk += added_chunks;
256 if (!s->synced && s->common.speed) {
257 delay_ns = ratelimit_calculate_delay(&s->limit, added_sectors);
259 } while (delay_ns == 0 && next_sector < end);
261 /* Allocate a MirrorOp that is used as an AIO callback. */
262 op = g_slice_new(MirrorOp);
263 op->s = s;
264 op->sector_num = sector_num;
265 op->nb_sectors = nb_sectors;
267 /* Now make a QEMUIOVector taking enough granularity-sized chunks
268 * from s->buf_free.
270 qemu_iovec_init(&op->qiov, nb_chunks);
271 next_sector = sector_num;
272 while (nb_chunks-- > 0) {
273 MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
274 size_t remaining = (nb_sectors * BDRV_SECTOR_SIZE) - op->qiov.size;
276 QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
277 s->buf_free_count--;
278 qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
280 /* Advance the HBitmapIter in parallel, so that we do not examine
281 * the same sector twice.
283 if (next_sector > hbitmap_next_sector
284 && bdrv_get_dirty(source, s->dirty_bitmap, next_sector)) {
285 hbitmap_next_sector = hbitmap_iter_next(&s->hbi);
288 next_sector += sectors_per_chunk;
291 bdrv_reset_dirty_bitmap(s->dirty_bitmap, sector_num, nb_sectors);
293 /* Copy the dirty cluster. */
294 s->in_flight++;
295 s->sectors_in_flight += nb_sectors;
296 trace_mirror_one_iteration(s, sector_num, nb_sectors);
298 ret = bdrv_get_block_status_above(source, NULL, sector_num,
299 nb_sectors, &pnum);
300 if (ret < 0 || pnum < nb_sectors ||
301 (ret & BDRV_BLOCK_DATA && !(ret & BDRV_BLOCK_ZERO))) {
302 bdrv_aio_readv(source, sector_num, &op->qiov, nb_sectors,
303 mirror_read_complete, op);
304 } else if (ret & BDRV_BLOCK_ZERO) {
305 bdrv_aio_write_zeroes(s->target, sector_num, op->nb_sectors,
306 s->unmap ? BDRV_REQ_MAY_UNMAP : 0,
307 mirror_write_complete, op);
308 } else {
309 assert(!(ret & BDRV_BLOCK_DATA));
310 bdrv_aio_discard(s->target, sector_num, op->nb_sectors,
311 mirror_write_complete, op);
313 return delay_ns;
316 static void mirror_free_init(MirrorBlockJob *s)
318 int granularity = s->granularity;
319 size_t buf_size = s->buf_size;
320 uint8_t *buf = s->buf;
322 assert(s->buf_free_count == 0);
323 QSIMPLEQ_INIT(&s->buf_free);
324 while (buf_size != 0) {
325 MirrorBuffer *cur = (MirrorBuffer *)buf;
326 QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
327 s->buf_free_count++;
328 buf_size -= granularity;
329 buf += granularity;
333 static void mirror_drain(MirrorBlockJob *s)
335 while (s->in_flight > 0) {
336 qemu_coroutine_yield();
340 typedef struct {
341 int ret;
342 } MirrorExitData;
344 static void mirror_exit(BlockJob *job, void *opaque)
346 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
347 MirrorExitData *data = opaque;
348 AioContext *replace_aio_context = NULL;
350 if (s->to_replace) {
351 replace_aio_context = bdrv_get_aio_context(s->to_replace);
352 aio_context_acquire(replace_aio_context);
355 if (s->should_complete && data->ret == 0) {
356 BlockDriverState *to_replace = s->common.bs;
357 if (s->to_replace) {
358 to_replace = s->to_replace;
360 if (bdrv_get_flags(s->target) != bdrv_get_flags(to_replace)) {
361 bdrv_reopen(s->target, bdrv_get_flags(to_replace), NULL);
363 bdrv_swap(s->target, to_replace);
364 if (s->common.driver->job_type == BLOCK_JOB_TYPE_COMMIT) {
365 /* drop the bs loop chain formed by the swap: break the loop then
366 * trigger the unref from the top one */
367 BlockDriverState *p = s->base->backing_hd;
368 bdrv_set_backing_hd(s->base, NULL);
369 bdrv_unref(p);
372 if (s->to_replace) {
373 bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
374 error_free(s->replace_blocker);
375 bdrv_unref(s->to_replace);
377 if (replace_aio_context) {
378 aio_context_release(replace_aio_context);
380 g_free(s->replaces);
381 bdrv_unref(s->target);
382 block_job_completed(&s->common, data->ret);
383 g_free(data);
386 static void coroutine_fn mirror_run(void *opaque)
388 MirrorBlockJob *s = opaque;
389 MirrorExitData *data;
390 BlockDriverState *bs = s->common.bs;
391 int64_t sector_num, end, sectors_per_chunk, length;
392 uint64_t last_pause_ns;
393 BlockDriverInfo bdi;
394 char backing_filename[2]; /* we only need 2 characters because we are only
395 checking for a NULL string */
396 int ret = 0;
397 int n;
399 if (block_job_is_cancelled(&s->common)) {
400 goto immediate_exit;
403 s->bdev_length = bdrv_getlength(bs);
404 if (s->bdev_length < 0) {
405 ret = s->bdev_length;
406 goto immediate_exit;
407 } else if (s->bdev_length == 0) {
408 /* Report BLOCK_JOB_READY and wait for complete. */
409 block_job_event_ready(&s->common);
410 s->synced = true;
411 while (!block_job_is_cancelled(&s->common) && !s->should_complete) {
412 block_job_yield(&s->common);
414 s->common.cancelled = false;
415 goto immediate_exit;
418 length = DIV_ROUND_UP(s->bdev_length, s->granularity);
419 s->in_flight_bitmap = bitmap_new(length);
421 /* If we have no backing file yet in the destination, we cannot let
422 * the destination do COW. Instead, we copy sectors around the
423 * dirty data if needed. We need a bitmap to do that.
425 bdrv_get_backing_filename(s->target, backing_filename,
426 sizeof(backing_filename));
427 if (backing_filename[0] && !s->target->backing_hd) {
428 ret = bdrv_get_info(s->target, &bdi);
429 if (ret < 0) {
430 goto immediate_exit;
432 if (s->granularity < bdi.cluster_size) {
433 s->buf_size = MAX(s->buf_size, bdi.cluster_size);
434 s->cow_bitmap = bitmap_new(length);
438 end = s->bdev_length / BDRV_SECTOR_SIZE;
439 s->buf = qemu_try_blockalign(bs, s->buf_size);
440 if (s->buf == NULL) {
441 ret = -ENOMEM;
442 goto immediate_exit;
445 sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
446 mirror_free_init(s);
448 last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
449 if (!s->is_none_mode) {
450 /* First part, loop on the sectors and initialize the dirty bitmap. */
451 BlockDriverState *base = s->base;
452 for (sector_num = 0; sector_num < end; ) {
453 int64_t next = (sector_num | (sectors_per_chunk - 1)) + 1;
454 int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
456 if (now - last_pause_ns > SLICE_TIME) {
457 last_pause_ns = now;
458 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, 0);
461 if (block_job_is_cancelled(&s->common)) {
462 goto immediate_exit;
465 ret = bdrv_is_allocated_above(bs, base,
466 sector_num, next - sector_num, &n);
468 if (ret < 0) {
469 goto immediate_exit;
472 assert(n > 0);
473 if (ret == 1) {
474 bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n);
475 sector_num = next;
476 } else {
477 sector_num += n;
482 bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi);
483 for (;;) {
484 uint64_t delay_ns = 0;
485 int64_t cnt;
486 bool should_complete;
488 if (s->ret < 0) {
489 ret = s->ret;
490 goto immediate_exit;
493 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
494 /* s->common.offset contains the number of bytes already processed so
495 * far, cnt is the number of dirty sectors remaining and
496 * s->sectors_in_flight is the number of sectors currently being
497 * processed; together those are the current total operation length */
498 s->common.len = s->common.offset +
499 (cnt + s->sectors_in_flight) * BDRV_SECTOR_SIZE;
501 /* Note that even when no rate limit is applied we need to yield
502 * periodically with no pending I/O so that bdrv_drain_all() returns.
503 * We do so every SLICE_TIME nanoseconds, or when there is an error,
504 * or when the source is clean, whichever comes first.
506 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - last_pause_ns < SLICE_TIME &&
507 s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
508 if (s->in_flight == MAX_IN_FLIGHT || s->buf_free_count == 0 ||
509 (cnt == 0 && s->in_flight > 0)) {
510 trace_mirror_yield(s, s->in_flight, s->buf_free_count, cnt);
511 qemu_coroutine_yield();
512 continue;
513 } else if (cnt != 0) {
514 delay_ns = mirror_iteration(s);
518 should_complete = false;
519 if (s->in_flight == 0 && cnt == 0) {
520 trace_mirror_before_flush(s);
521 ret = bdrv_flush(s->target);
522 if (ret < 0) {
523 if (mirror_error_action(s, false, -ret) ==
524 BLOCK_ERROR_ACTION_REPORT) {
525 goto immediate_exit;
527 } else {
528 /* We're out of the streaming phase. From now on, if the job
529 * is cancelled we will actually complete all pending I/O and
530 * report completion. This way, block-job-cancel will leave
531 * the target in a consistent state.
533 if (!s->synced) {
534 block_job_event_ready(&s->common);
535 s->synced = true;
538 should_complete = s->should_complete ||
539 block_job_is_cancelled(&s->common);
540 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
544 if (cnt == 0 && should_complete) {
545 /* The dirty bitmap is not updated while operations are pending.
546 * If we're about to exit, wait for pending operations before
547 * calling bdrv_get_dirty_count(bs), or we may exit while the
548 * source has dirty data to copy!
550 * Note that I/O can be submitted by the guest while
551 * mirror_populate runs.
553 trace_mirror_before_drain(s, cnt);
554 bdrv_drain(bs);
555 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
558 ret = 0;
559 trace_mirror_before_sleep(s, cnt, s->synced, delay_ns);
560 if (!s->synced) {
561 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
562 if (block_job_is_cancelled(&s->common)) {
563 break;
565 } else if (!should_complete) {
566 delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0);
567 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
568 } else if (cnt == 0) {
569 /* The two disks are in sync. Exit and report successful
570 * completion.
572 assert(QLIST_EMPTY(&bs->tracked_requests));
573 s->common.cancelled = false;
574 break;
576 last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
579 immediate_exit:
580 if (s->in_flight > 0) {
581 /* We get here only if something went wrong. Either the job failed,
582 * or it was cancelled prematurely so that we do not guarantee that
583 * the target is a copy of the source.
585 assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common)));
586 mirror_drain(s);
589 assert(s->in_flight == 0);
590 qemu_vfree(s->buf);
591 g_free(s->cow_bitmap);
592 g_free(s->in_flight_bitmap);
593 bdrv_release_dirty_bitmap(bs, s->dirty_bitmap);
594 bdrv_iostatus_disable(s->target);
596 data = g_malloc(sizeof(*data));
597 data->ret = ret;
598 block_job_defer_to_main_loop(&s->common, mirror_exit, data);
601 static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp)
603 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
605 if (speed < 0) {
606 error_setg(errp, QERR_INVALID_PARAMETER, "speed");
607 return;
609 ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME);
612 static void mirror_iostatus_reset(BlockJob *job)
614 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
616 bdrv_iostatus_reset(s->target);
619 static void mirror_complete(BlockJob *job, Error **errp)
621 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
622 Error *local_err = NULL;
623 int ret;
625 ret = bdrv_open_backing_file(s->target, NULL, &local_err);
626 if (ret < 0) {
627 error_propagate(errp, local_err);
628 return;
630 if (!s->synced) {
631 error_setg(errp, QERR_BLOCK_JOB_NOT_READY,
632 bdrv_get_device_name(job->bs));
633 return;
636 /* check the target bs is not blocked and block all operations on it */
637 if (s->replaces) {
638 AioContext *replace_aio_context;
640 s->to_replace = check_to_replace_node(s->replaces, &local_err);
641 if (!s->to_replace) {
642 error_propagate(errp, local_err);
643 return;
646 replace_aio_context = bdrv_get_aio_context(s->to_replace);
647 aio_context_acquire(replace_aio_context);
649 error_setg(&s->replace_blocker,
650 "block device is in use by block-job-complete");
651 bdrv_op_block_all(s->to_replace, s->replace_blocker);
652 bdrv_ref(s->to_replace);
654 aio_context_release(replace_aio_context);
657 s->should_complete = true;
658 block_job_enter(&s->common);
661 static const BlockJobDriver mirror_job_driver = {
662 .instance_size = sizeof(MirrorBlockJob),
663 .job_type = BLOCK_JOB_TYPE_MIRROR,
664 .set_speed = mirror_set_speed,
665 .iostatus_reset= mirror_iostatus_reset,
666 .complete = mirror_complete,
669 static const BlockJobDriver commit_active_job_driver = {
670 .instance_size = sizeof(MirrorBlockJob),
671 .job_type = BLOCK_JOB_TYPE_COMMIT,
672 .set_speed = mirror_set_speed,
673 .iostatus_reset
674 = mirror_iostatus_reset,
675 .complete = mirror_complete,
678 static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target,
679 const char *replaces,
680 int64_t speed, uint32_t granularity,
681 int64_t buf_size,
682 BlockdevOnError on_source_error,
683 BlockdevOnError on_target_error,
684 bool unmap,
685 BlockCompletionFunc *cb,
686 void *opaque, Error **errp,
687 const BlockJobDriver *driver,
688 bool is_none_mode, BlockDriverState *base)
690 MirrorBlockJob *s;
692 if (granularity == 0) {
693 granularity = bdrv_get_default_bitmap_granularity(target);
696 assert ((granularity & (granularity - 1)) == 0);
698 if ((on_source_error == BLOCKDEV_ON_ERROR_STOP ||
699 on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) &&
700 !bdrv_iostatus_is_enabled(bs)) {
701 error_setg(errp, QERR_INVALID_PARAMETER, "on-source-error");
702 return;
705 if (buf_size < 0) {
706 error_setg(errp, "Invalid parameter 'buf-size'");
707 return;
710 if (buf_size == 0) {
711 buf_size = DEFAULT_MIRROR_BUF_SIZE;
714 s = block_job_create(driver, bs, speed, cb, opaque, errp);
715 if (!s) {
716 return;
719 s->replaces = g_strdup(replaces);
720 s->on_source_error = on_source_error;
721 s->on_target_error = on_target_error;
722 s->target = target;
723 s->is_none_mode = is_none_mode;
724 s->base = base;
725 s->granularity = granularity;
726 s->buf_size = ROUND_UP(buf_size, granularity);
727 s->unmap = unmap;
729 s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
730 if (!s->dirty_bitmap) {
731 g_free(s->replaces);
732 block_job_release(bs);
733 return;
735 bdrv_set_enable_write_cache(s->target, true);
736 bdrv_set_on_error(s->target, on_target_error, on_target_error);
737 bdrv_iostatus_enable(s->target);
738 s->common.co = qemu_coroutine_create(mirror_run);
739 trace_mirror_start(bs, s, s->common.co, opaque);
740 qemu_coroutine_enter(s->common.co, s);
743 void mirror_start(BlockDriverState *bs, BlockDriverState *target,
744 const char *replaces,
745 int64_t speed, uint32_t granularity, int64_t buf_size,
746 MirrorSyncMode mode, BlockdevOnError on_source_error,
747 BlockdevOnError on_target_error,
748 bool unmap,
749 BlockCompletionFunc *cb,
750 void *opaque, Error **errp)
752 bool is_none_mode;
753 BlockDriverState *base;
755 if (mode == MIRROR_SYNC_MODE_INCREMENTAL) {
756 error_setg(errp, "Sync mode 'incremental' not supported");
757 return;
759 is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
760 base = mode == MIRROR_SYNC_MODE_TOP ? bs->backing_hd : NULL;
761 mirror_start_job(bs, target, replaces,
762 speed, granularity, buf_size,
763 on_source_error, on_target_error, unmap, cb, opaque, errp,
764 &mirror_job_driver, is_none_mode, base);
767 void commit_active_start(BlockDriverState *bs, BlockDriverState *base,
768 int64_t speed,
769 BlockdevOnError on_error,
770 BlockCompletionFunc *cb,
771 void *opaque, Error **errp)
773 int64_t length, base_length;
774 int orig_base_flags;
775 int ret;
776 Error *local_err = NULL;
778 orig_base_flags = bdrv_get_flags(base);
780 if (bdrv_reopen(base, bs->open_flags, errp)) {
781 return;
784 length = bdrv_getlength(bs);
785 if (length < 0) {
786 error_setg_errno(errp, -length,
787 "Unable to determine length of %s", bs->filename);
788 goto error_restore_flags;
791 base_length = bdrv_getlength(base);
792 if (base_length < 0) {
793 error_setg_errno(errp, -base_length,
794 "Unable to determine length of %s", base->filename);
795 goto error_restore_flags;
798 if (length > base_length) {
799 ret = bdrv_truncate(base, length);
800 if (ret < 0) {
801 error_setg_errno(errp, -ret,
802 "Top image %s is larger than base image %s, and "
803 "resize of base image failed",
804 bs->filename, base->filename);
805 goto error_restore_flags;
809 bdrv_ref(base);
810 mirror_start_job(bs, base, NULL, speed, 0, 0,
811 on_error, on_error, false, cb, opaque, &local_err,
812 &commit_active_job_driver, false, base);
813 if (local_err) {
814 error_propagate(errp, local_err);
815 goto error_restore_flags;
818 return;
820 error_restore_flags:
821 /* ignore error and errp for bdrv_reopen, because we want to propagate
822 * the original error */
823 bdrv_reopen(base, orig_base_flags, NULL);
824 return;