Merge remote-tracking branch 'remotes/bkoppelmann/tags/pull-tricore-20141021' into...
[qemu/kevin.git] / block / mirror.c
blobe8a43eb39eba272c448d0ec89456fe32c4d6c418
1 /*
2 * Image mirroring
4 * Copyright Red Hat, Inc. 2012
6 * Authors:
7 * Paolo Bonzini <pbonzini@redhat.com>
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
14 #include "trace.h"
15 #include "block/blockjob.h"
16 #include "block/block_int.h"
17 #include "qemu/ratelimit.h"
18 #include "qemu/bitmap.h"
20 #define SLICE_TIME 100000000ULL /* ns */
21 #define MAX_IN_FLIGHT 16
23 /* The mirroring buffer is a list of granularity-sized chunks.
24 * Free chunks are organized in a list.
26 typedef struct MirrorBuffer {
27 QSIMPLEQ_ENTRY(MirrorBuffer) next;
28 } MirrorBuffer;
30 typedef struct MirrorBlockJob {
31 BlockJob common;
32 RateLimit limit;
33 BlockDriverState *target;
34 BlockDriverState *base;
35 /* The name of the graph node to replace */
36 char *replaces;
37 /* The BDS to replace */
38 BlockDriverState *to_replace;
39 /* Used to block operations on the drive-mirror-replace target */
40 Error *replace_blocker;
41 bool is_none_mode;
42 BlockdevOnError on_source_error, on_target_error;
43 bool synced;
44 bool should_complete;
45 int64_t sector_num;
46 int64_t granularity;
47 size_t buf_size;
48 unsigned long *cow_bitmap;
49 BdrvDirtyBitmap *dirty_bitmap;
50 HBitmapIter hbi;
51 uint8_t *buf;
52 QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
53 int buf_free_count;
55 unsigned long *in_flight_bitmap;
56 int in_flight;
57 int ret;
58 } MirrorBlockJob;
60 typedef struct MirrorOp {
61 MirrorBlockJob *s;
62 QEMUIOVector qiov;
63 int64_t sector_num;
64 int nb_sectors;
65 } MirrorOp;
67 static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
68 int error)
70 s->synced = false;
71 if (read) {
72 return block_job_error_action(&s->common, s->common.bs,
73 s->on_source_error, true, error);
74 } else {
75 return block_job_error_action(&s->common, s->target,
76 s->on_target_error, false, error);
80 static void mirror_iteration_done(MirrorOp *op, int ret)
82 MirrorBlockJob *s = op->s;
83 struct iovec *iov;
84 int64_t chunk_num;
85 int i, nb_chunks, sectors_per_chunk;
87 trace_mirror_iteration_done(s, op->sector_num, op->nb_sectors, ret);
89 s->in_flight--;
90 iov = op->qiov.iov;
91 for (i = 0; i < op->qiov.niov; i++) {
92 MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
93 QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
94 s->buf_free_count++;
97 sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
98 chunk_num = op->sector_num / sectors_per_chunk;
99 nb_chunks = op->nb_sectors / sectors_per_chunk;
100 bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
101 if (s->cow_bitmap && ret >= 0) {
102 bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
105 qemu_iovec_destroy(&op->qiov);
106 g_slice_free(MirrorOp, op);
108 /* Enter coroutine when it is not sleeping. The coroutine sleeps to
109 * rate-limit itself. The coroutine will eventually resume since there is
110 * a sleep timeout so don't wake it early.
112 if (s->common.busy) {
113 qemu_coroutine_enter(s->common.co, NULL);
117 static void mirror_write_complete(void *opaque, int ret)
119 MirrorOp *op = opaque;
120 MirrorBlockJob *s = op->s;
121 if (ret < 0) {
122 BlockDriverState *source = s->common.bs;
123 BlockErrorAction action;
125 bdrv_set_dirty(source, op->sector_num, op->nb_sectors);
126 action = mirror_error_action(s, false, -ret);
127 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
128 s->ret = ret;
131 mirror_iteration_done(op, ret);
134 static void mirror_read_complete(void *opaque, int ret)
136 MirrorOp *op = opaque;
137 MirrorBlockJob *s = op->s;
138 if (ret < 0) {
139 BlockDriverState *source = s->common.bs;
140 BlockErrorAction action;
142 bdrv_set_dirty(source, op->sector_num, op->nb_sectors);
143 action = mirror_error_action(s, true, -ret);
144 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
145 s->ret = ret;
148 mirror_iteration_done(op, ret);
149 return;
151 bdrv_aio_writev(s->target, op->sector_num, &op->qiov, op->nb_sectors,
152 mirror_write_complete, op);
155 static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
157 BlockDriverState *source = s->common.bs;
158 int nb_sectors, sectors_per_chunk, nb_chunks;
159 int64_t end, sector_num, next_chunk, next_sector, hbitmap_next_sector;
160 uint64_t delay_ns = 0;
161 MirrorOp *op;
163 s->sector_num = hbitmap_iter_next(&s->hbi);
164 if (s->sector_num < 0) {
165 bdrv_dirty_iter_init(source, s->dirty_bitmap, &s->hbi);
166 s->sector_num = hbitmap_iter_next(&s->hbi);
167 trace_mirror_restart_iter(s,
168 bdrv_get_dirty_count(source, s->dirty_bitmap));
169 assert(s->sector_num >= 0);
172 hbitmap_next_sector = s->sector_num;
173 sector_num = s->sector_num;
174 sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
175 end = s->common.len >> BDRV_SECTOR_BITS;
177 /* Extend the QEMUIOVector to include all adjacent blocks that will
178 * be copied in this operation.
180 * We have to do this if we have no backing file yet in the destination,
181 * and the cluster size is very large. Then we need to do COW ourselves.
182 * The first time a cluster is copied, copy it entirely. Note that,
183 * because both the granularity and the cluster size are powers of two,
184 * the number of sectors to copy cannot exceed one cluster.
186 * We also want to extend the QEMUIOVector to include more adjacent
187 * dirty blocks if possible, to limit the number of I/O operations and
188 * run efficiently even with a small granularity.
190 nb_chunks = 0;
191 nb_sectors = 0;
192 next_sector = sector_num;
193 next_chunk = sector_num / sectors_per_chunk;
195 /* Wait for I/O to this cluster (from a previous iteration) to be done. */
196 while (test_bit(next_chunk, s->in_flight_bitmap)) {
197 trace_mirror_yield_in_flight(s, sector_num, s->in_flight);
198 qemu_coroutine_yield();
201 do {
202 int added_sectors, added_chunks;
204 if (!bdrv_get_dirty(source, s->dirty_bitmap, next_sector) ||
205 test_bit(next_chunk, s->in_flight_bitmap)) {
206 assert(nb_sectors > 0);
207 break;
210 added_sectors = sectors_per_chunk;
211 if (s->cow_bitmap && !test_bit(next_chunk, s->cow_bitmap)) {
212 bdrv_round_to_clusters(s->target,
213 next_sector, added_sectors,
214 &next_sector, &added_sectors);
216 /* On the first iteration, the rounding may make us copy
217 * sectors before the first dirty one.
219 if (next_sector < sector_num) {
220 assert(nb_sectors == 0);
221 sector_num = next_sector;
222 next_chunk = next_sector / sectors_per_chunk;
226 added_sectors = MIN(added_sectors, end - (sector_num + nb_sectors));
227 added_chunks = (added_sectors + sectors_per_chunk - 1) / sectors_per_chunk;
229 /* When doing COW, it may happen that there is not enough space for
230 * a full cluster. Wait if that is the case.
232 while (nb_chunks == 0 && s->buf_free_count < added_chunks) {
233 trace_mirror_yield_buf_busy(s, nb_chunks, s->in_flight);
234 qemu_coroutine_yield();
236 if (s->buf_free_count < nb_chunks + added_chunks) {
237 trace_mirror_break_buf_busy(s, nb_chunks, s->in_flight);
238 break;
241 /* We have enough free space to copy these sectors. */
242 bitmap_set(s->in_flight_bitmap, next_chunk, added_chunks);
244 nb_sectors += added_sectors;
245 nb_chunks += added_chunks;
246 next_sector += added_sectors;
247 next_chunk += added_chunks;
248 if (!s->synced && s->common.speed) {
249 delay_ns = ratelimit_calculate_delay(&s->limit, added_sectors);
251 } while (delay_ns == 0 && next_sector < end);
253 /* Allocate a MirrorOp that is used as an AIO callback. */
254 op = g_slice_new(MirrorOp);
255 op->s = s;
256 op->sector_num = sector_num;
257 op->nb_sectors = nb_sectors;
259 /* Now make a QEMUIOVector taking enough granularity-sized chunks
260 * from s->buf_free.
262 qemu_iovec_init(&op->qiov, nb_chunks);
263 next_sector = sector_num;
264 while (nb_chunks-- > 0) {
265 MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
266 size_t remaining = (nb_sectors * BDRV_SECTOR_SIZE) - op->qiov.size;
268 QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
269 s->buf_free_count--;
270 qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
272 /* Advance the HBitmapIter in parallel, so that we do not examine
273 * the same sector twice.
275 if (next_sector > hbitmap_next_sector
276 && bdrv_get_dirty(source, s->dirty_bitmap, next_sector)) {
277 hbitmap_next_sector = hbitmap_iter_next(&s->hbi);
280 next_sector += sectors_per_chunk;
283 bdrv_reset_dirty(source, sector_num, nb_sectors);
285 /* Copy the dirty cluster. */
286 s->in_flight++;
287 trace_mirror_one_iteration(s, sector_num, nb_sectors);
288 bdrv_aio_readv(source, sector_num, &op->qiov, nb_sectors,
289 mirror_read_complete, op);
290 return delay_ns;
293 static void mirror_free_init(MirrorBlockJob *s)
295 int granularity = s->granularity;
296 size_t buf_size = s->buf_size;
297 uint8_t *buf = s->buf;
299 assert(s->buf_free_count == 0);
300 QSIMPLEQ_INIT(&s->buf_free);
301 while (buf_size != 0) {
302 MirrorBuffer *cur = (MirrorBuffer *)buf;
303 QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
304 s->buf_free_count++;
305 buf_size -= granularity;
306 buf += granularity;
310 static void mirror_drain(MirrorBlockJob *s)
312 while (s->in_flight > 0) {
313 qemu_coroutine_yield();
317 static void coroutine_fn mirror_run(void *opaque)
319 MirrorBlockJob *s = opaque;
320 BlockDriverState *bs = s->common.bs;
321 int64_t sector_num, end, sectors_per_chunk, length;
322 uint64_t last_pause_ns;
323 BlockDriverInfo bdi;
324 char backing_filename[1024];
325 int ret = 0;
326 int n;
328 if (block_job_is_cancelled(&s->common)) {
329 goto immediate_exit;
332 s->common.len = bdrv_getlength(bs);
333 if (s->common.len < 0) {
334 ret = s->common.len;
335 goto immediate_exit;
336 } else if (s->common.len == 0) {
337 /* Report BLOCK_JOB_READY and wait for complete. */
338 block_job_event_ready(&s->common);
339 s->synced = true;
340 while (!block_job_is_cancelled(&s->common) && !s->should_complete) {
341 block_job_yield(&s->common);
343 s->common.cancelled = false;
344 goto immediate_exit;
347 length = DIV_ROUND_UP(s->common.len, s->granularity);
348 s->in_flight_bitmap = bitmap_new(length);
350 /* If we have no backing file yet in the destination, we cannot let
351 * the destination do COW. Instead, we copy sectors around the
352 * dirty data if needed. We need a bitmap to do that.
354 bdrv_get_backing_filename(s->target, backing_filename,
355 sizeof(backing_filename));
356 if (backing_filename[0] && !s->target->backing_hd) {
357 ret = bdrv_get_info(s->target, &bdi);
358 if (ret < 0) {
359 goto immediate_exit;
361 if (s->granularity < bdi.cluster_size) {
362 s->buf_size = MAX(s->buf_size, bdi.cluster_size);
363 s->cow_bitmap = bitmap_new(length);
367 end = s->common.len >> BDRV_SECTOR_BITS;
368 s->buf = qemu_try_blockalign(bs, s->buf_size);
369 if (s->buf == NULL) {
370 ret = -ENOMEM;
371 goto immediate_exit;
374 sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
375 mirror_free_init(s);
377 if (!s->is_none_mode) {
378 /* First part, loop on the sectors and initialize the dirty bitmap. */
379 BlockDriverState *base = s->base;
380 for (sector_num = 0; sector_num < end; ) {
381 int64_t next = (sector_num | (sectors_per_chunk - 1)) + 1;
382 ret = bdrv_is_allocated_above(bs, base,
383 sector_num, next - sector_num, &n);
385 if (ret < 0) {
386 goto immediate_exit;
389 assert(n > 0);
390 if (ret == 1) {
391 bdrv_set_dirty(bs, sector_num, n);
392 sector_num = next;
393 } else {
394 sector_num += n;
399 bdrv_dirty_iter_init(bs, s->dirty_bitmap, &s->hbi);
400 last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
401 for (;;) {
402 uint64_t delay_ns = 0;
403 int64_t cnt;
404 bool should_complete;
406 if (s->ret < 0) {
407 ret = s->ret;
408 goto immediate_exit;
411 cnt = bdrv_get_dirty_count(bs, s->dirty_bitmap);
413 /* Note that even when no rate limit is applied we need to yield
414 * periodically with no pending I/O so that qemu_aio_flush() returns.
415 * We do so every SLICE_TIME nanoseconds, or when there is an error,
416 * or when the source is clean, whichever comes first.
418 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - last_pause_ns < SLICE_TIME &&
419 s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
420 if (s->in_flight == MAX_IN_FLIGHT || s->buf_free_count == 0 ||
421 (cnt == 0 && s->in_flight > 0)) {
422 trace_mirror_yield(s, s->in_flight, s->buf_free_count, cnt);
423 qemu_coroutine_yield();
424 continue;
425 } else if (cnt != 0) {
426 delay_ns = mirror_iteration(s);
427 if (delay_ns == 0) {
428 continue;
433 should_complete = false;
434 if (s->in_flight == 0 && cnt == 0) {
435 trace_mirror_before_flush(s);
436 ret = bdrv_flush(s->target);
437 if (ret < 0) {
438 if (mirror_error_action(s, false, -ret) ==
439 BLOCK_ERROR_ACTION_REPORT) {
440 goto immediate_exit;
442 } else {
443 /* We're out of the streaming phase. From now on, if the job
444 * is cancelled we will actually complete all pending I/O and
445 * report completion. This way, block-job-cancel will leave
446 * the target in a consistent state.
448 s->common.offset = end * BDRV_SECTOR_SIZE;
449 if (!s->synced) {
450 block_job_event_ready(&s->common);
451 s->synced = true;
454 should_complete = s->should_complete ||
455 block_job_is_cancelled(&s->common);
456 cnt = bdrv_get_dirty_count(bs, s->dirty_bitmap);
460 if (cnt == 0 && should_complete) {
461 /* The dirty bitmap is not updated while operations are pending.
462 * If we're about to exit, wait for pending operations before
463 * calling bdrv_get_dirty_count(bs), or we may exit while the
464 * source has dirty data to copy!
466 * Note that I/O can be submitted by the guest while
467 * mirror_populate runs.
469 trace_mirror_before_drain(s, cnt);
470 bdrv_drain_all();
471 cnt = bdrv_get_dirty_count(bs, s->dirty_bitmap);
474 ret = 0;
475 trace_mirror_before_sleep(s, cnt, s->synced, delay_ns);
476 if (!s->synced) {
477 /* Publish progress */
478 s->common.offset = (end - cnt) * BDRV_SECTOR_SIZE;
479 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
480 if (block_job_is_cancelled(&s->common)) {
481 break;
483 } else if (!should_complete) {
484 delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0);
485 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
486 } else if (cnt == 0) {
487 /* The two disks are in sync. Exit and report successful
488 * completion.
490 assert(QLIST_EMPTY(&bs->tracked_requests));
491 s->common.cancelled = false;
492 break;
494 last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
497 immediate_exit:
498 if (s->in_flight > 0) {
499 /* We get here only if something went wrong. Either the job failed,
500 * or it was cancelled prematurely so that we do not guarantee that
501 * the target is a copy of the source.
503 assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common)));
504 mirror_drain(s);
507 assert(s->in_flight == 0);
508 qemu_vfree(s->buf);
509 g_free(s->cow_bitmap);
510 g_free(s->in_flight_bitmap);
511 bdrv_release_dirty_bitmap(bs, s->dirty_bitmap);
512 bdrv_iostatus_disable(s->target);
513 if (s->should_complete && ret == 0) {
514 BlockDriverState *to_replace = s->common.bs;
515 if (s->to_replace) {
516 to_replace = s->to_replace;
518 if (bdrv_get_flags(s->target) != bdrv_get_flags(to_replace)) {
519 bdrv_reopen(s->target, bdrv_get_flags(to_replace), NULL);
521 bdrv_swap(s->target, to_replace);
522 if (s->common.driver->job_type == BLOCK_JOB_TYPE_COMMIT) {
523 /* drop the bs loop chain formed by the swap: break the loop then
524 * trigger the unref from the top one */
525 BlockDriverState *p = s->base->backing_hd;
526 bdrv_set_backing_hd(s->base, NULL);
527 bdrv_unref(p);
530 if (s->to_replace) {
531 bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
532 error_free(s->replace_blocker);
533 bdrv_unref(s->to_replace);
535 g_free(s->replaces);
536 bdrv_unref(s->target);
537 block_job_completed(&s->common, ret);
540 static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp)
542 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
544 if (speed < 0) {
545 error_set(errp, QERR_INVALID_PARAMETER, "speed");
546 return;
548 ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME);
551 static void mirror_iostatus_reset(BlockJob *job)
553 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
555 bdrv_iostatus_reset(s->target);
558 static void mirror_complete(BlockJob *job, Error **errp)
560 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
561 Error *local_err = NULL;
562 int ret;
564 ret = bdrv_open_backing_file(s->target, NULL, &local_err);
565 if (ret < 0) {
566 error_propagate(errp, local_err);
567 return;
569 if (!s->synced) {
570 error_set(errp, QERR_BLOCK_JOB_NOT_READY,
571 bdrv_get_device_name(job->bs));
572 return;
575 /* check the target bs is not blocked and block all operations on it */
576 if (s->replaces) {
577 s->to_replace = check_to_replace_node(s->replaces, &local_err);
578 if (!s->to_replace) {
579 error_propagate(errp, local_err);
580 return;
583 error_setg(&s->replace_blocker,
584 "block device is in use by block-job-complete");
585 bdrv_op_block_all(s->to_replace, s->replace_blocker);
586 bdrv_ref(s->to_replace);
589 s->should_complete = true;
590 block_job_resume(job);
593 static const BlockJobDriver mirror_job_driver = {
594 .instance_size = sizeof(MirrorBlockJob),
595 .job_type = BLOCK_JOB_TYPE_MIRROR,
596 .set_speed = mirror_set_speed,
597 .iostatus_reset= mirror_iostatus_reset,
598 .complete = mirror_complete,
601 static const BlockJobDriver commit_active_job_driver = {
602 .instance_size = sizeof(MirrorBlockJob),
603 .job_type = BLOCK_JOB_TYPE_COMMIT,
604 .set_speed = mirror_set_speed,
605 .iostatus_reset
606 = mirror_iostatus_reset,
607 .complete = mirror_complete,
610 static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target,
611 const char *replaces,
612 int64_t speed, int64_t granularity,
613 int64_t buf_size,
614 BlockdevOnError on_source_error,
615 BlockdevOnError on_target_error,
616 BlockCompletionFunc *cb,
617 void *opaque, Error **errp,
618 const BlockJobDriver *driver,
619 bool is_none_mode, BlockDriverState *base)
621 MirrorBlockJob *s;
623 if (granularity == 0) {
624 /* Choose the default granularity based on the target file's cluster
625 * size, clamped between 4k and 64k. */
626 BlockDriverInfo bdi;
627 if (bdrv_get_info(target, &bdi) >= 0 && bdi.cluster_size != 0) {
628 granularity = MAX(4096, bdi.cluster_size);
629 granularity = MIN(65536, granularity);
630 } else {
631 granularity = 65536;
635 assert ((granularity & (granularity - 1)) == 0);
637 if ((on_source_error == BLOCKDEV_ON_ERROR_STOP ||
638 on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) &&
639 !bdrv_iostatus_is_enabled(bs)) {
640 error_set(errp, QERR_INVALID_PARAMETER, "on-source-error");
641 return;
645 s = block_job_create(driver, bs, speed, cb, opaque, errp);
646 if (!s) {
647 return;
650 s->replaces = g_strdup(replaces);
651 s->on_source_error = on_source_error;
652 s->on_target_error = on_target_error;
653 s->target = target;
654 s->is_none_mode = is_none_mode;
655 s->base = base;
656 s->granularity = granularity;
657 s->buf_size = MAX(buf_size, granularity);
659 s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, errp);
660 if (!s->dirty_bitmap) {
661 return;
663 bdrv_set_enable_write_cache(s->target, true);
664 bdrv_set_on_error(s->target, on_target_error, on_target_error);
665 bdrv_iostatus_enable(s->target);
666 s->common.co = qemu_coroutine_create(mirror_run);
667 trace_mirror_start(bs, s, s->common.co, opaque);
668 qemu_coroutine_enter(s->common.co, s);
671 void mirror_start(BlockDriverState *bs, BlockDriverState *target,
672 const char *replaces,
673 int64_t speed, int64_t granularity, int64_t buf_size,
674 MirrorSyncMode mode, BlockdevOnError on_source_error,
675 BlockdevOnError on_target_error,
676 BlockCompletionFunc *cb,
677 void *opaque, Error **errp)
679 bool is_none_mode;
680 BlockDriverState *base;
682 is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
683 base = mode == MIRROR_SYNC_MODE_TOP ? bs->backing_hd : NULL;
684 mirror_start_job(bs, target, replaces,
685 speed, granularity, buf_size,
686 on_source_error, on_target_error, cb, opaque, errp,
687 &mirror_job_driver, is_none_mode, base);
690 void commit_active_start(BlockDriverState *bs, BlockDriverState *base,
691 int64_t speed,
692 BlockdevOnError on_error,
693 BlockCompletionFunc *cb,
694 void *opaque, Error **errp)
696 int64_t length, base_length;
697 int orig_base_flags;
698 int ret;
699 Error *local_err = NULL;
701 orig_base_flags = bdrv_get_flags(base);
703 if (bdrv_reopen(base, bs->open_flags, errp)) {
704 return;
707 length = bdrv_getlength(bs);
708 if (length < 0) {
709 error_setg_errno(errp, -length,
710 "Unable to determine length of %s", bs->filename);
711 goto error_restore_flags;
714 base_length = bdrv_getlength(base);
715 if (base_length < 0) {
716 error_setg_errno(errp, -base_length,
717 "Unable to determine length of %s", base->filename);
718 goto error_restore_flags;
721 if (length > base_length) {
722 ret = bdrv_truncate(base, length);
723 if (ret < 0) {
724 error_setg_errno(errp, -ret,
725 "Top image %s is larger than base image %s, and "
726 "resize of base image failed",
727 bs->filename, base->filename);
728 goto error_restore_flags;
732 bdrv_ref(base);
733 mirror_start_job(bs, base, NULL, speed, 0, 0,
734 on_error, on_error, cb, opaque, &local_err,
735 &commit_active_job_driver, false, base);
736 if (local_err) {
737 error_propagate(errp, local_err);
738 goto error_restore_flags;
741 return;
743 error_restore_flags:
744 /* ignore error and errp for bdrv_reopen, because we want to propagate
745 * the original error */
746 bdrv_reopen(base, orig_base_flags, NULL);
747 return;