nmi: remove x86 specific nmi handling
[qemu.git] / migration / block.c
bloba7a76a0fb9da07d96216d6d27eb2ffe1d7fb30cd
1 /*
2 * QEMU live block migration
4 * Copyright IBM, Corp. 2009
6 * Authors:
7 * Liran Schour <lirans@il.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
18 #include "qemu-common.h"
19 #include "block/block.h"
20 #include "qemu/error-report.h"
21 #include "qemu/main-loop.h"
22 #include "hw/hw.h"
23 #include "qemu/cutils.h"
24 #include "qemu/queue.h"
25 #include "qemu/timer.h"
26 #include "migration/block.h"
27 #include "migration/migration.h"
28 #include "sysemu/blockdev.h"
29 #include "sysemu/block-backend.h"
31 #define BLOCK_SIZE (1 << 20)
32 #define BDRV_SECTORS_PER_DIRTY_CHUNK (BLOCK_SIZE >> BDRV_SECTOR_BITS)
34 #define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
35 #define BLK_MIG_FLAG_EOS 0x02
36 #define BLK_MIG_FLAG_PROGRESS 0x04
37 #define BLK_MIG_FLAG_ZERO_BLOCK 0x08
39 #define MAX_IS_ALLOCATED_SEARCH 65536
41 #define MAX_INFLIGHT_IO 512
43 //#define DEBUG_BLK_MIGRATION
45 #ifdef DEBUG_BLK_MIGRATION
46 #define DPRINTF(fmt, ...) \
47 do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
48 #else
49 #define DPRINTF(fmt, ...) \
50 do { } while (0)
51 #endif
53 typedef struct BlkMigDevState {
54 /* Written during setup phase. Can be read without a lock. */
55 BlockDriverState *bs;
56 int shared_base;
57 int64_t total_sectors;
58 QSIMPLEQ_ENTRY(BlkMigDevState) entry;
59 Error *blocker;
61 /* Only used by migration thread. Does not need a lock. */
62 int bulk_completed;
63 int64_t cur_sector;
64 int64_t cur_dirty;
66 /* Data in the aio_bitmap is protected by block migration lock.
67 * Allocation and free happen during setup and cleanup respectively.
69 unsigned long *aio_bitmap;
71 /* Protected by block migration lock. */
72 int64_t completed_sectors;
74 /* During migration this is protected by iothread lock / AioContext.
75 * Allocation and free happen during setup and cleanup respectively.
77 BdrvDirtyBitmap *dirty_bitmap;
78 } BlkMigDevState;
80 typedef struct BlkMigBlock {
81 /* Only used by migration thread. */
82 uint8_t *buf;
83 BlkMigDevState *bmds;
84 int64_t sector;
85 int nr_sectors;
86 struct iovec iov;
87 QEMUIOVector qiov;
88 BlockAIOCB *aiocb;
90 /* Protected by block migration lock. */
91 int ret;
92 QSIMPLEQ_ENTRY(BlkMigBlock) entry;
93 } BlkMigBlock;
95 typedef struct BlkMigState {
96 /* Written during setup phase. Can be read without a lock. */
97 int blk_enable;
98 int shared_base;
99 QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
100 int64_t total_sector_sum;
101 bool zero_blocks;
103 /* Protected by lock. */
104 QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list;
105 int submitted;
106 int read_done;
108 /* Only used by migration thread. Does not need a lock. */
109 int transferred;
110 int prev_progress;
111 int bulk_completed;
113 /* Lock must be taken _inside_ the iothread lock and any AioContexts. */
114 QemuMutex lock;
115 } BlkMigState;
117 static BlkMigState block_mig_state;
119 static void blk_mig_lock(void)
121 qemu_mutex_lock(&block_mig_state.lock);
124 static void blk_mig_unlock(void)
126 qemu_mutex_unlock(&block_mig_state.lock);
129 /* Must run outside of the iothread lock during the bulk phase,
130 * or the VM will stall.
133 static void blk_send(QEMUFile *f, BlkMigBlock * blk)
135 int len;
136 uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK;
138 if (block_mig_state.zero_blocks &&
139 buffer_is_zero(blk->buf, BLOCK_SIZE)) {
140 flags |= BLK_MIG_FLAG_ZERO_BLOCK;
143 /* sector number and flags */
144 qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
145 | flags);
147 /* device name */
148 len = strlen(bdrv_get_device_name(blk->bmds->bs));
149 qemu_put_byte(f, len);
150 qemu_put_buffer(f, (uint8_t *)bdrv_get_device_name(blk->bmds->bs), len);
152 /* if a block is zero we need to flush here since the network
153 * bandwidth is now a lot higher than the storage device bandwidth.
154 * thus if we queue zero blocks we slow down the migration */
155 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
156 qemu_fflush(f);
157 return;
160 qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
163 int blk_mig_active(void)
165 return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list);
168 uint64_t blk_mig_bytes_transferred(void)
170 BlkMigDevState *bmds;
171 uint64_t sum = 0;
173 blk_mig_lock();
174 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
175 sum += bmds->completed_sectors;
177 blk_mig_unlock();
178 return sum << BDRV_SECTOR_BITS;
181 uint64_t blk_mig_bytes_remaining(void)
183 return blk_mig_bytes_total() - blk_mig_bytes_transferred();
186 uint64_t blk_mig_bytes_total(void)
188 BlkMigDevState *bmds;
189 uint64_t sum = 0;
191 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
192 sum += bmds->total_sectors;
194 return sum << BDRV_SECTOR_BITS;
198 /* Called with migration lock held. */
200 static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector)
202 int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
204 if (sector < bdrv_nb_sectors(bmds->bs)) {
205 return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] &
206 (1UL << (chunk % (sizeof(unsigned long) * 8))));
207 } else {
208 return 0;
212 /* Called with migration lock held. */
214 static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num,
215 int nb_sectors, int set)
217 int64_t start, end;
218 unsigned long val, idx, bit;
220 start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
221 end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
223 for (; start <= end; start++) {
224 idx = start / (sizeof(unsigned long) * 8);
225 bit = start % (sizeof(unsigned long) * 8);
226 val = bmds->aio_bitmap[idx];
227 if (set) {
228 val |= 1UL << bit;
229 } else {
230 val &= ~(1UL << bit);
232 bmds->aio_bitmap[idx] = val;
236 static void alloc_aio_bitmap(BlkMigDevState *bmds)
238 BlockDriverState *bs = bmds->bs;
239 int64_t bitmap_size;
241 bitmap_size = bdrv_nb_sectors(bs) + BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
242 bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
244 bmds->aio_bitmap = g_malloc0(bitmap_size);
247 /* Never hold migration lock when yielding to the main loop! */
249 static void blk_mig_read_cb(void *opaque, int ret)
251 BlkMigBlock *blk = opaque;
253 blk_mig_lock();
254 blk->ret = ret;
256 QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
257 bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0);
259 block_mig_state.submitted--;
260 block_mig_state.read_done++;
261 assert(block_mig_state.submitted >= 0);
262 blk_mig_unlock();
265 /* Called with no lock taken. */
267 static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
269 int64_t total_sectors = bmds->total_sectors;
270 int64_t cur_sector = bmds->cur_sector;
271 BlockDriverState *bs = bmds->bs;
272 BlkMigBlock *blk;
273 int nr_sectors;
275 if (bmds->shared_base) {
276 qemu_mutex_lock_iothread();
277 aio_context_acquire(bdrv_get_aio_context(bs));
278 while (cur_sector < total_sectors &&
279 !bdrv_is_allocated(bs, cur_sector, MAX_IS_ALLOCATED_SEARCH,
280 &nr_sectors)) {
281 cur_sector += nr_sectors;
283 aio_context_release(bdrv_get_aio_context(bs));
284 qemu_mutex_unlock_iothread();
287 if (cur_sector >= total_sectors) {
288 bmds->cur_sector = bmds->completed_sectors = total_sectors;
289 return 1;
292 bmds->completed_sectors = cur_sector;
294 cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
296 /* we are going to transfer a full block even if it is not allocated */
297 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
299 if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
300 nr_sectors = total_sectors - cur_sector;
303 blk = g_new(BlkMigBlock, 1);
304 blk->buf = g_malloc(BLOCK_SIZE);
305 blk->bmds = bmds;
306 blk->sector = cur_sector;
307 blk->nr_sectors = nr_sectors;
309 blk->iov.iov_base = blk->buf;
310 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
311 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
313 blk_mig_lock();
314 block_mig_state.submitted++;
315 blk_mig_unlock();
317 /* We do not know if bs is under the main thread (and thus does
318 * not acquire the AioContext when doing AIO) or rather under
319 * dataplane. Thus acquire both the iothread mutex and the
320 * AioContext.
322 * This is ugly and will disappear when we make bdrv_* thread-safe,
323 * without the need to acquire the AioContext.
325 qemu_mutex_lock_iothread();
326 aio_context_acquire(bdrv_get_aio_context(bmds->bs));
327 blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov,
328 nr_sectors, blk_mig_read_cb, blk);
330 bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, cur_sector, nr_sectors);
331 aio_context_release(bdrv_get_aio_context(bmds->bs));
332 qemu_mutex_unlock_iothread();
334 bmds->cur_sector = cur_sector + nr_sectors;
335 return (bmds->cur_sector >= total_sectors);
338 /* Called with iothread lock taken. */
340 static int set_dirty_tracking(void)
342 BlkMigDevState *bmds;
343 int ret;
345 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
346 aio_context_acquire(bdrv_get_aio_context(bmds->bs));
347 bmds->dirty_bitmap = bdrv_create_dirty_bitmap(bmds->bs, BLOCK_SIZE,
348 NULL, NULL);
349 aio_context_release(bdrv_get_aio_context(bmds->bs));
350 if (!bmds->dirty_bitmap) {
351 ret = -errno;
352 goto fail;
355 return 0;
357 fail:
358 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
359 if (bmds->dirty_bitmap) {
360 aio_context_acquire(bdrv_get_aio_context(bmds->bs));
361 bdrv_release_dirty_bitmap(bmds->bs, bmds->dirty_bitmap);
362 aio_context_release(bdrv_get_aio_context(bmds->bs));
365 return ret;
368 /* Called with iothread lock taken. */
370 static void unset_dirty_tracking(void)
372 BlkMigDevState *bmds;
374 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
375 aio_context_acquire(bdrv_get_aio_context(bmds->bs));
376 bdrv_release_dirty_bitmap(bmds->bs, bmds->dirty_bitmap);
377 aio_context_release(bdrv_get_aio_context(bmds->bs));
381 static void init_blk_migration(QEMUFile *f)
383 BlockDriverState *bs;
384 BlkMigDevState *bmds;
385 int64_t sectors;
386 BdrvNextIterator *it = NULL;
388 block_mig_state.submitted = 0;
389 block_mig_state.read_done = 0;
390 block_mig_state.transferred = 0;
391 block_mig_state.total_sector_sum = 0;
392 block_mig_state.prev_progress = -1;
393 block_mig_state.bulk_completed = 0;
394 block_mig_state.zero_blocks = migrate_zero_blocks();
397 while ((it = bdrv_next(it, &bs))) {
398 if (bdrv_is_read_only(bs)) {
399 continue;
402 sectors = bdrv_nb_sectors(bs);
403 if (sectors <= 0) {
404 return;
407 bmds = g_new0(BlkMigDevState, 1);
408 bmds->bs = bs;
409 bmds->bulk_completed = 0;
410 bmds->total_sectors = sectors;
411 bmds->completed_sectors = 0;
412 bmds->shared_base = block_mig_state.shared_base;
413 alloc_aio_bitmap(bmds);
414 error_setg(&bmds->blocker, "block device is in use by migration");
415 bdrv_op_block_all(bs, bmds->blocker);
416 bdrv_ref(bs);
418 block_mig_state.total_sector_sum += sectors;
420 if (bmds->shared_base) {
421 DPRINTF("Start migration for %s with shared base image\n",
422 bdrv_get_device_name(bs));
423 } else {
424 DPRINTF("Start full migration for %s\n", bdrv_get_device_name(bs));
427 QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
431 /* Called with no lock taken. */
433 static int blk_mig_save_bulked_block(QEMUFile *f)
435 int64_t completed_sector_sum = 0;
436 BlkMigDevState *bmds;
437 int progress;
438 int ret = 0;
440 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
441 if (bmds->bulk_completed == 0) {
442 if (mig_save_device_bulk(f, bmds) == 1) {
443 /* completed bulk section for this device */
444 bmds->bulk_completed = 1;
446 completed_sector_sum += bmds->completed_sectors;
447 ret = 1;
448 break;
449 } else {
450 completed_sector_sum += bmds->completed_sectors;
454 if (block_mig_state.total_sector_sum != 0) {
455 progress = completed_sector_sum * 100 /
456 block_mig_state.total_sector_sum;
457 } else {
458 progress = 100;
460 if (progress != block_mig_state.prev_progress) {
461 block_mig_state.prev_progress = progress;
462 qemu_put_be64(f, (progress << BDRV_SECTOR_BITS)
463 | BLK_MIG_FLAG_PROGRESS);
464 DPRINTF("Completed %d %%\r", progress);
467 return ret;
470 static void blk_mig_reset_dirty_cursor(void)
472 BlkMigDevState *bmds;
474 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
475 bmds->cur_dirty = 0;
479 /* Called with iothread lock and AioContext taken. */
481 static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
482 int is_async)
484 BlkMigBlock *blk;
485 int64_t total_sectors = bmds->total_sectors;
486 int64_t sector;
487 int nr_sectors;
488 int ret = -EIO;
490 for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) {
491 blk_mig_lock();
492 if (bmds_aio_inflight(bmds, sector)) {
493 blk_mig_unlock();
494 bdrv_drain(bmds->bs);
495 } else {
496 blk_mig_unlock();
498 if (bdrv_get_dirty(bmds->bs, bmds->dirty_bitmap, sector)) {
500 if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
501 nr_sectors = total_sectors - sector;
502 } else {
503 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
505 blk = g_new(BlkMigBlock, 1);
506 blk->buf = g_malloc(BLOCK_SIZE);
507 blk->bmds = bmds;
508 blk->sector = sector;
509 blk->nr_sectors = nr_sectors;
511 if (is_async) {
512 blk->iov.iov_base = blk->buf;
513 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
514 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
516 blk->aiocb = bdrv_aio_readv(bmds->bs, sector, &blk->qiov,
517 nr_sectors, blk_mig_read_cb, blk);
519 blk_mig_lock();
520 block_mig_state.submitted++;
521 bmds_set_aio_inflight(bmds, sector, nr_sectors, 1);
522 blk_mig_unlock();
523 } else {
524 ret = bdrv_read(bmds->bs, sector, blk->buf, nr_sectors);
525 if (ret < 0) {
526 goto error;
528 blk_send(f, blk);
530 g_free(blk->buf);
531 g_free(blk);
534 bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, sector, nr_sectors);
535 break;
537 sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
538 bmds->cur_dirty = sector;
541 return (bmds->cur_dirty >= bmds->total_sectors);
543 error:
544 DPRINTF("Error reading sector %" PRId64 "\n", sector);
545 g_free(blk->buf);
546 g_free(blk);
547 return ret;
550 /* Called with iothread lock taken.
552 * return value:
553 * 0: too much data for max_downtime
554 * 1: few enough data for max_downtime
556 static int blk_mig_save_dirty_block(QEMUFile *f, int is_async)
558 BlkMigDevState *bmds;
559 int ret = 1;
561 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
562 aio_context_acquire(bdrv_get_aio_context(bmds->bs));
563 ret = mig_save_device_dirty(f, bmds, is_async);
564 aio_context_release(bdrv_get_aio_context(bmds->bs));
565 if (ret <= 0) {
566 break;
570 return ret;
573 /* Called with no locks taken. */
575 static int flush_blks(QEMUFile *f)
577 BlkMigBlock *blk;
578 int ret = 0;
580 DPRINTF("%s Enter submitted %d read_done %d transferred %d\n",
581 __FUNCTION__, block_mig_state.submitted, block_mig_state.read_done,
582 block_mig_state.transferred);
584 blk_mig_lock();
585 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
586 if (qemu_file_rate_limit(f)) {
587 break;
589 if (blk->ret < 0) {
590 ret = blk->ret;
591 break;
594 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
595 blk_mig_unlock();
596 blk_send(f, blk);
597 blk_mig_lock();
599 g_free(blk->buf);
600 g_free(blk);
602 block_mig_state.read_done--;
603 block_mig_state.transferred++;
604 assert(block_mig_state.read_done >= 0);
606 blk_mig_unlock();
608 DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__,
609 block_mig_state.submitted, block_mig_state.read_done,
610 block_mig_state.transferred);
611 return ret;
614 /* Called with iothread lock taken. */
616 static int64_t get_remaining_dirty(void)
618 BlkMigDevState *bmds;
619 int64_t dirty = 0;
621 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
622 aio_context_acquire(bdrv_get_aio_context(bmds->bs));
623 dirty += bdrv_get_dirty_count(bmds->dirty_bitmap);
624 aio_context_release(bdrv_get_aio_context(bmds->bs));
627 return dirty << BDRV_SECTOR_BITS;
630 /* Called with iothread lock taken. */
632 static void block_migration_cleanup(void *opaque)
634 BlkMigDevState *bmds;
635 BlkMigBlock *blk;
636 AioContext *ctx;
638 bdrv_drain_all();
640 unset_dirty_tracking();
642 while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
643 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
644 bdrv_op_unblock_all(bmds->bs, bmds->blocker);
645 error_free(bmds->blocker);
647 /* Save ctx, because bmds->bs can disappear during bdrv_unref. */
648 ctx = bdrv_get_aio_context(bmds->bs);
649 aio_context_acquire(ctx);
650 bdrv_unref(bmds->bs);
651 aio_context_release(ctx);
653 g_free(bmds->aio_bitmap);
654 g_free(bmds);
657 blk_mig_lock();
658 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
659 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
660 g_free(blk->buf);
661 g_free(blk);
663 blk_mig_unlock();
666 static int block_save_setup(QEMUFile *f, void *opaque)
668 int ret;
670 DPRINTF("Enter save live setup submitted %d transferred %d\n",
671 block_mig_state.submitted, block_mig_state.transferred);
673 qemu_mutex_lock_iothread();
674 init_blk_migration(f);
676 /* start track dirty blocks */
677 ret = set_dirty_tracking();
679 qemu_mutex_unlock_iothread();
681 if (ret) {
682 return ret;
685 ret = flush_blks(f);
686 blk_mig_reset_dirty_cursor();
687 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
689 return ret;
692 static int block_save_iterate(QEMUFile *f, void *opaque)
694 int ret;
695 int64_t last_ftell = qemu_ftell(f);
696 int64_t delta_ftell;
698 DPRINTF("Enter save live iterate submitted %d transferred %d\n",
699 block_mig_state.submitted, block_mig_state.transferred);
701 ret = flush_blks(f);
702 if (ret) {
703 return ret;
706 blk_mig_reset_dirty_cursor();
708 /* control the rate of transfer */
709 blk_mig_lock();
710 while ((block_mig_state.submitted +
711 block_mig_state.read_done) * BLOCK_SIZE <
712 qemu_file_get_rate_limit(f) &&
713 (block_mig_state.submitted +
714 block_mig_state.read_done) <
715 MAX_INFLIGHT_IO) {
716 blk_mig_unlock();
717 if (block_mig_state.bulk_completed == 0) {
718 /* first finish the bulk phase */
719 if (blk_mig_save_bulked_block(f) == 0) {
720 /* finished saving bulk on all devices */
721 block_mig_state.bulk_completed = 1;
723 ret = 0;
724 } else {
725 /* Always called with iothread lock taken for
726 * simplicity, block_save_complete also calls it.
728 qemu_mutex_lock_iothread();
729 ret = blk_mig_save_dirty_block(f, 1);
730 qemu_mutex_unlock_iothread();
732 if (ret < 0) {
733 return ret;
735 blk_mig_lock();
736 if (ret != 0) {
737 /* no more dirty blocks */
738 break;
741 blk_mig_unlock();
743 ret = flush_blks(f);
744 if (ret) {
745 return ret;
748 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
749 delta_ftell = qemu_ftell(f) - last_ftell;
750 if (delta_ftell > 0) {
751 return 1;
752 } else if (delta_ftell < 0) {
753 return -1;
754 } else {
755 return 0;
759 /* Called with iothread lock taken. */
761 static int block_save_complete(QEMUFile *f, void *opaque)
763 int ret;
765 DPRINTF("Enter save live complete submitted %d transferred %d\n",
766 block_mig_state.submitted, block_mig_state.transferred);
768 ret = flush_blks(f);
769 if (ret) {
770 return ret;
773 blk_mig_reset_dirty_cursor();
775 /* we know for sure that save bulk is completed and
776 all async read completed */
777 blk_mig_lock();
778 assert(block_mig_state.submitted == 0);
779 blk_mig_unlock();
781 do {
782 ret = blk_mig_save_dirty_block(f, 0);
783 if (ret < 0) {
784 return ret;
786 } while (ret == 0);
788 /* report completion */
789 qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
791 DPRINTF("Block migration completed\n");
793 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
795 return 0;
798 static void block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
799 uint64_t *non_postcopiable_pending,
800 uint64_t *postcopiable_pending)
802 /* Estimate pending number of bytes to send */
803 uint64_t pending;
805 qemu_mutex_lock_iothread();
806 pending = get_remaining_dirty();
807 qemu_mutex_unlock_iothread();
809 blk_mig_lock();
810 pending += block_mig_state.submitted * BLOCK_SIZE +
811 block_mig_state.read_done * BLOCK_SIZE;
812 blk_mig_unlock();
814 /* Report at least one block pending during bulk phase */
815 if (pending <= max_size && !block_mig_state.bulk_completed) {
816 pending = max_size + BLOCK_SIZE;
819 DPRINTF("Enter save live pending %" PRIu64 "\n", pending);
820 /* We don't do postcopy */
821 *non_postcopiable_pending += pending;
824 static int block_load(QEMUFile *f, void *opaque, int version_id)
826 static int banner_printed;
827 int len, flags;
828 char device_name[256];
829 int64_t addr;
830 BlockDriverState *bs, *bs_prev = NULL;
831 BlockBackend *blk;
832 Error *local_err = NULL;
833 uint8_t *buf;
834 int64_t total_sectors = 0;
835 int nr_sectors;
836 int ret;
838 do {
839 addr = qemu_get_be64(f);
841 flags = addr & ~BDRV_SECTOR_MASK;
842 addr >>= BDRV_SECTOR_BITS;
844 if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
845 /* get device name */
846 len = qemu_get_byte(f);
847 qemu_get_buffer(f, (uint8_t *)device_name, len);
848 device_name[len] = '\0';
850 blk = blk_by_name(device_name);
851 if (!blk) {
852 fprintf(stderr, "Error unknown block device %s\n",
853 device_name);
854 return -EINVAL;
856 bs = blk_bs(blk);
857 if (!bs) {
858 fprintf(stderr, "Block device %s has no medium\n",
859 device_name);
860 return -EINVAL;
863 if (bs != bs_prev) {
864 bs_prev = bs;
865 total_sectors = bdrv_nb_sectors(bs);
866 if (total_sectors <= 0) {
867 error_report("Error getting length of block device %s",
868 device_name);
869 return -EINVAL;
872 bdrv_invalidate_cache(bs, &local_err);
873 if (local_err) {
874 error_report_err(local_err);
875 return -EINVAL;
879 if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) {
880 nr_sectors = total_sectors - addr;
881 } else {
882 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
885 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
886 ret = bdrv_write_zeroes(bs, addr, nr_sectors,
887 BDRV_REQ_MAY_UNMAP);
888 } else {
889 buf = g_malloc(BLOCK_SIZE);
890 qemu_get_buffer(f, buf, BLOCK_SIZE);
891 ret = bdrv_write(bs, addr, buf, nr_sectors);
892 g_free(buf);
895 if (ret < 0) {
896 return ret;
898 } else if (flags & BLK_MIG_FLAG_PROGRESS) {
899 if (!banner_printed) {
900 printf("Receiving block device images\n");
901 banner_printed = 1;
903 printf("Completed %d %%%c", (int)addr,
904 (addr == 100) ? '\n' : '\r');
905 fflush(stdout);
906 } else if (!(flags & BLK_MIG_FLAG_EOS)) {
907 fprintf(stderr, "Unknown block migration flags: %#x\n", flags);
908 return -EINVAL;
910 ret = qemu_file_get_error(f);
911 if (ret != 0) {
912 return ret;
914 } while (!(flags & BLK_MIG_FLAG_EOS));
916 return 0;
919 static void block_set_params(const MigrationParams *params, void *opaque)
921 block_mig_state.blk_enable = params->blk;
922 block_mig_state.shared_base = params->shared;
924 /* shared base means that blk_enable = 1 */
925 block_mig_state.blk_enable |= params->shared;
928 static bool block_is_active(void *opaque)
930 return block_mig_state.blk_enable == 1;
933 static SaveVMHandlers savevm_block_handlers = {
934 .set_params = block_set_params,
935 .save_live_setup = block_save_setup,
936 .save_live_iterate = block_save_iterate,
937 .save_live_complete_precopy = block_save_complete,
938 .save_live_pending = block_save_pending,
939 .load_state = block_load,
940 .cleanup = block_migration_cleanup,
941 .is_active = block_is_active,
944 void blk_mig_init(void)
946 QSIMPLEQ_INIT(&block_mig_state.bmds_list);
947 QSIMPLEQ_INIT(&block_mig_state.blk_list);
948 qemu_mutex_init(&block_mig_state.lock);
950 register_savevm_live(NULL, "block", 0, 1, &savevm_block_handlers,
951 &block_mig_state);