block migration: Fix outgoing progress output
[qemu.git] / block-migration.c
blob22d10f03c6f7ca679f7dc150ec5fa68b7a72a4ff
1 /*
2 * QEMU live block migration
4 * Copyright IBM, Corp. 2009
6 * Authors:
7 * Liran Schour <lirans@il.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu-common.h"
15 #include "block_int.h"
16 #include "hw/hw.h"
17 #include "qemu-queue.h"
18 #include "monitor.h"
19 #include "block-migration.h"
20 #include <assert.h>
22 #define BLOCK_SIZE (BDRV_SECTORS_PER_DIRTY_CHUNK << BDRV_SECTOR_BITS)
24 #define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
25 #define BLK_MIG_FLAG_EOS 0x02
27 #define MAX_IS_ALLOCATED_SEARCH 65536
28 #define MAX_BLOCKS_READ 10000
29 #define BLOCKS_READ_CHANGE 100
30 #define INITIAL_BLOCKS_READ 100
32 //#define DEBUG_BLK_MIGRATION
34 #ifdef DEBUG_BLK_MIGRATION
35 #define dprintf(fmt, ...) \
36 do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
37 #else
38 #define dprintf(fmt, ...) \
39 do { } while (0)
40 #endif
42 typedef struct BlkMigDevState {
43 BlockDriverState *bs;
44 int bulk_completed;
45 int shared_base;
46 int64_t cur_sector;
47 int64_t completed_sectors;
48 int64_t total_sectors;
49 int64_t dirty;
50 QSIMPLEQ_ENTRY(BlkMigDevState) entry;
51 } BlkMigDevState;
53 typedef struct BlkMigBlock {
54 uint8_t *buf;
55 BlkMigDevState *bmds;
56 int64_t sector;
57 struct iovec iov;
58 QEMUIOVector qiov;
59 BlockDriverAIOCB *aiocb;
60 int ret;
61 QSIMPLEQ_ENTRY(BlkMigBlock) entry;
62 } BlkMigBlock;
64 typedef struct BlkMigState {
65 int blk_enable;
66 int shared_base;
67 QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
68 QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list;
69 int submitted;
70 int read_done;
71 int transferred;
72 int64_t total_sector_sum;
73 int64_t print_completion;
74 } BlkMigState;
76 static BlkMigState block_mig_state;
78 static void blk_send(QEMUFile *f, BlkMigBlock * blk)
80 int len;
82 /* sector number and flags */
83 qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
84 | BLK_MIG_FLAG_DEVICE_BLOCK);
86 /* device name */
87 len = strlen(blk->bmds->bs->device_name);
88 qemu_put_byte(f, len);
89 qemu_put_buffer(f, (uint8_t *)blk->bmds->bs->device_name, len);
91 qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
94 static void blk_mig_read_cb(void *opaque, int ret)
96 BlkMigBlock *blk = opaque;
98 blk->ret = ret;
100 QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
102 block_mig_state.submitted--;
103 block_mig_state.read_done++;
104 assert(block_mig_state.submitted >= 0);
107 static int mig_save_device_bulk(Monitor *mon, QEMUFile *f,
108 BlkMigDevState *bmds, int is_async)
110 int64_t total_sectors = bmds->total_sectors;
111 int64_t cur_sector = bmds->cur_sector;
112 BlockDriverState *bs = bmds->bs;
113 BlkMigBlock *blk;
114 int nr_sectors;
116 if (bmds->shared_base) {
117 while (cur_sector < total_sectors &&
118 !bdrv_is_allocated(bs, cur_sector, MAX_IS_ALLOCATED_SEARCH,
119 &nr_sectors)) {
120 cur_sector += nr_sectors;
124 if (cur_sector >= total_sectors) {
125 bmds->cur_sector = bmds->completed_sectors = total_sectors;
126 return 1;
129 bmds->completed_sectors = cur_sector;
131 cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
133 /* we are going to transfer a full block even if it is not allocated */
134 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
136 if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
137 nr_sectors = total_sectors - cur_sector;
140 blk = qemu_malloc(sizeof(BlkMigBlock));
141 blk->buf = qemu_malloc(BLOCK_SIZE);
142 blk->bmds = bmds;
143 blk->sector = cur_sector;
145 if (is_async) {
146 blk->iov.iov_base = blk->buf;
147 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
148 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
150 blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov,
151 nr_sectors, blk_mig_read_cb, blk);
152 if (!blk->aiocb) {
153 goto error;
155 block_mig_state.submitted++;
156 } else {
157 if (bdrv_read(bs, cur_sector, blk->buf, nr_sectors) < 0) {
158 goto error;
160 blk_send(f, blk);
162 qemu_free(blk->buf);
163 qemu_free(blk);
166 bdrv_reset_dirty(bs, cur_sector, nr_sectors);
167 bmds->cur_sector = cur_sector + nr_sectors;
169 return (bmds->cur_sector >= total_sectors);
171 error:
172 monitor_printf(mon, "Error reading sector %" PRId64 "\n", cur_sector);
173 qemu_file_set_error(f);
174 qemu_free(blk->buf);
175 qemu_free(blk);
176 return 0;
179 static void set_dirty_tracking(int enable)
181 BlkMigDevState *bmds;
183 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
184 bdrv_set_dirty_tracking(bmds->bs, enable);
188 static void init_blk_migration(Monitor *mon, QEMUFile *f)
190 BlkMigDevState *bmds;
191 BlockDriverState *bs;
193 block_mig_state.submitted = 0;
194 block_mig_state.read_done = 0;
195 block_mig_state.transferred = 0;
196 block_mig_state.total_sector_sum = 0;
197 block_mig_state.print_completion = 0;
199 for (bs = bdrv_first; bs != NULL; bs = bs->next) {
200 if (bs->type == BDRV_TYPE_HD) {
201 bmds = qemu_mallocz(sizeof(BlkMigDevState));
202 bmds->bs = bs;
203 bmds->bulk_completed = 0;
204 bmds->total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
205 bmds->completed_sectors = 0;
206 bmds->shared_base = block_mig_state.shared_base;
208 block_mig_state.total_sector_sum += bmds->total_sectors;
210 if (bmds->shared_base) {
211 monitor_printf(mon, "Start migration for %s with shared base "
212 "image\n",
213 bs->device_name);
214 } else {
215 monitor_printf(mon, "Start full migration for %s\n",
216 bs->device_name);
219 QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
224 static int blk_mig_save_bulked_block(Monitor *mon, QEMUFile *f, int is_async)
226 int64_t completed_sector_sum = 0;
227 BlkMigDevState *bmds;
228 int ret = 0;
230 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
231 if (bmds->bulk_completed == 0) {
232 if (mig_save_device_bulk(mon, f, bmds, is_async) == 1) {
233 /* completed bulk section for this device */
234 bmds->bulk_completed = 1;
236 completed_sector_sum += bmds->completed_sectors;
237 ret = 1;
238 break;
239 } else {
240 completed_sector_sum += bmds->completed_sectors;
244 if (completed_sector_sum >= block_mig_state.print_completion) {
245 monitor_printf(mon, "Completed %" PRId64 " %%\r",
246 completed_sector_sum * 100 /
247 block_mig_state.total_sector_sum);
248 monitor_flush(mon);
249 block_mig_state.print_completion +=
250 (BDRV_SECTORS_PER_DIRTY_CHUNK * 10000);
253 return ret;
256 #define MAX_NUM_BLOCKS 4
258 static void blk_mig_save_dirty_blocks(Monitor *mon, QEMUFile *f)
260 BlkMigDevState *bmds;
261 BlkMigBlock blk;
262 int64_t sector;
264 blk.buf = qemu_malloc(BLOCK_SIZE);
266 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
267 for (sector = 0; sector < bmds->cur_sector;) {
268 if (bdrv_get_dirty(bmds->bs, sector)) {
269 if (bdrv_read(bmds->bs, sector, blk.buf,
270 BDRV_SECTORS_PER_DIRTY_CHUNK) < 0) {
271 monitor_printf(mon, "Error reading sector %" PRId64 "\n",
272 sector);
273 qemu_file_set_error(f);
274 qemu_free(blk.buf);
275 return;
277 blk.bmds = bmds;
278 blk.sector = sector;
279 blk_send(f, &blk);
281 bdrv_reset_dirty(bmds->bs, sector,
282 BDRV_SECTORS_PER_DIRTY_CHUNK);
284 sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
288 qemu_free(blk.buf);
291 static void flush_blks(QEMUFile* f)
293 BlkMigBlock *blk;
295 dprintf("%s Enter submitted %d read_done %d transferred %d\n",
296 __FUNCTION__, block_mig_state.submitted, block_mig_state.read_done,
297 block_mig_state.transferred);
299 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
300 if (qemu_file_rate_limit(f)) {
301 break;
303 if (blk->ret < 0) {
304 qemu_file_set_error(f);
305 break;
307 blk_send(f, blk);
309 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
310 qemu_free(blk->buf);
311 qemu_free(blk);
313 block_mig_state.read_done--;
314 block_mig_state.transferred++;
315 assert(block_mig_state.read_done >= 0);
318 dprintf("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__,
319 block_mig_state.submitted, block_mig_state.read_done,
320 block_mig_state.transferred);
323 static int is_stage2_completed(void)
325 BlkMigDevState *bmds;
327 if (block_mig_state.submitted > 0) {
328 return 0;
331 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
332 if (bmds->bulk_completed == 0) {
333 return 0;
337 return 1;
340 static void blk_mig_cleanup(Monitor *mon)
342 BlkMigDevState *bmds;
343 BlkMigBlock *blk;
345 while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
346 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
347 qemu_free(bmds);
350 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
351 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
352 qemu_free(blk->buf);
353 qemu_free(blk);
356 set_dirty_tracking(0);
358 monitor_printf(mon, "\n");
361 static int block_save_live(Monitor *mon, QEMUFile *f, int stage, void *opaque)
363 dprintf("Enter save live stage %d submitted %d transferred %d\n",
364 stage, block_mig_state.submitted, block_mig_state.transferred);
366 if (stage < 0) {
367 blk_mig_cleanup(mon);
368 return 0;
371 if (block_mig_state.blk_enable != 1) {
372 /* no need to migrate storage */
373 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
374 return 1;
377 if (stage == 1) {
378 init_blk_migration(mon, f);
380 /* start track dirty blocks */
381 set_dirty_tracking(1);
384 flush_blks(f);
386 if (qemu_file_has_error(f)) {
387 blk_mig_cleanup(mon);
388 return 0;
391 /* control the rate of transfer */
392 while ((block_mig_state.submitted +
393 block_mig_state.read_done) * BLOCK_SIZE <
394 qemu_file_get_rate_limit(f)) {
395 if (blk_mig_save_bulked_block(mon, f, 1) == 0) {
396 /* no more bulk blocks for now */
397 break;
401 flush_blks(f);
403 if (qemu_file_has_error(f)) {
404 blk_mig_cleanup(mon);
405 return 0;
408 if (stage == 3) {
409 while (blk_mig_save_bulked_block(mon, f, 0) != 0) {
410 /* empty */
413 blk_mig_save_dirty_blocks(mon, f);
414 blk_mig_cleanup(mon);
416 if (qemu_file_has_error(f)) {
417 return 0;
420 monitor_printf(mon, "Block migration completed\n");
423 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
425 return ((stage == 2) && is_stage2_completed());
428 static int block_load(QEMUFile *f, void *opaque, int version_id)
430 int len, flags;
431 char device_name[256];
432 int64_t addr;
433 BlockDriverState *bs;
434 uint8_t *buf;
436 do {
437 addr = qemu_get_be64(f);
439 flags = addr & ~BDRV_SECTOR_MASK;
440 addr >>= BDRV_SECTOR_BITS;
442 if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
443 /* get device name */
444 len = qemu_get_byte(f);
445 qemu_get_buffer(f, (uint8_t *)device_name, len);
446 device_name[len] = '\0';
448 bs = bdrv_find(device_name);
449 if (!bs) {
450 fprintf(stderr, "Error unknown block device %s\n",
451 device_name);
452 return -EINVAL;
455 buf = qemu_malloc(BLOCK_SIZE);
457 qemu_get_buffer(f, buf, BLOCK_SIZE);
458 bdrv_write(bs, addr, buf, BDRV_SECTORS_PER_DIRTY_CHUNK);
460 qemu_free(buf);
461 } else if (!(flags & BLK_MIG_FLAG_EOS)) {
462 fprintf(stderr, "Unknown flags\n");
463 return -EINVAL;
465 if (qemu_file_has_error(f)) {
466 return -EIO;
468 } while (!(flags & BLK_MIG_FLAG_EOS));
470 return 0;
473 static void block_set_params(int blk_enable, int shared_base, void *opaque)
475 block_mig_state.blk_enable = blk_enable;
476 block_mig_state.shared_base = shared_base;
478 /* shared base means that blk_enable = 1 */
479 block_mig_state.blk_enable |= shared_base;
482 void blk_mig_init(void)
484 QSIMPLEQ_INIT(&block_mig_state.bmds_list);
485 QSIMPLEQ_INIT(&block_mig_state.blk_list);
487 register_savevm_live("block", 0, 1, block_set_params, block_save_live,
488 NULL, block_load, &block_mig_state);