block migration: Report progress also via info migration
[qemu/aliguori-queue.git] / block-migration.c
blob7510923a82195b2d4370c75282be8f60817013ab
1 /*
2 * QEMU live block migration
4 * Copyright IBM, Corp. 2009
6 * Authors:
7 * Liran Schour <lirans@il.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu-common.h"
15 #include "block_int.h"
16 #include "hw/hw.h"
17 #include "qemu-queue.h"
18 #include "monitor.h"
19 #include "block-migration.h"
20 #include <assert.h>
22 #define BLOCK_SIZE (BDRV_SECTORS_PER_DIRTY_CHUNK << BDRV_SECTOR_BITS)
24 #define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
25 #define BLK_MIG_FLAG_EOS 0x02
27 #define MAX_IS_ALLOCATED_SEARCH 65536
28 #define MAX_BLOCKS_READ 10000
29 #define BLOCKS_READ_CHANGE 100
30 #define INITIAL_BLOCKS_READ 100
32 //#define DEBUG_BLK_MIGRATION
34 #ifdef DEBUG_BLK_MIGRATION
35 #define dprintf(fmt, ...) \
36 do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
37 #else
38 #define dprintf(fmt, ...) \
39 do { } while (0)
40 #endif
42 typedef struct BlkMigDevState {
43 BlockDriverState *bs;
44 int bulk_completed;
45 int shared_base;
46 int64_t cur_sector;
47 int64_t completed_sectors;
48 int64_t total_sectors;
49 int64_t dirty;
50 QSIMPLEQ_ENTRY(BlkMigDevState) entry;
51 } BlkMigDevState;
53 typedef struct BlkMigBlock {
54 uint8_t *buf;
55 BlkMigDevState *bmds;
56 int64_t sector;
57 struct iovec iov;
58 QEMUIOVector qiov;
59 BlockDriverAIOCB *aiocb;
60 int ret;
61 QSIMPLEQ_ENTRY(BlkMigBlock) entry;
62 } BlkMigBlock;
64 typedef struct BlkMigState {
65 int blk_enable;
66 int shared_base;
67 QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
68 QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list;
69 int submitted;
70 int read_done;
71 int transferred;
72 int64_t total_sector_sum;
73 int64_t print_completion;
74 } BlkMigState;
76 static BlkMigState block_mig_state;
78 static void blk_send(QEMUFile *f, BlkMigBlock * blk)
80 int len;
82 /* sector number and flags */
83 qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
84 | BLK_MIG_FLAG_DEVICE_BLOCK);
86 /* device name */
87 len = strlen(blk->bmds->bs->device_name);
88 qemu_put_byte(f, len);
89 qemu_put_buffer(f, (uint8_t *)blk->bmds->bs->device_name, len);
91 qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
94 int blk_mig_active(void)
96 return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list);
99 uint64_t blk_mig_bytes_transferred(void)
101 BlkMigDevState *bmds;
102 uint64_t sum = 0;
104 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
105 sum += bmds->completed_sectors;
107 return sum << BDRV_SECTOR_BITS;
110 uint64_t blk_mig_bytes_remaining(void)
112 return blk_mig_bytes_total() - blk_mig_bytes_transferred();
115 uint64_t blk_mig_bytes_total(void)
117 BlkMigDevState *bmds;
118 uint64_t sum = 0;
120 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
121 sum += bmds->total_sectors;
123 return sum << BDRV_SECTOR_BITS;
126 static void blk_mig_read_cb(void *opaque, int ret)
128 BlkMigBlock *blk = opaque;
130 blk->ret = ret;
132 QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
134 block_mig_state.submitted--;
135 block_mig_state.read_done++;
136 assert(block_mig_state.submitted >= 0);
139 static int mig_save_device_bulk(Monitor *mon, QEMUFile *f,
140 BlkMigDevState *bmds, int is_async)
142 int64_t total_sectors = bmds->total_sectors;
143 int64_t cur_sector = bmds->cur_sector;
144 BlockDriverState *bs = bmds->bs;
145 BlkMigBlock *blk;
146 int nr_sectors;
148 if (bmds->shared_base) {
149 while (cur_sector < total_sectors &&
150 !bdrv_is_allocated(bs, cur_sector, MAX_IS_ALLOCATED_SEARCH,
151 &nr_sectors)) {
152 cur_sector += nr_sectors;
156 if (cur_sector >= total_sectors) {
157 bmds->cur_sector = bmds->completed_sectors = total_sectors;
158 return 1;
161 bmds->completed_sectors = cur_sector;
163 cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
165 /* we are going to transfer a full block even if it is not allocated */
166 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
168 if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
169 nr_sectors = total_sectors - cur_sector;
172 blk = qemu_malloc(sizeof(BlkMigBlock));
173 blk->buf = qemu_malloc(BLOCK_SIZE);
174 blk->bmds = bmds;
175 blk->sector = cur_sector;
177 if (is_async) {
178 blk->iov.iov_base = blk->buf;
179 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
180 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
182 blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov,
183 nr_sectors, blk_mig_read_cb, blk);
184 if (!blk->aiocb) {
185 goto error;
187 block_mig_state.submitted++;
188 } else {
189 if (bdrv_read(bs, cur_sector, blk->buf, nr_sectors) < 0) {
190 goto error;
192 blk_send(f, blk);
194 qemu_free(blk->buf);
195 qemu_free(blk);
198 bdrv_reset_dirty(bs, cur_sector, nr_sectors);
199 bmds->cur_sector = cur_sector + nr_sectors;
201 return (bmds->cur_sector >= total_sectors);
203 error:
204 monitor_printf(mon, "Error reading sector %" PRId64 "\n", cur_sector);
205 qemu_file_set_error(f);
206 qemu_free(blk->buf);
207 qemu_free(blk);
208 return 0;
211 static void set_dirty_tracking(int enable)
213 BlkMigDevState *bmds;
215 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
216 bdrv_set_dirty_tracking(bmds->bs, enable);
220 static void init_blk_migration(Monitor *mon, QEMUFile *f)
222 BlkMigDevState *bmds;
223 BlockDriverState *bs;
225 block_mig_state.submitted = 0;
226 block_mig_state.read_done = 0;
227 block_mig_state.transferred = 0;
228 block_mig_state.total_sector_sum = 0;
229 block_mig_state.print_completion = 0;
231 for (bs = bdrv_first; bs != NULL; bs = bs->next) {
232 if (bs->type == BDRV_TYPE_HD) {
233 bmds = qemu_mallocz(sizeof(BlkMigDevState));
234 bmds->bs = bs;
235 bmds->bulk_completed = 0;
236 bmds->total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
237 bmds->completed_sectors = 0;
238 bmds->shared_base = block_mig_state.shared_base;
240 block_mig_state.total_sector_sum += bmds->total_sectors;
242 if (bmds->shared_base) {
243 monitor_printf(mon, "Start migration for %s with shared base "
244 "image\n",
245 bs->device_name);
246 } else {
247 monitor_printf(mon, "Start full migration for %s\n",
248 bs->device_name);
251 QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
256 static int blk_mig_save_bulked_block(Monitor *mon, QEMUFile *f, int is_async)
258 int64_t completed_sector_sum = 0;
259 BlkMigDevState *bmds;
260 int ret = 0;
262 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
263 if (bmds->bulk_completed == 0) {
264 if (mig_save_device_bulk(mon, f, bmds, is_async) == 1) {
265 /* completed bulk section for this device */
266 bmds->bulk_completed = 1;
268 completed_sector_sum += bmds->completed_sectors;
269 ret = 1;
270 break;
271 } else {
272 completed_sector_sum += bmds->completed_sectors;
276 if (completed_sector_sum >= block_mig_state.print_completion) {
277 monitor_printf(mon, "Completed %" PRId64 " %%\r",
278 completed_sector_sum * 100 /
279 block_mig_state.total_sector_sum);
280 monitor_flush(mon);
281 block_mig_state.print_completion +=
282 (BDRV_SECTORS_PER_DIRTY_CHUNK * 10000);
285 return ret;
288 #define MAX_NUM_BLOCKS 4
290 static void blk_mig_save_dirty_blocks(Monitor *mon, QEMUFile *f)
292 BlkMigDevState *bmds;
293 BlkMigBlock blk;
294 int64_t sector;
296 blk.buf = qemu_malloc(BLOCK_SIZE);
298 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
299 for (sector = 0; sector < bmds->cur_sector;) {
300 if (bdrv_get_dirty(bmds->bs, sector)) {
301 if (bdrv_read(bmds->bs, sector, blk.buf,
302 BDRV_SECTORS_PER_DIRTY_CHUNK) < 0) {
303 monitor_printf(mon, "Error reading sector %" PRId64 "\n",
304 sector);
305 qemu_file_set_error(f);
306 qemu_free(blk.buf);
307 return;
309 blk.bmds = bmds;
310 blk.sector = sector;
311 blk_send(f, &blk);
313 bdrv_reset_dirty(bmds->bs, sector,
314 BDRV_SECTORS_PER_DIRTY_CHUNK);
316 sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
320 qemu_free(blk.buf);
323 static void flush_blks(QEMUFile* f)
325 BlkMigBlock *blk;
327 dprintf("%s Enter submitted %d read_done %d transferred %d\n",
328 __FUNCTION__, block_mig_state.submitted, block_mig_state.read_done,
329 block_mig_state.transferred);
331 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
332 if (qemu_file_rate_limit(f)) {
333 break;
335 if (blk->ret < 0) {
336 qemu_file_set_error(f);
337 break;
339 blk_send(f, blk);
341 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
342 qemu_free(blk->buf);
343 qemu_free(blk);
345 block_mig_state.read_done--;
346 block_mig_state.transferred++;
347 assert(block_mig_state.read_done >= 0);
350 dprintf("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__,
351 block_mig_state.submitted, block_mig_state.read_done,
352 block_mig_state.transferred);
355 static int is_stage2_completed(void)
357 BlkMigDevState *bmds;
359 if (block_mig_state.submitted > 0) {
360 return 0;
363 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
364 if (bmds->bulk_completed == 0) {
365 return 0;
369 return 1;
372 static void blk_mig_cleanup(Monitor *mon)
374 BlkMigDevState *bmds;
375 BlkMigBlock *blk;
377 while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
378 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
379 qemu_free(bmds);
382 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
383 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
384 qemu_free(blk->buf);
385 qemu_free(blk);
388 set_dirty_tracking(0);
390 monitor_printf(mon, "\n");
393 static int block_save_live(Monitor *mon, QEMUFile *f, int stage, void *opaque)
395 dprintf("Enter save live stage %d submitted %d transferred %d\n",
396 stage, block_mig_state.submitted, block_mig_state.transferred);
398 if (stage < 0) {
399 blk_mig_cleanup(mon);
400 return 0;
403 if (block_mig_state.blk_enable != 1) {
404 /* no need to migrate storage */
405 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
406 return 1;
409 if (stage == 1) {
410 init_blk_migration(mon, f);
412 /* start track dirty blocks */
413 set_dirty_tracking(1);
416 flush_blks(f);
418 if (qemu_file_has_error(f)) {
419 blk_mig_cleanup(mon);
420 return 0;
423 /* control the rate of transfer */
424 while ((block_mig_state.submitted +
425 block_mig_state.read_done) * BLOCK_SIZE <
426 qemu_file_get_rate_limit(f)) {
427 if (blk_mig_save_bulked_block(mon, f, 1) == 0) {
428 /* no more bulk blocks for now */
429 break;
433 flush_blks(f);
435 if (qemu_file_has_error(f)) {
436 blk_mig_cleanup(mon);
437 return 0;
440 if (stage == 3) {
441 while (blk_mig_save_bulked_block(mon, f, 0) != 0) {
442 /* empty */
445 blk_mig_save_dirty_blocks(mon, f);
446 blk_mig_cleanup(mon);
448 if (qemu_file_has_error(f)) {
449 return 0;
452 monitor_printf(mon, "Block migration completed\n");
455 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
457 return ((stage == 2) && is_stage2_completed());
460 static int block_load(QEMUFile *f, void *opaque, int version_id)
462 int len, flags;
463 char device_name[256];
464 int64_t addr;
465 BlockDriverState *bs;
466 uint8_t *buf;
468 do {
469 addr = qemu_get_be64(f);
471 flags = addr & ~BDRV_SECTOR_MASK;
472 addr >>= BDRV_SECTOR_BITS;
474 if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
475 /* get device name */
476 len = qemu_get_byte(f);
477 qemu_get_buffer(f, (uint8_t *)device_name, len);
478 device_name[len] = '\0';
480 bs = bdrv_find(device_name);
481 if (!bs) {
482 fprintf(stderr, "Error unknown block device %s\n",
483 device_name);
484 return -EINVAL;
487 buf = qemu_malloc(BLOCK_SIZE);
489 qemu_get_buffer(f, buf, BLOCK_SIZE);
490 bdrv_write(bs, addr, buf, BDRV_SECTORS_PER_DIRTY_CHUNK);
492 qemu_free(buf);
493 } else if (!(flags & BLK_MIG_FLAG_EOS)) {
494 fprintf(stderr, "Unknown flags\n");
495 return -EINVAL;
497 if (qemu_file_has_error(f)) {
498 return -EIO;
500 } while (!(flags & BLK_MIG_FLAG_EOS));
502 return 0;
505 static void block_set_params(int blk_enable, int shared_base, void *opaque)
507 block_mig_state.blk_enable = blk_enable;
508 block_mig_state.shared_base = shared_base;
510 /* shared base means that blk_enable = 1 */
511 block_mig_state.blk_enable |= shared_base;
514 void blk_mig_init(void)
516 QSIMPLEQ_INIT(&block_mig_state.bmds_list);
517 QSIMPLEQ_INIT(&block_mig_state.blk_list);
519 register_savevm_live("block", 0, 1, block_set_params, block_save_live,
520 NULL, block_load, &block_mig_state);