numa: move default mapping init to machine
[qemu/kevin.git] / migration / migration.c
blob48c94c9ca1e5952c57c3f8dba5c3b2565650a23a
1 /*
2 * QEMU live migration
4 * Copyright IBM, Corp. 2008
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qemu/cutils.h"
18 #include "qemu/error-report.h"
19 #include "qemu/main-loop.h"
20 #include "migration/blocker.h"
21 #include "exec.h"
22 #include "fd.h"
23 #include "socket.h"
24 #include "rdma.h"
25 #include "ram.h"
26 #include "migration/migration.h"
27 #include "savevm.h"
28 #include "qemu-file-channel.h"
29 #include "qemu-file.h"
30 #include "migration/vmstate.h"
31 #include "sysemu/sysemu.h"
32 #include "block/block.h"
33 #include "qapi/qmp/qerror.h"
34 #include "qapi/util.h"
35 #include "qemu/sockets.h"
36 #include "qemu/rcu.h"
37 #include "block.h"
38 #include "postcopy-ram.h"
39 #include "qemu/thread.h"
40 #include "qmp-commands.h"
41 #include "trace.h"
42 #include "qapi-event.h"
43 #include "qom/cpu.h"
44 #include "exec/memory.h"
45 #include "exec/address-spaces.h"
46 #include "exec/target_page.h"
47 #include "io/channel-buffer.h"
48 #include "migration/colo.h"
50 #define MAX_THROTTLE (32 << 20) /* Migration transfer speed throttling */
52 /* Amount of time to allocate to each "chunk" of bandwidth-throttled
53 * data. */
54 #define BUFFER_DELAY 100
55 #define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)
57 /* Time in milliseconds we are allowed to stop the source,
58 * for sending the last part */
59 #define DEFAULT_MIGRATE_SET_DOWNTIME 300
61 /* Maximum migrate downtime set to 2000 seconds */
62 #define MAX_MIGRATE_DOWNTIME_SECONDS 2000
63 #define MAX_MIGRATE_DOWNTIME (MAX_MIGRATE_DOWNTIME_SECONDS * 1000)
65 /* Default compression thread count */
66 #define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8
67 /* Default decompression thread count, usually decompression is at
68 * least 4 times as fast as compression.*/
69 #define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2
70 /*0: means nocompress, 1: best speed, ... 9: best compress ratio */
71 #define DEFAULT_MIGRATE_COMPRESS_LEVEL 1
72 /* Define default autoconverge cpu throttle migration parameters */
73 #define DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL 20
74 #define DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT 10
76 /* Migration XBZRLE default cache size */
77 #define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024)
79 /* The delay time (in ms) between two COLO checkpoints
80 * Note: Please change this default value to 10000 when we support hybrid mode.
82 #define DEFAULT_MIGRATE_X_CHECKPOINT_DELAY 200
84 static NotifierList migration_state_notifiers =
85 NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
87 static bool deferred_incoming;
89 /* When we add fault tolerance, we could have several
90 migrations at once. For now we don't need to add
91 dynamic creation of migration */
93 /* For outgoing */
94 MigrationState *migrate_get_current(void)
96 static bool once;
97 static MigrationState current_migration = {
98 .state = MIGRATION_STATUS_NONE,
99 .xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE,
100 .mbps = -1,
101 .parameters = {
102 .compress_level = DEFAULT_MIGRATE_COMPRESS_LEVEL,
103 .compress_threads = DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT,
104 .decompress_threads = DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT,
105 .cpu_throttle_initial = DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL,
106 .cpu_throttle_increment = DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT,
107 .max_bandwidth = MAX_THROTTLE,
108 .downtime_limit = DEFAULT_MIGRATE_SET_DOWNTIME,
109 .x_checkpoint_delay = DEFAULT_MIGRATE_X_CHECKPOINT_DELAY,
113 if (!once) {
114 current_migration.parameters.tls_creds = g_strdup("");
115 current_migration.parameters.tls_hostname = g_strdup("");
116 once = true;
118 return &current_migration;
121 MigrationIncomingState *migration_incoming_get_current(void)
123 static bool once;
124 static MigrationIncomingState mis_current;
126 if (!once) {
127 mis_current.state = MIGRATION_STATUS_NONE;
128 memset(&mis_current, 0, sizeof(MigrationIncomingState));
129 qemu_mutex_init(&mis_current.rp_mutex);
130 qemu_event_init(&mis_current.main_thread_load_event, false);
131 once = true;
133 return &mis_current;
136 void migration_incoming_state_destroy(void)
138 struct MigrationIncomingState *mis = migration_incoming_get_current();
140 if (mis->to_src_file) {
141 /* Tell source that we are done */
142 migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0);
143 qemu_fclose(mis->to_src_file);
144 mis->to_src_file = NULL;
147 if (mis->from_src_file) {
148 qemu_fclose(mis->from_src_file);
149 mis->from_src_file = NULL;
152 qemu_event_destroy(&mis->main_thread_load_event);
156 typedef struct {
157 bool optional;
158 uint32_t size;
159 uint8_t runstate[100];
160 RunState state;
161 bool received;
162 } GlobalState;
164 static GlobalState global_state;
166 int global_state_store(void)
168 if (!runstate_store((char *)global_state.runstate,
169 sizeof(global_state.runstate))) {
170 error_report("runstate name too big: %s", global_state.runstate);
171 trace_migrate_state_too_big();
172 return -EINVAL;
174 return 0;
177 void global_state_store_running(void)
179 const char *state = RunState_lookup[RUN_STATE_RUNNING];
180 strncpy((char *)global_state.runstate,
181 state, sizeof(global_state.runstate));
184 static bool global_state_received(void)
186 return global_state.received;
189 static RunState global_state_get_runstate(void)
191 return global_state.state;
194 void global_state_set_optional(void)
196 global_state.optional = true;
199 static bool global_state_needed(void *opaque)
201 GlobalState *s = opaque;
202 char *runstate = (char *)s->runstate;
204 /* If it is not optional, it is mandatory */
206 if (s->optional == false) {
207 return true;
210 /* If state is running or paused, it is not needed */
212 if (strcmp(runstate, "running") == 0 ||
213 strcmp(runstate, "paused") == 0) {
214 return false;
217 /* for any other state it is needed */
218 return true;
221 static int global_state_post_load(void *opaque, int version_id)
223 GlobalState *s = opaque;
224 Error *local_err = NULL;
225 int r;
226 char *runstate = (char *)s->runstate;
228 s->received = true;
229 trace_migrate_global_state_post_load(runstate);
231 r = qapi_enum_parse(RunState_lookup, runstate, RUN_STATE__MAX,
232 -1, &local_err);
234 if (r == -1) {
235 if (local_err) {
236 error_report_err(local_err);
238 return -EINVAL;
240 s->state = r;
242 return 0;
245 static void global_state_pre_save(void *opaque)
247 GlobalState *s = opaque;
249 trace_migrate_global_state_pre_save((char *)s->runstate);
250 s->size = strlen((char *)s->runstate) + 1;
253 static const VMStateDescription vmstate_globalstate = {
254 .name = "globalstate",
255 .version_id = 1,
256 .minimum_version_id = 1,
257 .post_load = global_state_post_load,
258 .pre_save = global_state_pre_save,
259 .needed = global_state_needed,
260 .fields = (VMStateField[]) {
261 VMSTATE_UINT32(size, GlobalState),
262 VMSTATE_BUFFER(runstate, GlobalState),
263 VMSTATE_END_OF_LIST()
267 void register_global_state(void)
269 /* We would use it independently that we receive it */
270 strcpy((char *)&global_state.runstate, "");
271 global_state.received = false;
272 vmstate_register(NULL, 0, &vmstate_globalstate, &global_state);
275 static void migrate_generate_event(int new_state)
277 if (migrate_use_events()) {
278 qapi_event_send_migration(new_state, &error_abort);
283 * Called on -incoming with a defer: uri.
284 * The migration can be started later after any parameters have been
285 * changed.
287 static void deferred_incoming_migration(Error **errp)
289 if (deferred_incoming) {
290 error_setg(errp, "Incoming migration already deferred");
292 deferred_incoming = true;
295 /* Request a range of pages from the source VM at the given
296 * start address.
297 * rbname: Name of the RAMBlock to request the page in, if NULL it's the same
298 * as the last request (a name must have been given previously)
299 * Start: Address offset within the RB
300 * Len: Length in bytes required - must be a multiple of pagesize
302 void migrate_send_rp_req_pages(MigrationIncomingState *mis, const char *rbname,
303 ram_addr_t start, size_t len)
305 uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */
306 size_t msglen = 12; /* start + len */
308 *(uint64_t *)bufc = cpu_to_be64((uint64_t)start);
309 *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len);
311 if (rbname) {
312 int rbname_len = strlen(rbname);
313 assert(rbname_len < 256);
315 bufc[msglen++] = rbname_len;
316 memcpy(bufc + msglen, rbname, rbname_len);
317 msglen += rbname_len;
318 migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES_ID, msglen, bufc);
319 } else {
320 migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES, msglen, bufc);
324 void qemu_start_incoming_migration(const char *uri, Error **errp)
326 const char *p;
328 qapi_event_send_migration(MIGRATION_STATUS_SETUP, &error_abort);
329 if (!strcmp(uri, "defer")) {
330 deferred_incoming_migration(errp);
331 } else if (strstart(uri, "tcp:", &p)) {
332 tcp_start_incoming_migration(p, errp);
333 #ifdef CONFIG_RDMA
334 } else if (strstart(uri, "rdma:", &p)) {
335 rdma_start_incoming_migration(p, errp);
336 #endif
337 } else if (strstart(uri, "exec:", &p)) {
338 exec_start_incoming_migration(p, errp);
339 } else if (strstart(uri, "unix:", &p)) {
340 unix_start_incoming_migration(p, errp);
341 } else if (strstart(uri, "fd:", &p)) {
342 fd_start_incoming_migration(p, errp);
343 } else {
344 error_setg(errp, "unknown migration protocol: %s", uri);
348 static void process_incoming_migration_bh(void *opaque)
350 Error *local_err = NULL;
351 MigrationIncomingState *mis = opaque;
353 /* Make sure all file formats flush their mutable metadata.
354 * If we get an error here, just don't restart the VM yet. */
355 bdrv_invalidate_cache_all(&local_err);
356 if (local_err) {
357 error_report_err(local_err);
358 local_err = NULL;
359 autostart = false;
363 * This must happen after all error conditions are dealt with and
364 * we're sure the VM is going to be running on this host.
366 qemu_announce_self();
368 /* If global state section was not received or we are in running
369 state, we need to obey autostart. Any other state is set with
370 runstate_set. */
372 if (!global_state_received() ||
373 global_state_get_runstate() == RUN_STATE_RUNNING) {
374 if (autostart) {
375 vm_start();
376 } else {
377 runstate_set(RUN_STATE_PAUSED);
379 } else {
380 runstate_set(global_state_get_runstate());
382 migrate_decompress_threads_join();
384 * This must happen after any state changes since as soon as an external
385 * observer sees this event they might start to prod at the VM assuming
386 * it's ready to use.
388 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
389 MIGRATION_STATUS_COMPLETED);
390 qemu_bh_delete(mis->bh);
391 migration_incoming_state_destroy();
394 static void process_incoming_migration_co(void *opaque)
396 QEMUFile *f = opaque;
397 MigrationIncomingState *mis = migration_incoming_get_current();
398 PostcopyState ps;
399 int ret;
401 mis->from_src_file = f;
402 mis->largest_page_size = qemu_ram_pagesize_largest();
403 postcopy_state_set(POSTCOPY_INCOMING_NONE);
404 migrate_set_state(&mis->state, MIGRATION_STATUS_NONE,
405 MIGRATION_STATUS_ACTIVE);
406 ret = qemu_loadvm_state(f);
408 ps = postcopy_state_get();
409 trace_process_incoming_migration_co_end(ret, ps);
410 if (ps != POSTCOPY_INCOMING_NONE) {
411 if (ps == POSTCOPY_INCOMING_ADVISE) {
413 * Where a migration had postcopy enabled (and thus went to advise)
414 * but managed to complete within the precopy period, we can use
415 * the normal exit.
417 postcopy_ram_incoming_cleanup(mis);
418 } else if (ret >= 0) {
420 * Postcopy was started, cleanup should happen at the end of the
421 * postcopy thread.
423 trace_process_incoming_migration_co_postcopy_end_main();
424 return;
426 /* Else if something went wrong then just fall out of the normal exit */
429 /* we get COLO info, and know if we are in COLO mode */
430 if (!ret && migration_incoming_enable_colo()) {
431 mis->migration_incoming_co = qemu_coroutine_self();
432 qemu_thread_create(&mis->colo_incoming_thread, "COLO incoming",
433 colo_process_incoming_thread, mis, QEMU_THREAD_JOINABLE);
434 mis->have_colo_incoming_thread = true;
435 qemu_coroutine_yield();
437 /* Wait checkpoint incoming thread exit before free resource */
438 qemu_thread_join(&mis->colo_incoming_thread);
441 if (ret < 0) {
442 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
443 MIGRATION_STATUS_FAILED);
444 error_report("load of migration failed: %s", strerror(-ret));
445 migrate_decompress_threads_join();
446 exit(EXIT_FAILURE);
449 free_xbzrle_decoded_buf();
451 mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
452 qemu_bh_schedule(mis->bh);
455 void migration_fd_process_incoming(QEMUFile *f)
457 Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, f);
459 migrate_decompress_threads_create();
460 qemu_file_set_blocking(f, false);
461 qemu_coroutine_enter(co);
465 * Send a message on the return channel back to the source
466 * of the migration.
468 void migrate_send_rp_message(MigrationIncomingState *mis,
469 enum mig_rp_message_type message_type,
470 uint16_t len, void *data)
472 trace_migrate_send_rp_message((int)message_type, len);
473 qemu_mutex_lock(&mis->rp_mutex);
474 qemu_put_be16(mis->to_src_file, (unsigned int)message_type);
475 qemu_put_be16(mis->to_src_file, len);
476 qemu_put_buffer(mis->to_src_file, data, len);
477 qemu_fflush(mis->to_src_file);
478 qemu_mutex_unlock(&mis->rp_mutex);
482 * Send a 'SHUT' message on the return channel with the given value
483 * to indicate that we've finished with the RP. Non-0 value indicates
484 * error.
486 void migrate_send_rp_shut(MigrationIncomingState *mis,
487 uint32_t value)
489 uint32_t buf;
491 buf = cpu_to_be32(value);
492 migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf);
496 * Send a 'PONG' message on the return channel with the given value
497 * (normally in response to a 'PING')
499 void migrate_send_rp_pong(MigrationIncomingState *mis,
500 uint32_t value)
502 uint32_t buf;
504 buf = cpu_to_be32(value);
505 migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf);
508 MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
510 MigrationCapabilityStatusList *head = NULL;
511 MigrationCapabilityStatusList *caps;
512 MigrationState *s = migrate_get_current();
513 int i;
515 caps = NULL; /* silence compiler warning */
516 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
517 #ifndef CONFIG_LIVE_BLOCK_MIGRATION
518 if (i == MIGRATION_CAPABILITY_BLOCK) {
519 continue;
521 #endif
522 if (i == MIGRATION_CAPABILITY_X_COLO && !colo_supported()) {
523 continue;
525 if (head == NULL) {
526 head = g_malloc0(sizeof(*caps));
527 caps = head;
528 } else {
529 caps->next = g_malloc0(sizeof(*caps));
530 caps = caps->next;
532 caps->value =
533 g_malloc(sizeof(*caps->value));
534 caps->value->capability = i;
535 caps->value->state = s->enabled_capabilities[i];
538 return head;
541 MigrationParameters *qmp_query_migrate_parameters(Error **errp)
543 MigrationParameters *params;
544 MigrationState *s = migrate_get_current();
546 params = g_malloc0(sizeof(*params));
547 params->has_compress_level = true;
548 params->compress_level = s->parameters.compress_level;
549 params->has_compress_threads = true;
550 params->compress_threads = s->parameters.compress_threads;
551 params->has_decompress_threads = true;
552 params->decompress_threads = s->parameters.decompress_threads;
553 params->has_cpu_throttle_initial = true;
554 params->cpu_throttle_initial = s->parameters.cpu_throttle_initial;
555 params->has_cpu_throttle_increment = true;
556 params->cpu_throttle_increment = s->parameters.cpu_throttle_increment;
557 params->has_tls_creds = !!s->parameters.tls_creds;
558 params->tls_creds = g_strdup(s->parameters.tls_creds);
559 params->has_tls_hostname = !!s->parameters.tls_hostname;
560 params->tls_hostname = g_strdup(s->parameters.tls_hostname);
561 params->has_max_bandwidth = true;
562 params->max_bandwidth = s->parameters.max_bandwidth;
563 params->has_downtime_limit = true;
564 params->downtime_limit = s->parameters.downtime_limit;
565 params->has_x_checkpoint_delay = true;
566 params->x_checkpoint_delay = s->parameters.x_checkpoint_delay;
567 params->has_block_incremental = true;
568 params->block_incremental = s->parameters.block_incremental;
570 return params;
574 * Return true if we're already in the middle of a migration
575 * (i.e. any of the active or setup states)
577 static bool migration_is_setup_or_active(int state)
579 switch (state) {
580 case MIGRATION_STATUS_ACTIVE:
581 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
582 case MIGRATION_STATUS_SETUP:
583 return true;
585 default:
586 return false;
591 static void get_xbzrle_cache_stats(MigrationInfo *info)
593 if (migrate_use_xbzrle()) {
594 info->has_xbzrle_cache = true;
595 info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
596 info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
597 info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred();
598 info->xbzrle_cache->pages = xbzrle_mig_pages_transferred();
599 info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss();
600 info->xbzrle_cache->cache_miss_rate = xbzrle_mig_cache_miss_rate();
601 info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow();
605 static void populate_ram_info(MigrationInfo *info, MigrationState *s)
607 info->has_ram = true;
608 info->ram = g_malloc0(sizeof(*info->ram));
609 info->ram->transferred = ram_bytes_transferred();
610 info->ram->total = ram_bytes_total();
611 info->ram->duplicate = dup_mig_pages_transferred();
612 /* legacy value. It is not used anymore */
613 info->ram->skipped = 0;
614 info->ram->normal = norm_mig_pages_transferred();
615 info->ram->normal_bytes = norm_mig_pages_transferred() *
616 qemu_target_page_size();
617 info->ram->mbps = s->mbps;
618 info->ram->dirty_sync_count = ram_dirty_sync_count();
619 info->ram->postcopy_requests = ram_postcopy_requests();
620 info->ram->page_size = qemu_target_page_size();
622 if (s->state != MIGRATION_STATUS_COMPLETED) {
623 info->ram->remaining = ram_bytes_remaining();
624 info->ram->dirty_pages_rate = ram_dirty_pages_rate();
628 MigrationInfo *qmp_query_migrate(Error **errp)
630 MigrationInfo *info = g_malloc0(sizeof(*info));
631 MigrationState *s = migrate_get_current();
633 switch (s->state) {
634 case MIGRATION_STATUS_NONE:
635 /* no migration has happened ever */
636 break;
637 case MIGRATION_STATUS_SETUP:
638 info->has_status = true;
639 info->has_total_time = false;
640 break;
641 case MIGRATION_STATUS_ACTIVE:
642 case MIGRATION_STATUS_CANCELLING:
643 info->has_status = true;
644 info->has_total_time = true;
645 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
646 - s->total_time;
647 info->has_expected_downtime = true;
648 info->expected_downtime = s->expected_downtime;
649 info->has_setup_time = true;
650 info->setup_time = s->setup_time;
652 populate_ram_info(info, s);
654 if (blk_mig_active()) {
655 info->has_disk = true;
656 info->disk = g_malloc0(sizeof(*info->disk));
657 info->disk->transferred = blk_mig_bytes_transferred();
658 info->disk->remaining = blk_mig_bytes_remaining();
659 info->disk->total = blk_mig_bytes_total();
662 if (cpu_throttle_active()) {
663 info->has_cpu_throttle_percentage = true;
664 info->cpu_throttle_percentage = cpu_throttle_get_percentage();
667 get_xbzrle_cache_stats(info);
668 break;
669 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
670 /* Mostly the same as active; TODO add some postcopy stats */
671 info->has_status = true;
672 info->has_total_time = true;
673 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
674 - s->total_time;
675 info->has_expected_downtime = true;
676 info->expected_downtime = s->expected_downtime;
677 info->has_setup_time = true;
678 info->setup_time = s->setup_time;
680 populate_ram_info(info, s);
682 if (blk_mig_active()) {
683 info->has_disk = true;
684 info->disk = g_malloc0(sizeof(*info->disk));
685 info->disk->transferred = blk_mig_bytes_transferred();
686 info->disk->remaining = blk_mig_bytes_remaining();
687 info->disk->total = blk_mig_bytes_total();
690 get_xbzrle_cache_stats(info);
691 break;
692 case MIGRATION_STATUS_COLO:
693 info->has_status = true;
694 /* TODO: display COLO specific information (checkpoint info etc.) */
695 break;
696 case MIGRATION_STATUS_COMPLETED:
697 get_xbzrle_cache_stats(info);
699 info->has_status = true;
700 info->has_total_time = true;
701 info->total_time = s->total_time;
702 info->has_downtime = true;
703 info->downtime = s->downtime;
704 info->has_setup_time = true;
705 info->setup_time = s->setup_time;
707 populate_ram_info(info, s);
708 break;
709 case MIGRATION_STATUS_FAILED:
710 info->has_status = true;
711 if (s->error) {
712 info->has_error_desc = true;
713 info->error_desc = g_strdup(error_get_pretty(s->error));
715 break;
716 case MIGRATION_STATUS_CANCELLED:
717 info->has_status = true;
718 break;
720 info->status = s->state;
722 return info;
725 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
726 Error **errp)
728 MigrationState *s = migrate_get_current();
729 MigrationCapabilityStatusList *cap;
730 bool old_postcopy_cap = migrate_postcopy_ram();
732 if (migration_is_setup_or_active(s->state)) {
733 error_setg(errp, QERR_MIGRATION_ACTIVE);
734 return;
737 for (cap = params; cap; cap = cap->next) {
738 #ifndef CONFIG_LIVE_BLOCK_MIGRATION
739 if (cap->value->capability == MIGRATION_CAPABILITY_BLOCK
740 && cap->value->state) {
741 error_setg(errp, "QEMU compiled without old-style (blk/-b, inc/-i) "
742 "block migration");
743 error_append_hint(errp, "Use drive_mirror+NBD instead.\n");
744 continue;
746 #endif
747 if (cap->value->capability == MIGRATION_CAPABILITY_X_COLO) {
748 if (!colo_supported()) {
749 error_setg(errp, "COLO is not currently supported, please"
750 " configure with --enable-colo option in order to"
751 " support COLO feature");
752 continue;
755 s->enabled_capabilities[cap->value->capability] = cap->value->state;
758 if (migrate_postcopy_ram()) {
759 if (migrate_use_compression()) {
760 /* The decompression threads asynchronously write into RAM
761 * rather than use the atomic copies needed to avoid
762 * userfaulting. It should be possible to fix the decompression
763 * threads for compatibility in future.
765 error_report("Postcopy is not currently compatible with "
766 "compression");
767 s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM] =
768 false;
770 /* This check is reasonably expensive, so only when it's being
771 * set the first time, also it's only the destination that needs
772 * special support.
774 if (!old_postcopy_cap && runstate_check(RUN_STATE_INMIGRATE) &&
775 !postcopy_ram_supported_by_host()) {
776 /* postcopy_ram_supported_by_host will have emitted a more
777 * detailed message
779 error_report("Postcopy is not supported");
780 s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM] =
781 false;
786 void qmp_migrate_set_parameters(MigrationParameters *params, Error **errp)
788 MigrationState *s = migrate_get_current();
790 if (params->has_compress_level &&
791 (params->compress_level < 0 || params->compress_level > 9)) {
792 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level",
793 "is invalid, it should be in the range of 0 to 9");
794 return;
796 if (params->has_compress_threads &&
797 (params->compress_threads < 1 || params->compress_threads > 255)) {
798 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
799 "compress_threads",
800 "is invalid, it should be in the range of 1 to 255");
801 return;
803 if (params->has_decompress_threads &&
804 (params->decompress_threads < 1 || params->decompress_threads > 255)) {
805 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
806 "decompress_threads",
807 "is invalid, it should be in the range of 1 to 255");
808 return;
810 if (params->has_cpu_throttle_initial &&
811 (params->cpu_throttle_initial < 1 ||
812 params->cpu_throttle_initial > 99)) {
813 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
814 "cpu_throttle_initial",
815 "an integer in the range of 1 to 99");
816 return;
818 if (params->has_cpu_throttle_increment &&
819 (params->cpu_throttle_increment < 1 ||
820 params->cpu_throttle_increment > 99)) {
821 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
822 "cpu_throttle_increment",
823 "an integer in the range of 1 to 99");
824 return;
826 if (params->has_max_bandwidth &&
827 (params->max_bandwidth < 0 || params->max_bandwidth > SIZE_MAX)) {
828 error_setg(errp, "Parameter 'max_bandwidth' expects an integer in the"
829 " range of 0 to %zu bytes/second", SIZE_MAX);
830 return;
832 if (params->has_downtime_limit &&
833 (params->downtime_limit < 0 ||
834 params->downtime_limit > MAX_MIGRATE_DOWNTIME)) {
835 error_setg(errp, "Parameter 'downtime_limit' expects an integer in "
836 "the range of 0 to %d milliseconds",
837 MAX_MIGRATE_DOWNTIME);
838 return;
840 if (params->has_x_checkpoint_delay && (params->x_checkpoint_delay < 0)) {
841 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
842 "x_checkpoint_delay",
843 "is invalid, it should be positive");
846 if (params->has_compress_level) {
847 s->parameters.compress_level = params->compress_level;
849 if (params->has_compress_threads) {
850 s->parameters.compress_threads = params->compress_threads;
852 if (params->has_decompress_threads) {
853 s->parameters.decompress_threads = params->decompress_threads;
855 if (params->has_cpu_throttle_initial) {
856 s->parameters.cpu_throttle_initial = params->cpu_throttle_initial;
858 if (params->has_cpu_throttle_increment) {
859 s->parameters.cpu_throttle_increment = params->cpu_throttle_increment;
861 if (params->has_tls_creds) {
862 g_free(s->parameters.tls_creds);
863 s->parameters.tls_creds = g_strdup(params->tls_creds);
865 if (params->has_tls_hostname) {
866 g_free(s->parameters.tls_hostname);
867 s->parameters.tls_hostname = g_strdup(params->tls_hostname);
869 if (params->has_max_bandwidth) {
870 s->parameters.max_bandwidth = params->max_bandwidth;
871 if (s->to_dst_file) {
872 qemu_file_set_rate_limit(s->to_dst_file,
873 s->parameters.max_bandwidth / XFER_LIMIT_RATIO);
876 if (params->has_downtime_limit) {
877 s->parameters.downtime_limit = params->downtime_limit;
880 if (params->has_x_checkpoint_delay) {
881 s->parameters.x_checkpoint_delay = params->x_checkpoint_delay;
882 if (migration_in_colo_state()) {
883 colo_checkpoint_notify(s);
886 if (params->has_block_incremental) {
887 s->parameters.block_incremental = params->block_incremental;
892 void qmp_migrate_start_postcopy(Error **errp)
894 MigrationState *s = migrate_get_current();
896 if (!migrate_postcopy_ram()) {
897 error_setg(errp, "Enable postcopy with migrate_set_capability before"
898 " the start of migration");
899 return;
902 if (s->state == MIGRATION_STATUS_NONE) {
903 error_setg(errp, "Postcopy must be started after migration has been"
904 " started");
905 return;
908 * we don't error if migration has finished since that would be racy
909 * with issuing this command.
911 atomic_set(&s->start_postcopy, true);
914 /* shared migration helpers */
916 void migrate_set_state(int *state, int old_state, int new_state)
918 if (atomic_cmpxchg(state, old_state, new_state) == old_state) {
919 trace_migrate_set_state(new_state);
920 migrate_generate_event(new_state);
924 void migrate_set_block_enabled(bool value, Error **errp)
926 MigrationCapabilityStatusList *cap;
928 cap = g_new0(MigrationCapabilityStatusList, 1);
929 cap->value = g_new0(MigrationCapabilityStatus, 1);
930 cap->value->capability = MIGRATION_CAPABILITY_BLOCK;
931 cap->value->state = value;
932 qmp_migrate_set_capabilities(cap, errp);
933 qapi_free_MigrationCapabilityStatusList(cap);
936 static void migrate_set_block_incremental(MigrationState *s, bool value)
938 s->parameters.block_incremental = value;
941 static void block_cleanup_parameters(MigrationState *s)
943 if (s->must_remove_block_options) {
944 /* setting to false can never fail */
945 migrate_set_block_enabled(false, &error_abort);
946 migrate_set_block_incremental(s, false);
947 s->must_remove_block_options = false;
951 static void migrate_fd_cleanup(void *opaque)
953 MigrationState *s = opaque;
955 qemu_bh_delete(s->cleanup_bh);
956 s->cleanup_bh = NULL;
958 migration_page_queue_free();
960 if (s->to_dst_file) {
961 trace_migrate_fd_cleanup();
962 qemu_mutex_unlock_iothread();
963 if (s->migration_thread_running) {
964 qemu_thread_join(&s->thread);
965 s->migration_thread_running = false;
967 qemu_mutex_lock_iothread();
969 migrate_compress_threads_join();
970 qemu_fclose(s->to_dst_file);
971 s->to_dst_file = NULL;
974 assert((s->state != MIGRATION_STATUS_ACTIVE) &&
975 (s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE));
977 if (s->state == MIGRATION_STATUS_CANCELLING) {
978 migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING,
979 MIGRATION_STATUS_CANCELLED);
982 notifier_list_notify(&migration_state_notifiers, s);
983 block_cleanup_parameters(s);
986 void migrate_fd_error(MigrationState *s, const Error *error)
988 trace_migrate_fd_error(error_get_pretty(error));
989 assert(s->to_dst_file == NULL);
990 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
991 MIGRATION_STATUS_FAILED);
992 if (!s->error) {
993 s->error = error_copy(error);
995 notifier_list_notify(&migration_state_notifiers, s);
996 block_cleanup_parameters(s);
999 static void migrate_fd_cancel(MigrationState *s)
1001 int old_state ;
1002 QEMUFile *f = migrate_get_current()->to_dst_file;
1003 trace_migrate_fd_cancel();
1005 if (s->rp_state.from_dst_file) {
1006 /* shutdown the rp socket, so causing the rp thread to shutdown */
1007 qemu_file_shutdown(s->rp_state.from_dst_file);
1010 do {
1011 old_state = s->state;
1012 if (!migration_is_setup_or_active(old_state)) {
1013 break;
1015 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING);
1016 } while (s->state != MIGRATION_STATUS_CANCELLING);
1019 * If we're unlucky the migration code might be stuck somewhere in a
1020 * send/write while the network has failed and is waiting to timeout;
1021 * if we've got shutdown(2) available then we can force it to quit.
1022 * The outgoing qemu file gets closed in migrate_fd_cleanup that is
1023 * called in a bh, so there is no race against this cancel.
1025 if (s->state == MIGRATION_STATUS_CANCELLING && f) {
1026 qemu_file_shutdown(f);
1028 if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) {
1029 Error *local_err = NULL;
1031 bdrv_invalidate_cache_all(&local_err);
1032 if (local_err) {
1033 error_report_err(local_err);
1034 } else {
1035 s->block_inactive = false;
1038 block_cleanup_parameters(s);
1041 void add_migration_state_change_notifier(Notifier *notify)
1043 notifier_list_add(&migration_state_notifiers, notify);
1046 void remove_migration_state_change_notifier(Notifier *notify)
1048 notifier_remove(notify);
1051 bool migration_in_setup(MigrationState *s)
1053 return s->state == MIGRATION_STATUS_SETUP;
1056 bool migration_has_finished(MigrationState *s)
1058 return s->state == MIGRATION_STATUS_COMPLETED;
1061 bool migration_has_failed(MigrationState *s)
1063 return (s->state == MIGRATION_STATUS_CANCELLED ||
1064 s->state == MIGRATION_STATUS_FAILED);
1067 bool migration_in_postcopy(void)
1069 MigrationState *s = migrate_get_current();
1071 return (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
1074 bool migration_in_postcopy_after_devices(MigrationState *s)
1076 return migration_in_postcopy() && s->postcopy_after_devices;
1079 bool migration_is_idle(void)
1081 MigrationState *s = migrate_get_current();
1083 switch (s->state) {
1084 case MIGRATION_STATUS_NONE:
1085 case MIGRATION_STATUS_CANCELLED:
1086 case MIGRATION_STATUS_COMPLETED:
1087 case MIGRATION_STATUS_FAILED:
1088 return true;
1089 case MIGRATION_STATUS_SETUP:
1090 case MIGRATION_STATUS_CANCELLING:
1091 case MIGRATION_STATUS_ACTIVE:
1092 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
1093 case MIGRATION_STATUS_COLO:
1094 return false;
1095 case MIGRATION_STATUS__MAX:
1096 g_assert_not_reached();
1099 return false;
1102 MigrationState *migrate_init(void)
1104 MigrationState *s = migrate_get_current();
1107 * Reinitialise all migration state, except
1108 * parameters/capabilities that the user set, and
1109 * locks.
1111 s->bytes_xfer = 0;
1112 s->xfer_limit = 0;
1113 s->cleanup_bh = 0;
1114 s->to_dst_file = NULL;
1115 s->state = MIGRATION_STATUS_NONE;
1116 s->rp_state.from_dst_file = NULL;
1117 s->rp_state.error = false;
1118 s->mbps = 0.0;
1119 s->downtime = 0;
1120 s->expected_downtime = 0;
1121 s->setup_time = 0;
1122 s->start_postcopy = false;
1123 s->postcopy_after_devices = false;
1124 s->migration_thread_running = false;
1125 error_free(s->error);
1126 s->error = NULL;
1128 migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
1130 s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1131 return s;
1134 static GSList *migration_blockers;
1136 int migrate_add_blocker(Error *reason, Error **errp)
1138 if (only_migratable) {
1139 error_propagate(errp, error_copy(reason));
1140 error_prepend(errp, "disallowing migration blocker "
1141 "(--only_migratable) for: ");
1142 return -EACCES;
1145 if (migration_is_idle()) {
1146 migration_blockers = g_slist_prepend(migration_blockers, reason);
1147 return 0;
1150 error_propagate(errp, error_copy(reason));
1151 error_prepend(errp, "disallowing migration blocker (migration in "
1152 "progress) for: ");
1153 return -EBUSY;
1156 void migrate_del_blocker(Error *reason)
1158 migration_blockers = g_slist_remove(migration_blockers, reason);
1161 void qmp_migrate_incoming(const char *uri, Error **errp)
1163 Error *local_err = NULL;
1164 static bool once = true;
1166 if (!deferred_incoming) {
1167 error_setg(errp, "For use with '-incoming defer'");
1168 return;
1170 if (!once) {
1171 error_setg(errp, "The incoming migration has already been started");
1174 qemu_start_incoming_migration(uri, &local_err);
1176 if (local_err) {
1177 error_propagate(errp, local_err);
1178 return;
1181 once = false;
1184 bool migration_is_blocked(Error **errp)
1186 if (qemu_savevm_state_blocked(errp)) {
1187 return true;
1190 if (migration_blockers) {
1191 *errp = error_copy(migration_blockers->data);
1192 return true;
1195 return false;
1198 void qmp_migrate(const char *uri, bool has_blk, bool blk,
1199 bool has_inc, bool inc, bool has_detach, bool detach,
1200 Error **errp)
1202 Error *local_err = NULL;
1203 MigrationState *s = migrate_get_current();
1204 const char *p;
1206 if (migration_is_setup_or_active(s->state) ||
1207 s->state == MIGRATION_STATUS_CANCELLING ||
1208 s->state == MIGRATION_STATUS_COLO) {
1209 error_setg(errp, QERR_MIGRATION_ACTIVE);
1210 return;
1212 if (runstate_check(RUN_STATE_INMIGRATE)) {
1213 error_setg(errp, "Guest is waiting for an incoming migration");
1214 return;
1217 if (migration_is_blocked(errp)) {
1218 return;
1221 if ((has_blk && blk) || (has_inc && inc)) {
1222 if (migrate_use_block() || migrate_use_block_incremental()) {
1223 error_setg(errp, "Command options are incompatible with "
1224 "current migration capabilities");
1225 return;
1227 migrate_set_block_enabled(true, &local_err);
1228 if (local_err) {
1229 error_propagate(errp, local_err);
1230 return;
1232 s->must_remove_block_options = true;
1235 if (has_inc && inc) {
1236 migrate_set_block_incremental(s, true);
1239 s = migrate_init();
1241 if (strstart(uri, "tcp:", &p)) {
1242 tcp_start_outgoing_migration(s, p, &local_err);
1243 #ifdef CONFIG_RDMA
1244 } else if (strstart(uri, "rdma:", &p)) {
1245 rdma_start_outgoing_migration(s, p, &local_err);
1246 #endif
1247 } else if (strstart(uri, "exec:", &p)) {
1248 exec_start_outgoing_migration(s, p, &local_err);
1249 } else if (strstart(uri, "unix:", &p)) {
1250 unix_start_outgoing_migration(s, p, &local_err);
1251 } else if (strstart(uri, "fd:", &p)) {
1252 fd_start_outgoing_migration(s, p, &local_err);
1253 } else {
1254 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri",
1255 "a valid migration protocol");
1256 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1257 MIGRATION_STATUS_FAILED);
1258 return;
1261 if (local_err) {
1262 migrate_fd_error(s, local_err);
1263 error_propagate(errp, local_err);
1264 return;
1268 void qmp_migrate_cancel(Error **errp)
1270 migrate_fd_cancel(migrate_get_current());
1273 void qmp_migrate_set_cache_size(int64_t value, Error **errp)
1275 MigrationState *s = migrate_get_current();
1276 int64_t new_size;
1278 /* Check for truncation */
1279 if (value != (size_t)value) {
1280 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
1281 "exceeding address space");
1282 return;
1285 /* Cache should not be larger than guest ram size */
1286 if (value > ram_bytes_total()) {
1287 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
1288 "exceeds guest ram size ");
1289 return;
1292 new_size = xbzrle_cache_resize(value);
1293 if (new_size < 0) {
1294 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
1295 "is smaller than page size");
1296 return;
1299 s->xbzrle_cache_size = new_size;
1302 int64_t qmp_query_migrate_cache_size(Error **errp)
1304 return migrate_xbzrle_cache_size();
1307 void qmp_migrate_set_speed(int64_t value, Error **errp)
1309 MigrationParameters p = {
1310 .has_max_bandwidth = true,
1311 .max_bandwidth = value,
1314 qmp_migrate_set_parameters(&p, errp);
1317 void qmp_migrate_set_downtime(double value, Error **errp)
1319 if (value < 0 || value > MAX_MIGRATE_DOWNTIME_SECONDS) {
1320 error_setg(errp, "Parameter 'downtime_limit' expects an integer in "
1321 "the range of 0 to %d seconds",
1322 MAX_MIGRATE_DOWNTIME_SECONDS);
1323 return;
1326 value *= 1000; /* Convert to milliseconds */
1327 value = MAX(0, MIN(INT64_MAX, value));
1329 MigrationParameters p = {
1330 .has_downtime_limit = true,
1331 .downtime_limit = value,
1334 qmp_migrate_set_parameters(&p, errp);
1337 bool migrate_release_ram(void)
1339 MigrationState *s;
1341 s = migrate_get_current();
1343 return s->enabled_capabilities[MIGRATION_CAPABILITY_RELEASE_RAM];
1346 bool migrate_postcopy_ram(void)
1348 MigrationState *s;
1350 s = migrate_get_current();
1352 return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM];
1355 bool migrate_auto_converge(void)
1357 MigrationState *s;
1359 s = migrate_get_current();
1361 return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
1364 bool migrate_zero_blocks(void)
1366 MigrationState *s;
1368 s = migrate_get_current();
1370 return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
1373 bool migrate_use_compression(void)
1375 MigrationState *s;
1377 s = migrate_get_current();
1379 return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS];
1382 int migrate_compress_level(void)
1384 MigrationState *s;
1386 s = migrate_get_current();
1388 return s->parameters.compress_level;
1391 int migrate_compress_threads(void)
1393 MigrationState *s;
1395 s = migrate_get_current();
1397 return s->parameters.compress_threads;
1400 int migrate_decompress_threads(void)
1402 MigrationState *s;
1404 s = migrate_get_current();
1406 return s->parameters.decompress_threads;
1409 bool migrate_use_events(void)
1411 MigrationState *s;
1413 s = migrate_get_current();
1415 return s->enabled_capabilities[MIGRATION_CAPABILITY_EVENTS];
1418 int migrate_use_xbzrle(void)
1420 MigrationState *s;
1422 s = migrate_get_current();
1424 return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE];
1427 int64_t migrate_xbzrle_cache_size(void)
1429 MigrationState *s;
1431 s = migrate_get_current();
1433 return s->xbzrle_cache_size;
1436 bool migrate_use_block(void)
1438 MigrationState *s;
1440 s = migrate_get_current();
1442 return s->enabled_capabilities[MIGRATION_CAPABILITY_BLOCK];
1445 bool migrate_use_block_incremental(void)
1447 MigrationState *s;
1449 s = migrate_get_current();
1451 return s->parameters.block_incremental;
1454 /* migration thread support */
1456 * Something bad happened to the RP stream, mark an error
1457 * The caller shall print or trace something to indicate why
1459 static void mark_source_rp_bad(MigrationState *s)
1461 s->rp_state.error = true;
1464 static struct rp_cmd_args {
1465 ssize_t len; /* -1 = variable */
1466 const char *name;
1467 } rp_cmd_args[] = {
1468 [MIG_RP_MSG_INVALID] = { .len = -1, .name = "INVALID" },
1469 [MIG_RP_MSG_SHUT] = { .len = 4, .name = "SHUT" },
1470 [MIG_RP_MSG_PONG] = { .len = 4, .name = "PONG" },
1471 [MIG_RP_MSG_REQ_PAGES] = { .len = 12, .name = "REQ_PAGES" },
1472 [MIG_RP_MSG_REQ_PAGES_ID] = { .len = -1, .name = "REQ_PAGES_ID" },
1473 [MIG_RP_MSG_MAX] = { .len = -1, .name = "MAX" },
1477 * Process a request for pages received on the return path,
1478 * We're allowed to send more than requested (e.g. to round to our page size)
1479 * and we don't need to send pages that have already been sent.
1481 static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
1482 ram_addr_t start, size_t len)
1484 long our_host_ps = getpagesize();
1486 trace_migrate_handle_rp_req_pages(rbname, start, len);
1489 * Since we currently insist on matching page sizes, just sanity check
1490 * we're being asked for whole host pages.
1492 if (start & (our_host_ps-1) ||
1493 (len & (our_host_ps-1))) {
1494 error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT
1495 " len: %zd", __func__, start, len);
1496 mark_source_rp_bad(ms);
1497 return;
1500 if (ram_save_queue_pages(rbname, start, len)) {
1501 mark_source_rp_bad(ms);
1506 * Handles messages sent on the return path towards the source VM
1509 static void *source_return_path_thread(void *opaque)
1511 MigrationState *ms = opaque;
1512 QEMUFile *rp = ms->rp_state.from_dst_file;
1513 uint16_t header_len, header_type;
1514 uint8_t buf[512];
1515 uint32_t tmp32, sibling_error;
1516 ram_addr_t start = 0; /* =0 to silence warning */
1517 size_t len = 0, expected_len;
1518 int res;
1520 trace_source_return_path_thread_entry();
1521 while (!ms->rp_state.error && !qemu_file_get_error(rp) &&
1522 migration_is_setup_or_active(ms->state)) {
1523 trace_source_return_path_thread_loop_top();
1524 header_type = qemu_get_be16(rp);
1525 header_len = qemu_get_be16(rp);
1527 if (header_type >= MIG_RP_MSG_MAX ||
1528 header_type == MIG_RP_MSG_INVALID) {
1529 error_report("RP: Received invalid message 0x%04x length 0x%04x",
1530 header_type, header_len);
1531 mark_source_rp_bad(ms);
1532 goto out;
1535 if ((rp_cmd_args[header_type].len != -1 &&
1536 header_len != rp_cmd_args[header_type].len) ||
1537 header_len > sizeof(buf)) {
1538 error_report("RP: Received '%s' message (0x%04x) with"
1539 "incorrect length %d expecting %zu",
1540 rp_cmd_args[header_type].name, header_type, header_len,
1541 (size_t)rp_cmd_args[header_type].len);
1542 mark_source_rp_bad(ms);
1543 goto out;
1546 /* We know we've got a valid header by this point */
1547 res = qemu_get_buffer(rp, buf, header_len);
1548 if (res != header_len) {
1549 error_report("RP: Failed reading data for message 0x%04x"
1550 " read %d expected %d",
1551 header_type, res, header_len);
1552 mark_source_rp_bad(ms);
1553 goto out;
1556 /* OK, we have the message and the data */
1557 switch (header_type) {
1558 case MIG_RP_MSG_SHUT:
1559 sibling_error = ldl_be_p(buf);
1560 trace_source_return_path_thread_shut(sibling_error);
1561 if (sibling_error) {
1562 error_report("RP: Sibling indicated error %d", sibling_error);
1563 mark_source_rp_bad(ms);
1566 * We'll let the main thread deal with closing the RP
1567 * we could do a shutdown(2) on it, but we're the only user
1568 * anyway, so there's nothing gained.
1570 goto out;
1572 case MIG_RP_MSG_PONG:
1573 tmp32 = ldl_be_p(buf);
1574 trace_source_return_path_thread_pong(tmp32);
1575 break;
1577 case MIG_RP_MSG_REQ_PAGES:
1578 start = ldq_be_p(buf);
1579 len = ldl_be_p(buf + 8);
1580 migrate_handle_rp_req_pages(ms, NULL, start, len);
1581 break;
1583 case MIG_RP_MSG_REQ_PAGES_ID:
1584 expected_len = 12 + 1; /* header + termination */
1586 if (header_len >= expected_len) {
1587 start = ldq_be_p(buf);
1588 len = ldl_be_p(buf + 8);
1589 /* Now we expect an idstr */
1590 tmp32 = buf[12]; /* Length of the following idstr */
1591 buf[13 + tmp32] = '\0';
1592 expected_len += tmp32;
1594 if (header_len != expected_len) {
1595 error_report("RP: Req_Page_id with length %d expecting %zd",
1596 header_len, expected_len);
1597 mark_source_rp_bad(ms);
1598 goto out;
1600 migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len);
1601 break;
1603 default:
1604 break;
1607 if (qemu_file_get_error(rp)) {
1608 trace_source_return_path_thread_bad_end();
1609 mark_source_rp_bad(ms);
1612 trace_source_return_path_thread_end();
1613 out:
1614 ms->rp_state.from_dst_file = NULL;
1615 qemu_fclose(rp);
1616 return NULL;
1619 static int open_return_path_on_source(MigrationState *ms)
1622 ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file);
1623 if (!ms->rp_state.from_dst_file) {
1624 return -1;
1627 trace_open_return_path_on_source();
1628 qemu_thread_create(&ms->rp_state.rp_thread, "return path",
1629 source_return_path_thread, ms, QEMU_THREAD_JOINABLE);
1631 trace_open_return_path_on_source_continue();
1633 return 0;
1636 /* Returns 0 if the RP was ok, otherwise there was an error on the RP */
1637 static int await_return_path_close_on_source(MigrationState *ms)
1640 * If this is a normal exit then the destination will send a SHUT and the
1641 * rp_thread will exit, however if there's an error we need to cause
1642 * it to exit.
1644 if (qemu_file_get_error(ms->to_dst_file) && ms->rp_state.from_dst_file) {
1646 * shutdown(2), if we have it, will cause it to unblock if it's stuck
1647 * waiting for the destination.
1649 qemu_file_shutdown(ms->rp_state.from_dst_file);
1650 mark_source_rp_bad(ms);
1652 trace_await_return_path_close_on_source_joining();
1653 qemu_thread_join(&ms->rp_state.rp_thread);
1654 trace_await_return_path_close_on_source_close();
1655 return ms->rp_state.error;
1659 * Switch from normal iteration to postcopy
1660 * Returns non-0 on error
1662 static int postcopy_start(MigrationState *ms, bool *old_vm_running)
1664 int ret;
1665 QIOChannelBuffer *bioc;
1666 QEMUFile *fb;
1667 int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1668 bool restart_block = false;
1669 migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE,
1670 MIGRATION_STATUS_POSTCOPY_ACTIVE);
1672 trace_postcopy_start();
1673 qemu_mutex_lock_iothread();
1674 trace_postcopy_start_set_run();
1676 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
1677 *old_vm_running = runstate_is_running();
1678 global_state_store();
1679 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
1680 if (ret < 0) {
1681 goto fail;
1684 ret = bdrv_inactivate_all();
1685 if (ret < 0) {
1686 goto fail;
1688 restart_block = true;
1691 * Cause any non-postcopiable, but iterative devices to
1692 * send out their final data.
1694 qemu_savevm_state_complete_precopy(ms->to_dst_file, true);
1697 * in Finish migrate and with the io-lock held everything should
1698 * be quiet, but we've potentially still got dirty pages and we
1699 * need to tell the destination to throw any pages it's already received
1700 * that are dirty
1702 if (ram_postcopy_send_discard_bitmap(ms)) {
1703 error_report("postcopy send discard bitmap failed");
1704 goto fail;
1708 * send rest of state - note things that are doing postcopy
1709 * will notice we're in POSTCOPY_ACTIVE and not actually
1710 * wrap their state up here
1712 qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX);
1713 /* Ping just for debugging, helps line traces up */
1714 qemu_savevm_send_ping(ms->to_dst_file, 2);
1717 * While loading the device state we may trigger page transfer
1718 * requests and the fd must be free to process those, and thus
1719 * the destination must read the whole device state off the fd before
1720 * it starts processing it. Unfortunately the ad-hoc migration format
1721 * doesn't allow the destination to know the size to read without fully
1722 * parsing it through each devices load-state code (especially the open
1723 * coded devices that use get/put).
1724 * So we wrap the device state up in a package with a length at the start;
1725 * to do this we use a qemu_buf to hold the whole of the device state.
1727 bioc = qio_channel_buffer_new(4096);
1728 qio_channel_set_name(QIO_CHANNEL(bioc), "migration-postcopy-buffer");
1729 fb = qemu_fopen_channel_output(QIO_CHANNEL(bioc));
1730 object_unref(OBJECT(bioc));
1733 * Make sure the receiver can get incoming pages before we send the rest
1734 * of the state
1736 qemu_savevm_send_postcopy_listen(fb);
1738 qemu_savevm_state_complete_precopy(fb, false);
1739 qemu_savevm_send_ping(fb, 3);
1741 qemu_savevm_send_postcopy_run(fb);
1743 /* <><> end of stuff going into the package */
1745 /* Last point of recovery; as soon as we send the package the destination
1746 * can open devices and potentially start running.
1747 * Lets just check again we've not got any errors.
1749 ret = qemu_file_get_error(ms->to_dst_file);
1750 if (ret) {
1751 error_report("postcopy_start: Migration stream errored (pre package)");
1752 goto fail_closefb;
1755 restart_block = false;
1757 /* Now send that blob */
1758 if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) {
1759 goto fail_closefb;
1761 qemu_fclose(fb);
1763 /* Send a notify to give a chance for anything that needs to happen
1764 * at the transition to postcopy and after the device state; in particular
1765 * spice needs to trigger a transition now
1767 ms->postcopy_after_devices = true;
1768 notifier_list_notify(&migration_state_notifiers, ms);
1770 ms->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop;
1772 qemu_mutex_unlock_iothread();
1775 * Although this ping is just for debug, it could potentially be
1776 * used for getting a better measurement of downtime at the source.
1778 qemu_savevm_send_ping(ms->to_dst_file, 4);
1780 if (migrate_release_ram()) {
1781 ram_postcopy_migrated_memory_release(ms);
1784 ret = qemu_file_get_error(ms->to_dst_file);
1785 if (ret) {
1786 error_report("postcopy_start: Migration stream errored");
1787 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
1788 MIGRATION_STATUS_FAILED);
1791 return ret;
1793 fail_closefb:
1794 qemu_fclose(fb);
1795 fail:
1796 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
1797 MIGRATION_STATUS_FAILED);
1798 if (restart_block) {
1799 /* A failure happened early enough that we know the destination hasn't
1800 * accessed block devices, so we're safe to recover.
1802 Error *local_err = NULL;
1804 bdrv_invalidate_cache_all(&local_err);
1805 if (local_err) {
1806 error_report_err(local_err);
1809 qemu_mutex_unlock_iothread();
1810 return -1;
1814 * migration_completion: Used by migration_thread when there's not much left.
1815 * The caller 'breaks' the loop when this returns.
1817 * @s: Current migration state
1818 * @current_active_state: The migration state we expect to be in
1819 * @*old_vm_running: Pointer to old_vm_running flag
1820 * @*start_time: Pointer to time to update
1822 static void migration_completion(MigrationState *s, int current_active_state,
1823 bool *old_vm_running,
1824 int64_t *start_time)
1826 int ret;
1828 if (s->state == MIGRATION_STATUS_ACTIVE) {
1829 qemu_mutex_lock_iothread();
1830 *start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1831 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
1832 *old_vm_running = runstate_is_running();
1833 ret = global_state_store();
1835 if (!ret) {
1836 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
1838 * Don't mark the image with BDRV_O_INACTIVE flag if
1839 * we will go into COLO stage later.
1841 if (ret >= 0 && !migrate_colo_enabled()) {
1842 ret = bdrv_inactivate_all();
1844 if (ret >= 0) {
1845 qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX);
1846 qemu_savevm_state_complete_precopy(s->to_dst_file, false);
1847 s->block_inactive = true;
1850 qemu_mutex_unlock_iothread();
1852 if (ret < 0) {
1853 goto fail;
1855 } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
1856 trace_migration_completion_postcopy_end();
1858 qemu_savevm_state_complete_postcopy(s->to_dst_file);
1859 trace_migration_completion_postcopy_end_after_complete();
1863 * If rp was opened we must clean up the thread before
1864 * cleaning everything else up (since if there are no failures
1865 * it will wait for the destination to send it's status in
1866 * a SHUT command).
1867 * Postcopy opens rp if enabled (even if it's not avtivated)
1869 if (migrate_postcopy_ram()) {
1870 int rp_error;
1871 trace_migration_completion_postcopy_end_before_rp();
1872 rp_error = await_return_path_close_on_source(s);
1873 trace_migration_completion_postcopy_end_after_rp(rp_error);
1874 if (rp_error) {
1875 goto fail_invalidate;
1879 if (qemu_file_get_error(s->to_dst_file)) {
1880 trace_migration_completion_file_err();
1881 goto fail_invalidate;
1884 if (!migrate_colo_enabled()) {
1885 migrate_set_state(&s->state, current_active_state,
1886 MIGRATION_STATUS_COMPLETED);
1889 return;
1891 fail_invalidate:
1892 /* If not doing postcopy, vm_start() will be called: let's regain
1893 * control on images.
1895 if (s->state == MIGRATION_STATUS_ACTIVE) {
1896 Error *local_err = NULL;
1898 qemu_mutex_lock_iothread();
1899 bdrv_invalidate_cache_all(&local_err);
1900 if (local_err) {
1901 error_report_err(local_err);
1902 } else {
1903 s->block_inactive = false;
1905 qemu_mutex_unlock_iothread();
1908 fail:
1909 migrate_set_state(&s->state, current_active_state,
1910 MIGRATION_STATUS_FAILED);
1913 bool migrate_colo_enabled(void)
1915 MigrationState *s = migrate_get_current();
1916 return s->enabled_capabilities[MIGRATION_CAPABILITY_X_COLO];
1920 * Master migration thread on the source VM.
1921 * It drives the migration and pumps the data down the outgoing channel.
1923 static void *migration_thread(void *opaque)
1925 MigrationState *s = opaque;
1926 /* Used by the bandwidth calcs, updated later */
1927 int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1928 int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
1929 int64_t initial_bytes = 0;
1931 * The final stage happens when the remaining data is smaller than
1932 * this threshold; it's calculated from the requested downtime and
1933 * measured bandwidth
1935 int64_t threshold_size = 0;
1936 int64_t start_time = initial_time;
1937 int64_t end_time;
1938 bool old_vm_running = false;
1939 bool entered_postcopy = false;
1940 /* The active state we expect to be in; ACTIVE or POSTCOPY_ACTIVE */
1941 enum MigrationStatus current_active_state = MIGRATION_STATUS_ACTIVE;
1942 bool enable_colo = migrate_colo_enabled();
1944 rcu_register_thread();
1946 qemu_savevm_state_header(s->to_dst_file);
1948 if (migrate_postcopy_ram()) {
1949 /* Now tell the dest that it should open its end so it can reply */
1950 qemu_savevm_send_open_return_path(s->to_dst_file);
1952 /* And do a ping that will make stuff easier to debug */
1953 qemu_savevm_send_ping(s->to_dst_file, 1);
1956 * Tell the destination that we *might* want to do postcopy later;
1957 * if the other end can't do postcopy it should fail now, nice and
1958 * early.
1960 qemu_savevm_send_postcopy_advise(s->to_dst_file);
1963 qemu_savevm_state_begin(s->to_dst_file);
1965 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
1966 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1967 MIGRATION_STATUS_ACTIVE);
1969 trace_migration_thread_setup_complete();
1971 while (s->state == MIGRATION_STATUS_ACTIVE ||
1972 s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
1973 int64_t current_time;
1974 uint64_t pending_size;
1976 if (!qemu_file_rate_limit(s->to_dst_file)) {
1977 uint64_t pend_post, pend_nonpost;
1979 qemu_savevm_state_pending(s->to_dst_file, threshold_size,
1980 &pend_nonpost, &pend_post);
1981 pending_size = pend_nonpost + pend_post;
1982 trace_migrate_pending(pending_size, threshold_size,
1983 pend_post, pend_nonpost);
1984 if (pending_size && pending_size >= threshold_size) {
1985 /* Still a significant amount to transfer */
1987 if (migrate_postcopy_ram() &&
1988 s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE &&
1989 pend_nonpost <= threshold_size &&
1990 atomic_read(&s->start_postcopy)) {
1992 if (!postcopy_start(s, &old_vm_running)) {
1993 current_active_state = MIGRATION_STATUS_POSTCOPY_ACTIVE;
1994 entered_postcopy = true;
1997 continue;
1999 /* Just another iteration step */
2000 qemu_savevm_state_iterate(s->to_dst_file, entered_postcopy);
2001 } else {
2002 trace_migration_thread_low_pending(pending_size);
2003 migration_completion(s, current_active_state,
2004 &old_vm_running, &start_time);
2005 break;
2009 if (qemu_file_get_error(s->to_dst_file)) {
2010 migrate_set_state(&s->state, current_active_state,
2011 MIGRATION_STATUS_FAILED);
2012 trace_migration_thread_file_err();
2013 break;
2015 current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
2016 if (current_time >= initial_time + BUFFER_DELAY) {
2017 uint64_t transferred_bytes = qemu_ftell(s->to_dst_file) -
2018 initial_bytes;
2019 uint64_t time_spent = current_time - initial_time;
2020 double bandwidth = (double)transferred_bytes / time_spent;
2021 threshold_size = bandwidth * s->parameters.downtime_limit;
2023 s->mbps = (((double) transferred_bytes * 8.0) /
2024 ((double) time_spent / 1000.0)) / 1000.0 / 1000.0;
2026 trace_migrate_transferred(transferred_bytes, time_spent,
2027 bandwidth, threshold_size);
2028 /* if we haven't sent anything, we don't want to recalculate
2029 10000 is a small enough number for our purposes */
2030 if (ram_dirty_pages_rate() && transferred_bytes > 10000) {
2031 s->expected_downtime = ram_dirty_pages_rate() *
2032 qemu_target_page_size() / bandwidth;
2035 qemu_file_reset_rate_limit(s->to_dst_file);
2036 initial_time = current_time;
2037 initial_bytes = qemu_ftell(s->to_dst_file);
2039 if (qemu_file_rate_limit(s->to_dst_file)) {
2040 /* usleep expects microseconds */
2041 g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
2045 trace_migration_thread_after_loop();
2046 /* If we enabled cpu throttling for auto-converge, turn it off. */
2047 cpu_throttle_stop();
2048 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
2050 qemu_mutex_lock_iothread();
2052 * The resource has been allocated by migration will be reused in COLO
2053 * process, so don't release them.
2055 if (!enable_colo) {
2056 qemu_savevm_state_cleanup();
2058 if (s->state == MIGRATION_STATUS_COMPLETED) {
2059 uint64_t transferred_bytes = qemu_ftell(s->to_dst_file);
2060 s->total_time = end_time - s->total_time;
2061 if (!entered_postcopy) {
2062 s->downtime = end_time - start_time;
2064 if (s->total_time) {
2065 s->mbps = (((double) transferred_bytes * 8.0) /
2066 ((double) s->total_time)) / 1000;
2068 runstate_set(RUN_STATE_POSTMIGRATE);
2069 } else {
2070 if (s->state == MIGRATION_STATUS_ACTIVE && enable_colo) {
2071 migrate_start_colo_process(s);
2072 qemu_savevm_state_cleanup();
2074 * Fixme: we will run VM in COLO no matter its old running state.
2075 * After exited COLO, we will keep running.
2077 old_vm_running = true;
2079 if (old_vm_running && !entered_postcopy) {
2080 vm_start();
2081 } else {
2082 if (runstate_check(RUN_STATE_FINISH_MIGRATE)) {
2083 runstate_set(RUN_STATE_POSTMIGRATE);
2087 qemu_bh_schedule(s->cleanup_bh);
2088 qemu_mutex_unlock_iothread();
2090 rcu_unregister_thread();
2091 return NULL;
2094 void migrate_fd_connect(MigrationState *s)
2096 s->expected_downtime = s->parameters.downtime_limit;
2097 s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s);
2099 qemu_file_set_blocking(s->to_dst_file, true);
2100 qemu_file_set_rate_limit(s->to_dst_file,
2101 s->parameters.max_bandwidth / XFER_LIMIT_RATIO);
2103 /* Notify before starting migration thread */
2104 notifier_list_notify(&migration_state_notifiers, s);
2107 * Open the return path; currently for postcopy but other things might
2108 * also want it.
2110 if (migrate_postcopy_ram()) {
2111 if (open_return_path_on_source(s)) {
2112 error_report("Unable to open return-path for postcopy");
2113 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
2114 MIGRATION_STATUS_FAILED);
2115 migrate_fd_cleanup(s);
2116 return;
2120 migrate_compress_threads_create();
2121 qemu_thread_create(&s->thread, "live_migration", migration_thread, s,
2122 QEMU_THREAD_JOINABLE);
2123 s->migration_thread_running = true;