net: stellaris_enet: check packet length against receive buffer
[qemu/ar7.git] / migration / migration.c
blob991313a8629a9fbff7785ba35535e2a7535fa597
1 /*
2 * QEMU live migration
4 * Copyright IBM, Corp. 2008
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qemu/cutils.h"
18 #include "qemu/error-report.h"
19 #include "qemu/main-loop.h"
20 #include "migration/migration.h"
21 #include "migration/qemu-file.h"
22 #include "sysemu/sysemu.h"
23 #include "block/block.h"
24 #include "qapi/qmp/qerror.h"
25 #include "qapi/util.h"
26 #include "qemu/sockets.h"
27 #include "qemu/rcu.h"
28 #include "migration/block.h"
29 #include "migration/postcopy-ram.h"
30 #include "qemu/thread.h"
31 #include "qmp-commands.h"
32 #include "trace.h"
33 #include "qapi-event.h"
34 #include "qom/cpu.h"
35 #include "exec/memory.h"
36 #include "exec/address-spaces.h"
38 #define MAX_THROTTLE (32 << 20) /* Migration transfer speed throttling */
40 /* Amount of time to allocate to each "chunk" of bandwidth-throttled
41 * data. */
42 #define BUFFER_DELAY 100
43 #define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)
45 /* Default compression thread count */
46 #define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8
47 /* Default decompression thread count, usually decompression is at
48 * least 4 times as fast as compression.*/
49 #define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2
50 /*0: means nocompress, 1: best speed, ... 9: best compress ratio */
51 #define DEFAULT_MIGRATE_COMPRESS_LEVEL 1
52 /* Define default autoconverge cpu throttle migration parameters */
53 #define DEFAULT_MIGRATE_X_CPU_THROTTLE_INITIAL 20
54 #define DEFAULT_MIGRATE_X_CPU_THROTTLE_INCREMENT 10
56 /* Migration XBZRLE default cache size */
57 #define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024)
59 static NotifierList migration_state_notifiers =
60 NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
62 static bool deferred_incoming;
65 * Current state of incoming postcopy; note this is not part of
66 * MigrationIncomingState since it's state is used during cleanup
67 * at the end as MIS is being freed.
69 static PostcopyState incoming_postcopy_state;
71 /* When we add fault tolerance, we could have several
72 migrations at once. For now we don't need to add
73 dynamic creation of migration */
75 /* For outgoing */
76 MigrationState *migrate_get_current(void)
78 static bool once;
79 static MigrationState current_migration = {
80 .state = MIGRATION_STATUS_NONE,
81 .bandwidth_limit = MAX_THROTTLE,
82 .xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE,
83 .mbps = -1,
84 .parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] =
85 DEFAULT_MIGRATE_COMPRESS_LEVEL,
86 .parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] =
87 DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT,
88 .parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] =
89 DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT,
90 .parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL] =
91 DEFAULT_MIGRATE_X_CPU_THROTTLE_INITIAL,
92 .parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT] =
93 DEFAULT_MIGRATE_X_CPU_THROTTLE_INCREMENT,
96 if (!once) {
97 qemu_mutex_init(&current_migration.src_page_req_mutex);
98 once = true;
100 return &current_migration;
103 /* For incoming */
104 static MigrationIncomingState *mis_current;
106 MigrationIncomingState *migration_incoming_get_current(void)
108 return mis_current;
111 MigrationIncomingState *migration_incoming_state_new(QEMUFile* f)
113 mis_current = g_new0(MigrationIncomingState, 1);
114 mis_current->from_src_file = f;
115 mis_current->state = MIGRATION_STATUS_NONE;
116 QLIST_INIT(&mis_current->loadvm_handlers);
117 qemu_mutex_init(&mis_current->rp_mutex);
118 qemu_event_init(&mis_current->main_thread_load_event, false);
120 return mis_current;
123 void migration_incoming_state_destroy(void)
125 qemu_event_destroy(&mis_current->main_thread_load_event);
126 loadvm_free_handlers(mis_current);
127 g_free(mis_current);
128 mis_current = NULL;
132 typedef struct {
133 bool optional;
134 uint32_t size;
135 uint8_t runstate[100];
136 RunState state;
137 bool received;
138 } GlobalState;
140 static GlobalState global_state;
142 int global_state_store(void)
144 if (!runstate_store((char *)global_state.runstate,
145 sizeof(global_state.runstate))) {
146 error_report("runstate name too big: %s", global_state.runstate);
147 trace_migrate_state_too_big();
148 return -EINVAL;
150 return 0;
153 void global_state_store_running(void)
155 const char *state = RunState_lookup[RUN_STATE_RUNNING];
156 strncpy((char *)global_state.runstate,
157 state, sizeof(global_state.runstate));
160 static bool global_state_received(void)
162 return global_state.received;
165 static RunState global_state_get_runstate(void)
167 return global_state.state;
170 void global_state_set_optional(void)
172 global_state.optional = true;
175 static bool global_state_needed(void *opaque)
177 GlobalState *s = opaque;
178 char *runstate = (char *)s->runstate;
180 /* If it is not optional, it is mandatory */
182 if (s->optional == false) {
183 return true;
186 /* If state is running or paused, it is not needed */
188 if (strcmp(runstate, "running") == 0 ||
189 strcmp(runstate, "paused") == 0) {
190 return false;
193 /* for any other state it is needed */
194 return true;
197 static int global_state_post_load(void *opaque, int version_id)
199 GlobalState *s = opaque;
200 Error *local_err = NULL;
201 int r;
202 char *runstate = (char *)s->runstate;
204 s->received = true;
205 trace_migrate_global_state_post_load(runstate);
207 r = qapi_enum_parse(RunState_lookup, runstate, RUN_STATE__MAX,
208 -1, &local_err);
210 if (r == -1) {
211 if (local_err) {
212 error_report_err(local_err);
214 return -EINVAL;
216 s->state = r;
218 return 0;
221 static void global_state_pre_save(void *opaque)
223 GlobalState *s = opaque;
225 trace_migrate_global_state_pre_save((char *)s->runstate);
226 s->size = strlen((char *)s->runstate) + 1;
229 static const VMStateDescription vmstate_globalstate = {
230 .name = "globalstate",
231 .version_id = 1,
232 .minimum_version_id = 1,
233 .post_load = global_state_post_load,
234 .pre_save = global_state_pre_save,
235 .needed = global_state_needed,
236 .fields = (VMStateField[]) {
237 VMSTATE_UINT32(size, GlobalState),
238 VMSTATE_BUFFER(runstate, GlobalState),
239 VMSTATE_END_OF_LIST()
243 void register_global_state(void)
245 /* We would use it independently that we receive it */
246 strcpy((char *)&global_state.runstate, "");
247 global_state.received = false;
248 vmstate_register(NULL, 0, &vmstate_globalstate, &global_state);
251 static void migrate_generate_event(int new_state)
253 if (migrate_use_events()) {
254 qapi_event_send_migration(new_state, &error_abort);
259 * Called on -incoming with a defer: uri.
260 * The migration can be started later after any parameters have been
261 * changed.
263 static void deferred_incoming_migration(Error **errp)
265 if (deferred_incoming) {
266 error_setg(errp, "Incoming migration already deferred");
268 deferred_incoming = true;
271 /* Request a range of pages from the source VM at the given
272 * start address.
273 * rbname: Name of the RAMBlock to request the page in, if NULL it's the same
274 * as the last request (a name must have been given previously)
275 * Start: Address offset within the RB
276 * Len: Length in bytes required - must be a multiple of pagesize
278 void migrate_send_rp_req_pages(MigrationIncomingState *mis, const char *rbname,
279 ram_addr_t start, size_t len)
281 uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname upto 256 */
282 size_t msglen = 12; /* start + len */
284 *(uint64_t *)bufc = cpu_to_be64((uint64_t)start);
285 *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len);
287 if (rbname) {
288 int rbname_len = strlen(rbname);
289 assert(rbname_len < 256);
291 bufc[msglen++] = rbname_len;
292 memcpy(bufc + msglen, rbname, rbname_len);
293 msglen += rbname_len;
294 migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES_ID, msglen, bufc);
295 } else {
296 migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES, msglen, bufc);
300 void qemu_start_incoming_migration(const char *uri, Error **errp)
302 const char *p;
304 qapi_event_send_migration(MIGRATION_STATUS_SETUP, &error_abort);
305 if (!strcmp(uri, "defer")) {
306 deferred_incoming_migration(errp);
307 } else if (strstart(uri, "tcp:", &p)) {
308 tcp_start_incoming_migration(p, errp);
309 #ifdef CONFIG_RDMA
310 } else if (strstart(uri, "rdma:", &p)) {
311 rdma_start_incoming_migration(p, errp);
312 #endif
313 #if !defined(WIN32)
314 } else if (strstart(uri, "exec:", &p)) {
315 exec_start_incoming_migration(p, errp);
316 } else if (strstart(uri, "unix:", &p)) {
317 unix_start_incoming_migration(p, errp);
318 } else if (strstart(uri, "fd:", &p)) {
319 fd_start_incoming_migration(p, errp);
320 #endif
321 } else {
322 error_setg(errp, "unknown migration protocol: %s", uri);
326 static void process_incoming_migration_bh(void *opaque)
328 Error *local_err = NULL;
329 MigrationIncomingState *mis = opaque;
331 /* Make sure all file formats flush their mutable metadata */
332 bdrv_invalidate_cache_all(&local_err);
333 if (local_err) {
334 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
335 MIGRATION_STATUS_FAILED);
336 error_report_err(local_err);
337 migrate_decompress_threads_join();
338 exit(EXIT_FAILURE);
342 * This must happen after all error conditions are dealt with and
343 * we're sure the VM is going to be running on this host.
345 qemu_announce_self();
347 /* If global state section was not received or we are in running
348 state, we need to obey autostart. Any other state is set with
349 runstate_set. */
351 if (!global_state_received() ||
352 global_state_get_runstate() == RUN_STATE_RUNNING) {
353 if (autostart) {
354 vm_start();
355 } else {
356 runstate_set(RUN_STATE_PAUSED);
358 } else {
359 runstate_set(global_state_get_runstate());
361 migrate_decompress_threads_join();
363 * This must happen after any state changes since as soon as an external
364 * observer sees this event they might start to prod at the VM assuming
365 * it's ready to use.
367 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
368 MIGRATION_STATUS_COMPLETED);
369 qemu_bh_delete(mis->bh);
370 migration_incoming_state_destroy();
373 static void process_incoming_migration_co(void *opaque)
375 QEMUFile *f = opaque;
376 MigrationIncomingState *mis;
377 PostcopyState ps;
378 int ret;
380 mis = migration_incoming_state_new(f);
381 postcopy_state_set(POSTCOPY_INCOMING_NONE);
382 migrate_set_state(&mis->state, MIGRATION_STATUS_NONE,
383 MIGRATION_STATUS_ACTIVE);
384 ret = qemu_loadvm_state(f);
386 ps = postcopy_state_get();
387 trace_process_incoming_migration_co_end(ret, ps);
388 if (ps != POSTCOPY_INCOMING_NONE) {
389 if (ps == POSTCOPY_INCOMING_ADVISE) {
391 * Where a migration had postcopy enabled (and thus went to advise)
392 * but managed to complete within the precopy period, we can use
393 * the normal exit.
395 postcopy_ram_incoming_cleanup(mis);
396 } else if (ret >= 0) {
398 * Postcopy was started, cleanup should happen at the end of the
399 * postcopy thread.
401 trace_process_incoming_migration_co_postcopy_end_main();
402 return;
404 /* Else if something went wrong then just fall out of the normal exit */
407 qemu_fclose(f);
408 free_xbzrle_decoded_buf();
410 if (ret < 0) {
411 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
412 MIGRATION_STATUS_FAILED);
413 error_report("load of migration failed: %s", strerror(-ret));
414 migrate_decompress_threads_join();
415 exit(EXIT_FAILURE);
418 mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
419 qemu_bh_schedule(mis->bh);
422 void process_incoming_migration(QEMUFile *f)
424 Coroutine *co = qemu_coroutine_create(process_incoming_migration_co);
425 int fd = qemu_get_fd(f);
427 assert(fd != -1);
428 migrate_decompress_threads_create();
429 qemu_set_nonblock(fd);
430 qemu_coroutine_enter(co, f);
434 * Send a message on the return channel back to the source
435 * of the migration.
437 void migrate_send_rp_message(MigrationIncomingState *mis,
438 enum mig_rp_message_type message_type,
439 uint16_t len, void *data)
441 trace_migrate_send_rp_message((int)message_type, len);
442 qemu_mutex_lock(&mis->rp_mutex);
443 qemu_put_be16(mis->to_src_file, (unsigned int)message_type);
444 qemu_put_be16(mis->to_src_file, len);
445 qemu_put_buffer(mis->to_src_file, data, len);
446 qemu_fflush(mis->to_src_file);
447 qemu_mutex_unlock(&mis->rp_mutex);
451 * Send a 'SHUT' message on the return channel with the given value
452 * to indicate that we've finished with the RP. Non-0 value indicates
453 * error.
455 void migrate_send_rp_shut(MigrationIncomingState *mis,
456 uint32_t value)
458 uint32_t buf;
460 buf = cpu_to_be32(value);
461 migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf);
465 * Send a 'PONG' message on the return channel with the given value
466 * (normally in response to a 'PING')
468 void migrate_send_rp_pong(MigrationIncomingState *mis,
469 uint32_t value)
471 uint32_t buf;
473 buf = cpu_to_be32(value);
474 migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf);
477 /* amount of nanoseconds we are willing to wait for migration to be down.
478 * the choice of nanoseconds is because it is the maximum resolution that
479 * get_clock() can achieve. It is an internal measure. All user-visible
480 * units must be in seconds */
481 static uint64_t max_downtime = 300000000;
483 uint64_t migrate_max_downtime(void)
485 return max_downtime;
488 MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
490 MigrationCapabilityStatusList *head = NULL;
491 MigrationCapabilityStatusList *caps;
492 MigrationState *s = migrate_get_current();
493 int i;
495 caps = NULL; /* silence compiler warning */
496 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
497 if (head == NULL) {
498 head = g_malloc0(sizeof(*caps));
499 caps = head;
500 } else {
501 caps->next = g_malloc0(sizeof(*caps));
502 caps = caps->next;
504 caps->value =
505 g_malloc(sizeof(*caps->value));
506 caps->value->capability = i;
507 caps->value->state = s->enabled_capabilities[i];
510 return head;
513 MigrationParameters *qmp_query_migrate_parameters(Error **errp)
515 MigrationParameters *params;
516 MigrationState *s = migrate_get_current();
518 params = g_malloc0(sizeof(*params));
519 params->compress_level = s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL];
520 params->compress_threads =
521 s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS];
522 params->decompress_threads =
523 s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS];
524 params->x_cpu_throttle_initial =
525 s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL];
526 params->x_cpu_throttle_increment =
527 s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT];
529 return params;
533 * Return true if we're already in the middle of a migration
534 * (i.e. any of the active or setup states)
536 static bool migration_is_setup_or_active(int state)
538 switch (state) {
539 case MIGRATION_STATUS_ACTIVE:
540 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
541 case MIGRATION_STATUS_SETUP:
542 return true;
544 default:
545 return false;
550 static void get_xbzrle_cache_stats(MigrationInfo *info)
552 if (migrate_use_xbzrle()) {
553 info->has_xbzrle_cache = true;
554 info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
555 info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
556 info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred();
557 info->xbzrle_cache->pages = xbzrle_mig_pages_transferred();
558 info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss();
559 info->xbzrle_cache->cache_miss_rate = xbzrle_mig_cache_miss_rate();
560 info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow();
564 MigrationInfo *qmp_query_migrate(Error **errp)
566 MigrationInfo *info = g_malloc0(sizeof(*info));
567 MigrationState *s = migrate_get_current();
569 switch (s->state) {
570 case MIGRATION_STATUS_NONE:
571 /* no migration has happened ever */
572 break;
573 case MIGRATION_STATUS_SETUP:
574 info->has_status = true;
575 info->has_total_time = false;
576 break;
577 case MIGRATION_STATUS_ACTIVE:
578 case MIGRATION_STATUS_CANCELLING:
579 info->has_status = true;
580 info->has_total_time = true;
581 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
582 - s->total_time;
583 info->has_expected_downtime = true;
584 info->expected_downtime = s->expected_downtime;
585 info->has_setup_time = true;
586 info->setup_time = s->setup_time;
588 info->has_ram = true;
589 info->ram = g_malloc0(sizeof(*info->ram));
590 info->ram->transferred = ram_bytes_transferred();
591 info->ram->remaining = ram_bytes_remaining();
592 info->ram->total = ram_bytes_total();
593 info->ram->duplicate = dup_mig_pages_transferred();
594 info->ram->skipped = skipped_mig_pages_transferred();
595 info->ram->normal = norm_mig_pages_transferred();
596 info->ram->normal_bytes = norm_mig_bytes_transferred();
597 info->ram->dirty_pages_rate = s->dirty_pages_rate;
598 info->ram->mbps = s->mbps;
599 info->ram->dirty_sync_count = s->dirty_sync_count;
601 if (blk_mig_active()) {
602 info->has_disk = true;
603 info->disk = g_malloc0(sizeof(*info->disk));
604 info->disk->transferred = blk_mig_bytes_transferred();
605 info->disk->remaining = blk_mig_bytes_remaining();
606 info->disk->total = blk_mig_bytes_total();
609 if (cpu_throttle_active()) {
610 info->has_x_cpu_throttle_percentage = true;
611 info->x_cpu_throttle_percentage = cpu_throttle_get_percentage();
614 get_xbzrle_cache_stats(info);
615 break;
616 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
617 /* Mostly the same as active; TODO add some postcopy stats */
618 info->has_status = true;
619 info->has_total_time = true;
620 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
621 - s->total_time;
622 info->has_expected_downtime = true;
623 info->expected_downtime = s->expected_downtime;
624 info->has_setup_time = true;
625 info->setup_time = s->setup_time;
627 info->has_ram = true;
628 info->ram = g_malloc0(sizeof(*info->ram));
629 info->ram->transferred = ram_bytes_transferred();
630 info->ram->remaining = ram_bytes_remaining();
631 info->ram->total = ram_bytes_total();
632 info->ram->duplicate = dup_mig_pages_transferred();
633 info->ram->skipped = skipped_mig_pages_transferred();
634 info->ram->normal = norm_mig_pages_transferred();
635 info->ram->normal_bytes = norm_mig_bytes_transferred();
636 info->ram->dirty_pages_rate = s->dirty_pages_rate;
637 info->ram->mbps = s->mbps;
638 info->ram->dirty_sync_count = s->dirty_sync_count;
640 if (blk_mig_active()) {
641 info->has_disk = true;
642 info->disk = g_malloc0(sizeof(*info->disk));
643 info->disk->transferred = blk_mig_bytes_transferred();
644 info->disk->remaining = blk_mig_bytes_remaining();
645 info->disk->total = blk_mig_bytes_total();
648 get_xbzrle_cache_stats(info);
649 break;
650 case MIGRATION_STATUS_COMPLETED:
651 get_xbzrle_cache_stats(info);
653 info->has_status = true;
654 info->has_total_time = true;
655 info->total_time = s->total_time;
656 info->has_downtime = true;
657 info->downtime = s->downtime;
658 info->has_setup_time = true;
659 info->setup_time = s->setup_time;
661 info->has_ram = true;
662 info->ram = g_malloc0(sizeof(*info->ram));
663 info->ram->transferred = ram_bytes_transferred();
664 info->ram->remaining = 0;
665 info->ram->total = ram_bytes_total();
666 info->ram->duplicate = dup_mig_pages_transferred();
667 info->ram->skipped = skipped_mig_pages_transferred();
668 info->ram->normal = norm_mig_pages_transferred();
669 info->ram->normal_bytes = norm_mig_bytes_transferred();
670 info->ram->mbps = s->mbps;
671 info->ram->dirty_sync_count = s->dirty_sync_count;
672 break;
673 case MIGRATION_STATUS_FAILED:
674 info->has_status = true;
675 break;
676 case MIGRATION_STATUS_CANCELLED:
677 info->has_status = true;
678 break;
680 info->status = s->state;
682 return info;
685 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
686 Error **errp)
688 MigrationState *s = migrate_get_current();
689 MigrationCapabilityStatusList *cap;
691 if (migration_is_setup_or_active(s->state)) {
692 error_setg(errp, QERR_MIGRATION_ACTIVE);
693 return;
696 for (cap = params; cap; cap = cap->next) {
697 s->enabled_capabilities[cap->value->capability] = cap->value->state;
700 if (migrate_postcopy_ram()) {
701 if (migrate_use_compression()) {
702 /* The decompression threads asynchronously write into RAM
703 * rather than use the atomic copies needed to avoid
704 * userfaulting. It should be possible to fix the decompression
705 * threads for compatibility in future.
707 error_report("Postcopy is not currently compatible with "
708 "compression");
709 s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM] =
710 false;
715 void qmp_migrate_set_parameters(bool has_compress_level,
716 int64_t compress_level,
717 bool has_compress_threads,
718 int64_t compress_threads,
719 bool has_decompress_threads,
720 int64_t decompress_threads,
721 bool has_x_cpu_throttle_initial,
722 int64_t x_cpu_throttle_initial,
723 bool has_x_cpu_throttle_increment,
724 int64_t x_cpu_throttle_increment, Error **errp)
726 MigrationState *s = migrate_get_current();
728 if (has_compress_level && (compress_level < 0 || compress_level > 9)) {
729 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level",
730 "is invalid, it should be in the range of 0 to 9");
731 return;
733 if (has_compress_threads &&
734 (compress_threads < 1 || compress_threads > 255)) {
735 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
736 "compress_threads",
737 "is invalid, it should be in the range of 1 to 255");
738 return;
740 if (has_decompress_threads &&
741 (decompress_threads < 1 || decompress_threads > 255)) {
742 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
743 "decompress_threads",
744 "is invalid, it should be in the range of 1 to 255");
745 return;
747 if (has_x_cpu_throttle_initial &&
748 (x_cpu_throttle_initial < 1 || x_cpu_throttle_initial > 99)) {
749 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
750 "x_cpu_throttle_initial",
751 "an integer in the range of 1 to 99");
753 if (has_x_cpu_throttle_increment &&
754 (x_cpu_throttle_increment < 1 || x_cpu_throttle_increment > 99)) {
755 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
756 "x_cpu_throttle_increment",
757 "an integer in the range of 1 to 99");
760 if (has_compress_level) {
761 s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = compress_level;
763 if (has_compress_threads) {
764 s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] = compress_threads;
766 if (has_decompress_threads) {
767 s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] =
768 decompress_threads;
770 if (has_x_cpu_throttle_initial) {
771 s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL] =
772 x_cpu_throttle_initial;
775 if (has_x_cpu_throttle_increment) {
776 s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT] =
777 x_cpu_throttle_increment;
781 void qmp_migrate_start_postcopy(Error **errp)
783 MigrationState *s = migrate_get_current();
785 if (!migrate_postcopy_ram()) {
786 error_setg(errp, "Enable postcopy with migrate_set_capability before"
787 " the start of migration");
788 return;
791 if (s->state == MIGRATION_STATUS_NONE) {
792 error_setg(errp, "Postcopy must be started after migration has been"
793 " started");
794 return;
797 * we don't error if migration has finished since that would be racy
798 * with issuing this command.
800 atomic_set(&s->start_postcopy, true);
803 /* shared migration helpers */
805 void migrate_set_state(int *state, int old_state, int new_state)
807 if (atomic_cmpxchg(state, old_state, new_state) == old_state) {
808 trace_migrate_set_state(new_state);
809 migrate_generate_event(new_state);
813 static void migrate_fd_cleanup(void *opaque)
815 MigrationState *s = opaque;
817 qemu_bh_delete(s->cleanup_bh);
818 s->cleanup_bh = NULL;
820 flush_page_queue(s);
822 if (s->to_dst_file) {
823 trace_migrate_fd_cleanup();
824 qemu_mutex_unlock_iothread();
825 if (s->migration_thread_running) {
826 qemu_thread_join(&s->thread);
827 s->migration_thread_running = false;
829 qemu_mutex_lock_iothread();
831 migrate_compress_threads_join();
832 qemu_fclose(s->to_dst_file);
833 s->to_dst_file = NULL;
836 assert((s->state != MIGRATION_STATUS_ACTIVE) &&
837 (s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE));
839 if (s->state == MIGRATION_STATUS_CANCELLING) {
840 migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING,
841 MIGRATION_STATUS_CANCELLED);
844 notifier_list_notify(&migration_state_notifiers, s);
847 void migrate_fd_error(MigrationState *s)
849 trace_migrate_fd_error();
850 assert(s->to_dst_file == NULL);
851 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
852 MIGRATION_STATUS_FAILED);
853 notifier_list_notify(&migration_state_notifiers, s);
856 static void migrate_fd_cancel(MigrationState *s)
858 int old_state ;
859 QEMUFile *f = migrate_get_current()->to_dst_file;
860 trace_migrate_fd_cancel();
862 if (s->rp_state.from_dst_file) {
863 /* shutdown the rp socket, so causing the rp thread to shutdown */
864 qemu_file_shutdown(s->rp_state.from_dst_file);
867 do {
868 old_state = s->state;
869 if (!migration_is_setup_or_active(old_state)) {
870 break;
872 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING);
873 } while (s->state != MIGRATION_STATUS_CANCELLING);
876 * If we're unlucky the migration code might be stuck somewhere in a
877 * send/write while the network has failed and is waiting to timeout;
878 * if we've got shutdown(2) available then we can force it to quit.
879 * The outgoing qemu file gets closed in migrate_fd_cleanup that is
880 * called in a bh, so there is no race against this cancel.
882 if (s->state == MIGRATION_STATUS_CANCELLING && f) {
883 qemu_file_shutdown(f);
887 void add_migration_state_change_notifier(Notifier *notify)
889 notifier_list_add(&migration_state_notifiers, notify);
892 void remove_migration_state_change_notifier(Notifier *notify)
894 notifier_remove(notify);
897 bool migration_in_setup(MigrationState *s)
899 return s->state == MIGRATION_STATUS_SETUP;
902 bool migration_has_finished(MigrationState *s)
904 return s->state == MIGRATION_STATUS_COMPLETED;
907 bool migration_has_failed(MigrationState *s)
909 return (s->state == MIGRATION_STATUS_CANCELLED ||
910 s->state == MIGRATION_STATUS_FAILED);
913 bool migration_in_postcopy(MigrationState *s)
915 return (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
918 bool migration_in_postcopy_after_devices(MigrationState *s)
920 return migration_in_postcopy(s) && s->postcopy_after_devices;
923 MigrationState *migrate_init(const MigrationParams *params)
925 MigrationState *s = migrate_get_current();
928 * Reinitialise all migration state, except
929 * parameters/capabilities that the user set, and
930 * locks.
932 s->bytes_xfer = 0;
933 s->xfer_limit = 0;
934 s->cleanup_bh = 0;
935 s->to_dst_file = NULL;
936 s->state = MIGRATION_STATUS_NONE;
937 s->params = *params;
938 s->rp_state.from_dst_file = NULL;
939 s->rp_state.error = false;
940 s->mbps = 0.0;
941 s->downtime = 0;
942 s->expected_downtime = 0;
943 s->dirty_pages_rate = 0;
944 s->dirty_bytes_rate = 0;
945 s->setup_time = 0;
946 s->dirty_sync_count = 0;
947 s->start_postcopy = false;
948 s->postcopy_after_devices = false;
949 s->migration_thread_running = false;
950 s->last_req_rb = NULL;
952 migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
954 QSIMPLEQ_INIT(&s->src_page_requests);
956 s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
957 return s;
960 static GSList *migration_blockers;
962 void migrate_add_blocker(Error *reason)
964 migration_blockers = g_slist_prepend(migration_blockers, reason);
967 void migrate_del_blocker(Error *reason)
969 migration_blockers = g_slist_remove(migration_blockers, reason);
972 void qmp_migrate_incoming(const char *uri, Error **errp)
974 Error *local_err = NULL;
975 static bool once = true;
977 if (!deferred_incoming) {
978 error_setg(errp, "For use with '-incoming defer'");
979 return;
981 if (!once) {
982 error_setg(errp, "The incoming migration has already been started");
985 qemu_start_incoming_migration(uri, &local_err);
987 if (local_err) {
988 error_propagate(errp, local_err);
989 return;
992 once = false;
995 void qmp_migrate(const char *uri, bool has_blk, bool blk,
996 bool has_inc, bool inc, bool has_detach, bool detach,
997 Error **errp)
999 Error *local_err = NULL;
1000 MigrationState *s = migrate_get_current();
1001 MigrationParams params;
1002 const char *p;
1004 params.blk = has_blk && blk;
1005 params.shared = has_inc && inc;
1007 if (migration_is_setup_or_active(s->state) ||
1008 s->state == MIGRATION_STATUS_CANCELLING) {
1009 error_setg(errp, QERR_MIGRATION_ACTIVE);
1010 return;
1012 if (runstate_check(RUN_STATE_INMIGRATE)) {
1013 error_setg(errp, "Guest is waiting for an incoming migration");
1014 return;
1017 if (qemu_savevm_state_blocked(errp)) {
1018 return;
1021 if (migration_blockers) {
1022 *errp = error_copy(migration_blockers->data);
1023 return;
1026 s = migrate_init(&params);
1028 if (strstart(uri, "tcp:", &p)) {
1029 tcp_start_outgoing_migration(s, p, &local_err);
1030 #ifdef CONFIG_RDMA
1031 } else if (strstart(uri, "rdma:", &p)) {
1032 rdma_start_outgoing_migration(s, p, &local_err);
1033 #endif
1034 #if !defined(WIN32)
1035 } else if (strstart(uri, "exec:", &p)) {
1036 exec_start_outgoing_migration(s, p, &local_err);
1037 } else if (strstart(uri, "unix:", &p)) {
1038 unix_start_outgoing_migration(s, p, &local_err);
1039 } else if (strstart(uri, "fd:", &p)) {
1040 fd_start_outgoing_migration(s, p, &local_err);
1041 #endif
1042 } else {
1043 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri",
1044 "a valid migration protocol");
1045 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1046 MIGRATION_STATUS_FAILED);
1047 return;
1050 if (local_err) {
1051 migrate_fd_error(s);
1052 error_propagate(errp, local_err);
1053 return;
1057 void qmp_migrate_cancel(Error **errp)
1059 migrate_fd_cancel(migrate_get_current());
1062 void qmp_migrate_set_cache_size(int64_t value, Error **errp)
1064 MigrationState *s = migrate_get_current();
1065 int64_t new_size;
1067 /* Check for truncation */
1068 if (value != (size_t)value) {
1069 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
1070 "exceeding address space");
1071 return;
1074 /* Cache should not be larger than guest ram size */
1075 if (value > ram_bytes_total()) {
1076 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
1077 "exceeds guest ram size ");
1078 return;
1081 new_size = xbzrle_cache_resize(value);
1082 if (new_size < 0) {
1083 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
1084 "is smaller than page size");
1085 return;
1088 s->xbzrle_cache_size = new_size;
1091 int64_t qmp_query_migrate_cache_size(Error **errp)
1093 return migrate_xbzrle_cache_size();
1096 void qmp_migrate_set_speed(int64_t value, Error **errp)
1098 MigrationState *s;
1100 if (value < 0) {
1101 value = 0;
1103 if (value > SIZE_MAX) {
1104 value = SIZE_MAX;
1107 s = migrate_get_current();
1108 s->bandwidth_limit = value;
1109 if (s->to_dst_file) {
1110 qemu_file_set_rate_limit(s->to_dst_file,
1111 s->bandwidth_limit / XFER_LIMIT_RATIO);
1115 void qmp_migrate_set_downtime(double value, Error **errp)
1117 value *= 1e9;
1118 value = MAX(0, MIN(UINT64_MAX, value));
1119 max_downtime = (uint64_t)value;
1122 bool migrate_postcopy_ram(void)
1124 MigrationState *s;
1126 s = migrate_get_current();
1128 return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM];
1131 bool migrate_auto_converge(void)
1133 MigrationState *s;
1135 s = migrate_get_current();
1137 return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
1140 bool migrate_zero_blocks(void)
1142 MigrationState *s;
1144 s = migrate_get_current();
1146 return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
1149 bool migrate_use_compression(void)
1151 MigrationState *s;
1153 s = migrate_get_current();
1155 return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS];
1158 int migrate_compress_level(void)
1160 MigrationState *s;
1162 s = migrate_get_current();
1164 return s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL];
1167 int migrate_compress_threads(void)
1169 MigrationState *s;
1171 s = migrate_get_current();
1173 return s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS];
1176 int migrate_decompress_threads(void)
1178 MigrationState *s;
1180 s = migrate_get_current();
1182 return s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS];
1185 bool migrate_use_events(void)
1187 MigrationState *s;
1189 s = migrate_get_current();
1191 return s->enabled_capabilities[MIGRATION_CAPABILITY_EVENTS];
1194 int migrate_use_xbzrle(void)
1196 MigrationState *s;
1198 s = migrate_get_current();
1200 return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE];
1203 int64_t migrate_xbzrle_cache_size(void)
1205 MigrationState *s;
1207 s = migrate_get_current();
1209 return s->xbzrle_cache_size;
1212 /* migration thread support */
1214 * Something bad happened to the RP stream, mark an error
1215 * The caller shall print or trace something to indicate why
1217 static void mark_source_rp_bad(MigrationState *s)
1219 s->rp_state.error = true;
1222 static struct rp_cmd_args {
1223 ssize_t len; /* -1 = variable */
1224 const char *name;
1225 } rp_cmd_args[] = {
1226 [MIG_RP_MSG_INVALID] = { .len = -1, .name = "INVALID" },
1227 [MIG_RP_MSG_SHUT] = { .len = 4, .name = "SHUT" },
1228 [MIG_RP_MSG_PONG] = { .len = 4, .name = "PONG" },
1229 [MIG_RP_MSG_REQ_PAGES] = { .len = 12, .name = "REQ_PAGES" },
1230 [MIG_RP_MSG_REQ_PAGES_ID] = { .len = -1, .name = "REQ_PAGES_ID" },
1231 [MIG_RP_MSG_MAX] = { .len = -1, .name = "MAX" },
1235 * Process a request for pages received on the return path,
1236 * We're allowed to send more than requested (e.g. to round to our page size)
1237 * and we don't need to send pages that have already been sent.
1239 static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
1240 ram_addr_t start, size_t len)
1242 long our_host_ps = getpagesize();
1244 trace_migrate_handle_rp_req_pages(rbname, start, len);
1247 * Since we currently insist on matching page sizes, just sanity check
1248 * we're being asked for whole host pages.
1250 if (start & (our_host_ps-1) ||
1251 (len & (our_host_ps-1))) {
1252 error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT
1253 " len: %zd", __func__, start, len);
1254 mark_source_rp_bad(ms);
1255 return;
1258 if (ram_save_queue_pages(ms, rbname, start, len)) {
1259 mark_source_rp_bad(ms);
1264 * Handles messages sent on the return path towards the source VM
1267 static void *source_return_path_thread(void *opaque)
1269 MigrationState *ms = opaque;
1270 QEMUFile *rp = ms->rp_state.from_dst_file;
1271 uint16_t header_len, header_type;
1272 uint8_t buf[512];
1273 uint32_t tmp32, sibling_error;
1274 ram_addr_t start = 0; /* =0 to silence warning */
1275 size_t len = 0, expected_len;
1276 int res;
1278 trace_source_return_path_thread_entry();
1279 while (!ms->rp_state.error && !qemu_file_get_error(rp) &&
1280 migration_is_setup_or_active(ms->state)) {
1281 trace_source_return_path_thread_loop_top();
1282 header_type = qemu_get_be16(rp);
1283 header_len = qemu_get_be16(rp);
1285 if (header_type >= MIG_RP_MSG_MAX ||
1286 header_type == MIG_RP_MSG_INVALID) {
1287 error_report("RP: Received invalid message 0x%04x length 0x%04x",
1288 header_type, header_len);
1289 mark_source_rp_bad(ms);
1290 goto out;
1293 if ((rp_cmd_args[header_type].len != -1 &&
1294 header_len != rp_cmd_args[header_type].len) ||
1295 header_len > sizeof(buf)) {
1296 error_report("RP: Received '%s' message (0x%04x) with"
1297 "incorrect length %d expecting %zu",
1298 rp_cmd_args[header_type].name, header_type, header_len,
1299 (size_t)rp_cmd_args[header_type].len);
1300 mark_source_rp_bad(ms);
1301 goto out;
1304 /* We know we've got a valid header by this point */
1305 res = qemu_get_buffer(rp, buf, header_len);
1306 if (res != header_len) {
1307 error_report("RP: Failed reading data for message 0x%04x"
1308 " read %d expected %d",
1309 header_type, res, header_len);
1310 mark_source_rp_bad(ms);
1311 goto out;
1314 /* OK, we have the message and the data */
1315 switch (header_type) {
1316 case MIG_RP_MSG_SHUT:
1317 sibling_error = be32_to_cpup((uint32_t *)buf);
1318 trace_source_return_path_thread_shut(sibling_error);
1319 if (sibling_error) {
1320 error_report("RP: Sibling indicated error %d", sibling_error);
1321 mark_source_rp_bad(ms);
1324 * We'll let the main thread deal with closing the RP
1325 * we could do a shutdown(2) on it, but we're the only user
1326 * anyway, so there's nothing gained.
1328 goto out;
1330 case MIG_RP_MSG_PONG:
1331 tmp32 = be32_to_cpup((uint32_t *)buf);
1332 trace_source_return_path_thread_pong(tmp32);
1333 break;
1335 case MIG_RP_MSG_REQ_PAGES:
1336 start = be64_to_cpup((uint64_t *)buf);
1337 len = be32_to_cpup((uint32_t *)(buf + 8));
1338 migrate_handle_rp_req_pages(ms, NULL, start, len);
1339 break;
1341 case MIG_RP_MSG_REQ_PAGES_ID:
1342 expected_len = 12 + 1; /* header + termination */
1344 if (header_len >= expected_len) {
1345 start = be64_to_cpup((uint64_t *)buf);
1346 len = be32_to_cpup((uint32_t *)(buf + 8));
1347 /* Now we expect an idstr */
1348 tmp32 = buf[12]; /* Length of the following idstr */
1349 buf[13 + tmp32] = '\0';
1350 expected_len += tmp32;
1352 if (header_len != expected_len) {
1353 error_report("RP: Req_Page_id with length %d expecting %zd",
1354 header_len, expected_len);
1355 mark_source_rp_bad(ms);
1356 goto out;
1358 migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len);
1359 break;
1361 default:
1362 break;
1365 if (qemu_file_get_error(rp)) {
1366 trace_source_return_path_thread_bad_end();
1367 mark_source_rp_bad(ms);
1370 trace_source_return_path_thread_end();
1371 out:
1372 ms->rp_state.from_dst_file = NULL;
1373 qemu_fclose(rp);
1374 return NULL;
1377 static int open_return_path_on_source(MigrationState *ms)
1380 ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file);
1381 if (!ms->rp_state.from_dst_file) {
1382 return -1;
1385 trace_open_return_path_on_source();
1386 qemu_thread_create(&ms->rp_state.rp_thread, "return path",
1387 source_return_path_thread, ms, QEMU_THREAD_JOINABLE);
1389 trace_open_return_path_on_source_continue();
1391 return 0;
1394 /* Returns 0 if the RP was ok, otherwise there was an error on the RP */
1395 static int await_return_path_close_on_source(MigrationState *ms)
1398 * If this is a normal exit then the destination will send a SHUT and the
1399 * rp_thread will exit, however if there's an error we need to cause
1400 * it to exit.
1402 if (qemu_file_get_error(ms->to_dst_file) && ms->rp_state.from_dst_file) {
1404 * shutdown(2), if we have it, will cause it to unblock if it's stuck
1405 * waiting for the destination.
1407 qemu_file_shutdown(ms->rp_state.from_dst_file);
1408 mark_source_rp_bad(ms);
1410 trace_await_return_path_close_on_source_joining();
1411 qemu_thread_join(&ms->rp_state.rp_thread);
1412 trace_await_return_path_close_on_source_close();
1413 return ms->rp_state.error;
1417 * Switch from normal iteration to postcopy
1418 * Returns non-0 on error
1420 static int postcopy_start(MigrationState *ms, bool *old_vm_running)
1422 int ret;
1423 const QEMUSizedBuffer *qsb;
1424 int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1425 migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE,
1426 MIGRATION_STATUS_POSTCOPY_ACTIVE);
1428 trace_postcopy_start();
1429 qemu_mutex_lock_iothread();
1430 trace_postcopy_start_set_run();
1432 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
1433 *old_vm_running = runstate_is_running();
1434 global_state_store();
1435 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
1436 if (ret < 0) {
1437 goto fail;
1440 ret = bdrv_inactivate_all();
1441 if (ret < 0) {
1442 goto fail;
1446 * Cause any non-postcopiable, but iterative devices to
1447 * send out their final data.
1449 qemu_savevm_state_complete_precopy(ms->to_dst_file, true);
1452 * in Finish migrate and with the io-lock held everything should
1453 * be quiet, but we've potentially still got dirty pages and we
1454 * need to tell the destination to throw any pages it's already received
1455 * that are dirty
1457 if (ram_postcopy_send_discard_bitmap(ms)) {
1458 error_report("postcopy send discard bitmap failed");
1459 goto fail;
1463 * send rest of state - note things that are doing postcopy
1464 * will notice we're in POSTCOPY_ACTIVE and not actually
1465 * wrap their state up here
1467 qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX);
1468 /* Ping just for debugging, helps line traces up */
1469 qemu_savevm_send_ping(ms->to_dst_file, 2);
1472 * While loading the device state we may trigger page transfer
1473 * requests and the fd must be free to process those, and thus
1474 * the destination must read the whole device state off the fd before
1475 * it starts processing it. Unfortunately the ad-hoc migration format
1476 * doesn't allow the destination to know the size to read without fully
1477 * parsing it through each devices load-state code (especially the open
1478 * coded devices that use get/put).
1479 * So we wrap the device state up in a package with a length at the start;
1480 * to do this we use a qemu_buf to hold the whole of the device state.
1482 QEMUFile *fb = qemu_bufopen("w", NULL);
1483 if (!fb) {
1484 error_report("Failed to create buffered file");
1485 goto fail;
1489 * Make sure the receiver can get incoming pages before we send the rest
1490 * of the state
1492 qemu_savevm_send_postcopy_listen(fb);
1494 qemu_savevm_state_complete_precopy(fb, false);
1495 qemu_savevm_send_ping(fb, 3);
1497 qemu_savevm_send_postcopy_run(fb);
1499 /* <><> end of stuff going into the package */
1500 qsb = qemu_buf_get(fb);
1502 /* Now send that blob */
1503 if (qemu_savevm_send_packaged(ms->to_dst_file, qsb)) {
1504 goto fail_closefb;
1506 qemu_fclose(fb);
1508 /* Send a notify to give a chance for anything that needs to happen
1509 * at the transition to postcopy and after the device state; in particular
1510 * spice needs to trigger a transition now
1512 ms->postcopy_after_devices = true;
1513 notifier_list_notify(&migration_state_notifiers, ms);
1515 ms->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop;
1517 qemu_mutex_unlock_iothread();
1520 * Although this ping is just for debug, it could potentially be
1521 * used for getting a better measurement of downtime at the source.
1523 qemu_savevm_send_ping(ms->to_dst_file, 4);
1525 ret = qemu_file_get_error(ms->to_dst_file);
1526 if (ret) {
1527 error_report("postcopy_start: Migration stream errored");
1528 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
1529 MIGRATION_STATUS_FAILED);
1532 return ret;
1534 fail_closefb:
1535 qemu_fclose(fb);
1536 fail:
1537 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
1538 MIGRATION_STATUS_FAILED);
1539 qemu_mutex_unlock_iothread();
1540 return -1;
1544 * migration_completion: Used by migration_thread when there's not much left.
1545 * The caller 'breaks' the loop when this returns.
1547 * @s: Current migration state
1548 * @current_active_state: The migration state we expect to be in
1549 * @*old_vm_running: Pointer to old_vm_running flag
1550 * @*start_time: Pointer to time to update
1552 static void migration_completion(MigrationState *s, int current_active_state,
1553 bool *old_vm_running,
1554 int64_t *start_time)
1556 int ret;
1558 if (s->state == MIGRATION_STATUS_ACTIVE) {
1559 qemu_mutex_lock_iothread();
1560 *start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1561 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
1562 *old_vm_running = runstate_is_running();
1563 ret = global_state_store();
1565 if (!ret) {
1566 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
1567 if (ret >= 0) {
1568 ret = bdrv_inactivate_all();
1570 if (ret >= 0) {
1571 qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX);
1572 qemu_savevm_state_complete_precopy(s->to_dst_file, false);
1575 qemu_mutex_unlock_iothread();
1577 if (ret < 0) {
1578 goto fail;
1580 } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
1581 trace_migration_completion_postcopy_end();
1583 qemu_savevm_state_complete_postcopy(s->to_dst_file);
1584 trace_migration_completion_postcopy_end_after_complete();
1588 * If rp was opened we must clean up the thread before
1589 * cleaning everything else up (since if there are no failures
1590 * it will wait for the destination to send it's status in
1591 * a SHUT command).
1592 * Postcopy opens rp if enabled (even if it's not avtivated)
1594 if (migrate_postcopy_ram()) {
1595 int rp_error;
1596 trace_migration_completion_postcopy_end_before_rp();
1597 rp_error = await_return_path_close_on_source(s);
1598 trace_migration_completion_postcopy_end_after_rp(rp_error);
1599 if (rp_error) {
1600 goto fail;
1604 if (qemu_file_get_error(s->to_dst_file)) {
1605 trace_migration_completion_file_err();
1606 goto fail;
1609 migrate_set_state(&s->state, current_active_state,
1610 MIGRATION_STATUS_COMPLETED);
1611 return;
1613 fail:
1614 migrate_set_state(&s->state, current_active_state,
1615 MIGRATION_STATUS_FAILED);
1619 * Master migration thread on the source VM.
1620 * It drives the migration and pumps the data down the outgoing channel.
1622 static void *migration_thread(void *opaque)
1624 MigrationState *s = opaque;
1625 /* Used by the bandwidth calcs, updated later */
1626 int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1627 int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
1628 int64_t initial_bytes = 0;
1629 int64_t max_size = 0;
1630 int64_t start_time = initial_time;
1631 int64_t end_time;
1632 bool old_vm_running = false;
1633 bool entered_postcopy = false;
1634 /* The active state we expect to be in; ACTIVE or POSTCOPY_ACTIVE */
1635 enum MigrationStatus current_active_state = MIGRATION_STATUS_ACTIVE;
1637 rcu_register_thread();
1639 qemu_savevm_state_header(s->to_dst_file);
1641 if (migrate_postcopy_ram()) {
1642 /* Now tell the dest that it should open its end so it can reply */
1643 qemu_savevm_send_open_return_path(s->to_dst_file);
1645 /* And do a ping that will make stuff easier to debug */
1646 qemu_savevm_send_ping(s->to_dst_file, 1);
1649 * Tell the destination that we *might* want to do postcopy later;
1650 * if the other end can't do postcopy it should fail now, nice and
1651 * early.
1653 qemu_savevm_send_postcopy_advise(s->to_dst_file);
1656 qemu_savevm_state_begin(s->to_dst_file, &s->params);
1658 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
1659 current_active_state = MIGRATION_STATUS_ACTIVE;
1660 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1661 MIGRATION_STATUS_ACTIVE);
1663 trace_migration_thread_setup_complete();
1665 while (s->state == MIGRATION_STATUS_ACTIVE ||
1666 s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
1667 int64_t current_time;
1668 uint64_t pending_size;
1670 if (!qemu_file_rate_limit(s->to_dst_file)) {
1671 uint64_t pend_post, pend_nonpost;
1673 qemu_savevm_state_pending(s->to_dst_file, max_size, &pend_nonpost,
1674 &pend_post);
1675 pending_size = pend_nonpost + pend_post;
1676 trace_migrate_pending(pending_size, max_size,
1677 pend_post, pend_nonpost);
1678 if (pending_size && pending_size >= max_size) {
1679 /* Still a significant amount to transfer */
1681 if (migrate_postcopy_ram() &&
1682 s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE &&
1683 pend_nonpost <= max_size &&
1684 atomic_read(&s->start_postcopy)) {
1686 if (!postcopy_start(s, &old_vm_running)) {
1687 current_active_state = MIGRATION_STATUS_POSTCOPY_ACTIVE;
1688 entered_postcopy = true;
1691 continue;
1693 /* Just another iteration step */
1694 qemu_savevm_state_iterate(s->to_dst_file, entered_postcopy);
1695 } else {
1696 trace_migration_thread_low_pending(pending_size);
1697 migration_completion(s, current_active_state,
1698 &old_vm_running, &start_time);
1699 break;
1703 if (qemu_file_get_error(s->to_dst_file)) {
1704 migrate_set_state(&s->state, current_active_state,
1705 MIGRATION_STATUS_FAILED);
1706 trace_migration_thread_file_err();
1707 break;
1709 current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1710 if (current_time >= initial_time + BUFFER_DELAY) {
1711 uint64_t transferred_bytes = qemu_ftell(s->to_dst_file) -
1712 initial_bytes;
1713 uint64_t time_spent = current_time - initial_time;
1714 double bandwidth = (double)transferred_bytes / time_spent;
1715 max_size = bandwidth * migrate_max_downtime() / 1000000;
1717 s->mbps = (((double) transferred_bytes * 8.0) /
1718 ((double) time_spent / 1000.0)) / 1000.0 / 1000.0;
1720 trace_migrate_transferred(transferred_bytes, time_spent,
1721 bandwidth, max_size);
1722 /* if we haven't sent anything, we don't want to recalculate
1723 10000 is a small enough number for our purposes */
1724 if (s->dirty_bytes_rate && transferred_bytes > 10000) {
1725 s->expected_downtime = s->dirty_bytes_rate / bandwidth;
1728 qemu_file_reset_rate_limit(s->to_dst_file);
1729 initial_time = current_time;
1730 initial_bytes = qemu_ftell(s->to_dst_file);
1732 if (qemu_file_rate_limit(s->to_dst_file)) {
1733 /* usleep expects microseconds */
1734 g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
1738 trace_migration_thread_after_loop();
1739 /* If we enabled cpu throttling for auto-converge, turn it off. */
1740 cpu_throttle_stop();
1741 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1743 qemu_mutex_lock_iothread();
1744 qemu_savevm_state_cleanup();
1745 if (s->state == MIGRATION_STATUS_COMPLETED) {
1746 uint64_t transferred_bytes = qemu_ftell(s->to_dst_file);
1747 s->total_time = end_time - s->total_time;
1748 if (!entered_postcopy) {
1749 s->downtime = end_time - start_time;
1751 if (s->total_time) {
1752 s->mbps = (((double) transferred_bytes * 8.0) /
1753 ((double) s->total_time)) / 1000;
1755 runstate_set(RUN_STATE_POSTMIGRATE);
1756 } else {
1757 if (old_vm_running && !entered_postcopy) {
1758 vm_start();
1761 qemu_bh_schedule(s->cleanup_bh);
1762 qemu_mutex_unlock_iothread();
1764 rcu_unregister_thread();
1765 return NULL;
1768 void migrate_fd_connect(MigrationState *s)
1770 /* This is a best 1st approximation. ns to ms */
1771 s->expected_downtime = max_downtime/1000000;
1772 s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s);
1774 qemu_file_set_rate_limit(s->to_dst_file,
1775 s->bandwidth_limit / XFER_LIMIT_RATIO);
1777 /* Notify before starting migration thread */
1778 notifier_list_notify(&migration_state_notifiers, s);
1781 * Open the return path; currently for postcopy but other things might
1782 * also want it.
1784 if (migrate_postcopy_ram()) {
1785 if (open_return_path_on_source(s)) {
1786 error_report("Unable to open return-path for postcopy");
1787 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1788 MIGRATION_STATUS_FAILED);
1789 migrate_fd_cleanup(s);
1790 return;
1794 migrate_compress_threads_create();
1795 qemu_thread_create(&s->thread, "migration", migration_thread, s,
1796 QEMU_THREAD_JOINABLE);
1797 s->migration_thread_running = true;
1800 PostcopyState postcopy_state_get(void)
1802 return atomic_mb_read(&incoming_postcopy_state);
1805 /* Set the state and return the old state */
1806 PostcopyState postcopy_state_set(PostcopyState new_state)
1808 return atomic_xchg(&incoming_postcopy_state, new_state);