hw/intc/arm_gicv3_its: Use address_space_map() to access command queue packets
[qemu/rayw.git] / migration / migration.c
blobbcc385b94b10723bfaf6aaaaec77f0185f09a350
1 /*
2 * QEMU live migration
4 * Copyright IBM, Corp. 2008
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu/osdep.h"
17 #include "qemu/cutils.h"
18 #include "qemu/error-report.h"
19 #include "qemu/main-loop.h"
20 #include "migration/blocker.h"
21 #include "exec.h"
22 #include "fd.h"
23 #include "socket.h"
24 #include "sysemu/runstate.h"
25 #include "sysemu/sysemu.h"
26 #include "sysemu/cpu-throttle.h"
27 #include "rdma.h"
28 #include "ram.h"
29 #include "migration/global_state.h"
30 #include "migration/misc.h"
31 #include "migration.h"
32 #include "savevm.h"
33 #include "qemu-file-channel.h"
34 #include "qemu-file.h"
35 #include "migration/vmstate.h"
36 #include "block/block.h"
37 #include "qapi/error.h"
38 #include "qapi/clone-visitor.h"
39 #include "qapi/qapi-visit-migration.h"
40 #include "qapi/qapi-visit-sockets.h"
41 #include "qapi/qapi-commands-migration.h"
42 #include "qapi/qapi-events-migration.h"
43 #include "qapi/qmp/qerror.h"
44 #include "qapi/qmp/qnull.h"
45 #include "qemu/rcu.h"
46 #include "block.h"
47 #include "postcopy-ram.h"
48 #include "qemu/thread.h"
49 #include "trace.h"
50 #include "exec/target_page.h"
51 #include "io/channel-buffer.h"
52 #include "migration/colo.h"
53 #include "hw/boards.h"
54 #include "hw/qdev-properties.h"
55 #include "hw/qdev-properties-system.h"
56 #include "monitor/monitor.h"
57 #include "net/announce.h"
58 #include "qemu/queue.h"
59 #include "multifd.h"
60 #include "qemu/yank.h"
61 #include "sysemu/cpus.h"
62 #include "yank_functions.h"
63 #include "sysemu/qtest.h"
65 #define MAX_THROTTLE (128 << 20) /* Migration transfer speed throttling */
67 /* Amount of time to allocate to each "chunk" of bandwidth-throttled
68 * data. */
69 #define BUFFER_DELAY 100
70 #define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)
72 /* Time in milliseconds we are allowed to stop the source,
73 * for sending the last part */
74 #define DEFAULT_MIGRATE_SET_DOWNTIME 300
76 /* Maximum migrate downtime set to 2000 seconds */
77 #define MAX_MIGRATE_DOWNTIME_SECONDS 2000
78 #define MAX_MIGRATE_DOWNTIME (MAX_MIGRATE_DOWNTIME_SECONDS * 1000)
80 /* Default compression thread count */
81 #define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8
82 /* Default decompression thread count, usually decompression is at
83 * least 4 times as fast as compression.*/
84 #define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2
85 /*0: means nocompress, 1: best speed, ... 9: best compress ratio */
86 #define DEFAULT_MIGRATE_COMPRESS_LEVEL 1
87 /* Define default autoconverge cpu throttle migration parameters */
88 #define DEFAULT_MIGRATE_THROTTLE_TRIGGER_THRESHOLD 50
89 #define DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL 20
90 #define DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT 10
91 #define DEFAULT_MIGRATE_MAX_CPU_THROTTLE 99
93 /* Migration XBZRLE default cache size */
94 #define DEFAULT_MIGRATE_XBZRLE_CACHE_SIZE (64 * 1024 * 1024)
96 /* The delay time (in ms) between two COLO checkpoints */
97 #define DEFAULT_MIGRATE_X_CHECKPOINT_DELAY (200 * 100)
98 #define DEFAULT_MIGRATE_MULTIFD_CHANNELS 2
99 #define DEFAULT_MIGRATE_MULTIFD_COMPRESSION MULTIFD_COMPRESSION_NONE
100 /* 0: means nocompress, 1: best speed, ... 9: best compress ratio */
101 #define DEFAULT_MIGRATE_MULTIFD_ZLIB_LEVEL 1
102 /* 0: means nocompress, 1: best speed, ... 20: best compress ratio */
103 #define DEFAULT_MIGRATE_MULTIFD_ZSTD_LEVEL 1
105 /* Background transfer rate for postcopy, 0 means unlimited, note
106 * that page requests can still exceed this limit.
108 #define DEFAULT_MIGRATE_MAX_POSTCOPY_BANDWIDTH 0
111 * Parameters for self_announce_delay giving a stream of RARP/ARP
112 * packets after migration.
114 #define DEFAULT_MIGRATE_ANNOUNCE_INITIAL 50
115 #define DEFAULT_MIGRATE_ANNOUNCE_MAX 550
116 #define DEFAULT_MIGRATE_ANNOUNCE_ROUNDS 5
117 #define DEFAULT_MIGRATE_ANNOUNCE_STEP 100
119 static NotifierList migration_state_notifiers =
120 NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
122 /* Messages sent on the return path from destination to source */
123 enum mig_rp_message_type {
124 MIG_RP_MSG_INVALID = 0, /* Must be 0 */
125 MIG_RP_MSG_SHUT, /* sibling will not send any more RP messages */
126 MIG_RP_MSG_PONG, /* Response to a PING; data (seq: be32 ) */
128 MIG_RP_MSG_REQ_PAGES_ID, /* data (start: be64, len: be32, id: string) */
129 MIG_RP_MSG_REQ_PAGES, /* data (start: be64, len: be32) */
130 MIG_RP_MSG_RECV_BITMAP, /* send recved_bitmap back to source */
131 MIG_RP_MSG_RESUME_ACK, /* tell source that we are ready to resume */
133 MIG_RP_MSG_MAX
136 /* Migration capabilities set */
137 struct MigrateCapsSet {
138 int size; /* Capability set size */
139 MigrationCapability caps[]; /* Variadic array of capabilities */
141 typedef struct MigrateCapsSet MigrateCapsSet;
143 /* Define and initialize MigrateCapsSet */
144 #define INITIALIZE_MIGRATE_CAPS_SET(_name, ...) \
145 MigrateCapsSet _name = { \
146 .size = sizeof((int []) { __VA_ARGS__ }) / sizeof(int), \
147 .caps = { __VA_ARGS__ } \
150 /* Background-snapshot compatibility check list */
151 static const
152 INITIALIZE_MIGRATE_CAPS_SET(check_caps_background_snapshot,
153 MIGRATION_CAPABILITY_POSTCOPY_RAM,
154 MIGRATION_CAPABILITY_DIRTY_BITMAPS,
155 MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME,
156 MIGRATION_CAPABILITY_LATE_BLOCK_ACTIVATE,
157 MIGRATION_CAPABILITY_RETURN_PATH,
158 MIGRATION_CAPABILITY_MULTIFD,
159 MIGRATION_CAPABILITY_PAUSE_BEFORE_SWITCHOVER,
160 MIGRATION_CAPABILITY_AUTO_CONVERGE,
161 MIGRATION_CAPABILITY_RELEASE_RAM,
162 MIGRATION_CAPABILITY_RDMA_PIN_ALL,
163 MIGRATION_CAPABILITY_COMPRESS,
164 MIGRATION_CAPABILITY_XBZRLE,
165 MIGRATION_CAPABILITY_X_COLO,
166 MIGRATION_CAPABILITY_VALIDATE_UUID);
168 /* When we add fault tolerance, we could have several
169 migrations at once. For now we don't need to add
170 dynamic creation of migration */
172 static MigrationState *current_migration;
173 static MigrationIncomingState *current_incoming;
175 static GSList *migration_blockers;
177 static bool migration_object_check(MigrationState *ms, Error **errp);
178 static int migration_maybe_pause(MigrationState *s,
179 int *current_active_state,
180 int new_state);
181 static void migrate_fd_cancel(MigrationState *s);
183 static gint page_request_addr_cmp(gconstpointer ap, gconstpointer bp)
185 uintptr_t a = (uintptr_t) ap, b = (uintptr_t) bp;
187 return (a > b) - (a < b);
190 void migration_object_init(void)
192 /* This can only be called once. */
193 assert(!current_migration);
194 current_migration = MIGRATION_OBJ(object_new(TYPE_MIGRATION));
197 * Init the migrate incoming object as well no matter whether
198 * we'll use it or not.
200 assert(!current_incoming);
201 current_incoming = g_new0(MigrationIncomingState, 1);
202 current_incoming->state = MIGRATION_STATUS_NONE;
203 current_incoming->postcopy_remote_fds =
204 g_array_new(FALSE, TRUE, sizeof(struct PostCopyFD));
205 qemu_mutex_init(&current_incoming->rp_mutex);
206 qemu_event_init(&current_incoming->main_thread_load_event, false);
207 qemu_sem_init(&current_incoming->postcopy_pause_sem_dst, 0);
208 qemu_sem_init(&current_incoming->postcopy_pause_sem_fault, 0);
209 qemu_mutex_init(&current_incoming->page_request_mutex);
210 current_incoming->page_requested = g_tree_new(page_request_addr_cmp);
212 migration_object_check(current_migration, &error_fatal);
214 blk_mig_init();
215 ram_mig_init();
216 dirty_bitmap_mig_init();
219 void migration_cancel(const Error *error)
221 if (error) {
222 migrate_set_error(current_migration, error);
224 migrate_fd_cancel(current_migration);
227 void migration_shutdown(void)
230 * When the QEMU main thread exit, the COLO thread
231 * may wait a semaphore. So, we should wakeup the
232 * COLO thread before migration shutdown.
234 colo_shutdown();
236 * Cancel the current migration - that will (eventually)
237 * stop the migration using this structure
239 migration_cancel(NULL);
240 object_unref(OBJECT(current_migration));
243 * Cancel outgoing migration of dirty bitmaps. It should
244 * at least unref used block nodes.
246 dirty_bitmap_mig_cancel_outgoing();
249 * Cancel incoming migration of dirty bitmaps. Dirty bitmaps
250 * are non-critical data, and their loss never considered as
251 * something serious.
253 dirty_bitmap_mig_cancel_incoming();
256 /* For outgoing */
257 MigrationState *migrate_get_current(void)
259 /* This can only be called after the object created. */
260 assert(current_migration);
261 return current_migration;
264 MigrationIncomingState *migration_incoming_get_current(void)
266 assert(current_incoming);
267 return current_incoming;
270 void migration_incoming_state_destroy(void)
272 struct MigrationIncomingState *mis = migration_incoming_get_current();
274 if (mis->to_src_file) {
275 /* Tell source that we are done */
276 migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0);
277 qemu_fclose(mis->to_src_file);
278 mis->to_src_file = NULL;
281 if (mis->from_src_file) {
282 migration_ioc_unregister_yank_from_file(mis->from_src_file);
283 qemu_fclose(mis->from_src_file);
284 mis->from_src_file = NULL;
286 if (mis->postcopy_remote_fds) {
287 g_array_free(mis->postcopy_remote_fds, TRUE);
288 mis->postcopy_remote_fds = NULL;
290 if (mis->transport_cleanup) {
291 mis->transport_cleanup(mis->transport_data);
294 qemu_event_reset(&mis->main_thread_load_event);
296 if (mis->page_requested) {
297 g_tree_destroy(mis->page_requested);
298 mis->page_requested = NULL;
301 if (mis->socket_address_list) {
302 qapi_free_SocketAddressList(mis->socket_address_list);
303 mis->socket_address_list = NULL;
306 yank_unregister_instance(MIGRATION_YANK_INSTANCE);
309 static void migrate_generate_event(int new_state)
311 if (migrate_use_events()) {
312 qapi_event_send_migration(new_state);
316 static bool migrate_late_block_activate(void)
318 MigrationState *s;
320 s = migrate_get_current();
322 return s->enabled_capabilities[
323 MIGRATION_CAPABILITY_LATE_BLOCK_ACTIVATE];
327 * Send a message on the return channel back to the source
328 * of the migration.
330 static int migrate_send_rp_message(MigrationIncomingState *mis,
331 enum mig_rp_message_type message_type,
332 uint16_t len, void *data)
334 int ret = 0;
336 trace_migrate_send_rp_message((int)message_type, len);
337 QEMU_LOCK_GUARD(&mis->rp_mutex);
340 * It's possible that the file handle got lost due to network
341 * failures.
343 if (!mis->to_src_file) {
344 ret = -EIO;
345 return ret;
348 qemu_put_be16(mis->to_src_file, (unsigned int)message_type);
349 qemu_put_be16(mis->to_src_file, len);
350 qemu_put_buffer(mis->to_src_file, data, len);
351 qemu_fflush(mis->to_src_file);
353 /* It's possible that qemu file got error during sending */
354 ret = qemu_file_get_error(mis->to_src_file);
356 return ret;
359 /* Request one page from the source VM at the given start address.
360 * rb: the RAMBlock to request the page in
361 * Start: Address offset within the RB
362 * Len: Length in bytes required - must be a multiple of pagesize
364 int migrate_send_rp_message_req_pages(MigrationIncomingState *mis,
365 RAMBlock *rb, ram_addr_t start)
367 uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */
368 size_t msglen = 12; /* start + len */
369 size_t len = qemu_ram_pagesize(rb);
370 enum mig_rp_message_type msg_type;
371 const char *rbname;
372 int rbname_len;
374 *(uint64_t *)bufc = cpu_to_be64((uint64_t)start);
375 *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len);
378 * We maintain the last ramblock that we requested for page. Note that we
379 * don't need locking because this function will only be called within the
380 * postcopy ram fault thread.
382 if (rb != mis->last_rb) {
383 mis->last_rb = rb;
385 rbname = qemu_ram_get_idstr(rb);
386 rbname_len = strlen(rbname);
388 assert(rbname_len < 256);
390 bufc[msglen++] = rbname_len;
391 memcpy(bufc + msglen, rbname, rbname_len);
392 msglen += rbname_len;
393 msg_type = MIG_RP_MSG_REQ_PAGES_ID;
394 } else {
395 msg_type = MIG_RP_MSG_REQ_PAGES;
398 return migrate_send_rp_message(mis, msg_type, msglen, bufc);
401 int migrate_send_rp_req_pages(MigrationIncomingState *mis,
402 RAMBlock *rb, ram_addr_t start, uint64_t haddr)
404 void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb));
405 bool received = false;
407 WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) {
408 received = ramblock_recv_bitmap_test_byte_offset(rb, start);
409 if (!received && !g_tree_lookup(mis->page_requested, aligned)) {
411 * The page has not been received, and it's not yet in the page
412 * request list. Queue it. Set the value of element to 1, so that
413 * things like g_tree_lookup() will return TRUE (1) when found.
415 g_tree_insert(mis->page_requested, aligned, (gpointer)1);
416 mis->page_requested_count++;
417 trace_postcopy_page_req_add(aligned, mis->page_requested_count);
422 * If the page is there, skip sending the message. We don't even need the
423 * lock because as long as the page arrived, it'll be there forever.
425 if (received) {
426 return 0;
429 return migrate_send_rp_message_req_pages(mis, rb, start);
432 static bool migration_colo_enabled;
433 bool migration_incoming_colo_enabled(void)
435 return migration_colo_enabled;
438 void migration_incoming_disable_colo(void)
440 ram_block_discard_disable(false);
441 migration_colo_enabled = false;
444 int migration_incoming_enable_colo(void)
446 if (ram_block_discard_disable(true)) {
447 error_report("COLO: cannot disable RAM discard");
448 return -EBUSY;
450 migration_colo_enabled = true;
451 return 0;
454 void migrate_add_address(SocketAddress *address)
456 MigrationIncomingState *mis = migration_incoming_get_current();
458 QAPI_LIST_PREPEND(mis->socket_address_list,
459 QAPI_CLONE(SocketAddress, address));
462 static void qemu_start_incoming_migration(const char *uri, Error **errp)
464 const char *p = NULL;
466 migrate_protocol_allow_multifd(false); /* reset it anyway */
467 qapi_event_send_migration(MIGRATION_STATUS_SETUP);
468 if (strstart(uri, "tcp:", &p) ||
469 strstart(uri, "unix:", NULL) ||
470 strstart(uri, "vsock:", NULL)) {
471 migrate_protocol_allow_multifd(true);
472 socket_start_incoming_migration(p ? p : uri, errp);
473 #ifdef CONFIG_RDMA
474 } else if (strstart(uri, "rdma:", &p)) {
475 rdma_start_incoming_migration(p, errp);
476 #endif
477 } else if (strstart(uri, "exec:", &p)) {
478 exec_start_incoming_migration(p, errp);
479 } else if (strstart(uri, "fd:", &p)) {
480 fd_start_incoming_migration(p, errp);
481 } else {
482 error_setg(errp, "unknown migration protocol: %s", uri);
486 static void process_incoming_migration_bh(void *opaque)
488 Error *local_err = NULL;
489 MigrationIncomingState *mis = opaque;
491 /* If capability late_block_activate is set:
492 * Only fire up the block code now if we're going to restart the
493 * VM, else 'cont' will do it.
494 * This causes file locking to happen; so we don't want it to happen
495 * unless we really are starting the VM.
497 if (!migrate_late_block_activate() ||
498 (autostart && (!global_state_received() ||
499 global_state_get_runstate() == RUN_STATE_RUNNING))) {
500 /* Make sure all file formats flush their mutable metadata.
501 * If we get an error here, just don't restart the VM yet. */
502 bdrv_invalidate_cache_all(&local_err);
503 if (local_err) {
504 error_report_err(local_err);
505 local_err = NULL;
506 autostart = false;
511 * This must happen after all error conditions are dealt with and
512 * we're sure the VM is going to be running on this host.
514 qemu_announce_self(&mis->announce_timer, migrate_announce_params());
516 if (multifd_load_cleanup(&local_err) != 0) {
517 error_report_err(local_err);
518 autostart = false;
520 /* If global state section was not received or we are in running
521 state, we need to obey autostart. Any other state is set with
522 runstate_set. */
524 dirty_bitmap_mig_before_vm_start();
526 if (!global_state_received() ||
527 global_state_get_runstate() == RUN_STATE_RUNNING) {
528 if (autostart) {
529 vm_start();
530 } else {
531 runstate_set(RUN_STATE_PAUSED);
533 } else if (migration_incoming_colo_enabled()) {
534 migration_incoming_disable_colo();
535 vm_start();
536 } else {
537 runstate_set(global_state_get_runstate());
540 * This must happen after any state changes since as soon as an external
541 * observer sees this event they might start to prod at the VM assuming
542 * it's ready to use.
544 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
545 MIGRATION_STATUS_COMPLETED);
546 qemu_bh_delete(mis->bh);
547 migration_incoming_state_destroy();
550 static void process_incoming_migration_co(void *opaque)
552 MigrationIncomingState *mis = migration_incoming_get_current();
553 PostcopyState ps;
554 int ret;
555 Error *local_err = NULL;
557 assert(mis->from_src_file);
558 mis->migration_incoming_co = qemu_coroutine_self();
559 mis->largest_page_size = qemu_ram_pagesize_largest();
560 postcopy_state_set(POSTCOPY_INCOMING_NONE);
561 migrate_set_state(&mis->state, MIGRATION_STATUS_NONE,
562 MIGRATION_STATUS_ACTIVE);
563 ret = qemu_loadvm_state(mis->from_src_file);
565 ps = postcopy_state_get();
566 trace_process_incoming_migration_co_end(ret, ps);
567 if (ps != POSTCOPY_INCOMING_NONE) {
568 if (ps == POSTCOPY_INCOMING_ADVISE) {
570 * Where a migration had postcopy enabled (and thus went to advise)
571 * but managed to complete within the precopy period, we can use
572 * the normal exit.
574 postcopy_ram_incoming_cleanup(mis);
575 } else if (ret >= 0) {
577 * Postcopy was started, cleanup should happen at the end of the
578 * postcopy thread.
580 trace_process_incoming_migration_co_postcopy_end_main();
581 return;
583 /* Else if something went wrong then just fall out of the normal exit */
586 /* we get COLO info, and know if we are in COLO mode */
587 if (!ret && migration_incoming_colo_enabled()) {
588 /* Make sure all file formats flush their mutable metadata */
589 bdrv_invalidate_cache_all(&local_err);
590 if (local_err) {
591 error_report_err(local_err);
592 goto fail;
595 qemu_thread_create(&mis->colo_incoming_thread, "COLO incoming",
596 colo_process_incoming_thread, mis, QEMU_THREAD_JOINABLE);
597 mis->have_colo_incoming_thread = true;
598 qemu_coroutine_yield();
600 qemu_mutex_unlock_iothread();
601 /* Wait checkpoint incoming thread exit before free resource */
602 qemu_thread_join(&mis->colo_incoming_thread);
603 qemu_mutex_lock_iothread();
604 /* We hold the global iothread lock, so it is safe here */
605 colo_release_ram_cache();
608 if (ret < 0) {
609 error_report("load of migration failed: %s", strerror(-ret));
610 goto fail;
612 mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
613 qemu_bh_schedule(mis->bh);
614 mis->migration_incoming_co = NULL;
615 return;
616 fail:
617 local_err = NULL;
618 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
619 MIGRATION_STATUS_FAILED);
620 qemu_fclose(mis->from_src_file);
621 if (multifd_load_cleanup(&local_err) != 0) {
622 error_report_err(local_err);
624 exit(EXIT_FAILURE);
628 * migration_incoming_setup: Setup incoming migration
629 * @f: file for main migration channel
630 * @errp: where to put errors
632 * Returns: %true on success, %false on error.
634 static bool migration_incoming_setup(QEMUFile *f, Error **errp)
636 MigrationIncomingState *mis = migration_incoming_get_current();
638 if (multifd_load_setup(errp) != 0) {
639 return false;
642 if (!mis->from_src_file) {
643 mis->from_src_file = f;
645 qemu_file_set_blocking(f, false);
646 return true;
649 void migration_incoming_process(void)
651 Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, NULL);
652 qemu_coroutine_enter(co);
655 /* Returns true if recovered from a paused migration, otherwise false */
656 static bool postcopy_try_recover(QEMUFile *f)
658 MigrationIncomingState *mis = migration_incoming_get_current();
660 if (mis->state == MIGRATION_STATUS_POSTCOPY_PAUSED) {
661 /* Resumed from a paused postcopy migration */
663 mis->from_src_file = f;
664 /* Postcopy has standalone thread to do vm load */
665 qemu_file_set_blocking(f, true);
667 /* Re-configure the return path */
668 mis->to_src_file = qemu_file_get_return_path(f);
670 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_PAUSED,
671 MIGRATION_STATUS_POSTCOPY_RECOVER);
674 * Here, we only wake up the main loading thread (while the
675 * fault thread will still be waiting), so that we can receive
676 * commands from source now, and answer it if needed. The
677 * fault thread will be woken up afterwards until we are sure
678 * that source is ready to reply to page requests.
680 qemu_sem_post(&mis->postcopy_pause_sem_dst);
681 return true;
684 return false;
687 void migration_fd_process_incoming(QEMUFile *f, Error **errp)
689 if (postcopy_try_recover(f)) {
690 return;
693 if (!migration_incoming_setup(f, errp)) {
694 return;
696 migration_incoming_process();
699 void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp)
701 MigrationIncomingState *mis = migration_incoming_get_current();
702 Error *local_err = NULL;
703 bool start_migration;
705 if (!mis->from_src_file) {
706 /* The first connection (multifd may have multiple) */
707 QEMUFile *f = qemu_fopen_channel_input(ioc);
709 /* If it's a recovery, we're done */
710 if (postcopy_try_recover(f)) {
711 return;
714 if (!migration_incoming_setup(f, errp)) {
715 return;
719 * Common migration only needs one channel, so we can start
720 * right now. Multifd needs more than one channel, we wait.
722 start_migration = !migrate_use_multifd();
723 } else {
724 /* Multiple connections */
725 assert(migrate_use_multifd());
726 start_migration = multifd_recv_new_channel(ioc, &local_err);
727 if (local_err) {
728 error_propagate(errp, local_err);
729 return;
733 if (start_migration) {
734 migration_incoming_process();
739 * @migration_has_all_channels: We have received all channels that we need
741 * Returns true when we have got connections to all the channels that
742 * we need for migration.
744 bool migration_has_all_channels(void)
746 MigrationIncomingState *mis = migration_incoming_get_current();
747 bool all_channels;
749 all_channels = multifd_recv_all_channels_created();
751 return all_channels && mis->from_src_file != NULL;
755 * Send a 'SHUT' message on the return channel with the given value
756 * to indicate that we've finished with the RP. Non-0 value indicates
757 * error.
759 void migrate_send_rp_shut(MigrationIncomingState *mis,
760 uint32_t value)
762 uint32_t buf;
764 buf = cpu_to_be32(value);
765 migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf);
769 * Send a 'PONG' message on the return channel with the given value
770 * (normally in response to a 'PING')
772 void migrate_send_rp_pong(MigrationIncomingState *mis,
773 uint32_t value)
775 uint32_t buf;
777 buf = cpu_to_be32(value);
778 migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf);
781 void migrate_send_rp_recv_bitmap(MigrationIncomingState *mis,
782 char *block_name)
784 char buf[512];
785 int len;
786 int64_t res;
789 * First, we send the header part. It contains only the len of
790 * idstr, and the idstr itself.
792 len = strlen(block_name);
793 buf[0] = len;
794 memcpy(buf + 1, block_name, len);
796 if (mis->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
797 error_report("%s: MSG_RP_RECV_BITMAP only used for recovery",
798 __func__);
799 return;
802 migrate_send_rp_message(mis, MIG_RP_MSG_RECV_BITMAP, len + 1, buf);
805 * Next, we dump the received bitmap to the stream.
807 * TODO: currently we are safe since we are the only one that is
808 * using the to_src_file handle (fault thread is still paused),
809 * and it's ok even not taking the mutex. However the best way is
810 * to take the lock before sending the message header, and release
811 * the lock after sending the bitmap.
813 qemu_mutex_lock(&mis->rp_mutex);
814 res = ramblock_recv_bitmap_send(mis->to_src_file, block_name);
815 qemu_mutex_unlock(&mis->rp_mutex);
817 trace_migrate_send_rp_recv_bitmap(block_name, res);
820 void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value)
822 uint32_t buf;
824 buf = cpu_to_be32(value);
825 migrate_send_rp_message(mis, MIG_RP_MSG_RESUME_ACK, sizeof(buf), &buf);
828 MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
830 MigrationCapabilityStatusList *head = NULL, **tail = &head;
831 MigrationCapabilityStatus *caps;
832 MigrationState *s = migrate_get_current();
833 int i;
835 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
836 #ifndef CONFIG_LIVE_BLOCK_MIGRATION
837 if (i == MIGRATION_CAPABILITY_BLOCK) {
838 continue;
840 #endif
841 caps = g_malloc0(sizeof(*caps));
842 caps->capability = i;
843 caps->state = s->enabled_capabilities[i];
844 QAPI_LIST_APPEND(tail, caps);
847 return head;
850 MigrationParameters *qmp_query_migrate_parameters(Error **errp)
852 MigrationParameters *params;
853 MigrationState *s = migrate_get_current();
855 /* TODO use QAPI_CLONE() instead of duplicating it inline */
856 params = g_malloc0(sizeof(*params));
857 params->has_compress_level = true;
858 params->compress_level = s->parameters.compress_level;
859 params->has_compress_threads = true;
860 params->compress_threads = s->parameters.compress_threads;
861 params->has_compress_wait_thread = true;
862 params->compress_wait_thread = s->parameters.compress_wait_thread;
863 params->has_decompress_threads = true;
864 params->decompress_threads = s->parameters.decompress_threads;
865 params->has_throttle_trigger_threshold = true;
866 params->throttle_trigger_threshold = s->parameters.throttle_trigger_threshold;
867 params->has_cpu_throttle_initial = true;
868 params->cpu_throttle_initial = s->parameters.cpu_throttle_initial;
869 params->has_cpu_throttle_increment = true;
870 params->cpu_throttle_increment = s->parameters.cpu_throttle_increment;
871 params->has_cpu_throttle_tailslow = true;
872 params->cpu_throttle_tailslow = s->parameters.cpu_throttle_tailslow;
873 params->has_tls_creds = true;
874 params->tls_creds = g_strdup(s->parameters.tls_creds);
875 params->has_tls_hostname = true;
876 params->tls_hostname = g_strdup(s->parameters.tls_hostname);
877 params->has_tls_authz = true;
878 params->tls_authz = g_strdup(s->parameters.tls_authz ?
879 s->parameters.tls_authz : "");
880 params->has_max_bandwidth = true;
881 params->max_bandwidth = s->parameters.max_bandwidth;
882 params->has_downtime_limit = true;
883 params->downtime_limit = s->parameters.downtime_limit;
884 params->has_x_checkpoint_delay = true;
885 params->x_checkpoint_delay = s->parameters.x_checkpoint_delay;
886 params->has_block_incremental = true;
887 params->block_incremental = s->parameters.block_incremental;
888 params->has_multifd_channels = true;
889 params->multifd_channels = s->parameters.multifd_channels;
890 params->has_multifd_compression = true;
891 params->multifd_compression = s->parameters.multifd_compression;
892 params->has_multifd_zlib_level = true;
893 params->multifd_zlib_level = s->parameters.multifd_zlib_level;
894 params->has_multifd_zstd_level = true;
895 params->multifd_zstd_level = s->parameters.multifd_zstd_level;
896 params->has_xbzrle_cache_size = true;
897 params->xbzrle_cache_size = s->parameters.xbzrle_cache_size;
898 params->has_max_postcopy_bandwidth = true;
899 params->max_postcopy_bandwidth = s->parameters.max_postcopy_bandwidth;
900 params->has_max_cpu_throttle = true;
901 params->max_cpu_throttle = s->parameters.max_cpu_throttle;
902 params->has_announce_initial = true;
903 params->announce_initial = s->parameters.announce_initial;
904 params->has_announce_max = true;
905 params->announce_max = s->parameters.announce_max;
906 params->has_announce_rounds = true;
907 params->announce_rounds = s->parameters.announce_rounds;
908 params->has_announce_step = true;
909 params->announce_step = s->parameters.announce_step;
911 if (s->parameters.has_block_bitmap_mapping) {
912 params->has_block_bitmap_mapping = true;
913 params->block_bitmap_mapping =
914 QAPI_CLONE(BitmapMigrationNodeAliasList,
915 s->parameters.block_bitmap_mapping);
918 return params;
921 AnnounceParameters *migrate_announce_params(void)
923 static AnnounceParameters ap;
925 MigrationState *s = migrate_get_current();
927 ap.initial = s->parameters.announce_initial;
928 ap.max = s->parameters.announce_max;
929 ap.rounds = s->parameters.announce_rounds;
930 ap.step = s->parameters.announce_step;
932 return &ap;
936 * Return true if we're already in the middle of a migration
937 * (i.e. any of the active or setup states)
939 bool migration_is_setup_or_active(int state)
941 switch (state) {
942 case MIGRATION_STATUS_ACTIVE:
943 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
944 case MIGRATION_STATUS_POSTCOPY_PAUSED:
945 case MIGRATION_STATUS_POSTCOPY_RECOVER:
946 case MIGRATION_STATUS_SETUP:
947 case MIGRATION_STATUS_PRE_SWITCHOVER:
948 case MIGRATION_STATUS_DEVICE:
949 case MIGRATION_STATUS_WAIT_UNPLUG:
950 case MIGRATION_STATUS_COLO:
951 return true;
953 default:
954 return false;
959 bool migration_is_running(int state)
961 switch (state) {
962 case MIGRATION_STATUS_ACTIVE:
963 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
964 case MIGRATION_STATUS_POSTCOPY_PAUSED:
965 case MIGRATION_STATUS_POSTCOPY_RECOVER:
966 case MIGRATION_STATUS_SETUP:
967 case MIGRATION_STATUS_PRE_SWITCHOVER:
968 case MIGRATION_STATUS_DEVICE:
969 case MIGRATION_STATUS_WAIT_UNPLUG:
970 case MIGRATION_STATUS_CANCELLING:
971 return true;
973 default:
974 return false;
979 static void populate_time_info(MigrationInfo *info, MigrationState *s)
981 info->has_status = true;
982 info->has_setup_time = true;
983 info->setup_time = s->setup_time;
984 if (s->state == MIGRATION_STATUS_COMPLETED) {
985 info->has_total_time = true;
986 info->total_time = s->total_time;
987 info->has_downtime = true;
988 info->downtime = s->downtime;
989 } else {
990 info->has_total_time = true;
991 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) -
992 s->start_time;
993 info->has_expected_downtime = true;
994 info->expected_downtime = s->expected_downtime;
998 static void populate_ram_info(MigrationInfo *info, MigrationState *s)
1000 size_t page_size = qemu_target_page_size();
1002 info->has_ram = true;
1003 info->ram = g_malloc0(sizeof(*info->ram));
1004 info->ram->transferred = ram_counters.transferred;
1005 info->ram->total = ram_bytes_total();
1006 info->ram->duplicate = ram_counters.duplicate;
1007 /* legacy value. It is not used anymore */
1008 info->ram->skipped = 0;
1009 info->ram->normal = ram_counters.normal;
1010 info->ram->normal_bytes = ram_counters.normal * page_size;
1011 info->ram->mbps = s->mbps;
1012 info->ram->dirty_sync_count = ram_counters.dirty_sync_count;
1013 info->ram->postcopy_requests = ram_counters.postcopy_requests;
1014 info->ram->page_size = page_size;
1015 info->ram->multifd_bytes = ram_counters.multifd_bytes;
1016 info->ram->pages_per_second = s->pages_per_second;
1017 info->ram->precopy_bytes = ram_counters.precopy_bytes;
1018 info->ram->downtime_bytes = ram_counters.downtime_bytes;
1019 info->ram->postcopy_bytes = ram_counters.postcopy_bytes;
1021 if (migrate_use_xbzrle()) {
1022 info->has_xbzrle_cache = true;
1023 info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
1024 info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
1025 info->xbzrle_cache->bytes = xbzrle_counters.bytes;
1026 info->xbzrle_cache->pages = xbzrle_counters.pages;
1027 info->xbzrle_cache->cache_miss = xbzrle_counters.cache_miss;
1028 info->xbzrle_cache->cache_miss_rate = xbzrle_counters.cache_miss_rate;
1029 info->xbzrle_cache->encoding_rate = xbzrle_counters.encoding_rate;
1030 info->xbzrle_cache->overflow = xbzrle_counters.overflow;
1033 if (migrate_use_compression()) {
1034 info->has_compression = true;
1035 info->compression = g_malloc0(sizeof(*info->compression));
1036 info->compression->pages = compression_counters.pages;
1037 info->compression->busy = compression_counters.busy;
1038 info->compression->busy_rate = compression_counters.busy_rate;
1039 info->compression->compressed_size =
1040 compression_counters.compressed_size;
1041 info->compression->compression_rate =
1042 compression_counters.compression_rate;
1045 if (cpu_throttle_active()) {
1046 info->has_cpu_throttle_percentage = true;
1047 info->cpu_throttle_percentage = cpu_throttle_get_percentage();
1050 if (s->state != MIGRATION_STATUS_COMPLETED) {
1051 info->ram->remaining = ram_bytes_remaining();
1052 info->ram->dirty_pages_rate = ram_counters.dirty_pages_rate;
1056 static void populate_disk_info(MigrationInfo *info)
1058 if (blk_mig_active()) {
1059 info->has_disk = true;
1060 info->disk = g_malloc0(sizeof(*info->disk));
1061 info->disk->transferred = blk_mig_bytes_transferred();
1062 info->disk->remaining = blk_mig_bytes_remaining();
1063 info->disk->total = blk_mig_bytes_total();
1067 static void fill_source_migration_info(MigrationInfo *info)
1069 MigrationState *s = migrate_get_current();
1070 GSList *cur_blocker = migration_blockers;
1072 info->blocked_reasons = NULL;
1075 * There are two types of reasons a migration might be blocked;
1076 * a) devices marked in VMState as non-migratable, and
1077 * b) Explicit migration blockers
1078 * We need to add both of them here.
1080 qemu_savevm_non_migratable_list(&info->blocked_reasons);
1082 while (cur_blocker) {
1083 QAPI_LIST_PREPEND(info->blocked_reasons,
1084 g_strdup(error_get_pretty(cur_blocker->data)));
1085 cur_blocker = g_slist_next(cur_blocker);
1087 info->has_blocked_reasons = info->blocked_reasons != NULL;
1089 switch (s->state) {
1090 case MIGRATION_STATUS_NONE:
1091 /* no migration has happened ever */
1092 /* do not overwrite destination migration status */
1093 return;
1094 case MIGRATION_STATUS_SETUP:
1095 info->has_status = true;
1096 info->has_total_time = false;
1097 break;
1098 case MIGRATION_STATUS_ACTIVE:
1099 case MIGRATION_STATUS_CANCELLING:
1100 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
1101 case MIGRATION_STATUS_PRE_SWITCHOVER:
1102 case MIGRATION_STATUS_DEVICE:
1103 case MIGRATION_STATUS_POSTCOPY_PAUSED:
1104 case MIGRATION_STATUS_POSTCOPY_RECOVER:
1105 /* TODO add some postcopy stats */
1106 populate_time_info(info, s);
1107 populate_ram_info(info, s);
1108 populate_disk_info(info);
1109 populate_vfio_info(info);
1110 break;
1111 case MIGRATION_STATUS_COLO:
1112 info->has_status = true;
1113 /* TODO: display COLO specific information (checkpoint info etc.) */
1114 break;
1115 case MIGRATION_STATUS_COMPLETED:
1116 populate_time_info(info, s);
1117 populate_ram_info(info, s);
1118 populate_vfio_info(info);
1119 break;
1120 case MIGRATION_STATUS_FAILED:
1121 info->has_status = true;
1122 if (s->error) {
1123 info->has_error_desc = true;
1124 info->error_desc = g_strdup(error_get_pretty(s->error));
1126 break;
1127 case MIGRATION_STATUS_CANCELLED:
1128 info->has_status = true;
1129 break;
1130 case MIGRATION_STATUS_WAIT_UNPLUG:
1131 info->has_status = true;
1132 break;
1134 info->status = s->state;
1137 typedef enum WriteTrackingSupport {
1138 WT_SUPPORT_UNKNOWN = 0,
1139 WT_SUPPORT_ABSENT,
1140 WT_SUPPORT_AVAILABLE,
1141 WT_SUPPORT_COMPATIBLE
1142 } WriteTrackingSupport;
1144 static
1145 WriteTrackingSupport migrate_query_write_tracking(void)
1147 /* Check if kernel supports required UFFD features */
1148 if (!ram_write_tracking_available()) {
1149 return WT_SUPPORT_ABSENT;
1152 * Check if current memory configuration is
1153 * compatible with required UFFD features.
1155 if (!ram_write_tracking_compatible()) {
1156 return WT_SUPPORT_AVAILABLE;
1159 return WT_SUPPORT_COMPATIBLE;
1163 * @migration_caps_check - check capability validity
1165 * @cap_list: old capability list, array of bool
1166 * @params: new capabilities to be applied soon
1167 * @errp: set *errp if the check failed, with reason
1169 * Returns true if check passed, otherwise false.
1171 static bool migrate_caps_check(bool *cap_list,
1172 MigrationCapabilityStatusList *params,
1173 Error **errp)
1175 MigrationCapabilityStatusList *cap;
1176 bool old_postcopy_cap;
1177 MigrationIncomingState *mis = migration_incoming_get_current();
1179 old_postcopy_cap = cap_list[MIGRATION_CAPABILITY_POSTCOPY_RAM];
1181 for (cap = params; cap; cap = cap->next) {
1182 cap_list[cap->value->capability] = cap->value->state;
1185 #ifndef CONFIG_LIVE_BLOCK_MIGRATION
1186 if (cap_list[MIGRATION_CAPABILITY_BLOCK]) {
1187 error_setg(errp, "QEMU compiled without old-style (blk/-b, inc/-i) "
1188 "block migration");
1189 error_append_hint(errp, "Use drive_mirror+NBD instead.\n");
1190 return false;
1192 #endif
1194 #ifndef CONFIG_REPLICATION
1195 if (cap_list[MIGRATION_CAPABILITY_X_COLO]) {
1196 error_setg(errp, "QEMU compiled without replication module"
1197 " can't enable COLO");
1198 error_append_hint(errp, "Please enable replication before COLO.\n");
1199 return false;
1201 #endif
1203 if (cap_list[MIGRATION_CAPABILITY_POSTCOPY_RAM]) {
1204 /* This check is reasonably expensive, so only when it's being
1205 * set the first time, also it's only the destination that needs
1206 * special support.
1208 if (!old_postcopy_cap && runstate_check(RUN_STATE_INMIGRATE) &&
1209 !postcopy_ram_supported_by_host(mis)) {
1210 /* postcopy_ram_supported_by_host will have emitted a more
1211 * detailed message
1213 error_setg(errp, "Postcopy is not supported");
1214 return false;
1217 if (cap_list[MIGRATION_CAPABILITY_X_IGNORE_SHARED]) {
1218 error_setg(errp, "Postcopy is not compatible with ignore-shared");
1219 return false;
1223 if (cap_list[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT]) {
1224 WriteTrackingSupport wt_support;
1225 int idx;
1227 * Check if 'background-snapshot' capability is supported by
1228 * host kernel and compatible with guest memory configuration.
1230 wt_support = migrate_query_write_tracking();
1231 if (wt_support < WT_SUPPORT_AVAILABLE) {
1232 error_setg(errp, "Background-snapshot is not supported by host kernel");
1233 return false;
1235 if (wt_support < WT_SUPPORT_COMPATIBLE) {
1236 error_setg(errp, "Background-snapshot is not compatible "
1237 "with guest memory configuration");
1238 return false;
1242 * Check if there are any migration capabilities
1243 * incompatible with 'background-snapshot'.
1245 for (idx = 0; idx < check_caps_background_snapshot.size; idx++) {
1246 int incomp_cap = check_caps_background_snapshot.caps[idx];
1247 if (cap_list[incomp_cap]) {
1248 error_setg(errp,
1249 "Background-snapshot is not compatible with %s",
1250 MigrationCapability_str(incomp_cap));
1251 return false;
1256 /* incoming side only */
1257 if (runstate_check(RUN_STATE_INMIGRATE) &&
1258 !migrate_multifd_is_allowed() &&
1259 cap_list[MIGRATION_CAPABILITY_MULTIFD]) {
1260 error_setg(errp, "multifd is not supported by current protocol");
1261 return false;
1264 return true;
1267 static void fill_destination_migration_info(MigrationInfo *info)
1269 MigrationIncomingState *mis = migration_incoming_get_current();
1271 if (mis->socket_address_list) {
1272 info->has_socket_address = true;
1273 info->socket_address =
1274 QAPI_CLONE(SocketAddressList, mis->socket_address_list);
1277 switch (mis->state) {
1278 case MIGRATION_STATUS_NONE:
1279 return;
1280 case MIGRATION_STATUS_SETUP:
1281 case MIGRATION_STATUS_CANCELLING:
1282 case MIGRATION_STATUS_CANCELLED:
1283 case MIGRATION_STATUS_ACTIVE:
1284 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
1285 case MIGRATION_STATUS_POSTCOPY_PAUSED:
1286 case MIGRATION_STATUS_POSTCOPY_RECOVER:
1287 case MIGRATION_STATUS_FAILED:
1288 case MIGRATION_STATUS_COLO:
1289 info->has_status = true;
1290 break;
1291 case MIGRATION_STATUS_COMPLETED:
1292 info->has_status = true;
1293 fill_destination_postcopy_migration_info(info);
1294 break;
1296 info->status = mis->state;
1299 MigrationInfo *qmp_query_migrate(Error **errp)
1301 MigrationInfo *info = g_malloc0(sizeof(*info));
1303 fill_destination_migration_info(info);
1304 fill_source_migration_info(info);
1306 return info;
1309 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
1310 Error **errp)
1312 MigrationState *s = migrate_get_current();
1313 MigrationCapabilityStatusList *cap;
1314 bool cap_list[MIGRATION_CAPABILITY__MAX];
1316 if (migration_is_running(s->state)) {
1317 error_setg(errp, QERR_MIGRATION_ACTIVE);
1318 return;
1321 memcpy(cap_list, s->enabled_capabilities, sizeof(cap_list));
1322 if (!migrate_caps_check(cap_list, params, errp)) {
1323 return;
1326 for (cap = params; cap; cap = cap->next) {
1327 s->enabled_capabilities[cap->value->capability] = cap->value->state;
1332 * Check whether the parameters are valid. Error will be put into errp
1333 * (if provided). Return true if valid, otherwise false.
1335 static bool migrate_params_check(MigrationParameters *params, Error **errp)
1337 if (params->has_compress_level &&
1338 (params->compress_level > 9)) {
1339 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level",
1340 "a value between 0 and 9");
1341 return false;
1344 if (params->has_compress_threads && (params->compress_threads < 1)) {
1345 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1346 "compress_threads",
1347 "a value between 1 and 255");
1348 return false;
1351 if (params->has_decompress_threads && (params->decompress_threads < 1)) {
1352 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1353 "decompress_threads",
1354 "a value between 1 and 255");
1355 return false;
1358 if (params->has_throttle_trigger_threshold &&
1359 (params->throttle_trigger_threshold < 1 ||
1360 params->throttle_trigger_threshold > 100)) {
1361 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1362 "throttle_trigger_threshold",
1363 "an integer in the range of 1 to 100");
1364 return false;
1367 if (params->has_cpu_throttle_initial &&
1368 (params->cpu_throttle_initial < 1 ||
1369 params->cpu_throttle_initial > 99)) {
1370 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1371 "cpu_throttle_initial",
1372 "an integer in the range of 1 to 99");
1373 return false;
1376 if (params->has_cpu_throttle_increment &&
1377 (params->cpu_throttle_increment < 1 ||
1378 params->cpu_throttle_increment > 99)) {
1379 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1380 "cpu_throttle_increment",
1381 "an integer in the range of 1 to 99");
1382 return false;
1385 if (params->has_max_bandwidth && (params->max_bandwidth > SIZE_MAX)) {
1386 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1387 "max_bandwidth",
1388 "an integer in the range of 0 to "stringify(SIZE_MAX)
1389 " bytes/second");
1390 return false;
1393 if (params->has_downtime_limit &&
1394 (params->downtime_limit > MAX_MIGRATE_DOWNTIME)) {
1395 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1396 "downtime_limit",
1397 "an integer in the range of 0 to "
1398 stringify(MAX_MIGRATE_DOWNTIME)" ms");
1399 return false;
1402 /* x_checkpoint_delay is now always positive */
1404 if (params->has_multifd_channels && (params->multifd_channels < 1)) {
1405 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1406 "multifd_channels",
1407 "a value between 1 and 255");
1408 return false;
1411 if (params->has_multifd_zlib_level &&
1412 (params->multifd_zlib_level > 9)) {
1413 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "multifd_zlib_level",
1414 "a value between 0 and 9");
1415 return false;
1418 if (params->has_multifd_zstd_level &&
1419 (params->multifd_zstd_level > 20)) {
1420 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "multifd_zstd_level",
1421 "a value between 0 and 20");
1422 return false;
1425 if (params->has_xbzrle_cache_size &&
1426 (params->xbzrle_cache_size < qemu_target_page_size() ||
1427 !is_power_of_2(params->xbzrle_cache_size))) {
1428 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1429 "xbzrle_cache_size",
1430 "a power of two no less than the target page size");
1431 return false;
1434 if (params->has_max_cpu_throttle &&
1435 (params->max_cpu_throttle < params->cpu_throttle_initial ||
1436 params->max_cpu_throttle > 99)) {
1437 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1438 "max_cpu_throttle",
1439 "an integer in the range of cpu_throttle_initial to 99");
1440 return false;
1443 if (params->has_announce_initial &&
1444 params->announce_initial > 100000) {
1445 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1446 "announce_initial",
1447 "a value between 0 and 100000");
1448 return false;
1450 if (params->has_announce_max &&
1451 params->announce_max > 100000) {
1452 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1453 "announce_max",
1454 "a value between 0 and 100000");
1455 return false;
1457 if (params->has_announce_rounds &&
1458 params->announce_rounds > 1000) {
1459 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1460 "announce_rounds",
1461 "a value between 0 and 1000");
1462 return false;
1464 if (params->has_announce_step &&
1465 (params->announce_step < 1 ||
1466 params->announce_step > 10000)) {
1467 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1468 "announce_step",
1469 "a value between 0 and 10000");
1470 return false;
1473 if (params->has_block_bitmap_mapping &&
1474 !check_dirty_bitmap_mig_alias_map(params->block_bitmap_mapping, errp)) {
1475 error_prepend(errp, "Invalid mapping given for block-bitmap-mapping: ");
1476 return false;
1479 return true;
1482 static void migrate_params_test_apply(MigrateSetParameters *params,
1483 MigrationParameters *dest)
1485 *dest = migrate_get_current()->parameters;
1487 /* TODO use QAPI_CLONE() instead of duplicating it inline */
1489 if (params->has_compress_level) {
1490 dest->compress_level = params->compress_level;
1493 if (params->has_compress_threads) {
1494 dest->compress_threads = params->compress_threads;
1497 if (params->has_compress_wait_thread) {
1498 dest->compress_wait_thread = params->compress_wait_thread;
1501 if (params->has_decompress_threads) {
1502 dest->decompress_threads = params->decompress_threads;
1505 if (params->has_throttle_trigger_threshold) {
1506 dest->throttle_trigger_threshold = params->throttle_trigger_threshold;
1509 if (params->has_cpu_throttle_initial) {
1510 dest->cpu_throttle_initial = params->cpu_throttle_initial;
1513 if (params->has_cpu_throttle_increment) {
1514 dest->cpu_throttle_increment = params->cpu_throttle_increment;
1517 if (params->has_cpu_throttle_tailslow) {
1518 dest->cpu_throttle_tailslow = params->cpu_throttle_tailslow;
1521 if (params->has_tls_creds) {
1522 assert(params->tls_creds->type == QTYPE_QSTRING);
1523 dest->tls_creds = params->tls_creds->u.s;
1526 if (params->has_tls_hostname) {
1527 assert(params->tls_hostname->type == QTYPE_QSTRING);
1528 dest->tls_hostname = params->tls_hostname->u.s;
1531 if (params->has_max_bandwidth) {
1532 dest->max_bandwidth = params->max_bandwidth;
1535 if (params->has_downtime_limit) {
1536 dest->downtime_limit = params->downtime_limit;
1539 if (params->has_x_checkpoint_delay) {
1540 dest->x_checkpoint_delay = params->x_checkpoint_delay;
1543 if (params->has_block_incremental) {
1544 dest->block_incremental = params->block_incremental;
1546 if (params->has_multifd_channels) {
1547 dest->multifd_channels = params->multifd_channels;
1549 if (params->has_multifd_compression) {
1550 dest->multifd_compression = params->multifd_compression;
1552 if (params->has_xbzrle_cache_size) {
1553 dest->xbzrle_cache_size = params->xbzrle_cache_size;
1555 if (params->has_max_postcopy_bandwidth) {
1556 dest->max_postcopy_bandwidth = params->max_postcopy_bandwidth;
1558 if (params->has_max_cpu_throttle) {
1559 dest->max_cpu_throttle = params->max_cpu_throttle;
1561 if (params->has_announce_initial) {
1562 dest->announce_initial = params->announce_initial;
1564 if (params->has_announce_max) {
1565 dest->announce_max = params->announce_max;
1567 if (params->has_announce_rounds) {
1568 dest->announce_rounds = params->announce_rounds;
1570 if (params->has_announce_step) {
1571 dest->announce_step = params->announce_step;
1574 if (params->has_block_bitmap_mapping) {
1575 dest->has_block_bitmap_mapping = true;
1576 dest->block_bitmap_mapping = params->block_bitmap_mapping;
1580 static void migrate_params_apply(MigrateSetParameters *params, Error **errp)
1582 MigrationState *s = migrate_get_current();
1584 /* TODO use QAPI_CLONE() instead of duplicating it inline */
1586 if (params->has_compress_level) {
1587 s->parameters.compress_level = params->compress_level;
1590 if (params->has_compress_threads) {
1591 s->parameters.compress_threads = params->compress_threads;
1594 if (params->has_compress_wait_thread) {
1595 s->parameters.compress_wait_thread = params->compress_wait_thread;
1598 if (params->has_decompress_threads) {
1599 s->parameters.decompress_threads = params->decompress_threads;
1602 if (params->has_throttle_trigger_threshold) {
1603 s->parameters.throttle_trigger_threshold = params->throttle_trigger_threshold;
1606 if (params->has_cpu_throttle_initial) {
1607 s->parameters.cpu_throttle_initial = params->cpu_throttle_initial;
1610 if (params->has_cpu_throttle_increment) {
1611 s->parameters.cpu_throttle_increment = params->cpu_throttle_increment;
1614 if (params->has_cpu_throttle_tailslow) {
1615 s->parameters.cpu_throttle_tailslow = params->cpu_throttle_tailslow;
1618 if (params->has_tls_creds) {
1619 g_free(s->parameters.tls_creds);
1620 assert(params->tls_creds->type == QTYPE_QSTRING);
1621 s->parameters.tls_creds = g_strdup(params->tls_creds->u.s);
1624 if (params->has_tls_hostname) {
1625 g_free(s->parameters.tls_hostname);
1626 assert(params->tls_hostname->type == QTYPE_QSTRING);
1627 s->parameters.tls_hostname = g_strdup(params->tls_hostname->u.s);
1630 if (params->has_tls_authz) {
1631 g_free(s->parameters.tls_authz);
1632 assert(params->tls_authz->type == QTYPE_QSTRING);
1633 s->parameters.tls_authz = g_strdup(params->tls_authz->u.s);
1636 if (params->has_max_bandwidth) {
1637 s->parameters.max_bandwidth = params->max_bandwidth;
1638 if (s->to_dst_file && !migration_in_postcopy()) {
1639 qemu_file_set_rate_limit(s->to_dst_file,
1640 s->parameters.max_bandwidth / XFER_LIMIT_RATIO);
1644 if (params->has_downtime_limit) {
1645 s->parameters.downtime_limit = params->downtime_limit;
1648 if (params->has_x_checkpoint_delay) {
1649 s->parameters.x_checkpoint_delay = params->x_checkpoint_delay;
1650 if (migration_in_colo_state()) {
1651 colo_checkpoint_notify(s);
1655 if (params->has_block_incremental) {
1656 s->parameters.block_incremental = params->block_incremental;
1658 if (params->has_multifd_channels) {
1659 s->parameters.multifd_channels = params->multifd_channels;
1661 if (params->has_multifd_compression) {
1662 s->parameters.multifd_compression = params->multifd_compression;
1664 if (params->has_xbzrle_cache_size) {
1665 s->parameters.xbzrle_cache_size = params->xbzrle_cache_size;
1666 xbzrle_cache_resize(params->xbzrle_cache_size, errp);
1668 if (params->has_max_postcopy_bandwidth) {
1669 s->parameters.max_postcopy_bandwidth = params->max_postcopy_bandwidth;
1670 if (s->to_dst_file && migration_in_postcopy()) {
1671 qemu_file_set_rate_limit(s->to_dst_file,
1672 s->parameters.max_postcopy_bandwidth / XFER_LIMIT_RATIO);
1675 if (params->has_max_cpu_throttle) {
1676 s->parameters.max_cpu_throttle = params->max_cpu_throttle;
1678 if (params->has_announce_initial) {
1679 s->parameters.announce_initial = params->announce_initial;
1681 if (params->has_announce_max) {
1682 s->parameters.announce_max = params->announce_max;
1684 if (params->has_announce_rounds) {
1685 s->parameters.announce_rounds = params->announce_rounds;
1687 if (params->has_announce_step) {
1688 s->parameters.announce_step = params->announce_step;
1691 if (params->has_block_bitmap_mapping) {
1692 qapi_free_BitmapMigrationNodeAliasList(
1693 s->parameters.block_bitmap_mapping);
1695 s->parameters.has_block_bitmap_mapping = true;
1696 s->parameters.block_bitmap_mapping =
1697 QAPI_CLONE(BitmapMigrationNodeAliasList,
1698 params->block_bitmap_mapping);
1702 void qmp_migrate_set_parameters(MigrateSetParameters *params, Error **errp)
1704 MigrationParameters tmp;
1706 /* TODO Rewrite "" to null instead */
1707 if (params->has_tls_creds
1708 && params->tls_creds->type == QTYPE_QNULL) {
1709 qobject_unref(params->tls_creds->u.n);
1710 params->tls_creds->type = QTYPE_QSTRING;
1711 params->tls_creds->u.s = strdup("");
1713 /* TODO Rewrite "" to null instead */
1714 if (params->has_tls_hostname
1715 && params->tls_hostname->type == QTYPE_QNULL) {
1716 qobject_unref(params->tls_hostname->u.n);
1717 params->tls_hostname->type = QTYPE_QSTRING;
1718 params->tls_hostname->u.s = strdup("");
1721 migrate_params_test_apply(params, &tmp);
1723 if (!migrate_params_check(&tmp, errp)) {
1724 /* Invalid parameter */
1725 return;
1728 migrate_params_apply(params, errp);
1732 void qmp_migrate_start_postcopy(Error **errp)
1734 MigrationState *s = migrate_get_current();
1736 if (!migrate_postcopy()) {
1737 error_setg(errp, "Enable postcopy with migrate_set_capability before"
1738 " the start of migration");
1739 return;
1742 if (s->state == MIGRATION_STATUS_NONE) {
1743 error_setg(errp, "Postcopy must be started after migration has been"
1744 " started");
1745 return;
1748 * we don't error if migration has finished since that would be racy
1749 * with issuing this command.
1751 qatomic_set(&s->start_postcopy, true);
1754 /* shared migration helpers */
1756 void migrate_set_state(int *state, int old_state, int new_state)
1758 assert(new_state < MIGRATION_STATUS__MAX);
1759 if (qatomic_cmpxchg(state, old_state, new_state) == old_state) {
1760 trace_migrate_set_state(MigrationStatus_str(new_state));
1761 migrate_generate_event(new_state);
1765 static MigrationCapabilityStatus *migrate_cap_add(MigrationCapability index,
1766 bool state)
1768 MigrationCapabilityStatus *cap;
1770 cap = g_new0(MigrationCapabilityStatus, 1);
1771 cap->capability = index;
1772 cap->state = state;
1774 return cap;
1777 void migrate_set_block_enabled(bool value, Error **errp)
1779 MigrationCapabilityStatusList *cap = NULL;
1781 QAPI_LIST_PREPEND(cap, migrate_cap_add(MIGRATION_CAPABILITY_BLOCK, value));
1782 qmp_migrate_set_capabilities(cap, errp);
1783 qapi_free_MigrationCapabilityStatusList(cap);
1786 static void migrate_set_block_incremental(MigrationState *s, bool value)
1788 s->parameters.block_incremental = value;
1791 static void block_cleanup_parameters(MigrationState *s)
1793 if (s->must_remove_block_options) {
1794 /* setting to false can never fail */
1795 migrate_set_block_enabled(false, &error_abort);
1796 migrate_set_block_incremental(s, false);
1797 s->must_remove_block_options = false;
1801 static void migrate_fd_cleanup(MigrationState *s)
1803 qemu_bh_delete(s->cleanup_bh);
1804 s->cleanup_bh = NULL;
1806 qemu_savevm_state_cleanup();
1808 if (s->to_dst_file) {
1809 QEMUFile *tmp;
1811 trace_migrate_fd_cleanup();
1812 qemu_mutex_unlock_iothread();
1813 if (s->migration_thread_running) {
1814 qemu_thread_join(&s->thread);
1815 s->migration_thread_running = false;
1817 qemu_mutex_lock_iothread();
1819 multifd_save_cleanup();
1820 qemu_mutex_lock(&s->qemu_file_lock);
1821 tmp = s->to_dst_file;
1822 s->to_dst_file = NULL;
1823 qemu_mutex_unlock(&s->qemu_file_lock);
1825 * Close the file handle without the lock to make sure the
1826 * critical section won't block for long.
1828 migration_ioc_unregister_yank_from_file(tmp);
1829 qemu_fclose(tmp);
1832 assert(!migration_is_active(s));
1834 if (s->state == MIGRATION_STATUS_CANCELLING) {
1835 migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING,
1836 MIGRATION_STATUS_CANCELLED);
1839 if (s->error) {
1840 /* It is used on info migrate. We can't free it */
1841 error_report_err(error_copy(s->error));
1843 notifier_list_notify(&migration_state_notifiers, s);
1844 block_cleanup_parameters(s);
1845 yank_unregister_instance(MIGRATION_YANK_INSTANCE);
1848 static void migrate_fd_cleanup_schedule(MigrationState *s)
1851 * Ref the state for bh, because it may be called when
1852 * there're already no other refs
1854 object_ref(OBJECT(s));
1855 qemu_bh_schedule(s->cleanup_bh);
1858 static void migrate_fd_cleanup_bh(void *opaque)
1860 MigrationState *s = opaque;
1861 migrate_fd_cleanup(s);
1862 object_unref(OBJECT(s));
1865 void migrate_set_error(MigrationState *s, const Error *error)
1867 QEMU_LOCK_GUARD(&s->error_mutex);
1868 if (!s->error) {
1869 s->error = error_copy(error);
1873 static void migrate_error_free(MigrationState *s)
1875 QEMU_LOCK_GUARD(&s->error_mutex);
1876 if (s->error) {
1877 error_free(s->error);
1878 s->error = NULL;
1882 void migrate_fd_error(MigrationState *s, const Error *error)
1884 trace_migrate_fd_error(error_get_pretty(error));
1885 assert(s->to_dst_file == NULL);
1886 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1887 MIGRATION_STATUS_FAILED);
1888 migrate_set_error(s, error);
1891 static void migrate_fd_cancel(MigrationState *s)
1893 int old_state ;
1894 QEMUFile *f = migrate_get_current()->to_dst_file;
1895 trace_migrate_fd_cancel();
1897 WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) {
1898 if (s->rp_state.from_dst_file) {
1899 /* shutdown the rp socket, so causing the rp thread to shutdown */
1900 qemu_file_shutdown(s->rp_state.from_dst_file);
1904 do {
1905 old_state = s->state;
1906 if (!migration_is_running(old_state)) {
1907 break;
1909 /* If the migration is paused, kick it out of the pause */
1910 if (old_state == MIGRATION_STATUS_PRE_SWITCHOVER) {
1911 qemu_sem_post(&s->pause_sem);
1913 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING);
1914 } while (s->state != MIGRATION_STATUS_CANCELLING);
1917 * If we're unlucky the migration code might be stuck somewhere in a
1918 * send/write while the network has failed and is waiting to timeout;
1919 * if we've got shutdown(2) available then we can force it to quit.
1920 * The outgoing qemu file gets closed in migrate_fd_cleanup that is
1921 * called in a bh, so there is no race against this cancel.
1923 if (s->state == MIGRATION_STATUS_CANCELLING && f) {
1924 qemu_file_shutdown(f);
1926 if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) {
1927 Error *local_err = NULL;
1929 bdrv_invalidate_cache_all(&local_err);
1930 if (local_err) {
1931 error_report_err(local_err);
1932 } else {
1933 s->block_inactive = false;
1938 void add_migration_state_change_notifier(Notifier *notify)
1940 notifier_list_add(&migration_state_notifiers, notify);
1943 void remove_migration_state_change_notifier(Notifier *notify)
1945 notifier_remove(notify);
1948 bool migration_in_setup(MigrationState *s)
1950 return s->state == MIGRATION_STATUS_SETUP;
1953 bool migration_has_finished(MigrationState *s)
1955 return s->state == MIGRATION_STATUS_COMPLETED;
1958 bool migration_has_failed(MigrationState *s)
1960 return (s->state == MIGRATION_STATUS_CANCELLED ||
1961 s->state == MIGRATION_STATUS_FAILED);
1964 bool migration_in_postcopy(void)
1966 MigrationState *s = migrate_get_current();
1968 switch (s->state) {
1969 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
1970 case MIGRATION_STATUS_POSTCOPY_PAUSED:
1971 case MIGRATION_STATUS_POSTCOPY_RECOVER:
1972 return true;
1973 default:
1974 return false;
1978 bool migration_in_postcopy_after_devices(MigrationState *s)
1980 return migration_in_postcopy() && s->postcopy_after_devices;
1983 bool migration_in_incoming_postcopy(void)
1985 PostcopyState ps = postcopy_state_get();
1987 return ps >= POSTCOPY_INCOMING_DISCARD && ps < POSTCOPY_INCOMING_END;
1990 bool migration_in_bg_snapshot(void)
1992 MigrationState *s = migrate_get_current();
1994 return migrate_background_snapshot() &&
1995 migration_is_setup_or_active(s->state);
1998 bool migration_is_idle(void)
2000 MigrationState *s = current_migration;
2002 if (!s) {
2003 return true;
2006 switch (s->state) {
2007 case MIGRATION_STATUS_NONE:
2008 case MIGRATION_STATUS_CANCELLED:
2009 case MIGRATION_STATUS_COMPLETED:
2010 case MIGRATION_STATUS_FAILED:
2011 return true;
2012 case MIGRATION_STATUS_SETUP:
2013 case MIGRATION_STATUS_CANCELLING:
2014 case MIGRATION_STATUS_ACTIVE:
2015 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
2016 case MIGRATION_STATUS_COLO:
2017 case MIGRATION_STATUS_PRE_SWITCHOVER:
2018 case MIGRATION_STATUS_DEVICE:
2019 case MIGRATION_STATUS_WAIT_UNPLUG:
2020 return false;
2021 case MIGRATION_STATUS__MAX:
2022 g_assert_not_reached();
2025 return false;
2028 bool migration_is_active(MigrationState *s)
2030 return (s->state == MIGRATION_STATUS_ACTIVE ||
2031 s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
2034 void migrate_init(MigrationState *s)
2037 * Reinitialise all migration state, except
2038 * parameters/capabilities that the user set, and
2039 * locks.
2041 s->cleanup_bh = 0;
2042 s->vm_start_bh = 0;
2043 s->to_dst_file = NULL;
2044 s->state = MIGRATION_STATUS_NONE;
2045 s->rp_state.from_dst_file = NULL;
2046 s->rp_state.error = false;
2047 s->mbps = 0.0;
2048 s->pages_per_second = 0.0;
2049 s->downtime = 0;
2050 s->expected_downtime = 0;
2051 s->setup_time = 0;
2052 s->start_postcopy = false;
2053 s->postcopy_after_devices = false;
2054 s->migration_thread_running = false;
2055 error_free(s->error);
2056 s->error = NULL;
2057 s->hostname = NULL;
2059 migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
2061 s->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
2062 s->total_time = 0;
2063 s->vm_was_running = false;
2064 s->iteration_initial_bytes = 0;
2065 s->threshold_size = 0;
2068 int migrate_add_blocker_internal(Error *reason, Error **errp)
2070 /* Snapshots are similar to migrations, so check RUN_STATE_SAVE_VM too. */
2071 if (runstate_check(RUN_STATE_SAVE_VM) || !migration_is_idle()) {
2072 error_propagate_prepend(errp, error_copy(reason),
2073 "disallowing migration blocker "
2074 "(migration/snapshot in progress) for: ");
2075 return -EBUSY;
2078 migration_blockers = g_slist_prepend(migration_blockers, reason);
2079 return 0;
2082 int migrate_add_blocker(Error *reason, Error **errp)
2084 if (only_migratable) {
2085 error_propagate_prepend(errp, error_copy(reason),
2086 "disallowing migration blocker "
2087 "(--only-migratable) for: ");
2088 return -EACCES;
2091 return migrate_add_blocker_internal(reason, errp);
2094 void migrate_del_blocker(Error *reason)
2096 migration_blockers = g_slist_remove(migration_blockers, reason);
2099 void qmp_migrate_incoming(const char *uri, Error **errp)
2101 Error *local_err = NULL;
2102 static bool once = true;
2104 if (!once) {
2105 error_setg(errp, "The incoming migration has already been started");
2106 return;
2108 if (!runstate_check(RUN_STATE_INMIGRATE)) {
2109 error_setg(errp, "'-incoming' was not specified on the command line");
2110 return;
2113 if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) {
2114 return;
2117 qemu_start_incoming_migration(uri, &local_err);
2119 if (local_err) {
2120 yank_unregister_instance(MIGRATION_YANK_INSTANCE);
2121 error_propagate(errp, local_err);
2122 return;
2125 once = false;
2128 void qmp_migrate_recover(const char *uri, Error **errp)
2130 MigrationIncomingState *mis = migration_incoming_get_current();
2133 * Don't even bother to use ERRP_GUARD() as it _must_ always be set by
2134 * callers (no one should ignore a recover failure); if there is, it's a
2135 * programming error.
2137 assert(errp);
2139 if (mis->state != MIGRATION_STATUS_POSTCOPY_PAUSED) {
2140 error_setg(errp, "Migrate recover can only be run "
2141 "when postcopy is paused.");
2142 return;
2145 if (qatomic_cmpxchg(&mis->postcopy_recover_triggered,
2146 false, true) == true) {
2147 error_setg(errp, "Migrate recovery is triggered already");
2148 return;
2152 * Note that this call will never start a real migration; it will
2153 * only re-setup the migration stream and poke existing migration
2154 * to continue using that newly established channel.
2156 qemu_start_incoming_migration(uri, errp);
2158 /* Safe to dereference with the assert above */
2159 if (*errp) {
2160 /* Reset the flag so user could still retry */
2161 qatomic_set(&mis->postcopy_recover_triggered, false);
2165 void qmp_migrate_pause(Error **errp)
2167 MigrationState *ms = migrate_get_current();
2168 MigrationIncomingState *mis = migration_incoming_get_current();
2169 int ret;
2171 if (ms->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
2172 /* Source side, during postcopy */
2173 qemu_mutex_lock(&ms->qemu_file_lock);
2174 ret = qemu_file_shutdown(ms->to_dst_file);
2175 qemu_mutex_unlock(&ms->qemu_file_lock);
2176 if (ret) {
2177 error_setg(errp, "Failed to pause source migration");
2179 return;
2182 if (mis->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
2183 ret = qemu_file_shutdown(mis->from_src_file);
2184 if (ret) {
2185 error_setg(errp, "Failed to pause destination migration");
2187 return;
2190 error_setg(errp, "migrate-pause is currently only supported "
2191 "during postcopy-active state");
2194 bool migration_is_blocked(Error **errp)
2196 if (qemu_savevm_state_blocked(errp)) {
2197 return true;
2200 if (migration_blockers) {
2201 error_propagate(errp, error_copy(migration_blockers->data));
2202 return true;
2205 return false;
2208 /* Returns true if continue to migrate, or false if error detected */
2209 static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc,
2210 bool resume, Error **errp)
2212 Error *local_err = NULL;
2214 if (resume) {
2215 if (s->state != MIGRATION_STATUS_POSTCOPY_PAUSED) {
2216 error_setg(errp, "Cannot resume if there is no "
2217 "paused migration");
2218 return false;
2222 * Postcopy recovery won't work well with release-ram
2223 * capability since release-ram will drop the page buffer as
2224 * long as the page is put into the send buffer. So if there
2225 * is a network failure happened, any page buffers that have
2226 * not yet reached the destination VM but have already been
2227 * sent from the source VM will be lost forever. Let's refuse
2228 * the client from resuming such a postcopy migration.
2229 * Luckily release-ram was designed to only be used when src
2230 * and destination VMs are on the same host, so it should be
2231 * fine.
2233 if (migrate_release_ram()) {
2234 error_setg(errp, "Postcopy recovery cannot work "
2235 "when release-ram capability is set");
2236 return false;
2239 /* This is a resume, skip init status */
2240 return true;
2243 if (migration_is_running(s->state)) {
2244 error_setg(errp, QERR_MIGRATION_ACTIVE);
2245 return false;
2248 if (runstate_check(RUN_STATE_INMIGRATE)) {
2249 error_setg(errp, "Guest is waiting for an incoming migration");
2250 return false;
2253 if (runstate_check(RUN_STATE_POSTMIGRATE)) {
2254 error_setg(errp, "Can't migrate the vm that was paused due to "
2255 "previous migration");
2256 return false;
2259 if (migration_is_blocked(errp)) {
2260 return false;
2263 if (blk || blk_inc) {
2264 if (migrate_colo_enabled()) {
2265 error_setg(errp, "No disk migration is required in COLO mode");
2266 return false;
2268 if (migrate_use_block() || migrate_use_block_incremental()) {
2269 error_setg(errp, "Command options are incompatible with "
2270 "current migration capabilities");
2271 return false;
2273 migrate_set_block_enabled(true, &local_err);
2274 if (local_err) {
2275 error_propagate(errp, local_err);
2276 return false;
2278 s->must_remove_block_options = true;
2281 if (blk_inc) {
2282 migrate_set_block_incremental(s, true);
2285 migrate_init(s);
2287 * set ram_counters compression_counters memory to zero for a
2288 * new migration
2290 memset(&ram_counters, 0, sizeof(ram_counters));
2291 memset(&compression_counters, 0, sizeof(compression_counters));
2293 return true;
2296 void qmp_migrate(const char *uri, bool has_blk, bool blk,
2297 bool has_inc, bool inc, bool has_detach, bool detach,
2298 bool has_resume, bool resume, Error **errp)
2300 Error *local_err = NULL;
2301 MigrationState *s = migrate_get_current();
2302 const char *p = NULL;
2304 if (!migrate_prepare(s, has_blk && blk, has_inc && inc,
2305 has_resume && resume, errp)) {
2306 /* Error detected, put into errp */
2307 return;
2310 if (!(has_resume && resume)) {
2311 if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) {
2312 return;
2316 migrate_protocol_allow_multifd(false);
2317 if (strstart(uri, "tcp:", &p) ||
2318 strstart(uri, "unix:", NULL) ||
2319 strstart(uri, "vsock:", NULL)) {
2320 migrate_protocol_allow_multifd(true);
2321 socket_start_outgoing_migration(s, p ? p : uri, &local_err);
2322 #ifdef CONFIG_RDMA
2323 } else if (strstart(uri, "rdma:", &p)) {
2324 rdma_start_outgoing_migration(s, p, &local_err);
2325 #endif
2326 } else if (strstart(uri, "exec:", &p)) {
2327 exec_start_outgoing_migration(s, p, &local_err);
2328 } else if (strstart(uri, "fd:", &p)) {
2329 fd_start_outgoing_migration(s, p, &local_err);
2330 } else {
2331 if (!(has_resume && resume)) {
2332 yank_unregister_instance(MIGRATION_YANK_INSTANCE);
2334 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri",
2335 "a valid migration protocol");
2336 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
2337 MIGRATION_STATUS_FAILED);
2338 block_cleanup_parameters(s);
2339 return;
2342 if (local_err) {
2343 if (!(has_resume && resume)) {
2344 yank_unregister_instance(MIGRATION_YANK_INSTANCE);
2346 migrate_fd_error(s, local_err);
2347 error_propagate(errp, local_err);
2348 return;
2352 void qmp_migrate_cancel(Error **errp)
2354 migration_cancel(NULL);
2357 void qmp_migrate_continue(MigrationStatus state, Error **errp)
2359 MigrationState *s = migrate_get_current();
2360 if (s->state != state) {
2361 error_setg(errp, "Migration not in expected state: %s",
2362 MigrationStatus_str(s->state));
2363 return;
2365 qemu_sem_post(&s->pause_sem);
2368 bool migrate_release_ram(void)
2370 MigrationState *s;
2372 s = migrate_get_current();
2374 return s->enabled_capabilities[MIGRATION_CAPABILITY_RELEASE_RAM];
2377 bool migrate_postcopy_ram(void)
2379 MigrationState *s;
2381 s = migrate_get_current();
2383 return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM];
2386 bool migrate_postcopy(void)
2388 return migrate_postcopy_ram() || migrate_dirty_bitmaps();
2391 bool migrate_auto_converge(void)
2393 MigrationState *s;
2395 s = migrate_get_current();
2397 return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
2400 bool migrate_zero_blocks(void)
2402 MigrationState *s;
2404 s = migrate_get_current();
2406 return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
2409 bool migrate_postcopy_blocktime(void)
2411 MigrationState *s;
2413 s = migrate_get_current();
2415 return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME];
2418 bool migrate_use_compression(void)
2420 MigrationState *s;
2422 s = migrate_get_current();
2424 return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS];
2427 int migrate_compress_level(void)
2429 MigrationState *s;
2431 s = migrate_get_current();
2433 return s->parameters.compress_level;
2436 int migrate_compress_threads(void)
2438 MigrationState *s;
2440 s = migrate_get_current();
2442 return s->parameters.compress_threads;
2445 int migrate_compress_wait_thread(void)
2447 MigrationState *s;
2449 s = migrate_get_current();
2451 return s->parameters.compress_wait_thread;
2454 int migrate_decompress_threads(void)
2456 MigrationState *s;
2458 s = migrate_get_current();
2460 return s->parameters.decompress_threads;
2463 bool migrate_dirty_bitmaps(void)
2465 MigrationState *s;
2467 s = migrate_get_current();
2469 return s->enabled_capabilities[MIGRATION_CAPABILITY_DIRTY_BITMAPS];
2472 bool migrate_ignore_shared(void)
2474 MigrationState *s;
2476 s = migrate_get_current();
2478 return s->enabled_capabilities[MIGRATION_CAPABILITY_X_IGNORE_SHARED];
2481 bool migrate_validate_uuid(void)
2483 MigrationState *s;
2485 s = migrate_get_current();
2487 return s->enabled_capabilities[MIGRATION_CAPABILITY_VALIDATE_UUID];
2490 bool migrate_use_events(void)
2492 MigrationState *s;
2494 s = migrate_get_current();
2496 return s->enabled_capabilities[MIGRATION_CAPABILITY_EVENTS];
2499 bool migrate_use_multifd(void)
2501 MigrationState *s;
2503 s = migrate_get_current();
2505 return s->enabled_capabilities[MIGRATION_CAPABILITY_MULTIFD];
2508 bool migrate_pause_before_switchover(void)
2510 MigrationState *s;
2512 s = migrate_get_current();
2514 return s->enabled_capabilities[
2515 MIGRATION_CAPABILITY_PAUSE_BEFORE_SWITCHOVER];
2518 int migrate_multifd_channels(void)
2520 MigrationState *s;
2522 s = migrate_get_current();
2524 return s->parameters.multifd_channels;
2527 MultiFDCompression migrate_multifd_compression(void)
2529 MigrationState *s;
2531 s = migrate_get_current();
2533 return s->parameters.multifd_compression;
2536 int migrate_multifd_zlib_level(void)
2538 MigrationState *s;
2540 s = migrate_get_current();
2542 return s->parameters.multifd_zlib_level;
2545 int migrate_multifd_zstd_level(void)
2547 MigrationState *s;
2549 s = migrate_get_current();
2551 return s->parameters.multifd_zstd_level;
2554 int migrate_use_xbzrle(void)
2556 MigrationState *s;
2558 s = migrate_get_current();
2560 return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE];
2563 uint64_t migrate_xbzrle_cache_size(void)
2565 MigrationState *s;
2567 s = migrate_get_current();
2569 return s->parameters.xbzrle_cache_size;
2572 static int64_t migrate_max_postcopy_bandwidth(void)
2574 MigrationState *s;
2576 s = migrate_get_current();
2578 return s->parameters.max_postcopy_bandwidth;
2581 bool migrate_use_block(void)
2583 MigrationState *s;
2585 s = migrate_get_current();
2587 return s->enabled_capabilities[MIGRATION_CAPABILITY_BLOCK];
2590 bool migrate_use_return_path(void)
2592 MigrationState *s;
2594 s = migrate_get_current();
2596 return s->enabled_capabilities[MIGRATION_CAPABILITY_RETURN_PATH];
2599 bool migrate_use_block_incremental(void)
2601 MigrationState *s;
2603 s = migrate_get_current();
2605 return s->parameters.block_incremental;
2608 bool migrate_background_snapshot(void)
2610 MigrationState *s;
2612 s = migrate_get_current();
2614 return s->enabled_capabilities[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT];
2617 /* migration thread support */
2619 * Something bad happened to the RP stream, mark an error
2620 * The caller shall print or trace something to indicate why
2622 static void mark_source_rp_bad(MigrationState *s)
2624 s->rp_state.error = true;
2627 static struct rp_cmd_args {
2628 ssize_t len; /* -1 = variable */
2629 const char *name;
2630 } rp_cmd_args[] = {
2631 [MIG_RP_MSG_INVALID] = { .len = -1, .name = "INVALID" },
2632 [MIG_RP_MSG_SHUT] = { .len = 4, .name = "SHUT" },
2633 [MIG_RP_MSG_PONG] = { .len = 4, .name = "PONG" },
2634 [MIG_RP_MSG_REQ_PAGES] = { .len = 12, .name = "REQ_PAGES" },
2635 [MIG_RP_MSG_REQ_PAGES_ID] = { .len = -1, .name = "REQ_PAGES_ID" },
2636 [MIG_RP_MSG_RECV_BITMAP] = { .len = -1, .name = "RECV_BITMAP" },
2637 [MIG_RP_MSG_RESUME_ACK] = { .len = 4, .name = "RESUME_ACK" },
2638 [MIG_RP_MSG_MAX] = { .len = -1, .name = "MAX" },
2642 * Process a request for pages received on the return path,
2643 * We're allowed to send more than requested (e.g. to round to our page size)
2644 * and we don't need to send pages that have already been sent.
2646 static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
2647 ram_addr_t start, size_t len)
2649 long our_host_ps = qemu_real_host_page_size;
2651 trace_migrate_handle_rp_req_pages(rbname, start, len);
2654 * Since we currently insist on matching page sizes, just sanity check
2655 * we're being asked for whole host pages.
2657 if (!QEMU_IS_ALIGNED(start, our_host_ps) ||
2658 !QEMU_IS_ALIGNED(len, our_host_ps)) {
2659 error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT
2660 " len: %zd", __func__, start, len);
2661 mark_source_rp_bad(ms);
2662 return;
2665 if (ram_save_queue_pages(rbname, start, len)) {
2666 mark_source_rp_bad(ms);
2670 /* Return true to retry, false to quit */
2671 static bool postcopy_pause_return_path_thread(MigrationState *s)
2673 trace_postcopy_pause_return_path();
2675 qemu_sem_wait(&s->postcopy_pause_rp_sem);
2677 trace_postcopy_pause_return_path_continued();
2679 return true;
2682 static int migrate_handle_rp_recv_bitmap(MigrationState *s, char *block_name)
2684 RAMBlock *block = qemu_ram_block_by_name(block_name);
2686 if (!block) {
2687 error_report("%s: invalid block name '%s'", __func__, block_name);
2688 return -EINVAL;
2691 /* Fetch the received bitmap and refresh the dirty bitmap */
2692 return ram_dirty_bitmap_reload(s, block);
2695 static int migrate_handle_rp_resume_ack(MigrationState *s, uint32_t value)
2697 trace_source_return_path_thread_resume_ack(value);
2699 if (value != MIGRATION_RESUME_ACK_VALUE) {
2700 error_report("%s: illegal resume_ack value %"PRIu32,
2701 __func__, value);
2702 return -1;
2705 /* Now both sides are active. */
2706 migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_RECOVER,
2707 MIGRATION_STATUS_POSTCOPY_ACTIVE);
2709 /* Notify send thread that time to continue send pages */
2710 qemu_sem_post(&s->rp_state.rp_sem);
2712 return 0;
2715 /* Release ms->rp_state.from_dst_file in a safe way */
2716 static void migration_release_from_dst_file(MigrationState *ms)
2718 QEMUFile *file;
2720 WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) {
2722 * Reset the from_dst_file pointer first before releasing it, as we
2723 * can't block within lock section
2725 file = ms->rp_state.from_dst_file;
2726 ms->rp_state.from_dst_file = NULL;
2729 qemu_fclose(file);
2733 * Handles messages sent on the return path towards the source VM
2736 static void *source_return_path_thread(void *opaque)
2738 MigrationState *ms = opaque;
2739 QEMUFile *rp = ms->rp_state.from_dst_file;
2740 uint16_t header_len, header_type;
2741 uint8_t buf[512];
2742 uint32_t tmp32, sibling_error;
2743 ram_addr_t start = 0; /* =0 to silence warning */
2744 size_t len = 0, expected_len;
2745 int res;
2747 trace_source_return_path_thread_entry();
2748 rcu_register_thread();
2750 retry:
2751 while (!ms->rp_state.error && !qemu_file_get_error(rp) &&
2752 migration_is_setup_or_active(ms->state)) {
2753 trace_source_return_path_thread_loop_top();
2754 header_type = qemu_get_be16(rp);
2755 header_len = qemu_get_be16(rp);
2757 if (qemu_file_get_error(rp)) {
2758 mark_source_rp_bad(ms);
2759 goto out;
2762 if (header_type >= MIG_RP_MSG_MAX ||
2763 header_type == MIG_RP_MSG_INVALID) {
2764 error_report("RP: Received invalid message 0x%04x length 0x%04x",
2765 header_type, header_len);
2766 mark_source_rp_bad(ms);
2767 goto out;
2770 if ((rp_cmd_args[header_type].len != -1 &&
2771 header_len != rp_cmd_args[header_type].len) ||
2772 header_len > sizeof(buf)) {
2773 error_report("RP: Received '%s' message (0x%04x) with"
2774 "incorrect length %d expecting %zu",
2775 rp_cmd_args[header_type].name, header_type, header_len,
2776 (size_t)rp_cmd_args[header_type].len);
2777 mark_source_rp_bad(ms);
2778 goto out;
2781 /* We know we've got a valid header by this point */
2782 res = qemu_get_buffer(rp, buf, header_len);
2783 if (res != header_len) {
2784 error_report("RP: Failed reading data for message 0x%04x"
2785 " read %d expected %d",
2786 header_type, res, header_len);
2787 mark_source_rp_bad(ms);
2788 goto out;
2791 /* OK, we have the message and the data */
2792 switch (header_type) {
2793 case MIG_RP_MSG_SHUT:
2794 sibling_error = ldl_be_p(buf);
2795 trace_source_return_path_thread_shut(sibling_error);
2796 if (sibling_error) {
2797 error_report("RP: Sibling indicated error %d", sibling_error);
2798 mark_source_rp_bad(ms);
2801 * We'll let the main thread deal with closing the RP
2802 * we could do a shutdown(2) on it, but we're the only user
2803 * anyway, so there's nothing gained.
2805 goto out;
2807 case MIG_RP_MSG_PONG:
2808 tmp32 = ldl_be_p(buf);
2809 trace_source_return_path_thread_pong(tmp32);
2810 break;
2812 case MIG_RP_MSG_REQ_PAGES:
2813 start = ldq_be_p(buf);
2814 len = ldl_be_p(buf + 8);
2815 migrate_handle_rp_req_pages(ms, NULL, start, len);
2816 break;
2818 case MIG_RP_MSG_REQ_PAGES_ID:
2819 expected_len = 12 + 1; /* header + termination */
2821 if (header_len >= expected_len) {
2822 start = ldq_be_p(buf);
2823 len = ldl_be_p(buf + 8);
2824 /* Now we expect an idstr */
2825 tmp32 = buf[12]; /* Length of the following idstr */
2826 buf[13 + tmp32] = '\0';
2827 expected_len += tmp32;
2829 if (header_len != expected_len) {
2830 error_report("RP: Req_Page_id with length %d expecting %zd",
2831 header_len, expected_len);
2832 mark_source_rp_bad(ms);
2833 goto out;
2835 migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len);
2836 break;
2838 case MIG_RP_MSG_RECV_BITMAP:
2839 if (header_len < 1) {
2840 error_report("%s: missing block name", __func__);
2841 mark_source_rp_bad(ms);
2842 goto out;
2844 /* Format: len (1B) + idstr (<255B). This ends the idstr. */
2845 buf[buf[0] + 1] = '\0';
2846 if (migrate_handle_rp_recv_bitmap(ms, (char *)(buf + 1))) {
2847 mark_source_rp_bad(ms);
2848 goto out;
2850 break;
2852 case MIG_RP_MSG_RESUME_ACK:
2853 tmp32 = ldl_be_p(buf);
2854 if (migrate_handle_rp_resume_ack(ms, tmp32)) {
2855 mark_source_rp_bad(ms);
2856 goto out;
2858 break;
2860 default:
2861 break;
2865 out:
2866 res = qemu_file_get_error(rp);
2867 if (res) {
2868 if (res == -EIO && migration_in_postcopy()) {
2870 * Maybe there is something we can do: it looks like a
2871 * network down issue, and we pause for a recovery.
2873 migration_release_from_dst_file(ms);
2874 rp = NULL;
2875 if (postcopy_pause_return_path_thread(ms)) {
2877 * Reload rp, reset the rest. Referencing it is safe since
2878 * it's reset only by us above, or when migration completes
2880 rp = ms->rp_state.from_dst_file;
2881 ms->rp_state.error = false;
2882 goto retry;
2886 trace_source_return_path_thread_bad_end();
2887 mark_source_rp_bad(ms);
2890 trace_source_return_path_thread_end();
2891 migration_release_from_dst_file(ms);
2892 rcu_unregister_thread();
2893 return NULL;
2896 static int open_return_path_on_source(MigrationState *ms,
2897 bool create_thread)
2899 ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file);
2900 if (!ms->rp_state.from_dst_file) {
2901 return -1;
2904 trace_open_return_path_on_source();
2906 if (!create_thread) {
2907 /* We're done */
2908 return 0;
2911 qemu_thread_create(&ms->rp_state.rp_thread, "return path",
2912 source_return_path_thread, ms, QEMU_THREAD_JOINABLE);
2913 ms->rp_state.rp_thread_created = true;
2915 trace_open_return_path_on_source_continue();
2917 return 0;
2920 /* Returns 0 if the RP was ok, otherwise there was an error on the RP */
2921 static int await_return_path_close_on_source(MigrationState *ms)
2924 * If this is a normal exit then the destination will send a SHUT and the
2925 * rp_thread will exit, however if there's an error we need to cause
2926 * it to exit.
2928 if (qemu_file_get_error(ms->to_dst_file) && ms->rp_state.from_dst_file) {
2930 * shutdown(2), if we have it, will cause it to unblock if it's stuck
2931 * waiting for the destination.
2933 qemu_file_shutdown(ms->rp_state.from_dst_file);
2934 mark_source_rp_bad(ms);
2936 trace_await_return_path_close_on_source_joining();
2937 qemu_thread_join(&ms->rp_state.rp_thread);
2938 ms->rp_state.rp_thread_created = false;
2939 trace_await_return_path_close_on_source_close();
2940 return ms->rp_state.error;
2944 * Switch from normal iteration to postcopy
2945 * Returns non-0 on error
2947 static int postcopy_start(MigrationState *ms)
2949 int ret;
2950 QIOChannelBuffer *bioc;
2951 QEMUFile *fb;
2952 int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
2953 int64_t bandwidth = migrate_max_postcopy_bandwidth();
2954 bool restart_block = false;
2955 int cur_state = MIGRATION_STATUS_ACTIVE;
2956 if (!migrate_pause_before_switchover()) {
2957 migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE,
2958 MIGRATION_STATUS_POSTCOPY_ACTIVE);
2961 trace_postcopy_start();
2962 qemu_mutex_lock_iothread();
2963 trace_postcopy_start_set_run();
2965 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL);
2966 global_state_store();
2967 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
2968 if (ret < 0) {
2969 goto fail;
2972 ret = migration_maybe_pause(ms, &cur_state,
2973 MIGRATION_STATUS_POSTCOPY_ACTIVE);
2974 if (ret < 0) {
2975 goto fail;
2978 ret = bdrv_inactivate_all();
2979 if (ret < 0) {
2980 goto fail;
2982 restart_block = true;
2985 * Cause any non-postcopiable, but iterative devices to
2986 * send out their final data.
2988 qemu_savevm_state_complete_precopy(ms->to_dst_file, true, false);
2991 * in Finish migrate and with the io-lock held everything should
2992 * be quiet, but we've potentially still got dirty pages and we
2993 * need to tell the destination to throw any pages it's already received
2994 * that are dirty
2996 if (migrate_postcopy_ram()) {
2997 ram_postcopy_send_discard_bitmap(ms);
3001 * send rest of state - note things that are doing postcopy
3002 * will notice we're in POSTCOPY_ACTIVE and not actually
3003 * wrap their state up here
3005 /* 0 max-postcopy-bandwidth means unlimited */
3006 if (!bandwidth) {
3007 qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX);
3008 } else {
3009 qemu_file_set_rate_limit(ms->to_dst_file, bandwidth / XFER_LIMIT_RATIO);
3011 if (migrate_postcopy_ram()) {
3012 /* Ping just for debugging, helps line traces up */
3013 qemu_savevm_send_ping(ms->to_dst_file, 2);
3017 * While loading the device state we may trigger page transfer
3018 * requests and the fd must be free to process those, and thus
3019 * the destination must read the whole device state off the fd before
3020 * it starts processing it. Unfortunately the ad-hoc migration format
3021 * doesn't allow the destination to know the size to read without fully
3022 * parsing it through each devices load-state code (especially the open
3023 * coded devices that use get/put).
3024 * So we wrap the device state up in a package with a length at the start;
3025 * to do this we use a qemu_buf to hold the whole of the device state.
3027 bioc = qio_channel_buffer_new(4096);
3028 qio_channel_set_name(QIO_CHANNEL(bioc), "migration-postcopy-buffer");
3029 fb = qemu_fopen_channel_output(QIO_CHANNEL(bioc));
3030 object_unref(OBJECT(bioc));
3033 * Make sure the receiver can get incoming pages before we send the rest
3034 * of the state
3036 qemu_savevm_send_postcopy_listen(fb);
3038 qemu_savevm_state_complete_precopy(fb, false, false);
3039 if (migrate_postcopy_ram()) {
3040 qemu_savevm_send_ping(fb, 3);
3043 qemu_savevm_send_postcopy_run(fb);
3045 /* <><> end of stuff going into the package */
3047 /* Last point of recovery; as soon as we send the package the destination
3048 * can open devices and potentially start running.
3049 * Lets just check again we've not got any errors.
3051 ret = qemu_file_get_error(ms->to_dst_file);
3052 if (ret) {
3053 error_report("postcopy_start: Migration stream errored (pre package)");
3054 goto fail_closefb;
3057 restart_block = false;
3059 /* Now send that blob */
3060 if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) {
3061 goto fail_closefb;
3063 qemu_fclose(fb);
3065 /* Send a notify to give a chance for anything that needs to happen
3066 * at the transition to postcopy and after the device state; in particular
3067 * spice needs to trigger a transition now
3069 ms->postcopy_after_devices = true;
3070 notifier_list_notify(&migration_state_notifiers, ms);
3072 ms->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop;
3074 qemu_mutex_unlock_iothread();
3076 if (migrate_postcopy_ram()) {
3078 * Although this ping is just for debug, it could potentially be
3079 * used for getting a better measurement of downtime at the source.
3081 qemu_savevm_send_ping(ms->to_dst_file, 4);
3084 if (migrate_release_ram()) {
3085 ram_postcopy_migrated_memory_release(ms);
3088 ret = qemu_file_get_error(ms->to_dst_file);
3089 if (ret) {
3090 error_report("postcopy_start: Migration stream errored");
3091 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
3092 MIGRATION_STATUS_FAILED);
3095 return ret;
3097 fail_closefb:
3098 qemu_fclose(fb);
3099 fail:
3100 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
3101 MIGRATION_STATUS_FAILED);
3102 if (restart_block) {
3103 /* A failure happened early enough that we know the destination hasn't
3104 * accessed block devices, so we're safe to recover.
3106 Error *local_err = NULL;
3108 bdrv_invalidate_cache_all(&local_err);
3109 if (local_err) {
3110 error_report_err(local_err);
3113 qemu_mutex_unlock_iothread();
3114 return -1;
3118 * migration_maybe_pause: Pause if required to by
3119 * migrate_pause_before_switchover called with the iothread locked
3120 * Returns: 0 on success
3122 static int migration_maybe_pause(MigrationState *s,
3123 int *current_active_state,
3124 int new_state)
3126 if (!migrate_pause_before_switchover()) {
3127 return 0;
3130 /* Since leaving this state is not atomic with posting the semaphore
3131 * it's possible that someone could have issued multiple migrate_continue
3132 * and the semaphore is incorrectly positive at this point;
3133 * the docs say it's undefined to reinit a semaphore that's already
3134 * init'd, so use timedwait to eat up any existing posts.
3136 while (qemu_sem_timedwait(&s->pause_sem, 1) == 0) {
3137 /* This block intentionally left blank */
3141 * If the migration is cancelled when it is in the completion phase,
3142 * the migration state is set to MIGRATION_STATUS_CANCELLING.
3143 * So we don't need to wait a semaphore, otherwise we would always
3144 * wait for the 'pause_sem' semaphore.
3146 if (s->state != MIGRATION_STATUS_CANCELLING) {
3147 qemu_mutex_unlock_iothread();
3148 migrate_set_state(&s->state, *current_active_state,
3149 MIGRATION_STATUS_PRE_SWITCHOVER);
3150 qemu_sem_wait(&s->pause_sem);
3151 migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER,
3152 new_state);
3153 *current_active_state = new_state;
3154 qemu_mutex_lock_iothread();
3157 return s->state == new_state ? 0 : -EINVAL;
3161 * migration_completion: Used by migration_thread when there's not much left.
3162 * The caller 'breaks' the loop when this returns.
3164 * @s: Current migration state
3166 static void migration_completion(MigrationState *s)
3168 int ret;
3169 int current_active_state = s->state;
3171 if (s->state == MIGRATION_STATUS_ACTIVE) {
3172 qemu_mutex_lock_iothread();
3173 s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
3174 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL);
3175 s->vm_was_running = runstate_is_running();
3176 ret = global_state_store();
3178 if (!ret) {
3179 bool inactivate = !migrate_colo_enabled();
3180 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
3181 trace_migration_completion_vm_stop(ret);
3182 if (ret >= 0) {
3183 ret = migration_maybe_pause(s, &current_active_state,
3184 MIGRATION_STATUS_DEVICE);
3186 if (ret >= 0) {
3187 qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX);
3188 ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false,
3189 inactivate);
3191 if (inactivate && ret >= 0) {
3192 s->block_inactive = true;
3195 qemu_mutex_unlock_iothread();
3197 if (ret < 0) {
3198 goto fail;
3200 } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
3201 trace_migration_completion_postcopy_end();
3203 qemu_mutex_lock_iothread();
3204 qemu_savevm_state_complete_postcopy(s->to_dst_file);
3205 qemu_mutex_unlock_iothread();
3207 trace_migration_completion_postcopy_end_after_complete();
3208 } else {
3209 goto fail;
3213 * If rp was opened we must clean up the thread before
3214 * cleaning everything else up (since if there are no failures
3215 * it will wait for the destination to send it's status in
3216 * a SHUT command).
3218 if (s->rp_state.rp_thread_created) {
3219 int rp_error;
3220 trace_migration_return_path_end_before();
3221 rp_error = await_return_path_close_on_source(s);
3222 trace_migration_return_path_end_after(rp_error);
3223 if (rp_error) {
3224 goto fail_invalidate;
3228 if (qemu_file_get_error(s->to_dst_file)) {
3229 trace_migration_completion_file_err();
3230 goto fail_invalidate;
3233 if (migrate_colo_enabled() && s->state == MIGRATION_STATUS_ACTIVE) {
3234 /* COLO does not support postcopy */
3235 migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,
3236 MIGRATION_STATUS_COLO);
3237 } else {
3238 migrate_set_state(&s->state, current_active_state,
3239 MIGRATION_STATUS_COMPLETED);
3242 return;
3244 fail_invalidate:
3245 /* If not doing postcopy, vm_start() will be called: let's regain
3246 * control on images.
3248 if (s->state == MIGRATION_STATUS_ACTIVE ||
3249 s->state == MIGRATION_STATUS_DEVICE) {
3250 Error *local_err = NULL;
3252 qemu_mutex_lock_iothread();
3253 bdrv_invalidate_cache_all(&local_err);
3254 if (local_err) {
3255 error_report_err(local_err);
3256 } else {
3257 s->block_inactive = false;
3259 qemu_mutex_unlock_iothread();
3262 fail:
3263 migrate_set_state(&s->state, current_active_state,
3264 MIGRATION_STATUS_FAILED);
3268 * bg_migration_completion: Used by bg_migration_thread when after all the
3269 * RAM has been saved. The caller 'breaks' the loop when this returns.
3271 * @s: Current migration state
3273 static void bg_migration_completion(MigrationState *s)
3275 int current_active_state = s->state;
3278 * Stop tracking RAM writes - un-protect memory, un-register UFFD
3279 * memory ranges, flush kernel wait queues and wake up threads
3280 * waiting for write fault to be resolved.
3282 ram_write_tracking_stop();
3284 if (s->state == MIGRATION_STATUS_ACTIVE) {
3286 * By this moment we have RAM content saved into the migration stream.
3287 * The next step is to flush the non-RAM content (device state)
3288 * right after the ram content. The device state has been stored into
3289 * the temporary buffer before RAM saving started.
3291 qemu_put_buffer(s->to_dst_file, s->bioc->data, s->bioc->usage);
3292 qemu_fflush(s->to_dst_file);
3293 } else if (s->state == MIGRATION_STATUS_CANCELLING) {
3294 goto fail;
3297 if (qemu_file_get_error(s->to_dst_file)) {
3298 trace_migration_completion_file_err();
3299 goto fail;
3302 migrate_set_state(&s->state, current_active_state,
3303 MIGRATION_STATUS_COMPLETED);
3304 return;
3306 fail:
3307 migrate_set_state(&s->state, current_active_state,
3308 MIGRATION_STATUS_FAILED);
3311 bool migrate_colo_enabled(void)
3313 MigrationState *s = migrate_get_current();
3314 return s->enabled_capabilities[MIGRATION_CAPABILITY_X_COLO];
3317 typedef enum MigThrError {
3318 /* No error detected */
3319 MIG_THR_ERR_NONE = 0,
3320 /* Detected error, but resumed successfully */
3321 MIG_THR_ERR_RECOVERED = 1,
3322 /* Detected fatal error, need to exit */
3323 MIG_THR_ERR_FATAL = 2,
3324 } MigThrError;
3326 static int postcopy_resume_handshake(MigrationState *s)
3328 qemu_savevm_send_postcopy_resume(s->to_dst_file);
3330 while (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) {
3331 qemu_sem_wait(&s->rp_state.rp_sem);
3334 if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
3335 return 0;
3338 return -1;
3341 /* Return zero if success, or <0 for error */
3342 static int postcopy_do_resume(MigrationState *s)
3344 int ret;
3347 * Call all the resume_prepare() hooks, so that modules can be
3348 * ready for the migration resume.
3350 ret = qemu_savevm_state_resume_prepare(s);
3351 if (ret) {
3352 error_report("%s: resume_prepare() failure detected: %d",
3353 __func__, ret);
3354 return ret;
3358 * Last handshake with destination on the resume (destination will
3359 * switch to postcopy-active afterwards)
3361 ret = postcopy_resume_handshake(s);
3362 if (ret) {
3363 error_report("%s: handshake failed: %d", __func__, ret);
3364 return ret;
3367 return 0;
3371 * We don't return until we are in a safe state to continue current
3372 * postcopy migration. Returns MIG_THR_ERR_RECOVERED if recovered, or
3373 * MIG_THR_ERR_FATAL if unrecovery failure happened.
3375 static MigThrError postcopy_pause(MigrationState *s)
3377 assert(s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
3379 while (true) {
3380 QEMUFile *file;
3383 * Current channel is possibly broken. Release it. Note that this is
3384 * guaranteed even without lock because to_dst_file should only be
3385 * modified by the migration thread. That also guarantees that the
3386 * unregister of yank is safe too without the lock. It should be safe
3387 * even to be within the qemu_file_lock, but we didn't do that to avoid
3388 * taking more mutex (yank_lock) within qemu_file_lock. TL;DR: we make
3389 * the qemu_file_lock critical section as small as possible.
3391 assert(s->to_dst_file);
3392 migration_ioc_unregister_yank_from_file(s->to_dst_file);
3393 qemu_mutex_lock(&s->qemu_file_lock);
3394 file = s->to_dst_file;
3395 s->to_dst_file = NULL;
3396 qemu_mutex_unlock(&s->qemu_file_lock);
3398 qemu_file_shutdown(file);
3399 qemu_fclose(file);
3401 migrate_set_state(&s->state, s->state,
3402 MIGRATION_STATUS_POSTCOPY_PAUSED);
3404 error_report("Detected IO failure for postcopy. "
3405 "Migration paused.");
3408 * We wait until things fixed up. Then someone will setup the
3409 * status back for us.
3411 while (s->state == MIGRATION_STATUS_POSTCOPY_PAUSED) {
3412 qemu_sem_wait(&s->postcopy_pause_sem);
3415 if (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) {
3416 /* Woken up by a recover procedure. Give it a shot */
3419 * Firstly, let's wake up the return path now, with a new
3420 * return path channel.
3422 qemu_sem_post(&s->postcopy_pause_rp_sem);
3424 /* Do the resume logic */
3425 if (postcopy_do_resume(s) == 0) {
3426 /* Let's continue! */
3427 trace_postcopy_pause_continued();
3428 return MIG_THR_ERR_RECOVERED;
3429 } else {
3431 * Something wrong happened during the recovery, let's
3432 * pause again. Pause is always better than throwing
3433 * data away.
3435 continue;
3437 } else {
3438 /* This is not right... Time to quit. */
3439 return MIG_THR_ERR_FATAL;
3444 static MigThrError migration_detect_error(MigrationState *s)
3446 int ret;
3447 int state = s->state;
3448 Error *local_error = NULL;
3450 if (state == MIGRATION_STATUS_CANCELLING ||
3451 state == MIGRATION_STATUS_CANCELLED) {
3452 /* End the migration, but don't set the state to failed */
3453 return MIG_THR_ERR_FATAL;
3456 /* Try to detect any file errors */
3457 ret = qemu_file_get_error_obj(s->to_dst_file, &local_error);
3458 if (!ret) {
3459 /* Everything is fine */
3460 assert(!local_error);
3461 return MIG_THR_ERR_NONE;
3464 if (local_error) {
3465 migrate_set_error(s, local_error);
3466 error_free(local_error);
3469 if (state == MIGRATION_STATUS_POSTCOPY_ACTIVE && ret == -EIO) {
3471 * For postcopy, we allow the network to be down for a
3472 * while. After that, it can be continued by a
3473 * recovery phase.
3475 return postcopy_pause(s);
3476 } else {
3478 * For precopy (or postcopy with error outside IO), we fail
3479 * with no time.
3481 migrate_set_state(&s->state, state, MIGRATION_STATUS_FAILED);
3482 trace_migration_thread_file_err();
3484 /* Time to stop the migration, now. */
3485 return MIG_THR_ERR_FATAL;
3489 /* How many bytes have we transferred since the beginning of the migration */
3490 static uint64_t migration_total_bytes(MigrationState *s)
3492 return qemu_ftell(s->to_dst_file) + ram_counters.multifd_bytes;
3495 static void migration_calculate_complete(MigrationState *s)
3497 uint64_t bytes = migration_total_bytes(s);
3498 int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
3499 int64_t transfer_time;
3501 s->total_time = end_time - s->start_time;
3502 if (!s->downtime) {
3504 * It's still not set, so we are precopy migration. For
3505 * postcopy, downtime is calculated during postcopy_start().
3507 s->downtime = end_time - s->downtime_start;
3510 transfer_time = s->total_time - s->setup_time;
3511 if (transfer_time) {
3512 s->mbps = ((double) bytes * 8.0) / transfer_time / 1000;
3516 static void update_iteration_initial_status(MigrationState *s)
3519 * Update these three fields at the same time to avoid mismatch info lead
3520 * wrong speed calculation.
3522 s->iteration_start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
3523 s->iteration_initial_bytes = migration_total_bytes(s);
3524 s->iteration_initial_pages = ram_get_total_transferred_pages();
3527 static void migration_update_counters(MigrationState *s,
3528 int64_t current_time)
3530 uint64_t transferred, transferred_pages, time_spent;
3531 uint64_t current_bytes; /* bytes transferred since the beginning */
3532 double bandwidth;
3534 if (current_time < s->iteration_start_time + BUFFER_DELAY) {
3535 return;
3538 current_bytes = migration_total_bytes(s);
3539 transferred = current_bytes - s->iteration_initial_bytes;
3540 time_spent = current_time - s->iteration_start_time;
3541 bandwidth = (double)transferred / time_spent;
3542 s->threshold_size = bandwidth * s->parameters.downtime_limit;
3544 s->mbps = (((double) transferred * 8.0) /
3545 ((double) time_spent / 1000.0)) / 1000.0 / 1000.0;
3547 transferred_pages = ram_get_total_transferred_pages() -
3548 s->iteration_initial_pages;
3549 s->pages_per_second = (double) transferred_pages /
3550 (((double) time_spent / 1000.0));
3553 * if we haven't sent anything, we don't want to
3554 * recalculate. 10000 is a small enough number for our purposes
3556 if (ram_counters.dirty_pages_rate && transferred > 10000) {
3557 s->expected_downtime = ram_counters.remaining / bandwidth;
3560 qemu_file_reset_rate_limit(s->to_dst_file);
3562 update_iteration_initial_status(s);
3564 trace_migrate_transferred(transferred, time_spent,
3565 bandwidth, s->threshold_size);
3568 /* Migration thread iteration status */
3569 typedef enum {
3570 MIG_ITERATE_RESUME, /* Resume current iteration */
3571 MIG_ITERATE_SKIP, /* Skip current iteration */
3572 MIG_ITERATE_BREAK, /* Break the loop */
3573 } MigIterateState;
3576 * Return true if continue to the next iteration directly, false
3577 * otherwise.
3579 static MigIterateState migration_iteration_run(MigrationState *s)
3581 uint64_t pending_size, pend_pre, pend_compat, pend_post;
3582 bool in_postcopy = s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE;
3584 qemu_savevm_state_pending(s->to_dst_file, s->threshold_size, &pend_pre,
3585 &pend_compat, &pend_post);
3586 pending_size = pend_pre + pend_compat + pend_post;
3588 trace_migrate_pending(pending_size, s->threshold_size,
3589 pend_pre, pend_compat, pend_post);
3591 if (pending_size && pending_size >= s->threshold_size) {
3592 /* Still a significant amount to transfer */
3593 if (!in_postcopy && pend_pre <= s->threshold_size &&
3594 qatomic_read(&s->start_postcopy)) {
3595 if (postcopy_start(s)) {
3596 error_report("%s: postcopy failed to start", __func__);
3598 return MIG_ITERATE_SKIP;
3600 /* Just another iteration step */
3601 qemu_savevm_state_iterate(s->to_dst_file, in_postcopy);
3602 } else {
3603 trace_migration_thread_low_pending(pending_size);
3604 migration_completion(s);
3605 return MIG_ITERATE_BREAK;
3608 return MIG_ITERATE_RESUME;
3611 static void migration_iteration_finish(MigrationState *s)
3613 /* If we enabled cpu throttling for auto-converge, turn it off. */
3614 cpu_throttle_stop();
3616 qemu_mutex_lock_iothread();
3617 switch (s->state) {
3618 case MIGRATION_STATUS_COMPLETED:
3619 migration_calculate_complete(s);
3620 runstate_set(RUN_STATE_POSTMIGRATE);
3621 break;
3622 case MIGRATION_STATUS_COLO:
3623 if (!migrate_colo_enabled()) {
3624 error_report("%s: critical error: calling COLO code without "
3625 "COLO enabled", __func__);
3627 migrate_start_colo_process(s);
3628 s->vm_was_running = true;
3629 /* Fallthrough */
3630 case MIGRATION_STATUS_FAILED:
3631 case MIGRATION_STATUS_CANCELLED:
3632 case MIGRATION_STATUS_CANCELLING:
3633 if (s->vm_was_running) {
3634 if (!runstate_check(RUN_STATE_SHUTDOWN)) {
3635 vm_start();
3637 } else {
3638 if (runstate_check(RUN_STATE_FINISH_MIGRATE)) {
3639 runstate_set(RUN_STATE_POSTMIGRATE);
3642 break;
3644 default:
3645 /* Should not reach here, but if so, forgive the VM. */
3646 error_report("%s: Unknown ending state %d", __func__, s->state);
3647 break;
3649 migrate_fd_cleanup_schedule(s);
3650 qemu_mutex_unlock_iothread();
3653 static void bg_migration_iteration_finish(MigrationState *s)
3655 qemu_mutex_lock_iothread();
3656 switch (s->state) {
3657 case MIGRATION_STATUS_COMPLETED:
3658 migration_calculate_complete(s);
3659 break;
3661 case MIGRATION_STATUS_ACTIVE:
3662 case MIGRATION_STATUS_FAILED:
3663 case MIGRATION_STATUS_CANCELLED:
3664 case MIGRATION_STATUS_CANCELLING:
3665 break;
3667 default:
3668 /* Should not reach here, but if so, forgive the VM. */
3669 error_report("%s: Unknown ending state %d", __func__, s->state);
3670 break;
3673 migrate_fd_cleanup_schedule(s);
3674 qemu_mutex_unlock_iothread();
3678 * Return true if continue to the next iteration directly, false
3679 * otherwise.
3681 static MigIterateState bg_migration_iteration_run(MigrationState *s)
3683 int res;
3685 res = qemu_savevm_state_iterate(s->to_dst_file, false);
3686 if (res > 0) {
3687 bg_migration_completion(s);
3688 return MIG_ITERATE_BREAK;
3691 return MIG_ITERATE_RESUME;
3694 void migration_make_urgent_request(void)
3696 qemu_sem_post(&migrate_get_current()->rate_limit_sem);
3699 void migration_consume_urgent_request(void)
3701 qemu_sem_wait(&migrate_get_current()->rate_limit_sem);
3704 /* Returns true if the rate limiting was broken by an urgent request */
3705 bool migration_rate_limit(void)
3707 int64_t now = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
3708 MigrationState *s = migrate_get_current();
3710 bool urgent = false;
3711 migration_update_counters(s, now);
3712 if (qemu_file_rate_limit(s->to_dst_file)) {
3714 if (qemu_file_get_error(s->to_dst_file)) {
3715 return false;
3718 * Wait for a delay to do rate limiting OR
3719 * something urgent to post the semaphore.
3721 int ms = s->iteration_start_time + BUFFER_DELAY - now;
3722 trace_migration_rate_limit_pre(ms);
3723 if (qemu_sem_timedwait(&s->rate_limit_sem, ms) == 0) {
3725 * We were woken by one or more urgent things but
3726 * the timedwait will have consumed one of them.
3727 * The service routine for the urgent wake will dec
3728 * the semaphore itself for each item it consumes,
3729 * so add this one we just eat back.
3731 qemu_sem_post(&s->rate_limit_sem);
3732 urgent = true;
3734 trace_migration_rate_limit_post(urgent);
3736 return urgent;
3740 * if failover devices are present, wait they are completely
3741 * unplugged
3744 static void qemu_savevm_wait_unplug(MigrationState *s, int old_state,
3745 int new_state)
3747 if (qemu_savevm_state_guest_unplug_pending()) {
3748 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_WAIT_UNPLUG);
3750 while (s->state == MIGRATION_STATUS_WAIT_UNPLUG &&
3751 qemu_savevm_state_guest_unplug_pending()) {
3752 qemu_sem_timedwait(&s->wait_unplug_sem, 250);
3754 if (s->state != MIGRATION_STATUS_WAIT_UNPLUG) {
3755 int timeout = 120; /* 30 seconds */
3757 * migration has been canceled
3758 * but as we have started an unplug we must wait the end
3759 * to be able to plug back the card
3761 while (timeout-- && qemu_savevm_state_guest_unplug_pending()) {
3762 qemu_sem_timedwait(&s->wait_unplug_sem, 250);
3764 if (qemu_savevm_state_guest_unplug_pending() &&
3765 !qtest_enabled()) {
3766 warn_report("migration: partially unplugged device on "
3767 "failure");
3771 migrate_set_state(&s->state, MIGRATION_STATUS_WAIT_UNPLUG, new_state);
3772 } else {
3773 migrate_set_state(&s->state, old_state, new_state);
3778 * Master migration thread on the source VM.
3779 * It drives the migration and pumps the data down the outgoing channel.
3781 static void *migration_thread(void *opaque)
3783 MigrationState *s = opaque;
3784 int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
3785 MigThrError thr_error;
3786 bool urgent = false;
3788 rcu_register_thread();
3790 object_ref(OBJECT(s));
3791 update_iteration_initial_status(s);
3793 qemu_savevm_state_header(s->to_dst_file);
3796 * If we opened the return path, we need to make sure dst has it
3797 * opened as well.
3799 if (s->rp_state.rp_thread_created) {
3800 /* Now tell the dest that it should open its end so it can reply */
3801 qemu_savevm_send_open_return_path(s->to_dst_file);
3803 /* And do a ping that will make stuff easier to debug */
3804 qemu_savevm_send_ping(s->to_dst_file, 1);
3807 if (migrate_postcopy()) {
3809 * Tell the destination that we *might* want to do postcopy later;
3810 * if the other end can't do postcopy it should fail now, nice and
3811 * early.
3813 qemu_savevm_send_postcopy_advise(s->to_dst_file);
3816 if (migrate_colo_enabled()) {
3817 /* Notify migration destination that we enable COLO */
3818 qemu_savevm_send_colo_enable(s->to_dst_file);
3821 qemu_savevm_state_setup(s->to_dst_file);
3823 qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP,
3824 MIGRATION_STATUS_ACTIVE);
3826 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
3828 trace_migration_thread_setup_complete();
3830 while (migration_is_active(s)) {
3831 if (urgent || !qemu_file_rate_limit(s->to_dst_file)) {
3832 MigIterateState iter_state = migration_iteration_run(s);
3833 if (iter_state == MIG_ITERATE_SKIP) {
3834 continue;
3835 } else if (iter_state == MIG_ITERATE_BREAK) {
3836 break;
3841 * Try to detect any kind of failures, and see whether we
3842 * should stop the migration now.
3844 thr_error = migration_detect_error(s);
3845 if (thr_error == MIG_THR_ERR_FATAL) {
3846 /* Stop migration */
3847 break;
3848 } else if (thr_error == MIG_THR_ERR_RECOVERED) {
3850 * Just recovered from a e.g. network failure, reset all
3851 * the local variables. This is important to avoid
3852 * breaking transferred_bytes and bandwidth calculation
3854 update_iteration_initial_status(s);
3857 urgent = migration_rate_limit();
3860 trace_migration_thread_after_loop();
3861 migration_iteration_finish(s);
3862 object_unref(OBJECT(s));
3863 rcu_unregister_thread();
3864 return NULL;
3867 static void bg_migration_vm_start_bh(void *opaque)
3869 MigrationState *s = opaque;
3871 qemu_bh_delete(s->vm_start_bh);
3872 s->vm_start_bh = NULL;
3874 vm_start();
3875 s->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - s->downtime_start;
3879 * Background snapshot thread, based on live migration code.
3880 * This is an alternative implementation of live migration mechanism
3881 * introduced specifically to support background snapshots.
3883 * It takes advantage of userfault_fd write protection mechanism introduced
3884 * in v5.7 kernel. Compared to existing dirty page logging migration much
3885 * lesser stream traffic is produced resulting in smaller snapshot images,
3886 * simply cause of no page duplicates can get into the stream.
3888 * Another key point is that generated vmstate stream reflects machine state
3889 * 'frozen' at the beginning of snapshot creation compared to dirty page logging
3890 * mechanism, which effectively results in that saved snapshot is the state of VM
3891 * at the end of the process.
3893 static void *bg_migration_thread(void *opaque)
3895 MigrationState *s = opaque;
3896 int64_t setup_start;
3897 MigThrError thr_error;
3898 QEMUFile *fb;
3899 bool early_fail = true;
3901 rcu_register_thread();
3902 object_ref(OBJECT(s));
3904 qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX);
3906 setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
3908 * We want to save vmstate for the moment when migration has been
3909 * initiated but also we want to save RAM content while VM is running.
3910 * The RAM content should appear first in the vmstate. So, we first
3911 * stash the non-RAM part of the vmstate to the temporary buffer,
3912 * then write RAM part of the vmstate to the migration stream
3913 * with vCPUs running and, finally, write stashed non-RAM part of
3914 * the vmstate from the buffer to the migration stream.
3916 s->bioc = qio_channel_buffer_new(512 * 1024);
3917 qio_channel_set_name(QIO_CHANNEL(s->bioc), "vmstate-buffer");
3918 fb = qemu_fopen_channel_output(QIO_CHANNEL(s->bioc));
3919 object_unref(OBJECT(s->bioc));
3921 update_iteration_initial_status(s);
3924 * Prepare for tracking memory writes with UFFD-WP - populate
3925 * RAM pages before protecting.
3927 #ifdef __linux__
3928 ram_write_tracking_prepare();
3929 #endif
3931 qemu_savevm_state_header(s->to_dst_file);
3932 qemu_savevm_state_setup(s->to_dst_file);
3934 qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP,
3935 MIGRATION_STATUS_ACTIVE);
3937 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
3939 trace_migration_thread_setup_complete();
3940 s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
3942 qemu_mutex_lock_iothread();
3945 * If VM is currently in suspended state, then, to make a valid runstate
3946 * transition in vm_stop_force_state() we need to wakeup it up.
3948 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL);
3949 s->vm_was_running = runstate_is_running();
3951 if (global_state_store()) {
3952 goto fail;
3954 /* Forcibly stop VM before saving state of vCPUs and devices */
3955 if (vm_stop_force_state(RUN_STATE_PAUSED)) {
3956 goto fail;
3959 * Put vCPUs in sync with shadow context structures, then
3960 * save their state to channel-buffer along with devices.
3962 cpu_synchronize_all_states();
3963 if (qemu_savevm_state_complete_precopy_non_iterable(fb, false, false)) {
3964 goto fail;
3967 * Since we are going to get non-iterable state data directly
3968 * from s->bioc->data, explicit flush is needed here.
3970 qemu_fflush(fb);
3972 /* Now initialize UFFD context and start tracking RAM writes */
3973 if (ram_write_tracking_start()) {
3974 goto fail;
3976 early_fail = false;
3979 * Start VM from BH handler to avoid write-fault lock here.
3980 * UFFD-WP protection for the whole RAM is already enabled so
3981 * calling VM state change notifiers from vm_start() would initiate
3982 * writes to virtio VQs memory which is in write-protected region.
3984 s->vm_start_bh = qemu_bh_new(bg_migration_vm_start_bh, s);
3985 qemu_bh_schedule(s->vm_start_bh);
3987 qemu_mutex_unlock_iothread();
3989 while (migration_is_active(s)) {
3990 MigIterateState iter_state = bg_migration_iteration_run(s);
3991 if (iter_state == MIG_ITERATE_SKIP) {
3992 continue;
3993 } else if (iter_state == MIG_ITERATE_BREAK) {
3994 break;
3998 * Try to detect any kind of failures, and see whether we
3999 * should stop the migration now.
4001 thr_error = migration_detect_error(s);
4002 if (thr_error == MIG_THR_ERR_FATAL) {
4003 /* Stop migration */
4004 break;
4007 migration_update_counters(s, qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
4010 trace_migration_thread_after_loop();
4012 fail:
4013 if (early_fail) {
4014 migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,
4015 MIGRATION_STATUS_FAILED);
4016 qemu_mutex_unlock_iothread();
4019 bg_migration_iteration_finish(s);
4021 qemu_fclose(fb);
4022 object_unref(OBJECT(s));
4023 rcu_unregister_thread();
4025 return NULL;
4028 void migrate_fd_connect(MigrationState *s, Error *error_in)
4030 Error *local_err = NULL;
4031 int64_t rate_limit;
4032 bool resume = s->state == MIGRATION_STATUS_POSTCOPY_PAUSED;
4035 * If there's a previous error, free it and prepare for another one.
4036 * Meanwhile if migration completes successfully, there won't have an error
4037 * dumped when calling migrate_fd_cleanup().
4039 migrate_error_free(s);
4041 s->expected_downtime = s->parameters.downtime_limit;
4042 if (resume) {
4043 assert(s->cleanup_bh);
4044 } else {
4045 assert(!s->cleanup_bh);
4046 s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup_bh, s);
4048 if (error_in) {
4049 migrate_fd_error(s, error_in);
4050 if (resume) {
4052 * Don't do cleanup for resume if channel is invalid, but only dump
4053 * the error. We wait for another channel connect from the user.
4054 * The error_report still gives HMP user a hint on what failed.
4055 * It's normally done in migrate_fd_cleanup(), but call it here
4056 * explicitly.
4058 error_report_err(error_copy(s->error));
4059 } else {
4060 migrate_fd_cleanup(s);
4062 return;
4065 if (resume) {
4066 /* This is a resumed migration */
4067 rate_limit = s->parameters.max_postcopy_bandwidth /
4068 XFER_LIMIT_RATIO;
4069 } else {
4070 /* This is a fresh new migration */
4071 rate_limit = s->parameters.max_bandwidth / XFER_LIMIT_RATIO;
4073 /* Notify before starting migration thread */
4074 notifier_list_notify(&migration_state_notifiers, s);
4077 qemu_file_set_rate_limit(s->to_dst_file, rate_limit);
4078 qemu_file_set_blocking(s->to_dst_file, true);
4081 * Open the return path. For postcopy, it is used exclusively. For
4082 * precopy, only if user specified "return-path" capability would
4083 * QEMU uses the return path.
4085 if (migrate_postcopy_ram() || migrate_use_return_path()) {
4086 if (open_return_path_on_source(s, !resume)) {
4087 error_report("Unable to open return-path for postcopy");
4088 migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED);
4089 migrate_fd_cleanup(s);
4090 return;
4094 if (resume) {
4095 /* Wakeup the main migration thread to do the recovery */
4096 migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_PAUSED,
4097 MIGRATION_STATUS_POSTCOPY_RECOVER);
4098 qemu_sem_post(&s->postcopy_pause_sem);
4099 return;
4102 if (multifd_save_setup(&local_err) != 0) {
4103 error_report_err(local_err);
4104 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
4105 MIGRATION_STATUS_FAILED);
4106 migrate_fd_cleanup(s);
4107 return;
4110 if (migrate_background_snapshot()) {
4111 qemu_thread_create(&s->thread, "bg_snapshot",
4112 bg_migration_thread, s, QEMU_THREAD_JOINABLE);
4113 } else {
4114 qemu_thread_create(&s->thread, "live_migration",
4115 migration_thread, s, QEMU_THREAD_JOINABLE);
4117 s->migration_thread_running = true;
4120 void migration_global_dump(Monitor *mon)
4122 MigrationState *ms = migrate_get_current();
4124 monitor_printf(mon, "globals:\n");
4125 monitor_printf(mon, "store-global-state: %s\n",
4126 ms->store_global_state ? "on" : "off");
4127 monitor_printf(mon, "only-migratable: %s\n",
4128 only_migratable ? "on" : "off");
4129 monitor_printf(mon, "send-configuration: %s\n",
4130 ms->send_configuration ? "on" : "off");
4131 monitor_printf(mon, "send-section-footer: %s\n",
4132 ms->send_section_footer ? "on" : "off");
4133 monitor_printf(mon, "decompress-error-check: %s\n",
4134 ms->decompress_error_check ? "on" : "off");
4135 monitor_printf(mon, "clear-bitmap-shift: %u\n",
4136 ms->clear_bitmap_shift);
4139 #define DEFINE_PROP_MIG_CAP(name, x) \
4140 DEFINE_PROP_BOOL(name, MigrationState, enabled_capabilities[x], false)
4142 static Property migration_properties[] = {
4143 DEFINE_PROP_BOOL("store-global-state", MigrationState,
4144 store_global_state, true),
4145 DEFINE_PROP_BOOL("send-configuration", MigrationState,
4146 send_configuration, true),
4147 DEFINE_PROP_BOOL("send-section-footer", MigrationState,
4148 send_section_footer, true),
4149 DEFINE_PROP_BOOL("decompress-error-check", MigrationState,
4150 decompress_error_check, true),
4151 DEFINE_PROP_UINT8("x-clear-bitmap-shift", MigrationState,
4152 clear_bitmap_shift, CLEAR_BITMAP_SHIFT_DEFAULT),
4154 /* Migration parameters */
4155 DEFINE_PROP_UINT8("x-compress-level", MigrationState,
4156 parameters.compress_level,
4157 DEFAULT_MIGRATE_COMPRESS_LEVEL),
4158 DEFINE_PROP_UINT8("x-compress-threads", MigrationState,
4159 parameters.compress_threads,
4160 DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT),
4161 DEFINE_PROP_BOOL("x-compress-wait-thread", MigrationState,
4162 parameters.compress_wait_thread, true),
4163 DEFINE_PROP_UINT8("x-decompress-threads", MigrationState,
4164 parameters.decompress_threads,
4165 DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT),
4166 DEFINE_PROP_UINT8("x-throttle-trigger-threshold", MigrationState,
4167 parameters.throttle_trigger_threshold,
4168 DEFAULT_MIGRATE_THROTTLE_TRIGGER_THRESHOLD),
4169 DEFINE_PROP_UINT8("x-cpu-throttle-initial", MigrationState,
4170 parameters.cpu_throttle_initial,
4171 DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL),
4172 DEFINE_PROP_UINT8("x-cpu-throttle-increment", MigrationState,
4173 parameters.cpu_throttle_increment,
4174 DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT),
4175 DEFINE_PROP_BOOL("x-cpu-throttle-tailslow", MigrationState,
4176 parameters.cpu_throttle_tailslow, false),
4177 DEFINE_PROP_SIZE("x-max-bandwidth", MigrationState,
4178 parameters.max_bandwidth, MAX_THROTTLE),
4179 DEFINE_PROP_UINT64("x-downtime-limit", MigrationState,
4180 parameters.downtime_limit,
4181 DEFAULT_MIGRATE_SET_DOWNTIME),
4182 DEFINE_PROP_UINT32("x-checkpoint-delay", MigrationState,
4183 parameters.x_checkpoint_delay,
4184 DEFAULT_MIGRATE_X_CHECKPOINT_DELAY),
4185 DEFINE_PROP_UINT8("multifd-channels", MigrationState,
4186 parameters.multifd_channels,
4187 DEFAULT_MIGRATE_MULTIFD_CHANNELS),
4188 DEFINE_PROP_MULTIFD_COMPRESSION("multifd-compression", MigrationState,
4189 parameters.multifd_compression,
4190 DEFAULT_MIGRATE_MULTIFD_COMPRESSION),
4191 DEFINE_PROP_UINT8("multifd-zlib-level", MigrationState,
4192 parameters.multifd_zlib_level,
4193 DEFAULT_MIGRATE_MULTIFD_ZLIB_LEVEL),
4194 DEFINE_PROP_UINT8("multifd-zstd-level", MigrationState,
4195 parameters.multifd_zstd_level,
4196 DEFAULT_MIGRATE_MULTIFD_ZSTD_LEVEL),
4197 DEFINE_PROP_SIZE("xbzrle-cache-size", MigrationState,
4198 parameters.xbzrle_cache_size,
4199 DEFAULT_MIGRATE_XBZRLE_CACHE_SIZE),
4200 DEFINE_PROP_SIZE("max-postcopy-bandwidth", MigrationState,
4201 parameters.max_postcopy_bandwidth,
4202 DEFAULT_MIGRATE_MAX_POSTCOPY_BANDWIDTH),
4203 DEFINE_PROP_UINT8("max-cpu-throttle", MigrationState,
4204 parameters.max_cpu_throttle,
4205 DEFAULT_MIGRATE_MAX_CPU_THROTTLE),
4206 DEFINE_PROP_SIZE("announce-initial", MigrationState,
4207 parameters.announce_initial,
4208 DEFAULT_MIGRATE_ANNOUNCE_INITIAL),
4209 DEFINE_PROP_SIZE("announce-max", MigrationState,
4210 parameters.announce_max,
4211 DEFAULT_MIGRATE_ANNOUNCE_MAX),
4212 DEFINE_PROP_SIZE("announce-rounds", MigrationState,
4213 parameters.announce_rounds,
4214 DEFAULT_MIGRATE_ANNOUNCE_ROUNDS),
4215 DEFINE_PROP_SIZE("announce-step", MigrationState,
4216 parameters.announce_step,
4217 DEFAULT_MIGRATE_ANNOUNCE_STEP),
4219 /* Migration capabilities */
4220 DEFINE_PROP_MIG_CAP("x-xbzrle", MIGRATION_CAPABILITY_XBZRLE),
4221 DEFINE_PROP_MIG_CAP("x-rdma-pin-all", MIGRATION_CAPABILITY_RDMA_PIN_ALL),
4222 DEFINE_PROP_MIG_CAP("x-auto-converge", MIGRATION_CAPABILITY_AUTO_CONVERGE),
4223 DEFINE_PROP_MIG_CAP("x-zero-blocks", MIGRATION_CAPABILITY_ZERO_BLOCKS),
4224 DEFINE_PROP_MIG_CAP("x-compress", MIGRATION_CAPABILITY_COMPRESS),
4225 DEFINE_PROP_MIG_CAP("x-events", MIGRATION_CAPABILITY_EVENTS),
4226 DEFINE_PROP_MIG_CAP("x-postcopy-ram", MIGRATION_CAPABILITY_POSTCOPY_RAM),
4227 DEFINE_PROP_MIG_CAP("x-colo", MIGRATION_CAPABILITY_X_COLO),
4228 DEFINE_PROP_MIG_CAP("x-release-ram", MIGRATION_CAPABILITY_RELEASE_RAM),
4229 DEFINE_PROP_MIG_CAP("x-block", MIGRATION_CAPABILITY_BLOCK),
4230 DEFINE_PROP_MIG_CAP("x-return-path", MIGRATION_CAPABILITY_RETURN_PATH),
4231 DEFINE_PROP_MIG_CAP("x-multifd", MIGRATION_CAPABILITY_MULTIFD),
4232 DEFINE_PROP_MIG_CAP("x-background-snapshot",
4233 MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT),
4235 DEFINE_PROP_END_OF_LIST(),
4238 static void migration_class_init(ObjectClass *klass, void *data)
4240 DeviceClass *dc = DEVICE_CLASS(klass);
4242 dc->user_creatable = false;
4243 device_class_set_props(dc, migration_properties);
4246 static void migration_instance_finalize(Object *obj)
4248 MigrationState *ms = MIGRATION_OBJ(obj);
4249 MigrationParameters *params = &ms->parameters;
4251 qemu_mutex_destroy(&ms->error_mutex);
4252 qemu_mutex_destroy(&ms->qemu_file_lock);
4253 g_free(params->tls_hostname);
4254 g_free(params->tls_creds);
4255 qemu_sem_destroy(&ms->wait_unplug_sem);
4256 qemu_sem_destroy(&ms->rate_limit_sem);
4257 qemu_sem_destroy(&ms->pause_sem);
4258 qemu_sem_destroy(&ms->postcopy_pause_sem);
4259 qemu_sem_destroy(&ms->postcopy_pause_rp_sem);
4260 qemu_sem_destroy(&ms->rp_state.rp_sem);
4261 error_free(ms->error);
4264 static void migration_instance_init(Object *obj)
4266 MigrationState *ms = MIGRATION_OBJ(obj);
4267 MigrationParameters *params = &ms->parameters;
4269 ms->state = MIGRATION_STATUS_NONE;
4270 ms->mbps = -1;
4271 ms->pages_per_second = -1;
4272 qemu_sem_init(&ms->pause_sem, 0);
4273 qemu_mutex_init(&ms->error_mutex);
4275 params->tls_hostname = g_strdup("");
4276 params->tls_creds = g_strdup("");
4278 /* Set has_* up only for parameter checks */
4279 params->has_compress_level = true;
4280 params->has_compress_threads = true;
4281 params->has_decompress_threads = true;
4282 params->has_throttle_trigger_threshold = true;
4283 params->has_cpu_throttle_initial = true;
4284 params->has_cpu_throttle_increment = true;
4285 params->has_cpu_throttle_tailslow = true;
4286 params->has_max_bandwidth = true;
4287 params->has_downtime_limit = true;
4288 params->has_x_checkpoint_delay = true;
4289 params->has_block_incremental = true;
4290 params->has_multifd_channels = true;
4291 params->has_multifd_compression = true;
4292 params->has_multifd_zlib_level = true;
4293 params->has_multifd_zstd_level = true;
4294 params->has_xbzrle_cache_size = true;
4295 params->has_max_postcopy_bandwidth = true;
4296 params->has_max_cpu_throttle = true;
4297 params->has_announce_initial = true;
4298 params->has_announce_max = true;
4299 params->has_announce_rounds = true;
4300 params->has_announce_step = true;
4302 qemu_sem_init(&ms->postcopy_pause_sem, 0);
4303 qemu_sem_init(&ms->postcopy_pause_rp_sem, 0);
4304 qemu_sem_init(&ms->rp_state.rp_sem, 0);
4305 qemu_sem_init(&ms->rate_limit_sem, 0);
4306 qemu_sem_init(&ms->wait_unplug_sem, 0);
4307 qemu_mutex_init(&ms->qemu_file_lock);
4311 * Return true if check pass, false otherwise. Error will be put
4312 * inside errp if provided.
4314 static bool migration_object_check(MigrationState *ms, Error **errp)
4316 MigrationCapabilityStatusList *head = NULL;
4317 /* Assuming all off */
4318 bool cap_list[MIGRATION_CAPABILITY__MAX] = { 0 }, ret;
4319 int i;
4321 if (!migrate_params_check(&ms->parameters, errp)) {
4322 return false;
4325 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
4326 if (ms->enabled_capabilities[i]) {
4327 QAPI_LIST_PREPEND(head, migrate_cap_add(i, true));
4331 ret = migrate_caps_check(cap_list, head, errp);
4333 /* It works with head == NULL */
4334 qapi_free_MigrationCapabilityStatusList(head);
4336 return ret;
4339 static const TypeInfo migration_type = {
4340 .name = TYPE_MIGRATION,
4342 * NOTE: TYPE_MIGRATION is not really a device, as the object is
4343 * not created using qdev_new(), it is not attached to the qdev
4344 * device tree, and it is never realized.
4346 * TODO: Make this TYPE_OBJECT once QOM provides something like
4347 * TYPE_DEVICE's "-global" properties.
4349 .parent = TYPE_DEVICE,
4350 .class_init = migration_class_init,
4351 .class_size = sizeof(MigrationClass),
4352 .instance_size = sizeof(MigrationState),
4353 .instance_init = migration_instance_init,
4354 .instance_finalize = migration_instance_finalize,
4357 static void register_migration_types(void)
4359 type_register_static(&migration_type);
4362 type_init(register_migration_types);