4 * Copyright IBM, Corp. 2008
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #ifndef QEMU_MIGRATION_H
15 #define QEMU_MIGRATION_H
17 #include "qapi/qmp/qdict.h"
18 #include "qemu-common.h"
19 #include "qemu/thread.h"
20 #include "qemu/notify.h"
21 #include "migration/vmstate.h"
22 #include "qapi-types.h"
23 #include "exec/cpu-common.h"
24 #include "qemu/coroutine_int.h"
25 #include "qom/object.h"
27 #define QEMU_VM_FILE_MAGIC 0x5145564d
28 #define QEMU_VM_FILE_VERSION_COMPAT 0x00000002
29 #define QEMU_VM_FILE_VERSION 0x00000003
31 #define QEMU_VM_EOF 0x00
32 #define QEMU_VM_SECTION_START 0x01
33 #define QEMU_VM_SECTION_PART 0x02
34 #define QEMU_VM_SECTION_END 0x03
35 #define QEMU_VM_SECTION_FULL 0x04
36 #define QEMU_VM_SUBSECTION 0x05
37 #define QEMU_VM_VMDESCRIPTION 0x06
38 #define QEMU_VM_CONFIGURATION 0x07
39 #define QEMU_VM_COMMAND 0x08
40 #define QEMU_VM_SECTION_FOOTER 0x7e
43 extern int only_migratable
;
45 struct MigrationParams
{
50 /* Messages sent on the return path from destination to source */
51 enum mig_rp_message_type
{
52 MIG_RP_MSG_INVALID
= 0, /* Must be 0 */
53 MIG_RP_MSG_SHUT
, /* sibling will not send any more RP messages */
54 MIG_RP_MSG_PONG
, /* Response to a PING; data (seq: be32 ) */
56 MIG_RP_MSG_REQ_PAGES_ID
, /* data (start: be64, len: be32, id: string) */
57 MIG_RP_MSG_REQ_PAGES
, /* data (start: be64, len: be32) */
62 typedef QLIST_HEAD(, LoadStateEntry
) LoadStateEntry_Head
;
64 /* The current postcopy state is read/set by postcopy_state_get/set
65 * which update it atomically.
66 * The state is updated as postcopy messages are received, and
67 * in general only one thread should be writing to the state at any one
68 * time, initially the main thread and then the listen thread;
69 * Corner cases are where either thread finishes early and/or errors.
70 * The state is checked as messages are received to ensure that
71 * the source is sending us messages in the correct order.
72 * The state is also used by the RAM reception code to know if it
73 * has to place pages atomically, and the cleanup code at the end of
74 * the main thread to know if it has to delay cleanup until the end
78 POSTCOPY_INCOMING_NONE
= 0, /* Initial state - no postcopy */
79 POSTCOPY_INCOMING_ADVISE
,
80 POSTCOPY_INCOMING_DISCARD
,
81 POSTCOPY_INCOMING_LISTENING
,
82 POSTCOPY_INCOMING_RUNNING
,
86 /* State for the incoming migration */
87 struct MigrationIncomingState
{
88 QEMUFile
*from_src_file
;
91 * Free at the start of the main state load, set as the main thread finishes
94 QemuEvent main_thread_load_event
;
96 size_t largest_page_size
;
97 bool have_fault_thread
;
98 QemuThread fault_thread
;
99 QemuSemaphore fault_thread_sem
;
101 bool have_listen_thread
;
102 QemuThread listen_thread
;
103 QemuSemaphore listen_thread_sem
;
105 /* For the kernel to send us notifications */
107 /* To tell the fault_thread to quit */
108 int userfault_quit_fd
;
109 QEMUFile
*to_src_file
;
110 QemuMutex rp_mutex
; /* We send replies from multiple threads */
111 void *postcopy_tmp_page
;
112 void *postcopy_tmp_zero_page
;
118 bool have_colo_incoming_thread
;
119 QemuThread colo_incoming_thread
;
120 /* The coroutine we should enter (back) after failover */
121 Coroutine
*migration_incoming_co
;
122 QemuSemaphore colo_incoming_sem
;
125 LoadStateEntry_Head loadvm_handlers
;
128 MigrationIncomingState
*migration_incoming_get_current(void);
129 void migration_incoming_state_destroy(void);
132 * An outstanding page request, on the source, having been received
135 struct MigrationSrcPageRequest
{
140 QSIMPLEQ_ENTRY(MigrationSrcPageRequest
) next_req
;
143 struct MigrationState
149 QEMUFile
*to_dst_file
;
151 /* New style params from 'migrate-set-parameters' */
152 MigrationParameters parameters
;
155 /* Old style params from 'migrate' command */
156 MigrationParams params
;
158 /* State related to return path */
160 QEMUFile
*from_dst_file
;
161 QemuThread rp_thread
;
168 int64_t expected_downtime
;
169 int64_t dirty_pages_rate
;
170 int64_t dirty_bytes_rate
;
171 bool enabled_capabilities
[MIGRATION_CAPABILITY__MAX
];
172 int64_t xbzrle_cache_size
;
174 int64_t dirty_sync_count
;
175 /* Count of requests incoming from destination */
176 int64_t postcopy_requests
;
178 /* Flag set once the migration has been asked to enter postcopy */
180 /* Flag set after postcopy has sent the device state */
181 bool postcopy_after_devices
;
183 /* Flag set once the migration thread is running (and needs joining) */
184 bool migration_thread_running
;
186 /* Flag set once the migration thread called bdrv_inactivate_all */
189 /* Queue of outstanding page requests from the destination */
190 QemuMutex src_page_req_mutex
;
191 QSIMPLEQ_HEAD(src_page_requests
, MigrationSrcPageRequest
) src_page_requests
;
192 /* The RAMBlock used in the last src_page_request */
193 RAMBlock
*last_req_rb
;
194 /* The semaphore is used to notify COLO thread that failover is finished */
195 QemuSemaphore colo_exit_sem
;
197 /* The semaphore is used to notify COLO thread to do checkpoint */
198 QemuSemaphore colo_checkpoint_sem
;
199 int64_t colo_checkpoint_time
;
200 QEMUTimer
*colo_delay_timer
;
202 /* The last error that occurred */
206 void migrate_set_state(int *state
, int old_state
, int new_state
);
208 void migration_fd_process_incoming(QEMUFile
*f
);
210 void qemu_start_incoming_migration(const char *uri
, Error
**errp
);
212 void migration_channel_process_incoming(MigrationState
*s
,
215 void migration_tls_channel_process_incoming(MigrationState
*s
,
219 void migration_channel_connect(MigrationState
*s
,
221 const char *hostname
);
223 void migration_tls_channel_connect(MigrationState
*s
,
225 const char *hostname
,
228 uint64_t migrate_max_downtime(void);
230 void exec_start_incoming_migration(const char *host_port
, Error
**errp
);
232 void exec_start_outgoing_migration(MigrationState
*s
, const char *host_port
, Error
**errp
);
234 void tcp_start_incoming_migration(const char *host_port
, Error
**errp
);
236 void tcp_start_outgoing_migration(MigrationState
*s
, const char *host_port
, Error
**errp
);
238 void unix_start_incoming_migration(const char *path
, Error
**errp
);
240 void unix_start_outgoing_migration(MigrationState
*s
, const char *path
, Error
**errp
);
242 void fd_start_incoming_migration(const char *path
, Error
**errp
);
244 void fd_start_outgoing_migration(MigrationState
*s
, const char *fdname
, Error
**errp
);
246 void rdma_start_outgoing_migration(void *opaque
, const char *host_port
, Error
**errp
);
248 void rdma_start_incoming_migration(const char *host_port
, Error
**errp
);
250 void migrate_fd_error(MigrationState
*s
, const Error
*error
);
252 void migrate_fd_connect(MigrationState
*s
);
254 void add_migration_state_change_notifier(Notifier
*notify
);
255 void remove_migration_state_change_notifier(Notifier
*notify
);
256 MigrationState
*migrate_init(const MigrationParams
*params
);
257 bool migration_is_blocked(Error
**errp
);
258 bool migration_in_setup(MigrationState
*);
259 bool migration_is_idle(MigrationState
*s
);
260 bool migration_has_finished(MigrationState
*);
261 bool migration_has_failed(MigrationState
*);
262 /* True if outgoing migration has entered postcopy phase */
263 bool migration_in_postcopy(MigrationState
*);
264 /* ...and after the device transmission */
265 bool migration_in_postcopy_after_devices(MigrationState
*);
266 MigrationState
*migrate_get_current(void);
268 void migrate_compress_threads_create(void);
269 void migrate_compress_threads_join(void);
270 void migrate_decompress_threads_create(void);
271 void migrate_decompress_threads_join(void);
272 uint64_t ram_bytes_remaining(void);
273 uint64_t ram_bytes_transferred(void);
274 uint64_t ram_bytes_total(void);
275 void free_xbzrle_decoded_buf(void);
277 void acct_update_position(QEMUFile
*f
, size_t size
, bool zero
);
279 uint64_t dup_mig_bytes_transferred(void);
280 uint64_t dup_mig_pages_transferred(void);
281 uint64_t skipped_mig_bytes_transferred(void);
282 uint64_t skipped_mig_pages_transferred(void);
283 uint64_t norm_mig_bytes_transferred(void);
284 uint64_t norm_mig_pages_transferred(void);
285 uint64_t xbzrle_mig_bytes_transferred(void);
286 uint64_t xbzrle_mig_pages_transferred(void);
287 uint64_t xbzrle_mig_pages_overflow(void);
288 uint64_t xbzrle_mig_pages_cache_miss(void);
289 double xbzrle_mig_cache_miss_rate(void);
291 void ram_handle_compressed(void *host
, uint8_t ch
, uint64_t size
);
292 void ram_debug_dump_bitmap(unsigned long *todump
, bool expected
);
293 /* For outgoing discard bitmap */
294 int ram_postcopy_send_discard_bitmap(MigrationState
*ms
);
295 /* For incoming postcopy discard */
296 int ram_discard_range(MigrationIncomingState
*mis
, const char *block_name
,
297 uint64_t start
, size_t length
);
298 int ram_postcopy_incoming_init(MigrationIncomingState
*mis
);
299 void ram_postcopy_migrated_memory_release(MigrationState
*ms
);
302 * @migrate_add_blocker - prevent migration from proceeding
304 * @reason - an error to be returned whenever migration is attempted
306 * @errp - [out] The reason (if any) we cannot block migration right now.
308 * @returns - 0 on success, -EBUSY/-EACCES on failure, with errp set.
310 int migrate_add_blocker(Error
*reason
, Error
**errp
);
313 * @migrate_del_blocker - remove a blocking error from migration
315 * @reason - the error blocking migration
317 void migrate_del_blocker(Error
*reason
);
319 int check_migratable(Object
*obj
, Error
**err
);
321 bool migrate_release_ram(void);
322 bool migrate_postcopy_ram(void);
323 bool migrate_zero_blocks(void);
325 bool migrate_auto_converge(void);
327 int xbzrle_encode_buffer(uint8_t *old_buf
, uint8_t *new_buf
, int slen
,
328 uint8_t *dst
, int dlen
);
329 int xbzrle_decode_buffer(uint8_t *src
, int slen
, uint8_t *dst
, int dlen
);
331 int migrate_use_xbzrle(void);
332 int64_t migrate_xbzrle_cache_size(void);
333 bool migrate_colo_enabled(void);
335 int64_t xbzrle_cache_resize(int64_t new_size
);
337 bool migrate_use_compression(void);
338 int migrate_compress_level(void);
339 int migrate_compress_threads(void);
340 int migrate_decompress_threads(void);
341 bool migrate_use_events(void);
343 /* Sending on the return path - generic and then for each message type */
344 void migrate_send_rp_message(MigrationIncomingState
*mis
,
345 enum mig_rp_message_type message_type
,
346 uint16_t len
, void *data
);
347 void migrate_send_rp_shut(MigrationIncomingState
*mis
,
349 void migrate_send_rp_pong(MigrationIncomingState
*mis
,
351 void migrate_send_rp_req_pages(MigrationIncomingState
*mis
, const char* rbname
,
352 ram_addr_t start
, size_t len
);
354 void ram_control_before_iterate(QEMUFile
*f
, uint64_t flags
);
355 void ram_control_after_iterate(QEMUFile
*f
, uint64_t flags
);
356 void ram_control_load_hook(QEMUFile
*f
, uint64_t flags
, void *data
);
358 /* Whenever this is found in the data stream, the flags
359 * will be passed to ram_control_load_hook in the incoming-migration
360 * side. This lets before_ram_iterate/after_ram_iterate add
361 * transport-specific sections to the RAM migration data.
363 #define RAM_SAVE_FLAG_HOOK 0x80
365 #define RAM_SAVE_CONTROL_NOT_SUPP -1000
366 #define RAM_SAVE_CONTROL_DELAYED -2000
368 size_t ram_control_save_page(QEMUFile
*f
, ram_addr_t block_offset
,
369 ram_addr_t offset
, size_t size
,
370 uint64_t *bytes_sent
);
372 void ram_mig_init(void);
373 void savevm_skip_section_footers(void);
374 void register_global_state(void);
375 void global_state_set_optional(void);
376 void savevm_skip_configuration(void);
377 int global_state_store(void);
378 void global_state_store_running(void);
380 void flush_page_queue(MigrationState
*ms
);
381 int ram_save_queue_pages(MigrationState
*ms
, const char *rbname
,
382 ram_addr_t start
, ram_addr_t len
);
383 uint64_t ram_pagesize_summary(void);
385 PostcopyState
postcopy_state_get(void);
386 /* Set the state and return the old state */
387 PostcopyState
postcopy_state_set(PostcopyState new_state
);