4 * Copyright IBM, Corp. 2008
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #ifndef QEMU_MIGRATION_H
15 #define QEMU_MIGRATION_H
17 #include "qapi/qmp/qdict.h"
18 #include "qemu-common.h"
19 #include "qemu/thread.h"
20 #include "qemu/notify.h"
21 #include "migration/vmstate.h"
22 #include "qapi-types.h"
23 #include "exec/cpu-common.h"
24 #include "qemu/coroutine_int.h"
26 #define QEMU_VM_FILE_MAGIC 0x5145564d
27 #define QEMU_VM_FILE_VERSION_COMPAT 0x00000002
28 #define QEMU_VM_FILE_VERSION 0x00000003
30 #define QEMU_VM_EOF 0x00
31 #define QEMU_VM_SECTION_START 0x01
32 #define QEMU_VM_SECTION_PART 0x02
33 #define QEMU_VM_SECTION_END 0x03
34 #define QEMU_VM_SECTION_FULL 0x04
35 #define QEMU_VM_SUBSECTION 0x05
36 #define QEMU_VM_VMDESCRIPTION 0x06
37 #define QEMU_VM_CONFIGURATION 0x07
38 #define QEMU_VM_COMMAND 0x08
39 #define QEMU_VM_SECTION_FOOTER 0x7e
42 extern int only_migratable
;
44 struct MigrationParams
{
49 /* Messages sent on the return path from destination to source */
50 enum mig_rp_message_type
{
51 MIG_RP_MSG_INVALID
= 0, /* Must be 0 */
52 MIG_RP_MSG_SHUT
, /* sibling will not send any more RP messages */
53 MIG_RP_MSG_PONG
, /* Response to a PING; data (seq: be32 ) */
55 MIG_RP_MSG_REQ_PAGES_ID
, /* data (start: be64, len: be32, id: string) */
56 MIG_RP_MSG_REQ_PAGES
, /* data (start: be64, len: be32) */
61 typedef QLIST_HEAD(, LoadStateEntry
) LoadStateEntry_Head
;
63 /* The current postcopy state is read/set by postcopy_state_get/set
64 * which update it atomically.
65 * The state is updated as postcopy messages are received, and
66 * in general only one thread should be writing to the state at any one
67 * time, initially the main thread and then the listen thread;
68 * Corner cases are where either thread finishes early and/or errors.
69 * The state is checked as messages are received to ensure that
70 * the source is sending us messages in the correct order.
71 * The state is also used by the RAM reception code to know if it
72 * has to place pages atomically, and the cleanup code at the end of
73 * the main thread to know if it has to delay cleanup until the end
77 POSTCOPY_INCOMING_NONE
= 0, /* Initial state - no postcopy */
78 POSTCOPY_INCOMING_ADVISE
,
79 POSTCOPY_INCOMING_DISCARD
,
80 POSTCOPY_INCOMING_LISTENING
,
81 POSTCOPY_INCOMING_RUNNING
,
85 /* State for the incoming migration */
86 struct MigrationIncomingState
{
87 QEMUFile
*from_src_file
;
90 * Free at the start of the main state load, set as the main thread finishes
93 QemuEvent main_thread_load_event
;
95 bool have_fault_thread
;
96 QemuThread fault_thread
;
97 QemuSemaphore fault_thread_sem
;
99 bool have_listen_thread
;
100 QemuThread listen_thread
;
101 QemuSemaphore listen_thread_sem
;
103 /* For the kernel to send us notifications */
105 /* To tell the fault_thread to quit */
106 int userfault_quit_fd
;
107 QEMUFile
*to_src_file
;
108 QemuMutex rp_mutex
; /* We send replies from multiple threads */
109 void *postcopy_tmp_page
;
115 bool have_colo_incoming_thread
;
116 QemuThread colo_incoming_thread
;
117 /* The coroutine we should enter (back) after failover */
118 Coroutine
*migration_incoming_co
;
121 LoadStateEntry_Head loadvm_handlers
;
124 MigrationIncomingState
*migration_incoming_get_current(void);
125 void migration_incoming_state_destroy(void);
128 * An outstanding page request, on the source, having been received
131 struct MigrationSrcPageRequest
{
136 QSIMPLEQ_ENTRY(MigrationSrcPageRequest
) next_req
;
139 struct MigrationState
145 QEMUFile
*to_dst_file
;
147 /* New style params from 'migrate-set-parameters' */
148 MigrationParameters parameters
;
151 /* Old style params from 'migrate' command */
152 MigrationParams params
;
154 /* State related to return path */
156 QEMUFile
*from_dst_file
;
157 QemuThread rp_thread
;
164 int64_t expected_downtime
;
165 int64_t dirty_pages_rate
;
166 int64_t dirty_bytes_rate
;
167 bool enabled_capabilities
[MIGRATION_CAPABILITY__MAX
];
168 int64_t xbzrle_cache_size
;
170 int64_t dirty_sync_count
;
171 /* Count of requests incoming from destination */
172 int64_t postcopy_requests
;
174 /* Flag set once the migration has been asked to enter postcopy */
176 /* Flag set after postcopy has sent the device state */
177 bool postcopy_after_devices
;
179 /* Flag set once the migration thread is running (and needs joining) */
180 bool migration_thread_running
;
182 /* Flag set once the migration thread called bdrv_inactivate_all */
185 /* Queue of outstanding page requests from the destination */
186 QemuMutex src_page_req_mutex
;
187 QSIMPLEQ_HEAD(src_page_requests
, MigrationSrcPageRequest
) src_page_requests
;
188 /* The RAMBlock used in the last src_page_request */
189 RAMBlock
*last_req_rb
;
191 /* The last error that occurred */
195 void migrate_set_state(int *state
, int old_state
, int new_state
);
197 void migration_fd_process_incoming(QEMUFile
*f
);
199 void qemu_start_incoming_migration(const char *uri
, Error
**errp
);
201 void migration_channel_process_incoming(MigrationState
*s
,
204 void migration_tls_channel_process_incoming(MigrationState
*s
,
208 void migration_channel_connect(MigrationState
*s
,
210 const char *hostname
);
212 void migration_tls_channel_connect(MigrationState
*s
,
214 const char *hostname
,
217 uint64_t migrate_max_downtime(void);
219 void exec_start_incoming_migration(const char *host_port
, Error
**errp
);
221 void exec_start_outgoing_migration(MigrationState
*s
, const char *host_port
, Error
**errp
);
223 void tcp_start_incoming_migration(const char *host_port
, Error
**errp
);
225 void tcp_start_outgoing_migration(MigrationState
*s
, const char *host_port
, Error
**errp
);
227 void unix_start_incoming_migration(const char *path
, Error
**errp
);
229 void unix_start_outgoing_migration(MigrationState
*s
, const char *path
, Error
**errp
);
231 void fd_start_incoming_migration(const char *path
, Error
**errp
);
233 void fd_start_outgoing_migration(MigrationState
*s
, const char *fdname
, Error
**errp
);
235 void rdma_start_outgoing_migration(void *opaque
, const char *host_port
, Error
**errp
);
237 void rdma_start_incoming_migration(const char *host_port
, Error
**errp
);
239 void migrate_fd_error(MigrationState
*s
, const Error
*error
);
241 void migrate_fd_connect(MigrationState
*s
);
243 void add_migration_state_change_notifier(Notifier
*notify
);
244 void remove_migration_state_change_notifier(Notifier
*notify
);
245 MigrationState
*migrate_init(const MigrationParams
*params
);
246 bool migration_is_blocked(Error
**errp
);
247 bool migration_in_setup(MigrationState
*);
248 bool migration_is_idle(MigrationState
*s
);
249 bool migration_has_finished(MigrationState
*);
250 bool migration_has_failed(MigrationState
*);
251 /* True if outgoing migration has entered postcopy phase */
252 bool migration_in_postcopy(MigrationState
*);
253 /* ...and after the device transmission */
254 bool migration_in_postcopy_after_devices(MigrationState
*);
255 MigrationState
*migrate_get_current(void);
257 void migrate_compress_threads_create(void);
258 void migrate_compress_threads_join(void);
259 void migrate_decompress_threads_create(void);
260 void migrate_decompress_threads_join(void);
261 uint64_t ram_bytes_remaining(void);
262 uint64_t ram_bytes_transferred(void);
263 uint64_t ram_bytes_total(void);
264 void free_xbzrle_decoded_buf(void);
266 void acct_update_position(QEMUFile
*f
, size_t size
, bool zero
);
268 uint64_t dup_mig_bytes_transferred(void);
269 uint64_t dup_mig_pages_transferred(void);
270 uint64_t skipped_mig_bytes_transferred(void);
271 uint64_t skipped_mig_pages_transferred(void);
272 uint64_t norm_mig_bytes_transferred(void);
273 uint64_t norm_mig_pages_transferred(void);
274 uint64_t xbzrle_mig_bytes_transferred(void);
275 uint64_t xbzrle_mig_pages_transferred(void);
276 uint64_t xbzrle_mig_pages_overflow(void);
277 uint64_t xbzrle_mig_pages_cache_miss(void);
278 double xbzrle_mig_cache_miss_rate(void);
280 void ram_handle_compressed(void *host
, uint8_t ch
, uint64_t size
);
281 void ram_debug_dump_bitmap(unsigned long *todump
, bool expected
);
282 /* For outgoing discard bitmap */
283 int ram_postcopy_send_discard_bitmap(MigrationState
*ms
);
284 /* For incoming postcopy discard */
285 int ram_discard_range(MigrationIncomingState
*mis
, const char *block_name
,
286 uint64_t start
, size_t length
);
287 int ram_postcopy_incoming_init(MigrationIncomingState
*mis
);
290 * @migrate_add_blocker - prevent migration from proceeding
292 * @reason - an error to be returned whenever migration is attempted
294 * @errp - [out] The reason (if any) we cannot block migration right now.
296 * @returns - 0 on success, -EBUSY/-EACCES on failure, with errp set.
298 int migrate_add_blocker(Error
*reason
, Error
**errp
);
301 * @migrate_del_blocker - remove a blocking error from migration
303 * @reason - the error blocking migration
305 void migrate_del_blocker(Error
*reason
);
307 bool migrate_release_ram(void);
308 bool migrate_postcopy_ram(void);
309 bool migrate_zero_blocks(void);
311 bool migrate_auto_converge(void);
313 int xbzrle_encode_buffer(uint8_t *old_buf
, uint8_t *new_buf
, int slen
,
314 uint8_t *dst
, int dlen
);
315 int xbzrle_decode_buffer(uint8_t *src
, int slen
, uint8_t *dst
, int dlen
);
317 int migrate_use_xbzrle(void);
318 int64_t migrate_xbzrle_cache_size(void);
319 bool migrate_colo_enabled(void);
321 int64_t xbzrle_cache_resize(int64_t new_size
);
323 bool migrate_use_compression(void);
324 int migrate_compress_level(void);
325 int migrate_compress_threads(void);
326 int migrate_decompress_threads(void);
327 bool migrate_use_events(void);
329 /* Sending on the return path - generic and then for each message type */
330 void migrate_send_rp_message(MigrationIncomingState
*mis
,
331 enum mig_rp_message_type message_type
,
332 uint16_t len
, void *data
);
333 void migrate_send_rp_shut(MigrationIncomingState
*mis
,
335 void migrate_send_rp_pong(MigrationIncomingState
*mis
,
337 void migrate_send_rp_req_pages(MigrationIncomingState
*mis
, const char* rbname
,
338 ram_addr_t start
, size_t len
);
340 void ram_control_before_iterate(QEMUFile
*f
, uint64_t flags
);
341 void ram_control_after_iterate(QEMUFile
*f
, uint64_t flags
);
342 void ram_control_load_hook(QEMUFile
*f
, uint64_t flags
, void *data
);
344 /* Whenever this is found in the data stream, the flags
345 * will be passed to ram_control_load_hook in the incoming-migration
346 * side. This lets before_ram_iterate/after_ram_iterate add
347 * transport-specific sections to the RAM migration data.
349 #define RAM_SAVE_FLAG_HOOK 0x80
351 #define RAM_SAVE_CONTROL_NOT_SUPP -1000
352 #define RAM_SAVE_CONTROL_DELAYED -2000
354 size_t ram_control_save_page(QEMUFile
*f
, ram_addr_t block_offset
,
355 ram_addr_t offset
, size_t size
,
356 uint64_t *bytes_sent
);
358 void ram_mig_init(void);
359 void savevm_skip_section_footers(void);
360 void register_global_state(void);
361 void global_state_set_optional(void);
362 void savevm_skip_configuration(void);
363 int global_state_store(void);
364 void global_state_store_running(void);
366 void flush_page_queue(MigrationState
*ms
);
367 int ram_save_queue_pages(MigrationState
*ms
, const char *rbname
,
368 ram_addr_t start
, ram_addr_t len
);
370 PostcopyState
postcopy_state_get(void);
371 /* Set the state and return the old state */
372 PostcopyState
postcopy_state_set(PostcopyState new_state
);