virtio: postpone the execution of event_notifier_cleanup function
[qemu.git] / migration / migration.h
blobf2bc1aaf85969bf2f80dabf105731c4f2ade7e67
1 /*
2 * QEMU live migration
4 * Copyright IBM, Corp. 2008
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #ifndef QEMU_MIGRATION_H
15 #define QEMU_MIGRATION_H
17 #include "qemu-common.h"
18 #include "qemu/thread.h"
19 #include "qapi-types.h"
20 #include "exec/cpu-common.h"
21 #include "qemu/coroutine_int.h"
22 #include "hw/qdev.h"
23 #include "io/channel.h"
25 struct PostcopyBlocktimeContext;
27 /* State for the incoming migration */
28 struct MigrationIncomingState {
29 QEMUFile *from_src_file;
32 * Free at the start of the main state load, set as the main thread finishes
33 * loading state.
35 QemuEvent main_thread_load_event;
37 size_t largest_page_size;
38 bool have_fault_thread;
39 QemuThread fault_thread;
40 QemuSemaphore fault_thread_sem;
42 bool have_listen_thread;
43 QemuThread listen_thread;
44 QemuSemaphore listen_thread_sem;
46 /* For the kernel to send us notifications */
47 int userfault_fd;
48 /* To tell the fault_thread to quit */
49 int userfault_quit_fd;
50 QEMUFile *to_src_file;
51 QemuMutex rp_mutex; /* We send replies from multiple threads */
52 void *postcopy_tmp_page;
53 void *postcopy_tmp_zero_page;
55 QEMUBH *bh;
57 int state;
59 bool have_colo_incoming_thread;
60 QemuThread colo_incoming_thread;
61 /* The coroutine we should enter (back) after failover */
62 Coroutine *migration_incoming_co;
63 QemuSemaphore colo_incoming_sem;
66 * PostcopyBlocktimeContext to keep information for postcopy
67 * live migration, to calculate vCPU block time
68 * */
69 struct PostcopyBlocktimeContext *blocktime_ctx;
72 MigrationIncomingState *migration_incoming_get_current(void);
73 void migration_incoming_state_destroy(void);
75 * Functions to work with blocktime context
77 void fill_destination_postcopy_migration_info(MigrationInfo *info);
79 #define TYPE_MIGRATION "migration"
81 #define MIGRATION_CLASS(klass) \
82 OBJECT_CLASS_CHECK(MigrationClass, (klass), TYPE_MIGRATION)
83 #define MIGRATION_OBJ(obj) \
84 OBJECT_CHECK(MigrationState, (obj), TYPE_MIGRATION)
85 #define MIGRATION_GET_CLASS(obj) \
86 OBJECT_GET_CLASS(MigrationClass, (obj), TYPE_MIGRATION)
88 typedef struct MigrationClass {
89 /*< private >*/
90 DeviceClass parent_class;
91 } MigrationClass;
93 struct MigrationState
95 /*< private >*/
96 DeviceState parent_obj;
98 /*< public >*/
99 size_t bytes_xfer;
100 size_t xfer_limit;
101 QemuThread thread;
102 QEMUBH *cleanup_bh;
103 QEMUFile *to_dst_file;
105 /* bytes already send at the beggining of current interation */
106 uint64_t iteration_initial_bytes;
107 /* time at the start of current iteration */
108 int64_t iteration_start_time;
110 * The final stage happens when the remaining data is smaller than
111 * this threshold; it's calculated from the requested downtime and
112 * measured bandwidth
114 int64_t threshold_size;
116 /* params from 'migrate-set-parameters' */
117 MigrationParameters parameters;
119 int state;
121 /* State related to return path */
122 struct {
123 QEMUFile *from_dst_file;
124 QemuThread rp_thread;
125 bool error;
126 } rp_state;
128 double mbps;
129 /* Timestamp when recent migration starts (ms) */
130 int64_t start_time;
131 /* Total time used by latest migration (ms) */
132 int64_t total_time;
133 /* Timestamp when VM is down (ms) to migrate the last stuff */
134 int64_t downtime_start;
135 int64_t downtime;
136 int64_t expected_downtime;
137 bool enabled_capabilities[MIGRATION_CAPABILITY__MAX];
138 int64_t setup_time;
140 * Whether guest was running when we enter the completion stage.
141 * If migration is interrupted by any reason, we need to continue
142 * running the guest on source.
144 bool vm_was_running;
146 /* Flag set once the migration has been asked to enter postcopy */
147 bool start_postcopy;
148 /* Flag set after postcopy has sent the device state */
149 bool postcopy_after_devices;
151 /* Flag set once the migration thread is running (and needs joining) */
152 bool migration_thread_running;
154 /* Flag set once the migration thread called bdrv_inactivate_all */
155 bool block_inactive;
157 /* Migration is paused due to pause-before-switchover */
158 QemuSemaphore pause_sem;
160 /* The semaphore is used to notify COLO thread that failover is finished */
161 QemuSemaphore colo_exit_sem;
163 /* The semaphore is used to notify COLO thread to do checkpoint */
164 QemuSemaphore colo_checkpoint_sem;
165 int64_t colo_checkpoint_time;
166 QEMUTimer *colo_delay_timer;
168 /* The first error that has occurred.
169 We used the mutex to be able to return the 1st error message */
170 Error *error;
171 /* mutex to protect errp */
172 QemuMutex error_mutex;
174 /* Do we have to clean up -b/-i from old migrate parameters */
175 /* This feature is deprecated and will be removed */
176 bool must_remove_block_options;
179 * Global switch on whether we need to store the global state
180 * during migration.
182 bool store_global_state;
184 /* Whether the VM is only allowing for migratable devices */
185 bool only_migratable;
187 /* Whether we send QEMU_VM_CONFIGURATION during migration */
188 bool send_configuration;
189 /* Whether we send section footer during migration */
190 bool send_section_footer;
193 void migrate_set_state(int *state, int old_state, int new_state);
195 void migration_fd_process_incoming(QEMUFile *f);
196 void migration_ioc_process_incoming(QIOChannel *ioc);
198 bool migration_has_all_channels(void);
200 uint64_t migrate_max_downtime(void);
202 void migrate_set_error(MigrationState *s, const Error *error);
203 void migrate_fd_error(MigrationState *s, const Error *error);
205 void migrate_fd_connect(MigrationState *s);
207 MigrationState *migrate_init(void);
208 bool migration_is_blocked(Error **errp);
209 /* True if outgoing migration has entered postcopy phase */
210 bool migration_in_postcopy(void);
211 MigrationState *migrate_get_current(void);
213 bool migrate_postcopy(void);
215 bool migrate_release_ram(void);
216 bool migrate_postcopy_ram(void);
217 bool migrate_zero_blocks(void);
219 bool migrate_auto_converge(void);
220 bool migrate_use_multifd(void);
221 bool migrate_pause_before_switchover(void);
222 int migrate_multifd_channels(void);
223 int migrate_multifd_page_count(void);
225 int migrate_use_xbzrle(void);
226 int64_t migrate_xbzrle_cache_size(void);
227 bool migrate_colo_enabled(void);
229 bool migrate_use_block(void);
230 bool migrate_use_block_incremental(void);
231 bool migrate_use_return_path(void);
233 bool migrate_use_compression(void);
234 int migrate_compress_level(void);
235 int migrate_compress_threads(void);
236 int migrate_decompress_threads(void);
237 bool migrate_use_events(void);
238 bool migrate_postcopy_blocktime(void);
240 /* Sending on the return path - generic and then for each message type */
241 void migrate_send_rp_shut(MigrationIncomingState *mis,
242 uint32_t value);
243 void migrate_send_rp_pong(MigrationIncomingState *mis,
244 uint32_t value);
245 void migrate_send_rp_req_pages(MigrationIncomingState *mis, const char* rbname,
246 ram_addr_t start, size_t len);
248 #endif