hw/arm/bcm2836: Use the Cortex-A7 instead of Cortex-A15
[qemu/ar7.git] / migration / migration.h
blob8d2f320c485fc2f355b03aa646230e66ffa43b81
1 /*
2 * QEMU live migration
4 * Copyright IBM, Corp. 2008
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #ifndef QEMU_MIGRATION_H
15 #define QEMU_MIGRATION_H
17 #include "qemu-common.h"
18 #include "qapi/qapi-types-migration.h"
19 #include "qemu/thread.h"
20 #include "exec/cpu-common.h"
21 #include "qemu/coroutine_int.h"
22 #include "hw/qdev.h"
23 #include "io/channel.h"
25 /* State for the incoming migration */
26 struct MigrationIncomingState {
27 QEMUFile *from_src_file;
30 * Free at the start of the main state load, set as the main thread finishes
31 * loading state.
33 QemuEvent main_thread_load_event;
35 size_t largest_page_size;
36 bool have_fault_thread;
37 QemuThread fault_thread;
38 QemuSemaphore fault_thread_sem;
39 /* Set this when we want the fault thread to quit */
40 bool fault_thread_quit;
42 bool have_listen_thread;
43 QemuThread listen_thread;
44 QemuSemaphore listen_thread_sem;
46 /* For the kernel to send us notifications */
47 int userfault_fd;
48 /* To notify the fault_thread to wake, e.g., when need to quit */
49 int userfault_event_fd;
50 QEMUFile *to_src_file;
51 QemuMutex rp_mutex; /* We send replies from multiple threads */
52 /* RAMBlock of last request sent to source */
53 RAMBlock *last_rb;
54 void *postcopy_tmp_page;
55 void *postcopy_tmp_zero_page;
56 /* PostCopyFD's for external userfaultfds & handlers of shared memory */
57 GArray *postcopy_remote_fds;
59 QEMUBH *bh;
61 int state;
63 bool have_colo_incoming_thread;
64 QemuThread colo_incoming_thread;
65 /* The coroutine we should enter (back) after failover */
66 Coroutine *migration_incoming_co;
67 QemuSemaphore colo_incoming_sem;
70 MigrationIncomingState *migration_incoming_get_current(void);
71 void migration_incoming_state_destroy(void);
73 #define TYPE_MIGRATION "migration"
75 #define MIGRATION_CLASS(klass) \
76 OBJECT_CLASS_CHECK(MigrationClass, (klass), TYPE_MIGRATION)
77 #define MIGRATION_OBJ(obj) \
78 OBJECT_CHECK(MigrationState, (obj), TYPE_MIGRATION)
79 #define MIGRATION_GET_CLASS(obj) \
80 OBJECT_GET_CLASS(MigrationClass, (obj), TYPE_MIGRATION)
82 typedef struct MigrationClass {
83 /*< private >*/
84 DeviceClass parent_class;
85 } MigrationClass;
87 struct MigrationState
89 /*< private >*/
90 DeviceState parent_obj;
92 /*< public >*/
93 size_t bytes_xfer;
94 size_t xfer_limit;
95 QemuThread thread;
96 QEMUBH *cleanup_bh;
97 QEMUFile *to_dst_file;
99 /* bytes already send at the beggining of current interation */
100 uint64_t iteration_initial_bytes;
101 /* time at the start of current iteration */
102 int64_t iteration_start_time;
104 * The final stage happens when the remaining data is smaller than
105 * this threshold; it's calculated from the requested downtime and
106 * measured bandwidth
108 int64_t threshold_size;
110 /* params from 'migrate-set-parameters' */
111 MigrationParameters parameters;
113 int state;
115 /* State related to return path */
116 struct {
117 QEMUFile *from_dst_file;
118 QemuThread rp_thread;
119 bool error;
120 } rp_state;
122 double mbps;
123 /* Timestamp when recent migration starts (ms) */
124 int64_t start_time;
125 /* Total time used by latest migration (ms) */
126 int64_t total_time;
127 /* Timestamp when VM is down (ms) to migrate the last stuff */
128 int64_t downtime_start;
129 int64_t downtime;
130 int64_t expected_downtime;
131 bool enabled_capabilities[MIGRATION_CAPABILITY__MAX];
132 int64_t setup_time;
134 * Whether guest was running when we enter the completion stage.
135 * If migration is interrupted by any reason, we need to continue
136 * running the guest on source.
138 bool vm_was_running;
140 /* Flag set once the migration has been asked to enter postcopy */
141 bool start_postcopy;
142 /* Flag set after postcopy has sent the device state */
143 bool postcopy_after_devices;
145 /* Flag set once the migration thread is running (and needs joining) */
146 bool migration_thread_running;
148 /* Flag set once the migration thread called bdrv_inactivate_all */
149 bool block_inactive;
151 /* Migration is paused due to pause-before-switchover */
152 QemuSemaphore pause_sem;
154 /* The semaphore is used to notify COLO thread that failover is finished */
155 QemuSemaphore colo_exit_sem;
157 /* The semaphore is used to notify COLO thread to do checkpoint */
158 QemuSemaphore colo_checkpoint_sem;
159 int64_t colo_checkpoint_time;
160 QEMUTimer *colo_delay_timer;
162 /* The first error that has occurred.
163 We used the mutex to be able to return the 1st error message */
164 Error *error;
165 /* mutex to protect errp */
166 QemuMutex error_mutex;
168 /* Do we have to clean up -b/-i from old migrate parameters */
169 /* This feature is deprecated and will be removed */
170 bool must_remove_block_options;
173 * Global switch on whether we need to store the global state
174 * during migration.
176 bool store_global_state;
178 /* Whether the VM is only allowing for migratable devices */
179 bool only_migratable;
181 /* Whether we send QEMU_VM_CONFIGURATION during migration */
182 bool send_configuration;
183 /* Whether we send section footer during migration */
184 bool send_section_footer;
187 void migrate_set_state(int *state, int old_state, int new_state);
189 void migration_fd_process_incoming(QEMUFile *f);
190 void migration_ioc_process_incoming(QIOChannel *ioc);
192 bool migration_has_all_channels(void);
194 uint64_t migrate_max_downtime(void);
196 void migrate_set_error(MigrationState *s, const Error *error);
197 void migrate_fd_error(MigrationState *s, const Error *error);
199 void migrate_fd_connect(MigrationState *s, Error *error_in);
201 void migrate_init(MigrationState *s);
202 bool migration_is_blocked(Error **errp);
203 /* True if outgoing migration has entered postcopy phase */
204 bool migration_in_postcopy(void);
205 MigrationState *migrate_get_current(void);
207 bool migrate_postcopy(void);
209 bool migrate_release_ram(void);
210 bool migrate_postcopy_ram(void);
211 bool migrate_zero_blocks(void);
212 bool migrate_dirty_bitmaps(void);
214 bool migrate_auto_converge(void);
215 bool migrate_use_multifd(void);
216 bool migrate_pause_before_switchover(void);
217 int migrate_multifd_channels(void);
218 int migrate_multifd_page_count(void);
220 int migrate_use_xbzrle(void);
221 int64_t migrate_xbzrle_cache_size(void);
222 bool migrate_colo_enabled(void);
224 bool migrate_use_block(void);
225 bool migrate_use_block_incremental(void);
226 bool migrate_use_return_path(void);
228 bool migrate_use_compression(void);
229 int migrate_compress_level(void);
230 int migrate_compress_threads(void);
231 int migrate_decompress_threads(void);
232 bool migrate_use_events(void);
234 /* Sending on the return path - generic and then for each message type */
235 void migrate_send_rp_shut(MigrationIncomingState *mis,
236 uint32_t value);
237 void migrate_send_rp_pong(MigrationIncomingState *mis,
238 uint32_t value);
239 int migrate_send_rp_req_pages(MigrationIncomingState *mis, const char* rbname,
240 ram_addr_t start, size_t len);
242 void dirty_bitmap_mig_before_vm_start(void);
243 void init_dirty_bitmap_incoming_migration(void);
245 #endif