spapr: Remove support for NVIDIA V100 GPU with NVLink2
[qemu/kevin.git] / migration / savevm.c
blobbb3e99194c608d4a08fe46182cfa67d94635d3c9
1 /*
2 * QEMU System Emulator
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 * Copyright (c) 2009-2015 Red Hat Inc
7 * Authors:
8 * Juan Quintela <quintela@redhat.com>
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 * THE SOFTWARE.
29 #include "qemu/osdep.h"
30 #include "hw/boards.h"
31 #include "net/net.h"
32 #include "migration.h"
33 #include "migration/snapshot.h"
34 #include "migration-stats.h"
35 #include "migration/vmstate.h"
36 #include "migration/misc.h"
37 #include "migration/register.h"
38 #include "migration/global_state.h"
39 #include "migration/channel-block.h"
40 #include "ram.h"
41 #include "qemu-file.h"
42 #include "savevm.h"
43 #include "postcopy-ram.h"
44 #include "qapi/error.h"
45 #include "qapi/qapi-commands-migration.h"
46 #include "qapi/clone-visitor.h"
47 #include "qapi/qapi-builtin-visit.h"
48 #include "qapi/qmp/qerror.h"
49 #include "qemu/error-report.h"
50 #include "sysemu/cpus.h"
51 #include "exec/memory.h"
52 #include "exec/target_page.h"
53 #include "trace.h"
54 #include "qemu/iov.h"
55 #include "qemu/job.h"
56 #include "qemu/main-loop.h"
57 #include "block/snapshot.h"
58 #include "qemu/cutils.h"
59 #include "io/channel-buffer.h"
60 #include "io/channel-file.h"
61 #include "sysemu/replay.h"
62 #include "sysemu/runstate.h"
63 #include "sysemu/sysemu.h"
64 #include "sysemu/xen.h"
65 #include "migration/colo.h"
66 #include "qemu/bitmap.h"
67 #include "net/announce.h"
68 #include "qemu/yank.h"
69 #include "yank_functions.h"
70 #include "sysemu/qtest.h"
71 #include "options.h"
73 const unsigned int postcopy_ram_discard_version;
75 /* Subcommands for QEMU_VM_COMMAND */
76 enum qemu_vm_cmd {
77 MIG_CMD_INVALID = 0, /* Must be 0 */
78 MIG_CMD_OPEN_RETURN_PATH, /* Tell the dest to open the Return path */
79 MIG_CMD_PING, /* Request a PONG on the RP */
81 MIG_CMD_POSTCOPY_ADVISE, /* Prior to any page transfers, just
82 warn we might want to do PC */
83 MIG_CMD_POSTCOPY_LISTEN, /* Start listening for incoming
84 pages as it's running. */
85 MIG_CMD_POSTCOPY_RUN, /* Start execution */
87 MIG_CMD_POSTCOPY_RAM_DISCARD, /* A list of pages to discard that
88 were previously sent during
89 precopy but are dirty. */
90 MIG_CMD_PACKAGED, /* Send a wrapped stream within this stream */
91 MIG_CMD_ENABLE_COLO, /* Enable COLO */
92 MIG_CMD_POSTCOPY_RESUME, /* resume postcopy on dest */
93 MIG_CMD_RECV_BITMAP, /* Request for recved bitmap on dst */
94 MIG_CMD_MAX
97 #define MAX_VM_CMD_PACKAGED_SIZE UINT32_MAX
98 static struct mig_cmd_args {
99 ssize_t len; /* -1 = variable */
100 const char *name;
101 } mig_cmd_args[] = {
102 [MIG_CMD_INVALID] = { .len = -1, .name = "INVALID" },
103 [MIG_CMD_OPEN_RETURN_PATH] = { .len = 0, .name = "OPEN_RETURN_PATH" },
104 [MIG_CMD_PING] = { .len = sizeof(uint32_t), .name = "PING" },
105 [MIG_CMD_POSTCOPY_ADVISE] = { .len = -1, .name = "POSTCOPY_ADVISE" },
106 [MIG_CMD_POSTCOPY_LISTEN] = { .len = 0, .name = "POSTCOPY_LISTEN" },
107 [MIG_CMD_POSTCOPY_RUN] = { .len = 0, .name = "POSTCOPY_RUN" },
108 [MIG_CMD_POSTCOPY_RAM_DISCARD] = {
109 .len = -1, .name = "POSTCOPY_RAM_DISCARD" },
110 [MIG_CMD_POSTCOPY_RESUME] = { .len = 0, .name = "POSTCOPY_RESUME" },
111 [MIG_CMD_PACKAGED] = { .len = 4, .name = "PACKAGED" },
112 [MIG_CMD_RECV_BITMAP] = { .len = -1, .name = "RECV_BITMAP" },
113 [MIG_CMD_MAX] = { .len = -1, .name = "MAX" },
116 /* Note for MIG_CMD_POSTCOPY_ADVISE:
117 * The format of arguments is depending on postcopy mode:
118 * - postcopy RAM only
119 * uint64_t host page size
120 * uint64_t target page size
122 * - postcopy RAM and postcopy dirty bitmaps
123 * format is the same as for postcopy RAM only
125 * - postcopy dirty bitmaps only
126 * Nothing. Command length field is 0.
128 * Be careful: adding a new postcopy entity with some other parameters should
129 * not break format self-description ability. Good way is to introduce some
130 * generic extendable format with an exception for two old entities.
133 /***********************************************************/
134 /* savevm/loadvm support */
136 static QEMUFile *qemu_fopen_bdrv(BlockDriverState *bs, int is_writable)
138 if (is_writable) {
139 return qemu_file_new_output(QIO_CHANNEL(qio_channel_block_new(bs)));
140 } else {
141 return qemu_file_new_input(QIO_CHANNEL(qio_channel_block_new(bs)));
146 /* QEMUFile timer support.
147 * Not in qemu-file.c to not add qemu-timer.c as dependency to qemu-file.c
150 void timer_put(QEMUFile *f, QEMUTimer *ts)
152 uint64_t expire_time;
154 expire_time = timer_expire_time_ns(ts);
155 qemu_put_be64(f, expire_time);
158 void timer_get(QEMUFile *f, QEMUTimer *ts)
160 uint64_t expire_time;
162 expire_time = qemu_get_be64(f);
163 if (expire_time != -1) {
164 timer_mod_ns(ts, expire_time);
165 } else {
166 timer_del(ts);
171 /* VMState timer support.
172 * Not in vmstate.c to not add qemu-timer.c as dependency to vmstate.c
175 static int get_timer(QEMUFile *f, void *pv, size_t size,
176 const VMStateField *field)
178 QEMUTimer *v = pv;
179 timer_get(f, v);
180 return 0;
183 static int put_timer(QEMUFile *f, void *pv, size_t size,
184 const VMStateField *field, JSONWriter *vmdesc)
186 QEMUTimer *v = pv;
187 timer_put(f, v);
189 return 0;
192 const VMStateInfo vmstate_info_timer = {
193 .name = "timer",
194 .get = get_timer,
195 .put = put_timer,
199 typedef struct CompatEntry {
200 char idstr[256];
201 int instance_id;
202 } CompatEntry;
204 typedef struct SaveStateEntry {
205 QTAILQ_ENTRY(SaveStateEntry) entry;
206 char idstr[256];
207 uint32_t instance_id;
208 int alias_id;
209 int version_id;
210 /* version id read from the stream */
211 int load_version_id;
212 int section_id;
213 /* section id read from the stream */
214 int load_section_id;
215 const SaveVMHandlers *ops;
216 const VMStateDescription *vmsd;
217 void *opaque;
218 CompatEntry *compat;
219 int is_ram;
220 } SaveStateEntry;
222 typedef struct SaveState {
223 QTAILQ_HEAD(, SaveStateEntry) handlers;
224 SaveStateEntry *handler_pri_head[MIG_PRI_MAX + 1];
225 int global_section_id;
226 uint32_t len;
227 const char *name;
228 uint32_t target_page_bits;
229 uint32_t caps_count;
230 MigrationCapability *capabilities;
231 QemuUUID uuid;
232 } SaveState;
234 static SaveState savevm_state = {
235 .handlers = QTAILQ_HEAD_INITIALIZER(savevm_state.handlers),
236 .handler_pri_head = { [MIG_PRI_DEFAULT ... MIG_PRI_MAX] = NULL },
237 .global_section_id = 0,
240 static bool should_validate_capability(int capability)
242 assert(capability >= 0 && capability < MIGRATION_CAPABILITY__MAX);
243 /* Validate only new capabilities to keep compatibility. */
244 switch (capability) {
245 case MIGRATION_CAPABILITY_X_IGNORE_SHARED:
246 return true;
247 default:
248 return false;
252 static uint32_t get_validatable_capabilities_count(void)
254 MigrationState *s = migrate_get_current();
255 uint32_t result = 0;
256 int i;
257 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
258 if (should_validate_capability(i) && s->capabilities[i]) {
259 result++;
262 return result;
265 static int configuration_pre_save(void *opaque)
267 SaveState *state = opaque;
268 const char *current_name = MACHINE_GET_CLASS(current_machine)->name;
269 MigrationState *s = migrate_get_current();
270 int i, j;
272 state->len = strlen(current_name);
273 state->name = current_name;
274 state->target_page_bits = qemu_target_page_bits();
276 state->caps_count = get_validatable_capabilities_count();
277 state->capabilities = g_renew(MigrationCapability, state->capabilities,
278 state->caps_count);
279 for (i = j = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
280 if (should_validate_capability(i) && s->capabilities[i]) {
281 state->capabilities[j++] = i;
284 state->uuid = qemu_uuid;
286 return 0;
289 static int configuration_post_save(void *opaque)
291 SaveState *state = opaque;
293 g_free(state->capabilities);
294 state->capabilities = NULL;
295 state->caps_count = 0;
296 return 0;
299 static int configuration_pre_load(void *opaque)
301 SaveState *state = opaque;
303 /* If there is no target-page-bits subsection it means the source
304 * predates the variable-target-page-bits support and is using the
305 * minimum possible value for this CPU.
307 state->target_page_bits = qemu_target_page_bits_min();
308 return 0;
311 static bool configuration_validate_capabilities(SaveState *state)
313 bool ret = true;
314 MigrationState *s = migrate_get_current();
315 unsigned long *source_caps_bm;
316 int i;
318 source_caps_bm = bitmap_new(MIGRATION_CAPABILITY__MAX);
319 for (i = 0; i < state->caps_count; i++) {
320 MigrationCapability capability = state->capabilities[i];
321 set_bit(capability, source_caps_bm);
324 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
325 bool source_state, target_state;
326 if (!should_validate_capability(i)) {
327 continue;
329 source_state = test_bit(i, source_caps_bm);
330 target_state = s->capabilities[i];
331 if (source_state != target_state) {
332 error_report("Capability %s is %s, but received capability is %s",
333 MigrationCapability_str(i),
334 target_state ? "on" : "off",
335 source_state ? "on" : "off");
336 ret = false;
337 /* Don't break here to report all failed capabilities */
341 g_free(source_caps_bm);
342 return ret;
345 static int configuration_post_load(void *opaque, int version_id)
347 SaveState *state = opaque;
348 const char *current_name = MACHINE_GET_CLASS(current_machine)->name;
349 int ret = 0;
351 if (strncmp(state->name, current_name, state->len) != 0) {
352 error_report("Machine type received is '%.*s' and local is '%s'",
353 (int) state->len, state->name, current_name);
354 ret = -EINVAL;
355 goto out;
358 if (state->target_page_bits != qemu_target_page_bits()) {
359 error_report("Received TARGET_PAGE_BITS is %d but local is %d",
360 state->target_page_bits, qemu_target_page_bits());
361 ret = -EINVAL;
362 goto out;
365 if (!configuration_validate_capabilities(state)) {
366 ret = -EINVAL;
367 goto out;
370 out:
371 g_free((void *)state->name);
372 state->name = NULL;
373 state->len = 0;
374 g_free(state->capabilities);
375 state->capabilities = NULL;
376 state->caps_count = 0;
378 return ret;
381 static int get_capability(QEMUFile *f, void *pv, size_t size,
382 const VMStateField *field)
384 MigrationCapability *capability = pv;
385 char capability_str[UINT8_MAX + 1];
386 uint8_t len;
387 int i;
389 len = qemu_get_byte(f);
390 qemu_get_buffer(f, (uint8_t *)capability_str, len);
391 capability_str[len] = '\0';
392 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
393 if (!strcmp(MigrationCapability_str(i), capability_str)) {
394 *capability = i;
395 return 0;
398 error_report("Received unknown capability %s", capability_str);
399 return -EINVAL;
402 static int put_capability(QEMUFile *f, void *pv, size_t size,
403 const VMStateField *field, JSONWriter *vmdesc)
405 MigrationCapability *capability = pv;
406 const char *capability_str = MigrationCapability_str(*capability);
407 size_t len = strlen(capability_str);
408 assert(len <= UINT8_MAX);
410 qemu_put_byte(f, len);
411 qemu_put_buffer(f, (uint8_t *)capability_str, len);
412 return 0;
415 static const VMStateInfo vmstate_info_capability = {
416 .name = "capability",
417 .get = get_capability,
418 .put = put_capability,
421 /* The target-page-bits subsection is present only if the
422 * target page size is not the same as the default (ie the
423 * minimum page size for a variable-page-size guest CPU).
424 * If it is present then it contains the actual target page
425 * bits for the machine, and migration will fail if the
426 * two ends don't agree about it.
428 static bool vmstate_target_page_bits_needed(void *opaque)
430 return qemu_target_page_bits()
431 > qemu_target_page_bits_min();
434 static const VMStateDescription vmstate_target_page_bits = {
435 .name = "configuration/target-page-bits",
436 .version_id = 1,
437 .minimum_version_id = 1,
438 .needed = vmstate_target_page_bits_needed,
439 .fields = (VMStateField[]) {
440 VMSTATE_UINT32(target_page_bits, SaveState),
441 VMSTATE_END_OF_LIST()
445 static bool vmstate_capabilites_needed(void *opaque)
447 return get_validatable_capabilities_count() > 0;
450 static const VMStateDescription vmstate_capabilites = {
451 .name = "configuration/capabilities",
452 .version_id = 1,
453 .minimum_version_id = 1,
454 .needed = vmstate_capabilites_needed,
455 .fields = (VMStateField[]) {
456 VMSTATE_UINT32_V(caps_count, SaveState, 1),
457 VMSTATE_VARRAY_UINT32_ALLOC(capabilities, SaveState, caps_count, 1,
458 vmstate_info_capability,
459 MigrationCapability),
460 VMSTATE_END_OF_LIST()
464 static bool vmstate_uuid_needed(void *opaque)
466 return qemu_uuid_set && migrate_validate_uuid();
469 static int vmstate_uuid_post_load(void *opaque, int version_id)
471 SaveState *state = opaque;
472 char uuid_src[UUID_FMT_LEN + 1];
473 char uuid_dst[UUID_FMT_LEN + 1];
475 if (!qemu_uuid_set) {
477 * It's warning because user might not know UUID in some cases,
478 * e.g. load an old snapshot
480 qemu_uuid_unparse(&state->uuid, uuid_src);
481 warn_report("UUID is received %s, but local uuid isn't set",
482 uuid_src);
483 return 0;
485 if (!qemu_uuid_is_equal(&state->uuid, &qemu_uuid)) {
486 qemu_uuid_unparse(&state->uuid, uuid_src);
487 qemu_uuid_unparse(&qemu_uuid, uuid_dst);
488 error_report("UUID received is %s and local is %s", uuid_src, uuid_dst);
489 return -EINVAL;
491 return 0;
494 static const VMStateDescription vmstate_uuid = {
495 .name = "configuration/uuid",
496 .version_id = 1,
497 .minimum_version_id = 1,
498 .needed = vmstate_uuid_needed,
499 .post_load = vmstate_uuid_post_load,
500 .fields = (VMStateField[]) {
501 VMSTATE_UINT8_ARRAY_V(uuid.data, SaveState, sizeof(QemuUUID), 1),
502 VMSTATE_END_OF_LIST()
506 static const VMStateDescription vmstate_configuration = {
507 .name = "configuration",
508 .version_id = 1,
509 .pre_load = configuration_pre_load,
510 .post_load = configuration_post_load,
511 .pre_save = configuration_pre_save,
512 .post_save = configuration_post_save,
513 .fields = (VMStateField[]) {
514 VMSTATE_UINT32(len, SaveState),
515 VMSTATE_VBUFFER_ALLOC_UINT32(name, SaveState, 0, NULL, len),
516 VMSTATE_END_OF_LIST()
518 .subsections = (const VMStateDescription *[]) {
519 &vmstate_target_page_bits,
520 &vmstate_capabilites,
521 &vmstate_uuid,
522 NULL
526 static void dump_vmstate_vmsd(FILE *out_file,
527 const VMStateDescription *vmsd, int indent,
528 bool is_subsection);
530 static void dump_vmstate_vmsf(FILE *out_file, const VMStateField *field,
531 int indent)
533 fprintf(out_file, "%*s{\n", indent, "");
534 indent += 2;
535 fprintf(out_file, "%*s\"field\": \"%s\",\n", indent, "", field->name);
536 fprintf(out_file, "%*s\"version_id\": %d,\n", indent, "",
537 field->version_id);
538 fprintf(out_file, "%*s\"field_exists\": %s,\n", indent, "",
539 field->field_exists ? "true" : "false");
540 if (field->flags & VMS_ARRAY) {
541 fprintf(out_file, "%*s\"num\": %d,\n", indent, "", field->num);
543 fprintf(out_file, "%*s\"size\": %zu", indent, "", field->size);
544 if (field->vmsd != NULL) {
545 fprintf(out_file, ",\n");
546 dump_vmstate_vmsd(out_file, field->vmsd, indent, false);
548 fprintf(out_file, "\n%*s}", indent - 2, "");
551 static void dump_vmstate_vmss(FILE *out_file,
552 const VMStateDescription **subsection,
553 int indent)
555 if (*subsection != NULL) {
556 dump_vmstate_vmsd(out_file, *subsection, indent, true);
560 static void dump_vmstate_vmsd(FILE *out_file,
561 const VMStateDescription *vmsd, int indent,
562 bool is_subsection)
564 if (is_subsection) {
565 fprintf(out_file, "%*s{\n", indent, "");
566 } else {
567 fprintf(out_file, "%*s\"%s\": {\n", indent, "", "Description");
569 indent += 2;
570 fprintf(out_file, "%*s\"name\": \"%s\",\n", indent, "", vmsd->name);
571 fprintf(out_file, "%*s\"version_id\": %d,\n", indent, "",
572 vmsd->version_id);
573 fprintf(out_file, "%*s\"minimum_version_id\": %d", indent, "",
574 vmsd->minimum_version_id);
575 if (vmsd->fields != NULL) {
576 const VMStateField *field = vmsd->fields;
577 bool first;
579 fprintf(out_file, ",\n%*s\"Fields\": [\n", indent, "");
580 first = true;
581 while (field->name != NULL) {
582 if (field->flags & VMS_MUST_EXIST) {
583 /* Ignore VMSTATE_VALIDATE bits; these don't get migrated */
584 field++;
585 continue;
587 if (!first) {
588 fprintf(out_file, ",\n");
590 dump_vmstate_vmsf(out_file, field, indent + 2);
591 field++;
592 first = false;
594 assert(field->flags == VMS_END);
595 fprintf(out_file, "\n%*s]", indent, "");
597 if (vmsd->subsections != NULL) {
598 const VMStateDescription **subsection = vmsd->subsections;
599 bool first;
601 fprintf(out_file, ",\n%*s\"Subsections\": [\n", indent, "");
602 first = true;
603 while (*subsection != NULL) {
604 if (!first) {
605 fprintf(out_file, ",\n");
607 dump_vmstate_vmss(out_file, subsection, indent + 2);
608 subsection++;
609 first = false;
611 fprintf(out_file, "\n%*s]", indent, "");
613 fprintf(out_file, "\n%*s}", indent - 2, "");
616 static void dump_machine_type(FILE *out_file)
618 MachineClass *mc;
620 mc = MACHINE_GET_CLASS(current_machine);
622 fprintf(out_file, " \"vmschkmachine\": {\n");
623 fprintf(out_file, " \"Name\": \"%s\"\n", mc->name);
624 fprintf(out_file, " },\n");
627 void dump_vmstate_json_to_file(FILE *out_file)
629 GSList *list, *elt;
630 bool first;
632 fprintf(out_file, "{\n");
633 dump_machine_type(out_file);
635 first = true;
636 list = object_class_get_list(TYPE_DEVICE, true);
637 for (elt = list; elt; elt = elt->next) {
638 DeviceClass *dc = OBJECT_CLASS_CHECK(DeviceClass, elt->data,
639 TYPE_DEVICE);
640 const char *name;
641 int indent = 2;
643 if (!dc->vmsd) {
644 continue;
647 if (!first) {
648 fprintf(out_file, ",\n");
650 name = object_class_get_name(OBJECT_CLASS(dc));
651 fprintf(out_file, "%*s\"%s\": {\n", indent, "", name);
652 indent += 2;
653 fprintf(out_file, "%*s\"Name\": \"%s\",\n", indent, "", name);
654 fprintf(out_file, "%*s\"version_id\": %d,\n", indent, "",
655 dc->vmsd->version_id);
656 fprintf(out_file, "%*s\"minimum_version_id\": %d,\n", indent, "",
657 dc->vmsd->minimum_version_id);
659 dump_vmstate_vmsd(out_file, dc->vmsd, indent, false);
661 fprintf(out_file, "\n%*s}", indent - 2, "");
662 first = false;
664 fprintf(out_file, "\n}\n");
665 fclose(out_file);
666 g_slist_free(list);
669 static uint32_t calculate_new_instance_id(const char *idstr)
671 SaveStateEntry *se;
672 uint32_t instance_id = 0;
674 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
675 if (strcmp(idstr, se->idstr) == 0
676 && instance_id <= se->instance_id) {
677 instance_id = se->instance_id + 1;
680 /* Make sure we never loop over without being noticed */
681 assert(instance_id != VMSTATE_INSTANCE_ID_ANY);
682 return instance_id;
685 static int calculate_compat_instance_id(const char *idstr)
687 SaveStateEntry *se;
688 int instance_id = 0;
690 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
691 if (!se->compat) {
692 continue;
695 if (strcmp(idstr, se->compat->idstr) == 0
696 && instance_id <= se->compat->instance_id) {
697 instance_id = se->compat->instance_id + 1;
700 return instance_id;
703 static inline MigrationPriority save_state_priority(SaveStateEntry *se)
705 if (se->vmsd) {
706 return se->vmsd->priority;
708 return MIG_PRI_DEFAULT;
711 static void savevm_state_handler_insert(SaveStateEntry *nse)
713 MigrationPriority priority = save_state_priority(nse);
714 SaveStateEntry *se;
715 int i;
717 assert(priority <= MIG_PRI_MAX);
719 for (i = priority - 1; i >= 0; i--) {
720 se = savevm_state.handler_pri_head[i];
721 if (se != NULL) {
722 assert(save_state_priority(se) < priority);
723 break;
727 if (i >= 0) {
728 QTAILQ_INSERT_BEFORE(se, nse, entry);
729 } else {
730 QTAILQ_INSERT_TAIL(&savevm_state.handlers, nse, entry);
733 if (savevm_state.handler_pri_head[priority] == NULL) {
734 savevm_state.handler_pri_head[priority] = nse;
738 static void savevm_state_handler_remove(SaveStateEntry *se)
740 SaveStateEntry *next;
741 MigrationPriority priority = save_state_priority(se);
743 if (se == savevm_state.handler_pri_head[priority]) {
744 next = QTAILQ_NEXT(se, entry);
745 if (next != NULL && save_state_priority(next) == priority) {
746 savevm_state.handler_pri_head[priority] = next;
747 } else {
748 savevm_state.handler_pri_head[priority] = NULL;
751 QTAILQ_REMOVE(&savevm_state.handlers, se, entry);
754 /* TODO: Individual devices generally have very little idea about the rest
755 of the system, so instance_id should be removed/replaced.
756 Meanwhile pass -1 as instance_id if you do not already have a clearly
757 distinguishing id for all instances of your device class. */
758 int register_savevm_live(const char *idstr,
759 uint32_t instance_id,
760 int version_id,
761 const SaveVMHandlers *ops,
762 void *opaque)
764 SaveStateEntry *se;
766 se = g_new0(SaveStateEntry, 1);
767 se->version_id = version_id;
768 se->section_id = savevm_state.global_section_id++;
769 se->ops = ops;
770 se->opaque = opaque;
771 se->vmsd = NULL;
772 /* if this is a live_savem then set is_ram */
773 if (ops->save_setup != NULL) {
774 se->is_ram = 1;
777 pstrcat(se->idstr, sizeof(se->idstr), idstr);
779 if (instance_id == VMSTATE_INSTANCE_ID_ANY) {
780 se->instance_id = calculate_new_instance_id(se->idstr);
781 } else {
782 se->instance_id = instance_id;
784 assert(!se->compat || se->instance_id == 0);
785 savevm_state_handler_insert(se);
786 return 0;
789 void unregister_savevm(VMStateIf *obj, const char *idstr, void *opaque)
791 SaveStateEntry *se, *new_se;
792 char id[256] = "";
794 if (obj) {
795 char *oid = vmstate_if_get_id(obj);
796 if (oid) {
797 pstrcpy(id, sizeof(id), oid);
798 pstrcat(id, sizeof(id), "/");
799 g_free(oid);
802 pstrcat(id, sizeof(id), idstr);
804 QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se) {
805 if (strcmp(se->idstr, id) == 0 && se->opaque == opaque) {
806 savevm_state_handler_remove(se);
807 g_free(se->compat);
808 g_free(se);
814 * Perform some basic checks on vmsd's at registration
815 * time.
817 static void vmstate_check(const VMStateDescription *vmsd)
819 const VMStateField *field = vmsd->fields;
820 const VMStateDescription **subsection = vmsd->subsections;
822 if (field) {
823 while (field->name) {
824 if (field->flags & (VMS_STRUCT | VMS_VSTRUCT)) {
825 /* Recurse to sub structures */
826 vmstate_check(field->vmsd);
828 /* Carry on */
829 field++;
831 /* Check for the end of field list canary */
832 if (field->flags != VMS_END) {
833 error_report("VMSTATE not ending with VMS_END: %s", vmsd->name);
834 g_assert_not_reached();
838 while (subsection && *subsection) {
840 * The name of a subsection should start with the name of the
841 * current object.
843 assert(!strncmp(vmsd->name, (*subsection)->name, strlen(vmsd->name)));
844 vmstate_check(*subsection);
845 subsection++;
849 int vmstate_register_with_alias_id(VMStateIf *obj, uint32_t instance_id,
850 const VMStateDescription *vmsd,
851 void *opaque, int alias_id,
852 int required_for_version,
853 Error **errp)
855 SaveStateEntry *se;
857 /* If this triggers, alias support can be dropped for the vmsd. */
858 assert(alias_id == -1 || required_for_version >= vmsd->minimum_version_id);
860 se = g_new0(SaveStateEntry, 1);
861 se->version_id = vmsd->version_id;
862 se->section_id = savevm_state.global_section_id++;
863 se->opaque = opaque;
864 se->vmsd = vmsd;
865 se->alias_id = alias_id;
867 if (obj) {
868 char *id = vmstate_if_get_id(obj);
869 if (id) {
870 if (snprintf(se->idstr, sizeof(se->idstr), "%s/", id) >=
871 sizeof(se->idstr)) {
872 error_setg(errp, "Path too long for VMState (%s)", id);
873 g_free(id);
874 g_free(se);
876 return -1;
878 g_free(id);
880 se->compat = g_new0(CompatEntry, 1);
881 pstrcpy(se->compat->idstr, sizeof(se->compat->idstr), vmsd->name);
882 se->compat->instance_id = instance_id == VMSTATE_INSTANCE_ID_ANY ?
883 calculate_compat_instance_id(vmsd->name) : instance_id;
884 instance_id = VMSTATE_INSTANCE_ID_ANY;
887 pstrcat(se->idstr, sizeof(se->idstr), vmsd->name);
889 if (instance_id == VMSTATE_INSTANCE_ID_ANY) {
890 se->instance_id = calculate_new_instance_id(se->idstr);
891 } else {
892 se->instance_id = instance_id;
895 /* Perform a recursive sanity check during the test runs */
896 if (qtest_enabled()) {
897 vmstate_check(vmsd);
899 assert(!se->compat || se->instance_id == 0);
900 savevm_state_handler_insert(se);
901 return 0;
904 void vmstate_unregister(VMStateIf *obj, const VMStateDescription *vmsd,
905 void *opaque)
907 SaveStateEntry *se, *new_se;
909 QTAILQ_FOREACH_SAFE(se, &savevm_state.handlers, entry, new_se) {
910 if (se->vmsd == vmsd && se->opaque == opaque) {
911 savevm_state_handler_remove(se);
912 g_free(se->compat);
913 g_free(se);
918 static int vmstate_load(QEMUFile *f, SaveStateEntry *se)
920 trace_vmstate_load(se->idstr, se->vmsd ? se->vmsd->name : "(old)");
921 if (!se->vmsd) { /* Old style */
922 return se->ops->load_state(f, se->opaque, se->load_version_id);
924 return vmstate_load_state(f, se->vmsd, se->opaque, se->load_version_id);
927 static void vmstate_save_old_style(QEMUFile *f, SaveStateEntry *se,
928 JSONWriter *vmdesc)
930 uint64_t old_offset = qemu_file_transferred_noflush(f);
931 se->ops->save_state(f, se->opaque);
932 uint64_t size = qemu_file_transferred_noflush(f) - old_offset;
934 if (vmdesc) {
935 json_writer_int64(vmdesc, "size", size);
936 json_writer_start_array(vmdesc, "fields");
937 json_writer_start_object(vmdesc, NULL);
938 json_writer_str(vmdesc, "name", "data");
939 json_writer_int64(vmdesc, "size", size);
940 json_writer_str(vmdesc, "type", "buffer");
941 json_writer_end_object(vmdesc);
942 json_writer_end_array(vmdesc);
947 * Write the header for device section (QEMU_VM_SECTION START/END/PART/FULL)
949 static void save_section_header(QEMUFile *f, SaveStateEntry *se,
950 uint8_t section_type)
952 qemu_put_byte(f, section_type);
953 qemu_put_be32(f, se->section_id);
955 if (section_type == QEMU_VM_SECTION_FULL ||
956 section_type == QEMU_VM_SECTION_START) {
957 /* ID string */
958 size_t len = strlen(se->idstr);
959 qemu_put_byte(f, len);
960 qemu_put_buffer(f, (uint8_t *)se->idstr, len);
962 qemu_put_be32(f, se->instance_id);
963 qemu_put_be32(f, se->version_id);
968 * Write a footer onto device sections that catches cases misformatted device
969 * sections.
971 static void save_section_footer(QEMUFile *f, SaveStateEntry *se)
973 if (migrate_get_current()->send_section_footer) {
974 qemu_put_byte(f, QEMU_VM_SECTION_FOOTER);
975 qemu_put_be32(f, se->section_id);
979 static int vmstate_save(QEMUFile *f, SaveStateEntry *se, JSONWriter *vmdesc)
981 int ret;
983 if ((!se->ops || !se->ops->save_state) && !se->vmsd) {
984 return 0;
986 if (se->vmsd && !vmstate_save_needed(se->vmsd, se->opaque)) {
987 trace_savevm_section_skip(se->idstr, se->section_id);
988 return 0;
991 trace_savevm_section_start(se->idstr, se->section_id);
992 save_section_header(f, se, QEMU_VM_SECTION_FULL);
993 if (vmdesc) {
994 json_writer_start_object(vmdesc, NULL);
995 json_writer_str(vmdesc, "name", se->idstr);
996 json_writer_int64(vmdesc, "instance_id", se->instance_id);
999 trace_vmstate_save(se->idstr, se->vmsd ? se->vmsd->name : "(old)");
1000 if (!se->vmsd) {
1001 vmstate_save_old_style(f, se, vmdesc);
1002 } else {
1003 ret = vmstate_save_state(f, se->vmsd, se->opaque, vmdesc);
1004 if (ret) {
1005 return ret;
1009 trace_savevm_section_end(se->idstr, se->section_id, 0);
1010 save_section_footer(f, se);
1011 if (vmdesc) {
1012 json_writer_end_object(vmdesc);
1014 return 0;
1017 * qemu_savevm_command_send: Send a 'QEMU_VM_COMMAND' type element with the
1018 * command and associated data.
1020 * @f: File to send command on
1021 * @command: Command type to send
1022 * @len: Length of associated data
1023 * @data: Data associated with command.
1025 static void qemu_savevm_command_send(QEMUFile *f,
1026 enum qemu_vm_cmd command,
1027 uint16_t len,
1028 uint8_t *data)
1030 trace_savevm_command_send(command, len);
1031 qemu_put_byte(f, QEMU_VM_COMMAND);
1032 qemu_put_be16(f, (uint16_t)command);
1033 qemu_put_be16(f, len);
1034 qemu_put_buffer(f, data, len);
1035 qemu_fflush(f);
1038 void qemu_savevm_send_colo_enable(QEMUFile *f)
1040 trace_savevm_send_colo_enable();
1041 qemu_savevm_command_send(f, MIG_CMD_ENABLE_COLO, 0, NULL);
1044 void qemu_savevm_send_ping(QEMUFile *f, uint32_t value)
1046 uint32_t buf;
1048 trace_savevm_send_ping(value);
1049 buf = cpu_to_be32(value);
1050 qemu_savevm_command_send(f, MIG_CMD_PING, sizeof(value), (uint8_t *)&buf);
1053 void qemu_savevm_send_open_return_path(QEMUFile *f)
1055 trace_savevm_send_open_return_path();
1056 qemu_savevm_command_send(f, MIG_CMD_OPEN_RETURN_PATH, 0, NULL);
1059 /* We have a buffer of data to send; we don't want that all to be loaded
1060 * by the command itself, so the command contains just the length of the
1061 * extra buffer that we then send straight after it.
1062 * TODO: Must be a better way to organise that
1064 * Returns:
1065 * 0 on success
1066 * -ve on error
1068 int qemu_savevm_send_packaged(QEMUFile *f, const uint8_t *buf, size_t len)
1070 uint32_t tmp;
1072 if (len > MAX_VM_CMD_PACKAGED_SIZE) {
1073 error_report("%s: Unreasonably large packaged state: %zu",
1074 __func__, len);
1075 return -1;
1078 tmp = cpu_to_be32(len);
1080 trace_qemu_savevm_send_packaged();
1081 qemu_savevm_command_send(f, MIG_CMD_PACKAGED, 4, (uint8_t *)&tmp);
1083 qemu_put_buffer(f, buf, len);
1085 return 0;
1088 /* Send prior to any postcopy transfer */
1089 void qemu_savevm_send_postcopy_advise(QEMUFile *f)
1091 if (migrate_postcopy_ram()) {
1092 uint64_t tmp[2];
1093 tmp[0] = cpu_to_be64(ram_pagesize_summary());
1094 tmp[1] = cpu_to_be64(qemu_target_page_size());
1096 trace_qemu_savevm_send_postcopy_advise();
1097 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_ADVISE,
1098 16, (uint8_t *)tmp);
1099 } else {
1100 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_ADVISE, 0, NULL);
1104 /* Sent prior to starting the destination running in postcopy, discard pages
1105 * that have already been sent but redirtied on the source.
1106 * CMD_POSTCOPY_RAM_DISCARD consist of:
1107 * byte version (0)
1108 * byte Length of name field (not including 0)
1109 * n x byte RAM block name
1110 * byte 0 terminator (just for safety)
1111 * n x Byte ranges within the named RAMBlock
1112 * be64 Start of the range
1113 * be64 Length
1115 * name: RAMBlock name that these entries are part of
1116 * len: Number of page entries
1117 * start_list: 'len' addresses
1118 * length_list: 'len' addresses
1121 void qemu_savevm_send_postcopy_ram_discard(QEMUFile *f, const char *name,
1122 uint16_t len,
1123 uint64_t *start_list,
1124 uint64_t *length_list)
1126 uint8_t *buf;
1127 uint16_t tmplen;
1128 uint16_t t;
1129 size_t name_len = strlen(name);
1131 trace_qemu_savevm_send_postcopy_ram_discard(name, len);
1132 assert(name_len < 256);
1133 buf = g_malloc0(1 + 1 + name_len + 1 + (8 + 8) * len);
1134 buf[0] = postcopy_ram_discard_version;
1135 buf[1] = name_len;
1136 memcpy(buf + 2, name, name_len);
1137 tmplen = 2 + name_len;
1138 buf[tmplen++] = '\0';
1140 for (t = 0; t < len; t++) {
1141 stq_be_p(buf + tmplen, start_list[t]);
1142 tmplen += 8;
1143 stq_be_p(buf + tmplen, length_list[t]);
1144 tmplen += 8;
1146 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_RAM_DISCARD, tmplen, buf);
1147 g_free(buf);
1150 /* Get the destination into a state where it can receive postcopy data. */
1151 void qemu_savevm_send_postcopy_listen(QEMUFile *f)
1153 trace_savevm_send_postcopy_listen();
1154 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_LISTEN, 0, NULL);
1157 /* Kick the destination into running */
1158 void qemu_savevm_send_postcopy_run(QEMUFile *f)
1160 trace_savevm_send_postcopy_run();
1161 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_RUN, 0, NULL);
1164 void qemu_savevm_send_postcopy_resume(QEMUFile *f)
1166 trace_savevm_send_postcopy_resume();
1167 qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_RESUME, 0, NULL);
1170 void qemu_savevm_send_recv_bitmap(QEMUFile *f, char *block_name)
1172 size_t len;
1173 char buf[256];
1175 trace_savevm_send_recv_bitmap(block_name);
1177 buf[0] = len = strlen(block_name);
1178 memcpy(buf + 1, block_name, len);
1180 qemu_savevm_command_send(f, MIG_CMD_RECV_BITMAP, len + 1, (uint8_t *)buf);
1183 bool qemu_savevm_state_blocked(Error **errp)
1185 SaveStateEntry *se;
1187 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
1188 if (se->vmsd && se->vmsd->unmigratable) {
1189 error_setg(errp, "State blocked by non-migratable device '%s'",
1190 se->idstr);
1191 return true;
1194 return false;
1197 void qemu_savevm_non_migratable_list(strList **reasons)
1199 SaveStateEntry *se;
1201 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
1202 if (se->vmsd && se->vmsd->unmigratable) {
1203 QAPI_LIST_PREPEND(*reasons,
1204 g_strdup_printf("non-migratable device: %s",
1205 se->idstr));
1210 void qemu_savevm_state_header(QEMUFile *f)
1212 trace_savevm_state_header();
1213 qemu_put_be32(f, QEMU_VM_FILE_MAGIC);
1214 qemu_put_be32(f, QEMU_VM_FILE_VERSION);
1216 if (migrate_get_current()->send_configuration) {
1217 qemu_put_byte(f, QEMU_VM_CONFIGURATION);
1218 vmstate_save_state(f, &vmstate_configuration, &savevm_state, 0);
1222 bool qemu_savevm_state_guest_unplug_pending(void)
1224 SaveStateEntry *se;
1226 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
1227 if (se->vmsd && se->vmsd->dev_unplug_pending &&
1228 se->vmsd->dev_unplug_pending(se->opaque)) {
1229 return true;
1233 return false;
1236 int qemu_savevm_state_prepare(Error **errp)
1238 SaveStateEntry *se;
1239 int ret;
1241 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
1242 if (!se->ops || !se->ops->save_prepare) {
1243 continue;
1245 if (se->ops->is_active) {
1246 if (!se->ops->is_active(se->opaque)) {
1247 continue;
1251 ret = se->ops->save_prepare(se->opaque, errp);
1252 if (ret < 0) {
1253 return ret;
1257 return 0;
1260 void qemu_savevm_state_setup(QEMUFile *f)
1262 MigrationState *ms = migrate_get_current();
1263 SaveStateEntry *se;
1264 Error *local_err = NULL;
1265 int ret;
1267 ms->vmdesc = json_writer_new(false);
1268 json_writer_start_object(ms->vmdesc, NULL);
1269 json_writer_int64(ms->vmdesc, "page_size", qemu_target_page_size());
1270 json_writer_start_array(ms->vmdesc, "devices");
1272 trace_savevm_state_setup();
1273 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
1274 if (se->vmsd && se->vmsd->early_setup) {
1275 ret = vmstate_save(f, se, ms->vmdesc);
1276 if (ret) {
1277 qemu_file_set_error(f, ret);
1278 break;
1280 continue;
1283 if (!se->ops || !se->ops->save_setup) {
1284 continue;
1286 if (se->ops->is_active) {
1287 if (!se->ops->is_active(se->opaque)) {
1288 continue;
1291 save_section_header(f, se, QEMU_VM_SECTION_START);
1293 ret = se->ops->save_setup(f, se->opaque);
1294 save_section_footer(f, se);
1295 if (ret < 0) {
1296 qemu_file_set_error(f, ret);
1297 break;
1301 if (precopy_notify(PRECOPY_NOTIFY_SETUP, &local_err)) {
1302 error_report_err(local_err);
1306 int qemu_savevm_state_resume_prepare(MigrationState *s)
1308 SaveStateEntry *se;
1309 int ret;
1311 trace_savevm_state_resume_prepare();
1313 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
1314 if (!se->ops || !se->ops->resume_prepare) {
1315 continue;
1317 if (se->ops->is_active) {
1318 if (!se->ops->is_active(se->opaque)) {
1319 continue;
1322 ret = se->ops->resume_prepare(s, se->opaque);
1323 if (ret < 0) {
1324 return ret;
1328 return 0;
1332 * this function has three return values:
1333 * negative: there was one error, and we have -errno.
1334 * 0 : We haven't finished, caller have to go again
1335 * 1 : We have finished, we can go to complete phase
1337 int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy)
1339 SaveStateEntry *se;
1340 int ret = 1;
1342 trace_savevm_state_iterate();
1343 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
1344 if (!se->ops || !se->ops->save_live_iterate) {
1345 continue;
1347 if (se->ops->is_active &&
1348 !se->ops->is_active(se->opaque)) {
1349 continue;
1351 if (se->ops->is_active_iterate &&
1352 !se->ops->is_active_iterate(se->opaque)) {
1353 continue;
1356 * In the postcopy phase, any device that doesn't know how to
1357 * do postcopy should have saved it's state in the _complete
1358 * call that's already run, it might get confused if we call
1359 * iterate afterwards.
1361 if (postcopy &&
1362 !(se->ops->has_postcopy && se->ops->has_postcopy(se->opaque))) {
1363 continue;
1365 if (migration_rate_exceeded(f)) {
1366 return 0;
1368 trace_savevm_section_start(se->idstr, se->section_id);
1370 save_section_header(f, se, QEMU_VM_SECTION_PART);
1372 ret = se->ops->save_live_iterate(f, se->opaque);
1373 trace_savevm_section_end(se->idstr, se->section_id, ret);
1374 save_section_footer(f, se);
1376 if (ret < 0) {
1377 error_report("failed to save SaveStateEntry with id(name): "
1378 "%d(%s): %d",
1379 se->section_id, se->idstr, ret);
1380 qemu_file_set_error(f, ret);
1382 if (ret <= 0) {
1383 /* Do not proceed to the next vmstate before this one reported
1384 completion of the current stage. This serializes the migration
1385 and reduces the probability that a faster changing state is
1386 synchronized over and over again. */
1387 break;
1390 return ret;
1393 static bool should_send_vmdesc(void)
1395 MachineState *machine = MACHINE(qdev_get_machine());
1396 bool in_postcopy = migration_in_postcopy();
1397 return !machine->suppress_vmdesc && !in_postcopy;
1401 * Calls the save_live_complete_postcopy methods
1402 * causing the last few pages to be sent immediately and doing any associated
1403 * cleanup.
1404 * Note postcopy also calls qemu_savevm_state_complete_precopy to complete
1405 * all the other devices, but that happens at the point we switch to postcopy.
1407 void qemu_savevm_state_complete_postcopy(QEMUFile *f)
1409 SaveStateEntry *se;
1410 int ret;
1412 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
1413 if (!se->ops || !se->ops->save_live_complete_postcopy) {
1414 continue;
1416 if (se->ops->is_active) {
1417 if (!se->ops->is_active(se->opaque)) {
1418 continue;
1421 trace_savevm_section_start(se->idstr, se->section_id);
1422 /* Section type */
1423 qemu_put_byte(f, QEMU_VM_SECTION_END);
1424 qemu_put_be32(f, se->section_id);
1426 ret = se->ops->save_live_complete_postcopy(f, se->opaque);
1427 trace_savevm_section_end(se->idstr, se->section_id, ret);
1428 save_section_footer(f, se);
1429 if (ret < 0) {
1430 qemu_file_set_error(f, ret);
1431 return;
1435 qemu_put_byte(f, QEMU_VM_EOF);
1436 qemu_fflush(f);
1439 static
1440 int qemu_savevm_state_complete_precopy_iterable(QEMUFile *f, bool in_postcopy)
1442 SaveStateEntry *se;
1443 int ret;
1445 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
1446 if (!se->ops ||
1447 (in_postcopy && se->ops->has_postcopy &&
1448 se->ops->has_postcopy(se->opaque)) ||
1449 !se->ops->save_live_complete_precopy) {
1450 continue;
1453 if (se->ops->is_active) {
1454 if (!se->ops->is_active(se->opaque)) {
1455 continue;
1458 trace_savevm_section_start(se->idstr, se->section_id);
1460 save_section_header(f, se, QEMU_VM_SECTION_END);
1462 ret = se->ops->save_live_complete_precopy(f, se->opaque);
1463 trace_savevm_section_end(se->idstr, se->section_id, ret);
1464 save_section_footer(f, se);
1465 if (ret < 0) {
1466 qemu_file_set_error(f, ret);
1467 return -1;
1471 return 0;
1474 int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f,
1475 bool in_postcopy,
1476 bool inactivate_disks)
1478 MigrationState *ms = migrate_get_current();
1479 JSONWriter *vmdesc = ms->vmdesc;
1480 int vmdesc_len;
1481 SaveStateEntry *se;
1482 int ret;
1484 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
1485 if (se->vmsd && se->vmsd->early_setup) {
1486 /* Already saved during qemu_savevm_state_setup(). */
1487 continue;
1490 ret = vmstate_save(f, se, vmdesc);
1491 if (ret) {
1492 qemu_file_set_error(f, ret);
1493 return ret;
1497 if (inactivate_disks) {
1498 /* Inactivate before sending QEMU_VM_EOF so that the
1499 * bdrv_activate_all() on the other end won't fail. */
1500 ret = bdrv_inactivate_all();
1501 if (ret) {
1502 error_report("%s: bdrv_inactivate_all() failed (%d)",
1503 __func__, ret);
1504 qemu_file_set_error(f, ret);
1505 return ret;
1508 if (!in_postcopy) {
1509 /* Postcopy stream will still be going */
1510 qemu_put_byte(f, QEMU_VM_EOF);
1513 json_writer_end_array(vmdesc);
1514 json_writer_end_object(vmdesc);
1515 vmdesc_len = strlen(json_writer_get(vmdesc));
1517 if (should_send_vmdesc()) {
1518 qemu_put_byte(f, QEMU_VM_VMDESCRIPTION);
1519 qemu_put_be32(f, vmdesc_len);
1520 qemu_put_buffer(f, (uint8_t *)json_writer_get(vmdesc), vmdesc_len);
1523 /* Free it now to detect any inconsistencies. */
1524 json_writer_free(vmdesc);
1525 ms->vmdesc = NULL;
1527 return 0;
1530 int qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only,
1531 bool inactivate_disks)
1533 int ret;
1534 Error *local_err = NULL;
1535 bool in_postcopy = migration_in_postcopy();
1537 if (precopy_notify(PRECOPY_NOTIFY_COMPLETE, &local_err)) {
1538 error_report_err(local_err);
1541 trace_savevm_state_complete_precopy();
1543 cpu_synchronize_all_states();
1545 if (!in_postcopy || iterable_only) {
1546 ret = qemu_savevm_state_complete_precopy_iterable(f, in_postcopy);
1547 if (ret) {
1548 return ret;
1552 if (iterable_only) {
1553 goto flush;
1556 ret = qemu_savevm_state_complete_precopy_non_iterable(f, in_postcopy,
1557 inactivate_disks);
1558 if (ret) {
1559 return ret;
1562 flush:
1563 qemu_fflush(f);
1564 return 0;
1567 /* Give an estimate of the amount left to be transferred,
1568 * the result is split into the amount for units that can and
1569 * for units that can't do postcopy.
1571 void qemu_savevm_state_pending_estimate(uint64_t *must_precopy,
1572 uint64_t *can_postcopy)
1574 SaveStateEntry *se;
1576 *must_precopy = 0;
1577 *can_postcopy = 0;
1579 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
1580 if (!se->ops || !se->ops->state_pending_estimate) {
1581 continue;
1583 if (se->ops->is_active) {
1584 if (!se->ops->is_active(se->opaque)) {
1585 continue;
1588 se->ops->state_pending_estimate(se->opaque, must_precopy, can_postcopy);
1592 void qemu_savevm_state_pending_exact(uint64_t *must_precopy,
1593 uint64_t *can_postcopy)
1595 SaveStateEntry *se;
1597 *must_precopy = 0;
1598 *can_postcopy = 0;
1600 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
1601 if (!se->ops || !se->ops->state_pending_exact) {
1602 continue;
1604 if (se->ops->is_active) {
1605 if (!se->ops->is_active(se->opaque)) {
1606 continue;
1609 se->ops->state_pending_exact(se->opaque, must_precopy, can_postcopy);
1613 void qemu_savevm_state_cleanup(void)
1615 SaveStateEntry *se;
1616 Error *local_err = NULL;
1618 if (precopy_notify(PRECOPY_NOTIFY_CLEANUP, &local_err)) {
1619 error_report_err(local_err);
1622 trace_savevm_state_cleanup();
1623 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
1624 if (se->ops && se->ops->save_cleanup) {
1625 se->ops->save_cleanup(se->opaque);
1630 static int qemu_savevm_state(QEMUFile *f, Error **errp)
1632 int ret;
1633 MigrationState *ms = migrate_get_current();
1634 MigrationStatus status;
1636 if (migration_is_running(ms->state)) {
1637 error_setg(errp, QERR_MIGRATION_ACTIVE);
1638 return -EINVAL;
1641 if (migrate_block()) {
1642 error_setg(errp, "Block migration and snapshots are incompatible");
1643 return -EINVAL;
1646 ret = migrate_init(ms, errp);
1647 if (ret) {
1648 return ret;
1650 ms->to_dst_file = f;
1652 qemu_mutex_unlock_iothread();
1653 qemu_savevm_state_header(f);
1654 qemu_savevm_state_setup(f);
1655 qemu_mutex_lock_iothread();
1657 while (qemu_file_get_error(f) == 0) {
1658 if (qemu_savevm_state_iterate(f, false) > 0) {
1659 break;
1663 ret = qemu_file_get_error(f);
1664 if (ret == 0) {
1665 qemu_savevm_state_complete_precopy(f, false, false);
1666 ret = qemu_file_get_error(f);
1668 qemu_savevm_state_cleanup();
1669 if (ret != 0) {
1670 error_setg_errno(errp, -ret, "Error while writing VM state");
1673 if (ret != 0) {
1674 status = MIGRATION_STATUS_FAILED;
1675 } else {
1676 status = MIGRATION_STATUS_COMPLETED;
1678 migrate_set_state(&ms->state, MIGRATION_STATUS_SETUP, status);
1680 /* f is outer parameter, it should not stay in global migration state after
1681 * this function finished */
1682 ms->to_dst_file = NULL;
1684 return ret;
1687 void qemu_savevm_live_state(QEMUFile *f)
1689 /* save QEMU_VM_SECTION_END section */
1690 qemu_savevm_state_complete_precopy(f, true, false);
1691 qemu_put_byte(f, QEMU_VM_EOF);
1694 int qemu_save_device_state(QEMUFile *f)
1696 SaveStateEntry *se;
1698 if (!migration_in_colo_state()) {
1699 qemu_put_be32(f, QEMU_VM_FILE_MAGIC);
1700 qemu_put_be32(f, QEMU_VM_FILE_VERSION);
1702 cpu_synchronize_all_states();
1704 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
1705 int ret;
1707 if (se->is_ram) {
1708 continue;
1710 ret = vmstate_save(f, se, NULL);
1711 if (ret) {
1712 return ret;
1716 qemu_put_byte(f, QEMU_VM_EOF);
1718 return qemu_file_get_error(f);
1721 static SaveStateEntry *find_se(const char *idstr, uint32_t instance_id)
1723 SaveStateEntry *se;
1725 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
1726 if (!strcmp(se->idstr, idstr) &&
1727 (instance_id == se->instance_id ||
1728 instance_id == se->alias_id))
1729 return se;
1730 /* Migrating from an older version? */
1731 if (strstr(se->idstr, idstr) && se->compat) {
1732 if (!strcmp(se->compat->idstr, idstr) &&
1733 (instance_id == se->compat->instance_id ||
1734 instance_id == se->alias_id))
1735 return se;
1738 return NULL;
1741 enum LoadVMExitCodes {
1742 /* Allow a command to quit all layers of nested loadvm loops */
1743 LOADVM_QUIT = 1,
1746 /* ------ incoming postcopy messages ------ */
1747 /* 'advise' arrives before any transfers just to tell us that a postcopy
1748 * *might* happen - it might be skipped if precopy transferred everything
1749 * quickly.
1751 static int loadvm_postcopy_handle_advise(MigrationIncomingState *mis,
1752 uint16_t len)
1754 PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_ADVISE);
1755 uint64_t remote_pagesize_summary, local_pagesize_summary, remote_tps;
1756 size_t page_size = qemu_target_page_size();
1757 Error *local_err = NULL;
1759 trace_loadvm_postcopy_handle_advise();
1760 if (ps != POSTCOPY_INCOMING_NONE) {
1761 error_report("CMD_POSTCOPY_ADVISE in wrong postcopy state (%d)", ps);
1762 return -1;
1765 switch (len) {
1766 case 0:
1767 if (migrate_postcopy_ram()) {
1768 error_report("RAM postcopy is enabled but have 0 byte advise");
1769 return -EINVAL;
1771 return 0;
1772 case 8 + 8:
1773 if (!migrate_postcopy_ram()) {
1774 error_report("RAM postcopy is disabled but have 16 byte advise");
1775 return -EINVAL;
1777 break;
1778 default:
1779 error_report("CMD_POSTCOPY_ADVISE invalid length (%d)", len);
1780 return -EINVAL;
1783 if (!postcopy_ram_supported_by_host(mis, &local_err)) {
1784 error_report_err(local_err);
1785 postcopy_state_set(POSTCOPY_INCOMING_NONE);
1786 return -1;
1789 remote_pagesize_summary = qemu_get_be64(mis->from_src_file);
1790 local_pagesize_summary = ram_pagesize_summary();
1792 if (remote_pagesize_summary != local_pagesize_summary) {
1794 * This detects two potential causes of mismatch:
1795 * a) A mismatch in host page sizes
1796 * Some combinations of mismatch are probably possible but it gets
1797 * a bit more complicated. In particular we need to place whole
1798 * host pages on the dest at once, and we need to ensure that we
1799 * handle dirtying to make sure we never end up sending part of
1800 * a hostpage on it's own.
1801 * b) The use of different huge page sizes on source/destination
1802 * a more fine grain test is performed during RAM block migration
1803 * but this test here causes a nice early clear failure, and
1804 * also fails when passed to an older qemu that doesn't
1805 * do huge pages.
1807 error_report("Postcopy needs matching RAM page sizes (s=%" PRIx64
1808 " d=%" PRIx64 ")",
1809 remote_pagesize_summary, local_pagesize_summary);
1810 return -1;
1813 remote_tps = qemu_get_be64(mis->from_src_file);
1814 if (remote_tps != page_size) {
1816 * Again, some differences could be dealt with, but for now keep it
1817 * simple.
1819 error_report("Postcopy needs matching target page sizes (s=%d d=%zd)",
1820 (int)remote_tps, page_size);
1821 return -1;
1824 if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_ADVISE, &local_err)) {
1825 error_report_err(local_err);
1826 return -1;
1829 if (ram_postcopy_incoming_init(mis)) {
1830 return -1;
1833 return 0;
1836 /* After postcopy we will be told to throw some pages away since they're
1837 * dirty and will have to be demand fetched. Must happen before CPU is
1838 * started.
1839 * There can be 0..many of these messages, each encoding multiple pages.
1841 static int loadvm_postcopy_ram_handle_discard(MigrationIncomingState *mis,
1842 uint16_t len)
1844 int tmp;
1845 char ramid[256];
1846 PostcopyState ps = postcopy_state_get();
1848 trace_loadvm_postcopy_ram_handle_discard();
1850 switch (ps) {
1851 case POSTCOPY_INCOMING_ADVISE:
1852 /* 1st discard */
1853 tmp = postcopy_ram_prepare_discard(mis);
1854 if (tmp) {
1855 return tmp;
1857 break;
1859 case POSTCOPY_INCOMING_DISCARD:
1860 /* Expected state */
1861 break;
1863 default:
1864 error_report("CMD_POSTCOPY_RAM_DISCARD in wrong postcopy state (%d)",
1865 ps);
1866 return -1;
1868 /* We're expecting a
1869 * Version (0)
1870 * a RAM ID string (length byte, name, 0 term)
1871 * then at least 1 16 byte chunk
1873 if (len < (1 + 1 + 1 + 1 + 2 * 8)) {
1874 error_report("CMD_POSTCOPY_RAM_DISCARD invalid length (%d)", len);
1875 return -1;
1878 tmp = qemu_get_byte(mis->from_src_file);
1879 if (tmp != postcopy_ram_discard_version) {
1880 error_report("CMD_POSTCOPY_RAM_DISCARD invalid version (%d)", tmp);
1881 return -1;
1884 if (!qemu_get_counted_string(mis->from_src_file, ramid)) {
1885 error_report("CMD_POSTCOPY_RAM_DISCARD Failed to read RAMBlock ID");
1886 return -1;
1888 tmp = qemu_get_byte(mis->from_src_file);
1889 if (tmp != 0) {
1890 error_report("CMD_POSTCOPY_RAM_DISCARD missing nil (%d)", tmp);
1891 return -1;
1894 len -= 3 + strlen(ramid);
1895 if (len % 16) {
1896 error_report("CMD_POSTCOPY_RAM_DISCARD invalid length (%d)", len);
1897 return -1;
1899 trace_loadvm_postcopy_ram_handle_discard_header(ramid, len);
1900 while (len) {
1901 uint64_t start_addr, block_length;
1902 start_addr = qemu_get_be64(mis->from_src_file);
1903 block_length = qemu_get_be64(mis->from_src_file);
1905 len -= 16;
1906 int ret = ram_discard_range(ramid, start_addr, block_length);
1907 if (ret) {
1908 return ret;
1911 trace_loadvm_postcopy_ram_handle_discard_end();
1913 return 0;
1917 * Triggered by a postcopy_listen command; this thread takes over reading
1918 * the input stream, leaving the main thread free to carry on loading the rest
1919 * of the device state (from RAM).
1920 * (TODO:This could do with being in a postcopy file - but there again it's
1921 * just another input loop, not that postcopy specific)
1923 static void *postcopy_ram_listen_thread(void *opaque)
1925 MigrationIncomingState *mis = migration_incoming_get_current();
1926 QEMUFile *f = mis->from_src_file;
1927 int load_res;
1928 MigrationState *migr = migrate_get_current();
1930 object_ref(OBJECT(migr));
1932 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
1933 MIGRATION_STATUS_POSTCOPY_ACTIVE);
1934 qemu_sem_post(&mis->thread_sync_sem);
1935 trace_postcopy_ram_listen_thread_start();
1937 rcu_register_thread();
1939 * Because we're a thread and not a coroutine we can't yield
1940 * in qemu_file, and thus we must be blocking now.
1942 qemu_file_set_blocking(f, true);
1943 load_res = qemu_loadvm_state_main(f, mis);
1946 * This is tricky, but, mis->from_src_file can change after it
1947 * returns, when postcopy recovery happened. In the future, we may
1948 * want a wrapper for the QEMUFile handle.
1950 f = mis->from_src_file;
1952 /* And non-blocking again so we don't block in any cleanup */
1953 qemu_file_set_blocking(f, false);
1955 trace_postcopy_ram_listen_thread_exit();
1956 if (load_res < 0) {
1957 qemu_file_set_error(f, load_res);
1958 dirty_bitmap_mig_cancel_incoming();
1959 if (postcopy_state_get() == POSTCOPY_INCOMING_RUNNING &&
1960 !migrate_postcopy_ram() && migrate_dirty_bitmaps())
1962 error_report("%s: loadvm failed during postcopy: %d. All states "
1963 "are migrated except dirty bitmaps. Some dirty "
1964 "bitmaps may be lost, and present migrated dirty "
1965 "bitmaps are correctly migrated and valid.",
1966 __func__, load_res);
1967 load_res = 0; /* prevent further exit() */
1968 } else {
1969 error_report("%s: loadvm failed: %d", __func__, load_res);
1970 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
1971 MIGRATION_STATUS_FAILED);
1974 if (load_res >= 0) {
1976 * This looks good, but it's possible that the device loading in the
1977 * main thread hasn't finished yet, and so we might not be in 'RUN'
1978 * state yet; wait for the end of the main thread.
1980 qemu_event_wait(&mis->main_thread_load_event);
1982 postcopy_ram_incoming_cleanup(mis);
1984 if (load_res < 0) {
1986 * If something went wrong then we have a bad state so exit;
1987 * depending how far we got it might be possible at this point
1988 * to leave the guest running and fire MCEs for pages that never
1989 * arrived as a desperate recovery step.
1991 rcu_unregister_thread();
1992 exit(EXIT_FAILURE);
1995 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
1996 MIGRATION_STATUS_COMPLETED);
1998 * If everything has worked fine, then the main thread has waited
1999 * for us to start, and we're the last use of the mis.
2000 * (If something broke then qemu will have to exit anyway since it's
2001 * got a bad migration state).
2003 migration_incoming_state_destroy();
2004 qemu_loadvm_state_cleanup();
2006 rcu_unregister_thread();
2007 mis->have_listen_thread = false;
2008 postcopy_state_set(POSTCOPY_INCOMING_END);
2010 object_unref(OBJECT(migr));
2012 return NULL;
2015 /* After this message we must be able to immediately receive postcopy data */
2016 static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis)
2018 PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_LISTENING);
2019 Error *local_err = NULL;
2021 trace_loadvm_postcopy_handle_listen("enter");
2023 if (ps != POSTCOPY_INCOMING_ADVISE && ps != POSTCOPY_INCOMING_DISCARD) {
2024 error_report("CMD_POSTCOPY_LISTEN in wrong postcopy state (%d)", ps);
2025 return -1;
2027 if (ps == POSTCOPY_INCOMING_ADVISE) {
2029 * A rare case, we entered listen without having to do any discards,
2030 * so do the setup that's normally done at the time of the 1st discard.
2032 if (migrate_postcopy_ram()) {
2033 postcopy_ram_prepare_discard(mis);
2037 trace_loadvm_postcopy_handle_listen("after discard");
2040 * Sensitise RAM - can now generate requests for blocks that don't exist
2041 * However, at this point the CPU shouldn't be running, and the IO
2042 * shouldn't be doing anything yet so don't actually expect requests
2044 if (migrate_postcopy_ram()) {
2045 if (postcopy_ram_incoming_setup(mis)) {
2046 postcopy_ram_incoming_cleanup(mis);
2047 return -1;
2051 trace_loadvm_postcopy_handle_listen("after uffd");
2053 if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_LISTEN, &local_err)) {
2054 error_report_err(local_err);
2055 return -1;
2058 mis->have_listen_thread = true;
2059 postcopy_thread_create(mis, &mis->listen_thread, "postcopy/listen",
2060 postcopy_ram_listen_thread, QEMU_THREAD_DETACHED);
2061 trace_loadvm_postcopy_handle_listen("return");
2063 return 0;
2066 static void loadvm_postcopy_handle_run_bh(void *opaque)
2068 Error *local_err = NULL;
2069 MigrationIncomingState *mis = opaque;
2071 trace_loadvm_postcopy_handle_run_bh("enter");
2073 /* TODO we should move all of this lot into postcopy_ram.c or a shared code
2074 * in migration.c
2076 cpu_synchronize_all_post_init();
2078 trace_loadvm_postcopy_handle_run_bh("after cpu sync");
2080 qemu_announce_self(&mis->announce_timer, migrate_announce_params());
2082 trace_loadvm_postcopy_handle_run_bh("after announce");
2084 /* Make sure all file formats throw away their mutable metadata.
2085 * If we get an error here, just don't restart the VM yet. */
2086 bdrv_activate_all(&local_err);
2087 if (local_err) {
2088 error_report_err(local_err);
2089 local_err = NULL;
2090 autostart = false;
2093 trace_loadvm_postcopy_handle_run_bh("after invalidate cache");
2095 dirty_bitmap_mig_before_vm_start();
2097 if (autostart) {
2098 /* Hold onto your hats, starting the CPU */
2099 vm_start();
2100 } else {
2101 /* leave it paused and let management decide when to start the CPU */
2102 runstate_set(RUN_STATE_PAUSED);
2105 qemu_bh_delete(mis->bh);
2107 trace_loadvm_postcopy_handle_run_bh("return");
2110 /* After all discards we can start running and asking for pages */
2111 static int loadvm_postcopy_handle_run(MigrationIncomingState *mis)
2113 PostcopyState ps = postcopy_state_get();
2115 trace_loadvm_postcopy_handle_run();
2116 if (ps != POSTCOPY_INCOMING_LISTENING) {
2117 error_report("CMD_POSTCOPY_RUN in wrong postcopy state (%d)", ps);
2118 return -1;
2121 postcopy_state_set(POSTCOPY_INCOMING_RUNNING);
2122 mis->bh = qemu_bh_new(loadvm_postcopy_handle_run_bh, mis);
2123 qemu_bh_schedule(mis->bh);
2125 /* We need to finish reading the stream from the package
2126 * and also stop reading anything more from the stream that loaded the
2127 * package (since it's now being read by the listener thread).
2128 * LOADVM_QUIT will quit all the layers of nested loadvm loops.
2130 return LOADVM_QUIT;
2133 /* We must be with page_request_mutex held */
2134 static gboolean postcopy_sync_page_req(gpointer key, gpointer value,
2135 gpointer data)
2137 MigrationIncomingState *mis = data;
2138 void *host_addr = (void *) key;
2139 ram_addr_t rb_offset;
2140 RAMBlock *rb;
2141 int ret;
2143 rb = qemu_ram_block_from_host(host_addr, true, &rb_offset);
2144 if (!rb) {
2146 * This should _never_ happen. However be nice for a migrating VM to
2147 * not crash/assert. Post an error (note: intended to not use *_once
2148 * because we do want to see all the illegal addresses; and this can
2149 * never be triggered by the guest so we're safe) and move on next.
2151 error_report("%s: illegal host addr %p", __func__, host_addr);
2152 /* Try the next entry */
2153 return FALSE;
2156 ret = migrate_send_rp_message_req_pages(mis, rb, rb_offset);
2157 if (ret) {
2158 /* Please refer to above comment. */
2159 error_report("%s: send rp message failed for addr %p",
2160 __func__, host_addr);
2161 return FALSE;
2164 trace_postcopy_page_req_sync(host_addr);
2166 return FALSE;
2169 static void migrate_send_rp_req_pages_pending(MigrationIncomingState *mis)
2171 WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) {
2172 g_tree_foreach(mis->page_requested, postcopy_sync_page_req, mis);
2176 static int loadvm_postcopy_handle_resume(MigrationIncomingState *mis)
2178 if (mis->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
2179 error_report("%s: illegal resume received", __func__);
2180 /* Don't fail the load, only for this. */
2181 return 0;
2185 * Reset the last_rb before we resend any page req to source again, since
2186 * the source should have it reset already.
2188 mis->last_rb = NULL;
2191 * This means source VM is ready to resume the postcopy migration.
2193 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_RECOVER,
2194 MIGRATION_STATUS_POSTCOPY_ACTIVE);
2196 trace_loadvm_postcopy_handle_resume();
2198 /* Tell source that "we are ready" */
2199 migrate_send_rp_resume_ack(mis, MIGRATION_RESUME_ACK_VALUE);
2202 * After a postcopy recovery, the source should have lost the postcopy
2203 * queue, or potentially the requested pages could have been lost during
2204 * the network down phase. Let's re-sync with the source VM by re-sending
2205 * all the pending pages that we eagerly need, so these threads won't get
2206 * blocked too long due to the recovery.
2208 * Without this procedure, the faulted destination VM threads (waiting for
2209 * page requests right before the postcopy is interrupted) can keep hanging
2210 * until the pages are sent by the source during the background copying of
2211 * pages, or another thread faulted on the same address accidentally.
2213 migrate_send_rp_req_pages_pending(mis);
2216 * It's time to switch state and release the fault thread to continue
2217 * service page faults. Note that this should be explicitly after the
2218 * above call to migrate_send_rp_req_pages_pending(). In short:
2219 * migrate_send_rp_message_req_pages() is not thread safe, yet.
2221 qemu_sem_post(&mis->postcopy_pause_sem_fault);
2223 if (migrate_postcopy_preempt()) {
2225 * The preempt channel will be created in async manner, now let's
2226 * wait for it and make sure it's created.
2228 qemu_sem_wait(&mis->postcopy_qemufile_dst_done);
2229 assert(mis->postcopy_qemufile_dst);
2230 /* Kick the fast ram load thread too */
2231 qemu_sem_post(&mis->postcopy_pause_sem_fast_load);
2234 return 0;
2238 * Immediately following this command is a blob of data containing an embedded
2239 * chunk of migration stream; read it and load it.
2241 * @mis: Incoming state
2242 * @length: Length of packaged data to read
2244 * Returns: Negative values on error
2247 static int loadvm_handle_cmd_packaged(MigrationIncomingState *mis)
2249 int ret;
2250 size_t length;
2251 QIOChannelBuffer *bioc;
2253 length = qemu_get_be32(mis->from_src_file);
2254 trace_loadvm_handle_cmd_packaged(length);
2256 if (length > MAX_VM_CMD_PACKAGED_SIZE) {
2257 error_report("Unreasonably large packaged state: %zu", length);
2258 return -1;
2261 bioc = qio_channel_buffer_new(length);
2262 qio_channel_set_name(QIO_CHANNEL(bioc), "migration-loadvm-buffer");
2263 ret = qemu_get_buffer(mis->from_src_file,
2264 bioc->data,
2265 length);
2266 if (ret != length) {
2267 object_unref(OBJECT(bioc));
2268 error_report("CMD_PACKAGED: Buffer receive fail ret=%d length=%zu",
2269 ret, length);
2270 return (ret < 0) ? ret : -EAGAIN;
2272 bioc->usage += length;
2273 trace_loadvm_handle_cmd_packaged_received(ret);
2275 QEMUFile *packf = qemu_file_new_input(QIO_CHANNEL(bioc));
2277 ret = qemu_loadvm_state_main(packf, mis);
2278 trace_loadvm_handle_cmd_packaged_main(ret);
2279 qemu_fclose(packf);
2280 object_unref(OBJECT(bioc));
2282 return ret;
2286 * Handle request that source requests for recved_bitmap on
2287 * destination. Payload format:
2289 * len (1 byte) + ramblock_name (<255 bytes)
2291 static int loadvm_handle_recv_bitmap(MigrationIncomingState *mis,
2292 uint16_t len)
2294 QEMUFile *file = mis->from_src_file;
2295 RAMBlock *rb;
2296 char block_name[256];
2297 size_t cnt;
2299 cnt = qemu_get_counted_string(file, block_name);
2300 if (!cnt) {
2301 error_report("%s: failed to read block name", __func__);
2302 return -EINVAL;
2305 /* Validate before using the data */
2306 if (qemu_file_get_error(file)) {
2307 return qemu_file_get_error(file);
2310 if (len != cnt + 1) {
2311 error_report("%s: invalid payload length (%d)", __func__, len);
2312 return -EINVAL;
2315 rb = qemu_ram_block_by_name(block_name);
2316 if (!rb) {
2317 error_report("%s: block '%s' not found", __func__, block_name);
2318 return -EINVAL;
2321 migrate_send_rp_recv_bitmap(mis, block_name);
2323 trace_loadvm_handle_recv_bitmap(block_name);
2325 return 0;
2328 static int loadvm_process_enable_colo(MigrationIncomingState *mis)
2330 int ret = migration_incoming_enable_colo();
2332 if (!ret) {
2333 ret = colo_init_ram_cache();
2334 if (ret) {
2335 migration_incoming_disable_colo();
2338 return ret;
2342 * Process an incoming 'QEMU_VM_COMMAND'
2343 * 0 just a normal return
2344 * LOADVM_QUIT All good, but exit the loop
2345 * <0 Error
2347 static int loadvm_process_command(QEMUFile *f)
2349 MigrationIncomingState *mis = migration_incoming_get_current();
2350 uint16_t cmd;
2351 uint16_t len;
2352 uint32_t tmp32;
2354 cmd = qemu_get_be16(f);
2355 len = qemu_get_be16(f);
2357 /* Check validity before continue processing of cmds */
2358 if (qemu_file_get_error(f)) {
2359 return qemu_file_get_error(f);
2362 if (cmd >= MIG_CMD_MAX || cmd == MIG_CMD_INVALID) {
2363 error_report("MIG_CMD 0x%x unknown (len 0x%x)", cmd, len);
2364 return -EINVAL;
2367 trace_loadvm_process_command(mig_cmd_args[cmd].name, len);
2369 if (mig_cmd_args[cmd].len != -1 && mig_cmd_args[cmd].len != len) {
2370 error_report("%s received with bad length - expecting %zu, got %d",
2371 mig_cmd_args[cmd].name,
2372 (size_t)mig_cmd_args[cmd].len, len);
2373 return -ERANGE;
2376 switch (cmd) {
2377 case MIG_CMD_OPEN_RETURN_PATH:
2378 if (mis->to_src_file) {
2379 error_report("CMD_OPEN_RETURN_PATH called when RP already open");
2380 /* Not really a problem, so don't give up */
2381 return 0;
2383 mis->to_src_file = qemu_file_get_return_path(f);
2384 if (!mis->to_src_file) {
2385 error_report("CMD_OPEN_RETURN_PATH failed");
2386 return -1;
2390 * Switchover ack is enabled but no device uses it, so send an ACK to
2391 * source that it's OK to switchover. Do it here, after return path has
2392 * been created.
2394 if (migrate_switchover_ack() && !mis->switchover_ack_pending_num) {
2395 int ret = migrate_send_rp_switchover_ack(mis);
2396 if (ret) {
2397 error_report(
2398 "Could not send switchover ack RP MSG, err %d (%s)", ret,
2399 strerror(-ret));
2400 return ret;
2403 break;
2405 case MIG_CMD_PING:
2406 tmp32 = qemu_get_be32(f);
2407 trace_loadvm_process_command_ping(tmp32);
2408 if (!mis->to_src_file) {
2409 error_report("CMD_PING (0x%x) received with no return path",
2410 tmp32);
2411 return -1;
2413 migrate_send_rp_pong(mis, tmp32);
2414 break;
2416 case MIG_CMD_PACKAGED:
2417 return loadvm_handle_cmd_packaged(mis);
2419 case MIG_CMD_POSTCOPY_ADVISE:
2420 return loadvm_postcopy_handle_advise(mis, len);
2422 case MIG_CMD_POSTCOPY_LISTEN:
2423 return loadvm_postcopy_handle_listen(mis);
2425 case MIG_CMD_POSTCOPY_RUN:
2426 return loadvm_postcopy_handle_run(mis);
2428 case MIG_CMD_POSTCOPY_RAM_DISCARD:
2429 return loadvm_postcopy_ram_handle_discard(mis, len);
2431 case MIG_CMD_POSTCOPY_RESUME:
2432 return loadvm_postcopy_handle_resume(mis);
2434 case MIG_CMD_RECV_BITMAP:
2435 return loadvm_handle_recv_bitmap(mis, len);
2437 case MIG_CMD_ENABLE_COLO:
2438 return loadvm_process_enable_colo(mis);
2441 return 0;
2445 * Read a footer off the wire and check that it matches the expected section
2447 * Returns: true if the footer was good
2448 * false if there is a problem (and calls error_report to say why)
2450 static bool check_section_footer(QEMUFile *f, SaveStateEntry *se)
2452 int ret;
2453 uint8_t read_mark;
2454 uint32_t read_section_id;
2456 if (!migrate_get_current()->send_section_footer) {
2457 /* No footer to check */
2458 return true;
2461 read_mark = qemu_get_byte(f);
2463 ret = qemu_file_get_error(f);
2464 if (ret) {
2465 error_report("%s: Read section footer failed: %d",
2466 __func__, ret);
2467 return false;
2470 if (read_mark != QEMU_VM_SECTION_FOOTER) {
2471 error_report("Missing section footer for %s", se->idstr);
2472 return false;
2475 read_section_id = qemu_get_be32(f);
2476 if (read_section_id != se->load_section_id) {
2477 error_report("Mismatched section id in footer for %s -"
2478 " read 0x%x expected 0x%x",
2479 se->idstr, read_section_id, se->load_section_id);
2480 return false;
2483 /* All good */
2484 return true;
2487 static int
2488 qemu_loadvm_section_start_full(QEMUFile *f, MigrationIncomingState *mis)
2490 uint32_t instance_id, version_id, section_id;
2491 SaveStateEntry *se;
2492 char idstr[256];
2493 int ret;
2495 /* Read section start */
2496 section_id = qemu_get_be32(f);
2497 if (!qemu_get_counted_string(f, idstr)) {
2498 error_report("Unable to read ID string for section %u",
2499 section_id);
2500 return -EINVAL;
2502 instance_id = qemu_get_be32(f);
2503 version_id = qemu_get_be32(f);
2505 ret = qemu_file_get_error(f);
2506 if (ret) {
2507 error_report("%s: Failed to read instance/version ID: %d",
2508 __func__, ret);
2509 return ret;
2512 trace_qemu_loadvm_state_section_startfull(section_id, idstr,
2513 instance_id, version_id);
2514 /* Find savevm section */
2515 se = find_se(idstr, instance_id);
2516 if (se == NULL) {
2517 error_report("Unknown savevm section or instance '%s' %"PRIu32". "
2518 "Make sure that your current VM setup matches your "
2519 "saved VM setup, including any hotplugged devices",
2520 idstr, instance_id);
2521 return -EINVAL;
2524 /* Validate version */
2525 if (version_id > se->version_id) {
2526 error_report("savevm: unsupported version %d for '%s' v%d",
2527 version_id, idstr, se->version_id);
2528 return -EINVAL;
2530 se->load_version_id = version_id;
2531 se->load_section_id = section_id;
2533 /* Validate if it is a device's state */
2534 if (xen_enabled() && se->is_ram) {
2535 error_report("loadvm: %s RAM loading not allowed on Xen", idstr);
2536 return -EINVAL;
2539 ret = vmstate_load(f, se);
2540 if (ret < 0) {
2541 error_report("error while loading state for instance 0x%"PRIx32" of"
2542 " device '%s'", instance_id, idstr);
2543 return ret;
2545 if (!check_section_footer(f, se)) {
2546 return -EINVAL;
2549 return 0;
2552 static int
2553 qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis)
2555 uint32_t section_id;
2556 SaveStateEntry *se;
2557 int ret;
2559 section_id = qemu_get_be32(f);
2561 ret = qemu_file_get_error(f);
2562 if (ret) {
2563 error_report("%s: Failed to read section ID: %d",
2564 __func__, ret);
2565 return ret;
2568 trace_qemu_loadvm_state_section_partend(section_id);
2569 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
2570 if (se->load_section_id == section_id) {
2571 break;
2574 if (se == NULL) {
2575 error_report("Unknown savevm section %d", section_id);
2576 return -EINVAL;
2579 ret = vmstate_load(f, se);
2580 if (ret < 0) {
2581 error_report("error while loading state section id %d(%s)",
2582 section_id, se->idstr);
2583 return ret;
2585 if (!check_section_footer(f, se)) {
2586 return -EINVAL;
2589 return 0;
2592 static int qemu_loadvm_state_header(QEMUFile *f)
2594 unsigned int v;
2595 int ret;
2597 v = qemu_get_be32(f);
2598 if (v != QEMU_VM_FILE_MAGIC) {
2599 error_report("Not a migration stream");
2600 return -EINVAL;
2603 v = qemu_get_be32(f);
2604 if (v == QEMU_VM_FILE_VERSION_COMPAT) {
2605 error_report("SaveVM v2 format is obsolete and don't work anymore");
2606 return -ENOTSUP;
2608 if (v != QEMU_VM_FILE_VERSION) {
2609 error_report("Unsupported migration stream version");
2610 return -ENOTSUP;
2613 if (migrate_get_current()->send_configuration) {
2614 if (qemu_get_byte(f) != QEMU_VM_CONFIGURATION) {
2615 error_report("Configuration section missing");
2616 qemu_loadvm_state_cleanup();
2617 return -EINVAL;
2619 ret = vmstate_load_state(f, &vmstate_configuration, &savevm_state, 0);
2621 if (ret) {
2622 qemu_loadvm_state_cleanup();
2623 return ret;
2626 return 0;
2629 static void qemu_loadvm_state_switchover_ack_needed(MigrationIncomingState *mis)
2631 SaveStateEntry *se;
2633 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
2634 if (!se->ops || !se->ops->switchover_ack_needed) {
2635 continue;
2638 if (se->ops->switchover_ack_needed(se->opaque)) {
2639 mis->switchover_ack_pending_num++;
2643 trace_loadvm_state_switchover_ack_needed(mis->switchover_ack_pending_num);
2646 static int qemu_loadvm_state_setup(QEMUFile *f)
2648 SaveStateEntry *se;
2649 int ret;
2651 trace_loadvm_state_setup();
2652 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
2653 if (!se->ops || !se->ops->load_setup) {
2654 continue;
2656 if (se->ops->is_active) {
2657 if (!se->ops->is_active(se->opaque)) {
2658 continue;
2662 ret = se->ops->load_setup(f, se->opaque);
2663 if (ret < 0) {
2664 qemu_file_set_error(f, ret);
2665 error_report("Load state of device %s failed", se->idstr);
2666 return ret;
2669 return 0;
2672 void qemu_loadvm_state_cleanup(void)
2674 SaveStateEntry *se;
2676 trace_loadvm_state_cleanup();
2677 QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
2678 if (se->ops && se->ops->load_cleanup) {
2679 se->ops->load_cleanup(se->opaque);
2684 /* Return true if we should continue the migration, or false. */
2685 static bool postcopy_pause_incoming(MigrationIncomingState *mis)
2687 int i;
2689 trace_postcopy_pause_incoming();
2691 assert(migrate_postcopy_ram());
2694 * Unregister yank with either from/to src would work, since ioc behind it
2695 * is the same
2697 migration_ioc_unregister_yank_from_file(mis->from_src_file);
2699 assert(mis->from_src_file);
2700 qemu_file_shutdown(mis->from_src_file);
2701 qemu_fclose(mis->from_src_file);
2702 mis->from_src_file = NULL;
2704 assert(mis->to_src_file);
2705 qemu_file_shutdown(mis->to_src_file);
2706 qemu_mutex_lock(&mis->rp_mutex);
2707 qemu_fclose(mis->to_src_file);
2708 mis->to_src_file = NULL;
2709 qemu_mutex_unlock(&mis->rp_mutex);
2712 * NOTE: this must happen before reset the PostcopyTmpPages below,
2713 * otherwise it's racy to reset those fields when the fast load thread
2714 * can be accessing it in parallel.
2716 if (mis->postcopy_qemufile_dst) {
2717 qemu_file_shutdown(mis->postcopy_qemufile_dst);
2718 /* Take the mutex to make sure the fast ram load thread halted */
2719 qemu_mutex_lock(&mis->postcopy_prio_thread_mutex);
2720 migration_ioc_unregister_yank_from_file(mis->postcopy_qemufile_dst);
2721 qemu_fclose(mis->postcopy_qemufile_dst);
2722 mis->postcopy_qemufile_dst = NULL;
2723 qemu_mutex_unlock(&mis->postcopy_prio_thread_mutex);
2726 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
2727 MIGRATION_STATUS_POSTCOPY_PAUSED);
2729 /* Notify the fault thread for the invalidated file handle */
2730 postcopy_fault_thread_notify(mis);
2733 * If network is interrupted, any temp page we received will be useless
2734 * because we didn't mark them as "received" in receivedmap. After a
2735 * proper recovery later (which will sync src dirty bitmap with receivedmap
2736 * on dest) these cached small pages will be resent again.
2738 for (i = 0; i < mis->postcopy_channels; i++) {
2739 postcopy_temp_page_reset(&mis->postcopy_tmp_pages[i]);
2742 error_report("Detected IO failure for postcopy. "
2743 "Migration paused.");
2745 while (mis->state == MIGRATION_STATUS_POSTCOPY_PAUSED) {
2746 qemu_sem_wait(&mis->postcopy_pause_sem_dst);
2749 trace_postcopy_pause_incoming_continued();
2751 return true;
2754 int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis)
2756 uint8_t section_type;
2757 int ret = 0;
2759 retry:
2760 while (true) {
2761 section_type = qemu_get_byte(f);
2763 ret = qemu_file_get_error_obj_any(f, mis->postcopy_qemufile_dst, NULL);
2764 if (ret) {
2765 break;
2768 trace_qemu_loadvm_state_section(section_type);
2769 switch (section_type) {
2770 case QEMU_VM_SECTION_START:
2771 case QEMU_VM_SECTION_FULL:
2772 ret = qemu_loadvm_section_start_full(f, mis);
2773 if (ret < 0) {
2774 goto out;
2776 break;
2777 case QEMU_VM_SECTION_PART:
2778 case QEMU_VM_SECTION_END:
2779 ret = qemu_loadvm_section_part_end(f, mis);
2780 if (ret < 0) {
2781 goto out;
2783 break;
2784 case QEMU_VM_COMMAND:
2785 ret = loadvm_process_command(f);
2786 trace_qemu_loadvm_state_section_command(ret);
2787 if ((ret < 0) || (ret == LOADVM_QUIT)) {
2788 goto out;
2790 break;
2791 case QEMU_VM_EOF:
2792 /* This is the end of migration */
2793 goto out;
2794 default:
2795 error_report("Unknown savevm section type %d", section_type);
2796 ret = -EINVAL;
2797 goto out;
2801 out:
2802 if (ret < 0) {
2803 qemu_file_set_error(f, ret);
2805 /* Cancel bitmaps incoming regardless of recovery */
2806 dirty_bitmap_mig_cancel_incoming();
2809 * If we are during an active postcopy, then we pause instead
2810 * of bail out to at least keep the VM's dirty data. Note
2811 * that POSTCOPY_INCOMING_LISTENING stage is still not enough,
2812 * during which we're still receiving device states and we
2813 * still haven't yet started the VM on destination.
2815 * Only RAM postcopy supports recovery. Still, if RAM postcopy is
2816 * enabled, canceled bitmaps postcopy will not affect RAM postcopy
2817 * recovering.
2819 if (postcopy_state_get() == POSTCOPY_INCOMING_RUNNING &&
2820 migrate_postcopy_ram() && postcopy_pause_incoming(mis)) {
2821 /* Reset f to point to the newly created channel */
2822 f = mis->from_src_file;
2823 goto retry;
2826 return ret;
2829 int qemu_loadvm_state(QEMUFile *f)
2831 MigrationIncomingState *mis = migration_incoming_get_current();
2832 Error *local_err = NULL;
2833 int ret;
2835 if (qemu_savevm_state_blocked(&local_err)) {
2836 error_report_err(local_err);
2837 return -EINVAL;
2840 ret = qemu_loadvm_state_header(f);
2841 if (ret) {
2842 return ret;
2845 if (qemu_loadvm_state_setup(f) != 0) {
2846 return -EINVAL;
2849 if (migrate_switchover_ack()) {
2850 qemu_loadvm_state_switchover_ack_needed(mis);
2853 cpu_synchronize_all_pre_loadvm();
2855 ret = qemu_loadvm_state_main(f, mis);
2856 qemu_event_set(&mis->main_thread_load_event);
2858 trace_qemu_loadvm_state_post_main(ret);
2860 if (mis->have_listen_thread) {
2861 /* Listen thread still going, can't clean up yet */
2862 return ret;
2865 if (ret == 0) {
2866 ret = qemu_file_get_error(f);
2870 * Try to read in the VMDESC section as well, so that dumping tools that
2871 * intercept our migration stream have the chance to see it.
2874 /* We've got to be careful; if we don't read the data and just shut the fd
2875 * then the sender can error if we close while it's still sending.
2876 * We also mustn't read data that isn't there; some transports (RDMA)
2877 * will stall waiting for that data when the source has already closed.
2879 if (ret == 0 && should_send_vmdesc()) {
2880 uint8_t *buf;
2881 uint32_t size;
2882 uint8_t section_type = qemu_get_byte(f);
2884 if (section_type != QEMU_VM_VMDESCRIPTION) {
2885 error_report("Expected vmdescription section, but got %d",
2886 section_type);
2888 * It doesn't seem worth failing at this point since
2889 * we apparently have an otherwise valid VM state
2891 } else {
2892 buf = g_malloc(0x1000);
2893 size = qemu_get_be32(f);
2895 while (size > 0) {
2896 uint32_t read_chunk = MIN(size, 0x1000);
2897 qemu_get_buffer(f, buf, read_chunk);
2898 size -= read_chunk;
2900 g_free(buf);
2904 qemu_loadvm_state_cleanup();
2905 cpu_synchronize_all_post_init();
2907 return ret;
2910 int qemu_load_device_state(QEMUFile *f)
2912 MigrationIncomingState *mis = migration_incoming_get_current();
2913 int ret;
2915 /* Load QEMU_VM_SECTION_FULL section */
2916 ret = qemu_loadvm_state_main(f, mis);
2917 if (ret < 0) {
2918 error_report("Failed to load device state: %d", ret);
2919 return ret;
2922 cpu_synchronize_all_post_init();
2923 return 0;
2926 int qemu_loadvm_approve_switchover(void)
2928 MigrationIncomingState *mis = migration_incoming_get_current();
2930 if (!mis->switchover_ack_pending_num) {
2931 return -EINVAL;
2934 mis->switchover_ack_pending_num--;
2935 trace_loadvm_approve_switchover(mis->switchover_ack_pending_num);
2937 if (mis->switchover_ack_pending_num) {
2938 return 0;
2941 return migrate_send_rp_switchover_ack(mis);
2944 bool save_snapshot(const char *name, bool overwrite, const char *vmstate,
2945 bool has_devices, strList *devices, Error **errp)
2947 BlockDriverState *bs;
2948 QEMUSnapshotInfo sn1, *sn = &sn1;
2949 int ret = -1, ret2;
2950 QEMUFile *f;
2951 int saved_vm_running;
2952 uint64_t vm_state_size;
2953 g_autoptr(GDateTime) now = g_date_time_new_now_local();
2954 AioContext *aio_context;
2956 GLOBAL_STATE_CODE();
2958 if (migration_is_blocked(errp)) {
2959 return false;
2962 if (!replay_can_snapshot()) {
2963 error_setg(errp, "Record/replay does not allow making snapshot "
2964 "right now. Try once more later.");
2965 return false;
2968 if (!bdrv_all_can_snapshot(has_devices, devices, errp)) {
2969 return false;
2972 /* Delete old snapshots of the same name */
2973 if (name) {
2974 if (overwrite) {
2975 if (bdrv_all_delete_snapshot(name, has_devices,
2976 devices, errp) < 0) {
2977 return false;
2979 } else {
2980 ret2 = bdrv_all_has_snapshot(name, has_devices, devices, errp);
2981 if (ret2 < 0) {
2982 return false;
2984 if (ret2 == 1) {
2985 error_setg(errp,
2986 "Snapshot '%s' already exists in one or more devices",
2987 name);
2988 return false;
2993 bs = bdrv_all_find_vmstate_bs(vmstate, has_devices, devices, errp);
2994 if (bs == NULL) {
2995 return false;
2997 aio_context = bdrv_get_aio_context(bs);
2999 saved_vm_running = runstate_is_running();
3001 global_state_store();
3002 vm_stop(RUN_STATE_SAVE_VM);
3004 bdrv_drain_all_begin();
3006 aio_context_acquire(aio_context);
3008 memset(sn, 0, sizeof(*sn));
3010 /* fill auxiliary fields */
3011 sn->date_sec = g_date_time_to_unix(now);
3012 sn->date_nsec = g_date_time_get_microsecond(now) * 1000;
3013 sn->vm_clock_nsec = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
3014 if (replay_mode != REPLAY_MODE_NONE) {
3015 sn->icount = replay_get_current_icount();
3016 } else {
3017 sn->icount = -1ULL;
3020 if (name) {
3021 pstrcpy(sn->name, sizeof(sn->name), name);
3022 } else {
3023 g_autofree char *autoname = g_date_time_format(now, "vm-%Y%m%d%H%M%S");
3024 pstrcpy(sn->name, sizeof(sn->name), autoname);
3027 /* save the VM state */
3028 f = qemu_fopen_bdrv(bs, 1);
3029 if (!f) {
3030 error_setg(errp, "Could not open VM state file");
3031 goto the_end;
3033 ret = qemu_savevm_state(f, errp);
3034 vm_state_size = qemu_file_transferred_noflush(f);
3035 ret2 = qemu_fclose(f);
3036 if (ret < 0) {
3037 goto the_end;
3039 if (ret2 < 0) {
3040 ret = ret2;
3041 goto the_end;
3044 /* The bdrv_all_create_snapshot() call that follows acquires the AioContext
3045 * for itself. BDRV_POLL_WHILE() does not support nested locking because
3046 * it only releases the lock once. Therefore synchronous I/O will deadlock
3047 * unless we release the AioContext before bdrv_all_create_snapshot().
3049 aio_context_release(aio_context);
3050 aio_context = NULL;
3052 ret = bdrv_all_create_snapshot(sn, bs, vm_state_size,
3053 has_devices, devices, errp);
3054 if (ret < 0) {
3055 bdrv_all_delete_snapshot(sn->name, has_devices, devices, NULL);
3056 goto the_end;
3059 ret = 0;
3061 the_end:
3062 if (aio_context) {
3063 aio_context_release(aio_context);
3066 bdrv_drain_all_end();
3068 if (saved_vm_running) {
3069 vm_start();
3071 return ret == 0;
3074 void qmp_xen_save_devices_state(const char *filename, bool has_live, bool live,
3075 Error **errp)
3077 QEMUFile *f;
3078 QIOChannelFile *ioc;
3079 int saved_vm_running;
3080 int ret;
3082 if (!has_live) {
3083 /* live default to true so old version of Xen tool stack can have a
3084 * successful live migration */
3085 live = true;
3088 saved_vm_running = runstate_is_running();
3089 vm_stop(RUN_STATE_SAVE_VM);
3090 global_state_store_running();
3092 ioc = qio_channel_file_new_path(filename, O_WRONLY | O_CREAT | O_TRUNC,
3093 0660, errp);
3094 if (!ioc) {
3095 goto the_end;
3097 qio_channel_set_name(QIO_CHANNEL(ioc), "migration-xen-save-state");
3098 f = qemu_file_new_output(QIO_CHANNEL(ioc));
3099 object_unref(OBJECT(ioc));
3100 ret = qemu_save_device_state(f);
3101 if (ret < 0 || qemu_fclose(f) < 0) {
3102 error_setg(errp, QERR_IO_ERROR);
3103 } else {
3104 /* libxl calls the QMP command "stop" before calling
3105 * "xen-save-devices-state" and in case of migration failure, libxl
3106 * would call "cont".
3107 * So call bdrv_inactivate_all (release locks) here to let the other
3108 * side of the migration take control of the images.
3110 if (live && !saved_vm_running) {
3111 ret = bdrv_inactivate_all();
3112 if (ret) {
3113 error_setg(errp, "%s: bdrv_inactivate_all() failed (%d)",
3114 __func__, ret);
3119 the_end:
3120 if (saved_vm_running) {
3121 vm_start();
3125 void qmp_xen_load_devices_state(const char *filename, Error **errp)
3127 QEMUFile *f;
3128 QIOChannelFile *ioc;
3129 int ret;
3131 /* Guest must be paused before loading the device state; the RAM state
3132 * will already have been loaded by xc
3134 if (runstate_is_running()) {
3135 error_setg(errp, "Cannot update device state while vm is running");
3136 return;
3138 vm_stop(RUN_STATE_RESTORE_VM);
3140 ioc = qio_channel_file_new_path(filename, O_RDONLY | O_BINARY, 0, errp);
3141 if (!ioc) {
3142 return;
3144 qio_channel_set_name(QIO_CHANNEL(ioc), "migration-xen-load-state");
3145 f = qemu_file_new_input(QIO_CHANNEL(ioc));
3146 object_unref(OBJECT(ioc));
3148 ret = qemu_loadvm_state(f);
3149 qemu_fclose(f);
3150 if (ret < 0) {
3151 error_setg(errp, QERR_IO_ERROR);
3153 migration_incoming_state_destroy();
3156 bool load_snapshot(const char *name, const char *vmstate,
3157 bool has_devices, strList *devices, Error **errp)
3159 BlockDriverState *bs_vm_state;
3160 QEMUSnapshotInfo sn;
3161 QEMUFile *f;
3162 int ret;
3163 AioContext *aio_context;
3164 MigrationIncomingState *mis = migration_incoming_get_current();
3166 if (!bdrv_all_can_snapshot(has_devices, devices, errp)) {
3167 return false;
3169 ret = bdrv_all_has_snapshot(name, has_devices, devices, errp);
3170 if (ret < 0) {
3171 return false;
3173 if (ret == 0) {
3174 error_setg(errp, "Snapshot '%s' does not exist in one or more devices",
3175 name);
3176 return false;
3179 bs_vm_state = bdrv_all_find_vmstate_bs(vmstate, has_devices, devices, errp);
3180 if (!bs_vm_state) {
3181 return false;
3183 aio_context = bdrv_get_aio_context(bs_vm_state);
3185 /* Don't even try to load empty VM states */
3186 aio_context_acquire(aio_context);
3187 ret = bdrv_snapshot_find(bs_vm_state, &sn, name);
3188 aio_context_release(aio_context);
3189 if (ret < 0) {
3190 return false;
3191 } else if (sn.vm_state_size == 0) {
3192 error_setg(errp, "This is a disk-only snapshot. Revert to it "
3193 " offline using qemu-img");
3194 return false;
3198 * Flush the record/replay queue. Now the VM state is going
3199 * to change. Therefore we don't need to preserve its consistency
3201 replay_flush_events();
3203 /* Flush all IO requests so they don't interfere with the new state. */
3204 bdrv_drain_all_begin();
3206 ret = bdrv_all_goto_snapshot(name, has_devices, devices, errp);
3207 if (ret < 0) {
3208 goto err_drain;
3211 /* restore the VM state */
3212 f = qemu_fopen_bdrv(bs_vm_state, 0);
3213 if (!f) {
3214 error_setg(errp, "Could not open VM state file");
3215 goto err_drain;
3218 qemu_system_reset(SHUTDOWN_CAUSE_SNAPSHOT_LOAD);
3219 mis->from_src_file = f;
3221 if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) {
3222 ret = -EINVAL;
3223 goto err_drain;
3225 aio_context_acquire(aio_context);
3226 ret = qemu_loadvm_state(f);
3227 migration_incoming_state_destroy();
3228 aio_context_release(aio_context);
3230 bdrv_drain_all_end();
3232 if (ret < 0) {
3233 error_setg(errp, "Error %d while loading VM state", ret);
3234 return false;
3237 return true;
3239 err_drain:
3240 bdrv_drain_all_end();
3241 return false;
3244 bool delete_snapshot(const char *name, bool has_devices,
3245 strList *devices, Error **errp)
3247 if (!bdrv_all_can_snapshot(has_devices, devices, errp)) {
3248 return false;
3251 if (bdrv_all_delete_snapshot(name, has_devices, devices, errp) < 0) {
3252 return false;
3255 return true;
3258 void vmstate_register_ram(MemoryRegion *mr, DeviceState *dev)
3260 qemu_ram_set_idstr(mr->ram_block,
3261 memory_region_name(mr), dev);
3262 qemu_ram_set_migratable(mr->ram_block);
3265 void vmstate_unregister_ram(MemoryRegion *mr, DeviceState *dev)
3267 qemu_ram_unset_idstr(mr->ram_block);
3268 qemu_ram_unset_migratable(mr->ram_block);
3271 void vmstate_register_ram_global(MemoryRegion *mr)
3273 vmstate_register_ram(mr, NULL);
3276 bool vmstate_check_only_migratable(const VMStateDescription *vmsd)
3278 /* check needed if --only-migratable is specified */
3279 if (!only_migratable) {
3280 return true;
3283 return !(vmsd && vmsd->unmigratable);
3286 typedef struct SnapshotJob {
3287 Job common;
3288 char *tag;
3289 char *vmstate;
3290 strList *devices;
3291 Coroutine *co;
3292 Error **errp;
3293 bool ret;
3294 } SnapshotJob;
3296 static void qmp_snapshot_job_free(SnapshotJob *s)
3298 g_free(s->tag);
3299 g_free(s->vmstate);
3300 qapi_free_strList(s->devices);
3304 static void snapshot_load_job_bh(void *opaque)
3306 Job *job = opaque;
3307 SnapshotJob *s = container_of(job, SnapshotJob, common);
3308 int orig_vm_running;
3310 job_progress_set_remaining(&s->common, 1);
3312 orig_vm_running = runstate_is_running();
3313 vm_stop(RUN_STATE_RESTORE_VM);
3315 s->ret = load_snapshot(s->tag, s->vmstate, true, s->devices, s->errp);
3316 if (s->ret && orig_vm_running) {
3317 vm_start();
3320 job_progress_update(&s->common, 1);
3322 qmp_snapshot_job_free(s);
3323 aio_co_wake(s->co);
3326 static void snapshot_save_job_bh(void *opaque)
3328 Job *job = opaque;
3329 SnapshotJob *s = container_of(job, SnapshotJob, common);
3331 job_progress_set_remaining(&s->common, 1);
3332 s->ret = save_snapshot(s->tag, false, s->vmstate,
3333 true, s->devices, s->errp);
3334 job_progress_update(&s->common, 1);
3336 qmp_snapshot_job_free(s);
3337 aio_co_wake(s->co);
3340 static void snapshot_delete_job_bh(void *opaque)
3342 Job *job = opaque;
3343 SnapshotJob *s = container_of(job, SnapshotJob, common);
3345 job_progress_set_remaining(&s->common, 1);
3346 s->ret = delete_snapshot(s->tag, true, s->devices, s->errp);
3347 job_progress_update(&s->common, 1);
3349 qmp_snapshot_job_free(s);
3350 aio_co_wake(s->co);
3353 static int coroutine_fn snapshot_save_job_run(Job *job, Error **errp)
3355 SnapshotJob *s = container_of(job, SnapshotJob, common);
3356 s->errp = errp;
3357 s->co = qemu_coroutine_self();
3358 aio_bh_schedule_oneshot(qemu_get_aio_context(),
3359 snapshot_save_job_bh, job);
3360 qemu_coroutine_yield();
3361 return s->ret ? 0 : -1;
3364 static int coroutine_fn snapshot_load_job_run(Job *job, Error **errp)
3366 SnapshotJob *s = container_of(job, SnapshotJob, common);
3367 s->errp = errp;
3368 s->co = qemu_coroutine_self();
3369 aio_bh_schedule_oneshot(qemu_get_aio_context(),
3370 snapshot_load_job_bh, job);
3371 qemu_coroutine_yield();
3372 return s->ret ? 0 : -1;
3375 static int coroutine_fn snapshot_delete_job_run(Job *job, Error **errp)
3377 SnapshotJob *s = container_of(job, SnapshotJob, common);
3378 s->errp = errp;
3379 s->co = qemu_coroutine_self();
3380 aio_bh_schedule_oneshot(qemu_get_aio_context(),
3381 snapshot_delete_job_bh, job);
3382 qemu_coroutine_yield();
3383 return s->ret ? 0 : -1;
3387 static const JobDriver snapshot_load_job_driver = {
3388 .instance_size = sizeof(SnapshotJob),
3389 .job_type = JOB_TYPE_SNAPSHOT_LOAD,
3390 .run = snapshot_load_job_run,
3393 static const JobDriver snapshot_save_job_driver = {
3394 .instance_size = sizeof(SnapshotJob),
3395 .job_type = JOB_TYPE_SNAPSHOT_SAVE,
3396 .run = snapshot_save_job_run,
3399 static const JobDriver snapshot_delete_job_driver = {
3400 .instance_size = sizeof(SnapshotJob),
3401 .job_type = JOB_TYPE_SNAPSHOT_DELETE,
3402 .run = snapshot_delete_job_run,
3406 void qmp_snapshot_save(const char *job_id,
3407 const char *tag,
3408 const char *vmstate,
3409 strList *devices,
3410 Error **errp)
3412 SnapshotJob *s;
3414 s = job_create(job_id, &snapshot_save_job_driver, NULL,
3415 qemu_get_aio_context(), JOB_MANUAL_DISMISS,
3416 NULL, NULL, errp);
3417 if (!s) {
3418 return;
3421 s->tag = g_strdup(tag);
3422 s->vmstate = g_strdup(vmstate);
3423 s->devices = QAPI_CLONE(strList, devices);
3425 job_start(&s->common);
3428 void qmp_snapshot_load(const char *job_id,
3429 const char *tag,
3430 const char *vmstate,
3431 strList *devices,
3432 Error **errp)
3434 SnapshotJob *s;
3436 s = job_create(job_id, &snapshot_load_job_driver, NULL,
3437 qemu_get_aio_context(), JOB_MANUAL_DISMISS,
3438 NULL, NULL, errp);
3439 if (!s) {
3440 return;
3443 s->tag = g_strdup(tag);
3444 s->vmstate = g_strdup(vmstate);
3445 s->devices = QAPI_CLONE(strList, devices);
3447 job_start(&s->common);
3450 void qmp_snapshot_delete(const char *job_id,
3451 const char *tag,
3452 strList *devices,
3453 Error **errp)
3455 SnapshotJob *s;
3457 s = job_create(job_id, &snapshot_delete_job_driver, NULL,
3458 qemu_get_aio_context(), JOB_MANUAL_DISMISS,
3459 NULL, NULL, errp);
3460 if (!s) {
3461 return;
3464 s->tag = g_strdup(tag);
3465 s->devices = QAPI_CLONE(strList, devices);
3467 job_start(&s->common);