4 * Copyright IBM, Corp. 2008
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu-common.h"
17 #include "qemu/error-report.h"
18 #include "qemu/main-loop.h"
19 #include "migration/migration.h"
20 #include "migration/qemu-file.h"
21 #include "sysemu/sysemu.h"
22 #include "block/block.h"
23 #include "qapi/qmp/qerror.h"
24 #include "qemu/sockets.h"
26 #include "migration/block.h"
27 #include "qemu/thread.h"
28 #include "qmp-commands.h"
30 #include "qapi/util.h"
31 #include "qapi-event.h"
33 #define MAX_THROTTLE (32 << 20) /* Migration speed throttling */
35 /* Amount of time to allocate to each "chunk" of bandwidth-throttled
37 #define BUFFER_DELAY 100
38 #define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)
40 /* Default compression thread count */
41 #define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8
42 /* Default decompression thread count, usually decompression is at
43 * least 4 times as fast as compression.*/
44 #define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2
45 /*0: means nocompress, 1: best speed, ... 9: best compress ratio */
46 #define DEFAULT_MIGRATE_COMPRESS_LEVEL 1
48 /* Migration XBZRLE default cache size */
49 #define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024)
51 static NotifierList migration_state_notifiers
=
52 NOTIFIER_LIST_INITIALIZER(migration_state_notifiers
);
54 static bool deferred_incoming
;
56 /* When we add fault tolerance, we could have several
57 migrations at once. For now we don't need to add
58 dynamic creation of migration */
61 MigrationState
*migrate_get_current(void)
63 static MigrationState current_migration
= {
64 .state
= MIGRATION_STATUS_NONE
,
65 .bandwidth_limit
= MAX_THROTTLE
,
66 .xbzrle_cache_size
= DEFAULT_MIGRATE_CACHE_SIZE
,
68 .parameters
[MIGRATION_PARAMETER_COMPRESS_LEVEL
] =
69 DEFAULT_MIGRATE_COMPRESS_LEVEL
,
70 .parameters
[MIGRATION_PARAMETER_COMPRESS_THREADS
] =
71 DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT
,
72 .parameters
[MIGRATION_PARAMETER_DECOMPRESS_THREADS
] =
73 DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT
,
76 return ¤t_migration
;
80 static MigrationIncomingState
*mis_current
;
82 MigrationIncomingState
*migration_incoming_get_current(void)
87 MigrationIncomingState
*migration_incoming_state_new(QEMUFile
* f
)
89 mis_current
= g_malloc0(sizeof(MigrationIncomingState
));
90 mis_current
->file
= f
;
91 QLIST_INIT(&mis_current
->loadvm_handlers
);
96 void migration_incoming_state_destroy(void)
98 loadvm_free_handlers(mis_current
);
107 uint8_t runstate
[100];
112 static GlobalState global_state
;
114 int global_state_store(void)
116 if (!runstate_store((char *)global_state
.runstate
,
117 sizeof(global_state
.runstate
))) {
118 error_report("runstate name too big: %s", global_state
.runstate
);
119 trace_migrate_state_too_big();
125 void global_state_store_running(void)
127 const char *state
= RunState_lookup
[RUN_STATE_RUNNING
];
128 strncpy((char *)global_state
.runstate
,
129 state
, sizeof(global_state
.runstate
));
132 static bool global_state_received(void)
134 return global_state
.received
;
137 static RunState
global_state_get_runstate(void)
139 return global_state
.state
;
142 void global_state_set_optional(void)
144 global_state
.optional
= true;
147 static bool global_state_needed(void *opaque
)
149 GlobalState
*s
= opaque
;
150 char *runstate
= (char *)s
->runstate
;
152 /* If it is not optional, it is mandatory */
154 if (s
->optional
== false) {
158 /* If state is running or paused, it is not needed */
160 if (strcmp(runstate
, "running") == 0 ||
161 strcmp(runstate
, "paused") == 0) {
165 /* for any other state it is needed */
169 static int global_state_post_load(void *opaque
, int version_id
)
171 GlobalState
*s
= opaque
;
172 Error
*local_err
= NULL
;
174 char *runstate
= (char *)s
->runstate
;
177 trace_migrate_global_state_post_load(runstate
);
179 r
= qapi_enum_parse(RunState_lookup
, runstate
, RUN_STATE_MAX
,
184 error_report_err(local_err
);
193 static void global_state_pre_save(void *opaque
)
195 GlobalState
*s
= opaque
;
197 trace_migrate_global_state_pre_save((char *)s
->runstate
);
198 s
->size
= strlen((char *)s
->runstate
) + 1;
201 static const VMStateDescription vmstate_globalstate
= {
202 .name
= "globalstate",
204 .minimum_version_id
= 1,
205 .post_load
= global_state_post_load
,
206 .pre_save
= global_state_pre_save
,
207 .needed
= global_state_needed
,
208 .fields
= (VMStateField
[]) {
209 VMSTATE_UINT32(size
, GlobalState
),
210 VMSTATE_BUFFER(runstate
, GlobalState
),
211 VMSTATE_END_OF_LIST()
215 void register_global_state(void)
217 /* We would use it independently that we receive it */
218 strcpy((char *)&global_state
.runstate
, "");
219 global_state
.received
= false;
220 vmstate_register(NULL
, 0, &vmstate_globalstate
, &global_state
);
223 static void migrate_generate_event(int new_state
)
225 if (migrate_use_events()) {
226 qapi_event_send_migration(new_state
, &error_abort
);
231 * Called on -incoming with a defer: uri.
232 * The migration can be started later after any parameters have been
235 static void deferred_incoming_migration(Error
**errp
)
237 if (deferred_incoming
) {
238 error_setg(errp
, "Incoming migration already deferred");
240 deferred_incoming
= true;
243 void qemu_start_incoming_migration(const char *uri
, Error
**errp
)
247 qapi_event_send_migration(MIGRATION_STATUS_SETUP
, &error_abort
);
248 if (!strcmp(uri
, "defer")) {
249 deferred_incoming_migration(errp
);
250 } else if (strstart(uri
, "tcp:", &p
)) {
251 tcp_start_incoming_migration(p
, errp
);
253 } else if (strstart(uri
, "rdma:", &p
)) {
254 rdma_start_incoming_migration(p
, errp
);
257 } else if (strstart(uri
, "exec:", &p
)) {
258 exec_start_incoming_migration(p
, errp
);
259 } else if (strstart(uri
, "unix:", &p
)) {
260 unix_start_incoming_migration(p
, errp
);
261 } else if (strstart(uri
, "fd:", &p
)) {
262 fd_start_incoming_migration(p
, errp
);
265 error_setg(errp
, "unknown migration protocol: %s", uri
);
269 static void process_incoming_migration_co(void *opaque
)
271 QEMUFile
*f
= opaque
;
272 Error
*local_err
= NULL
;
275 migration_incoming_state_new(f
);
276 migrate_generate_event(MIGRATION_STATUS_ACTIVE
);
277 ret
= qemu_loadvm_state(f
);
280 free_xbzrle_decoded_buf();
281 migration_incoming_state_destroy();
284 migrate_generate_event(MIGRATION_STATUS_FAILED
);
285 error_report("load of migration failed: %s", strerror(-ret
));
286 migrate_decompress_threads_join();
289 migrate_generate_event(MIGRATION_STATUS_COMPLETED
);
290 qemu_announce_self();
292 /* Make sure all file formats flush their mutable metadata */
293 bdrv_invalidate_cache_all(&local_err
);
295 error_report_err(local_err
);
296 migrate_decompress_threads_join();
300 /* If global state section was not received or we are in running
301 state, we need to obey autostart. Any other state is set with
304 if (!global_state_received() ||
305 global_state_get_runstate() == RUN_STATE_RUNNING
) {
309 runstate_set(RUN_STATE_PAUSED
);
312 runstate_set(global_state_get_runstate());
314 migrate_decompress_threads_join();
317 void process_incoming_migration(QEMUFile
*f
)
319 Coroutine
*co
= qemu_coroutine_create(process_incoming_migration_co
);
320 int fd
= qemu_get_fd(f
);
323 migrate_decompress_threads_create();
324 qemu_set_nonblock(fd
);
325 qemu_coroutine_enter(co
, f
);
328 /* amount of nanoseconds we are willing to wait for migration to be down.
329 * the choice of nanoseconds is because it is the maximum resolution that
330 * get_clock() can achieve. It is an internal measure. All user-visible
331 * units must be in seconds */
332 static uint64_t max_downtime
= 300000000;
334 uint64_t migrate_max_downtime(void)
339 MigrationCapabilityStatusList
*qmp_query_migrate_capabilities(Error
**errp
)
341 MigrationCapabilityStatusList
*head
= NULL
;
342 MigrationCapabilityStatusList
*caps
;
343 MigrationState
*s
= migrate_get_current();
346 caps
= NULL
; /* silence compiler warning */
347 for (i
= 0; i
< MIGRATION_CAPABILITY_MAX
; i
++) {
349 head
= g_malloc0(sizeof(*caps
));
352 caps
->next
= g_malloc0(sizeof(*caps
));
356 g_malloc(sizeof(*caps
->value
));
357 caps
->value
->capability
= i
;
358 caps
->value
->state
= s
->enabled_capabilities
[i
];
364 MigrationParameters
*qmp_query_migrate_parameters(Error
**errp
)
366 MigrationParameters
*params
;
367 MigrationState
*s
= migrate_get_current();
369 params
= g_malloc0(sizeof(*params
));
370 params
->compress_level
= s
->parameters
[MIGRATION_PARAMETER_COMPRESS_LEVEL
];
371 params
->compress_threads
=
372 s
->parameters
[MIGRATION_PARAMETER_COMPRESS_THREADS
];
373 params
->decompress_threads
=
374 s
->parameters
[MIGRATION_PARAMETER_DECOMPRESS_THREADS
];
379 static void get_xbzrle_cache_stats(MigrationInfo
*info
)
381 if (migrate_use_xbzrle()) {
382 info
->has_xbzrle_cache
= true;
383 info
->xbzrle_cache
= g_malloc0(sizeof(*info
->xbzrle_cache
));
384 info
->xbzrle_cache
->cache_size
= migrate_xbzrle_cache_size();
385 info
->xbzrle_cache
->bytes
= xbzrle_mig_bytes_transferred();
386 info
->xbzrle_cache
->pages
= xbzrle_mig_pages_transferred();
387 info
->xbzrle_cache
->cache_miss
= xbzrle_mig_pages_cache_miss();
388 info
->xbzrle_cache
->cache_miss_rate
= xbzrle_mig_cache_miss_rate();
389 info
->xbzrle_cache
->overflow
= xbzrle_mig_pages_overflow();
393 MigrationInfo
*qmp_query_migrate(Error
**errp
)
395 MigrationInfo
*info
= g_malloc0(sizeof(*info
));
396 MigrationState
*s
= migrate_get_current();
399 case MIGRATION_STATUS_NONE
:
400 /* no migration has happened ever */
402 case MIGRATION_STATUS_SETUP
:
403 info
->has_status
= true;
404 info
->has_total_time
= false;
406 case MIGRATION_STATUS_ACTIVE
:
407 case MIGRATION_STATUS_CANCELLING
:
408 info
->has_status
= true;
409 info
->has_total_time
= true;
410 info
->total_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
)
412 info
->has_expected_downtime
= true;
413 info
->expected_downtime
= s
->expected_downtime
;
414 info
->has_setup_time
= true;
415 info
->setup_time
= s
->setup_time
;
417 info
->has_ram
= true;
418 info
->ram
= g_malloc0(sizeof(*info
->ram
));
419 info
->ram
->transferred
= ram_bytes_transferred();
420 info
->ram
->remaining
= ram_bytes_remaining();
421 info
->ram
->total
= ram_bytes_total();
422 info
->ram
->duplicate
= dup_mig_pages_transferred();
423 info
->ram
->skipped
= skipped_mig_pages_transferred();
424 info
->ram
->normal
= norm_mig_pages_transferred();
425 info
->ram
->normal_bytes
= norm_mig_bytes_transferred();
426 info
->ram
->dirty_pages_rate
= s
->dirty_pages_rate
;
427 info
->ram
->mbps
= s
->mbps
;
428 info
->ram
->dirty_sync_count
= s
->dirty_sync_count
;
430 if (blk_mig_active()) {
431 info
->has_disk
= true;
432 info
->disk
= g_malloc0(sizeof(*info
->disk
));
433 info
->disk
->transferred
= blk_mig_bytes_transferred();
434 info
->disk
->remaining
= blk_mig_bytes_remaining();
435 info
->disk
->total
= blk_mig_bytes_total();
438 get_xbzrle_cache_stats(info
);
440 case MIGRATION_STATUS_COMPLETED
:
441 get_xbzrle_cache_stats(info
);
443 info
->has_status
= true;
444 info
->has_total_time
= true;
445 info
->total_time
= s
->total_time
;
446 info
->has_downtime
= true;
447 info
->downtime
= s
->downtime
;
448 info
->has_setup_time
= true;
449 info
->setup_time
= s
->setup_time
;
451 info
->has_ram
= true;
452 info
->ram
= g_malloc0(sizeof(*info
->ram
));
453 info
->ram
->transferred
= ram_bytes_transferred();
454 info
->ram
->remaining
= 0;
455 info
->ram
->total
= ram_bytes_total();
456 info
->ram
->duplicate
= dup_mig_pages_transferred();
457 info
->ram
->skipped
= skipped_mig_pages_transferred();
458 info
->ram
->normal
= norm_mig_pages_transferred();
459 info
->ram
->normal_bytes
= norm_mig_bytes_transferred();
460 info
->ram
->mbps
= s
->mbps
;
461 info
->ram
->dirty_sync_count
= s
->dirty_sync_count
;
463 case MIGRATION_STATUS_FAILED
:
464 info
->has_status
= true;
466 case MIGRATION_STATUS_CANCELLED
:
467 info
->has_status
= true;
470 info
->status
= s
->state
;
475 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList
*params
,
478 MigrationState
*s
= migrate_get_current();
479 MigrationCapabilityStatusList
*cap
;
481 if (s
->state
== MIGRATION_STATUS_ACTIVE
||
482 s
->state
== MIGRATION_STATUS_SETUP
) {
483 error_setg(errp
, QERR_MIGRATION_ACTIVE
);
487 for (cap
= params
; cap
; cap
= cap
->next
) {
488 s
->enabled_capabilities
[cap
->value
->capability
] = cap
->value
->state
;
492 void qmp_migrate_set_parameters(bool has_compress_level
,
493 int64_t compress_level
,
494 bool has_compress_threads
,
495 int64_t compress_threads
,
496 bool has_decompress_threads
,
497 int64_t decompress_threads
, Error
**errp
)
499 MigrationState
*s
= migrate_get_current();
501 if (has_compress_level
&& (compress_level
< 0 || compress_level
> 9)) {
502 error_setg(errp
, QERR_INVALID_PARAMETER_VALUE
, "compress_level",
503 "is invalid, it should be in the range of 0 to 9");
506 if (has_compress_threads
&&
507 (compress_threads
< 1 || compress_threads
> 255)) {
508 error_setg(errp
, QERR_INVALID_PARAMETER_VALUE
,
510 "is invalid, it should be in the range of 1 to 255");
513 if (has_decompress_threads
&&
514 (decompress_threads
< 1 || decompress_threads
> 255)) {
515 error_setg(errp
, QERR_INVALID_PARAMETER_VALUE
,
516 "decompress_threads",
517 "is invalid, it should be in the range of 1 to 255");
521 if (has_compress_level
) {
522 s
->parameters
[MIGRATION_PARAMETER_COMPRESS_LEVEL
] = compress_level
;
524 if (has_compress_threads
) {
525 s
->parameters
[MIGRATION_PARAMETER_COMPRESS_THREADS
] = compress_threads
;
527 if (has_decompress_threads
) {
528 s
->parameters
[MIGRATION_PARAMETER_DECOMPRESS_THREADS
] =
533 /* shared migration helpers */
535 static void migrate_set_state(MigrationState
*s
, int old_state
, int new_state
)
537 if (atomic_cmpxchg(&s
->state
, old_state
, new_state
) == old_state
) {
538 trace_migrate_set_state(new_state
);
539 migrate_generate_event(new_state
);
543 static void migrate_fd_cleanup(void *opaque
)
545 MigrationState
*s
= opaque
;
547 qemu_bh_delete(s
->cleanup_bh
);
548 s
->cleanup_bh
= NULL
;
551 trace_migrate_fd_cleanup();
552 qemu_mutex_unlock_iothread();
553 qemu_thread_join(&s
->thread
);
554 qemu_mutex_lock_iothread();
556 migrate_compress_threads_join();
557 qemu_fclose(s
->file
);
561 assert(s
->state
!= MIGRATION_STATUS_ACTIVE
);
563 if (s
->state
!= MIGRATION_STATUS_COMPLETED
) {
564 qemu_savevm_state_cancel();
565 if (s
->state
== MIGRATION_STATUS_CANCELLING
) {
566 migrate_set_state(s
, MIGRATION_STATUS_CANCELLING
,
567 MIGRATION_STATUS_CANCELLED
);
571 notifier_list_notify(&migration_state_notifiers
, s
);
574 void migrate_fd_error(MigrationState
*s
)
576 trace_migrate_fd_error();
577 assert(s
->file
== NULL
);
578 migrate_set_state(s
, MIGRATION_STATUS_SETUP
, MIGRATION_STATUS_FAILED
);
579 notifier_list_notify(&migration_state_notifiers
, s
);
582 static void migrate_fd_cancel(MigrationState
*s
)
585 QEMUFile
*f
= migrate_get_current()->file
;
586 trace_migrate_fd_cancel();
589 old_state
= s
->state
;
590 if (old_state
!= MIGRATION_STATUS_SETUP
&&
591 old_state
!= MIGRATION_STATUS_ACTIVE
) {
594 migrate_set_state(s
, old_state
, MIGRATION_STATUS_CANCELLING
);
595 } while (s
->state
!= MIGRATION_STATUS_CANCELLING
);
598 * If we're unlucky the migration code might be stuck somewhere in a
599 * send/write while the network has failed and is waiting to timeout;
600 * if we've got shutdown(2) available then we can force it to quit.
601 * The outgoing qemu file gets closed in migrate_fd_cleanup that is
602 * called in a bh, so there is no race against this cancel.
604 if (s
->state
== MIGRATION_STATUS_CANCELLING
&& f
) {
605 qemu_file_shutdown(f
);
609 void add_migration_state_change_notifier(Notifier
*notify
)
611 notifier_list_add(&migration_state_notifiers
, notify
);
614 void remove_migration_state_change_notifier(Notifier
*notify
)
616 notifier_remove(notify
);
619 bool migration_in_setup(MigrationState
*s
)
621 return s
->state
== MIGRATION_STATUS_SETUP
;
624 bool migration_has_finished(MigrationState
*s
)
626 return s
->state
== MIGRATION_STATUS_COMPLETED
;
629 bool migration_has_failed(MigrationState
*s
)
631 return (s
->state
== MIGRATION_STATUS_CANCELLED
||
632 s
->state
== MIGRATION_STATUS_FAILED
);
635 static MigrationState
*migrate_init(const MigrationParams
*params
)
637 MigrationState
*s
= migrate_get_current();
638 int64_t bandwidth_limit
= s
->bandwidth_limit
;
639 bool enabled_capabilities
[MIGRATION_CAPABILITY_MAX
];
640 int64_t xbzrle_cache_size
= s
->xbzrle_cache_size
;
641 int compress_level
= s
->parameters
[MIGRATION_PARAMETER_COMPRESS_LEVEL
];
642 int compress_thread_count
=
643 s
->parameters
[MIGRATION_PARAMETER_COMPRESS_THREADS
];
644 int decompress_thread_count
=
645 s
->parameters
[MIGRATION_PARAMETER_DECOMPRESS_THREADS
];
647 memcpy(enabled_capabilities
, s
->enabled_capabilities
,
648 sizeof(enabled_capabilities
));
650 memset(s
, 0, sizeof(*s
));
652 memcpy(s
->enabled_capabilities
, enabled_capabilities
,
653 sizeof(enabled_capabilities
));
654 s
->xbzrle_cache_size
= xbzrle_cache_size
;
656 s
->parameters
[MIGRATION_PARAMETER_COMPRESS_LEVEL
] = compress_level
;
657 s
->parameters
[MIGRATION_PARAMETER_COMPRESS_THREADS
] =
658 compress_thread_count
;
659 s
->parameters
[MIGRATION_PARAMETER_DECOMPRESS_THREADS
] =
660 decompress_thread_count
;
661 s
->bandwidth_limit
= bandwidth_limit
;
662 migrate_set_state(s
, MIGRATION_STATUS_NONE
, MIGRATION_STATUS_SETUP
);
664 s
->total_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
668 static GSList
*migration_blockers
;
670 void migrate_add_blocker(Error
*reason
)
672 migration_blockers
= g_slist_prepend(migration_blockers
, reason
);
675 void migrate_del_blocker(Error
*reason
)
677 migration_blockers
= g_slist_remove(migration_blockers
, reason
);
680 void qmp_migrate_incoming(const char *uri
, Error
**errp
)
682 Error
*local_err
= NULL
;
683 static bool once
= true;
685 if (!deferred_incoming
) {
686 error_setg(errp
, "For use with '-incoming defer'");
690 error_setg(errp
, "The incoming migration has already been started");
693 qemu_start_incoming_migration(uri
, &local_err
);
696 error_propagate(errp
, local_err
);
703 void qmp_migrate(const char *uri
, bool has_blk
, bool blk
,
704 bool has_inc
, bool inc
, bool has_detach
, bool detach
,
707 Error
*local_err
= NULL
;
708 MigrationState
*s
= migrate_get_current();
709 MigrationParams params
;
712 params
.blk
= has_blk
&& blk
;
713 params
.shared
= has_inc
&& inc
;
715 if (s
->state
== MIGRATION_STATUS_ACTIVE
||
716 s
->state
== MIGRATION_STATUS_SETUP
||
717 s
->state
== MIGRATION_STATUS_CANCELLING
) {
718 error_setg(errp
, QERR_MIGRATION_ACTIVE
);
721 if (runstate_check(RUN_STATE_INMIGRATE
)) {
722 error_setg(errp
, "Guest is waiting for an incoming migration");
726 if (qemu_savevm_state_blocked(errp
)) {
730 if (migration_blockers
) {
731 *errp
= error_copy(migration_blockers
->data
);
735 /* We are starting a new migration, so we want to start in a clean
736 state. This change is only needed if previous migration
737 failed/was cancelled. We don't use migrate_set_state() because
738 we are setting the initial state, not changing it. */
739 s
->state
= MIGRATION_STATUS_NONE
;
741 s
= migrate_init(¶ms
);
743 if (strstart(uri
, "tcp:", &p
)) {
744 tcp_start_outgoing_migration(s
, p
, &local_err
);
746 } else if (strstart(uri
, "rdma:", &p
)) {
747 rdma_start_outgoing_migration(s
, p
, &local_err
);
750 } else if (strstart(uri
, "exec:", &p
)) {
751 exec_start_outgoing_migration(s
, p
, &local_err
);
752 } else if (strstart(uri
, "unix:", &p
)) {
753 unix_start_outgoing_migration(s
, p
, &local_err
);
754 } else if (strstart(uri
, "fd:", &p
)) {
755 fd_start_outgoing_migration(s
, p
, &local_err
);
758 error_setg(errp
, QERR_INVALID_PARAMETER_VALUE
, "uri",
759 "a valid migration protocol");
760 migrate_set_state(s
, MIGRATION_STATUS_SETUP
, MIGRATION_STATUS_FAILED
);
766 error_propagate(errp
, local_err
);
771 void qmp_migrate_cancel(Error
**errp
)
773 migrate_fd_cancel(migrate_get_current());
776 void qmp_migrate_set_cache_size(int64_t value
, Error
**errp
)
778 MigrationState
*s
= migrate_get_current();
781 /* Check for truncation */
782 if (value
!= (size_t)value
) {
783 error_setg(errp
, QERR_INVALID_PARAMETER_VALUE
, "cache size",
784 "exceeding address space");
788 /* Cache should not be larger than guest ram size */
789 if (value
> ram_bytes_total()) {
790 error_setg(errp
, QERR_INVALID_PARAMETER_VALUE
, "cache size",
791 "exceeds guest ram size ");
795 new_size
= xbzrle_cache_resize(value
);
797 error_setg(errp
, QERR_INVALID_PARAMETER_VALUE
, "cache size",
798 "is smaller than page size");
802 s
->xbzrle_cache_size
= new_size
;
805 int64_t qmp_query_migrate_cache_size(Error
**errp
)
807 return migrate_xbzrle_cache_size();
810 void qmp_migrate_set_speed(int64_t value
, Error
**errp
)
817 if (value
> SIZE_MAX
) {
821 s
= migrate_get_current();
822 s
->bandwidth_limit
= value
;
824 qemu_file_set_rate_limit(s
->file
, s
->bandwidth_limit
/ XFER_LIMIT_RATIO
);
828 void qmp_migrate_set_downtime(double value
, Error
**errp
)
831 value
= MAX(0, MIN(UINT64_MAX
, value
));
832 max_downtime
= (uint64_t)value
;
835 bool migrate_auto_converge(void)
839 s
= migrate_get_current();
841 return s
->enabled_capabilities
[MIGRATION_CAPABILITY_AUTO_CONVERGE
];
844 bool migrate_zero_blocks(void)
848 s
= migrate_get_current();
850 return s
->enabled_capabilities
[MIGRATION_CAPABILITY_ZERO_BLOCKS
];
853 bool migrate_use_compression(void)
857 s
= migrate_get_current();
859 return s
->enabled_capabilities
[MIGRATION_CAPABILITY_COMPRESS
];
862 int migrate_compress_level(void)
866 s
= migrate_get_current();
868 return s
->parameters
[MIGRATION_PARAMETER_COMPRESS_LEVEL
];
871 int migrate_compress_threads(void)
875 s
= migrate_get_current();
877 return s
->parameters
[MIGRATION_PARAMETER_COMPRESS_THREADS
];
880 int migrate_decompress_threads(void)
884 s
= migrate_get_current();
886 return s
->parameters
[MIGRATION_PARAMETER_DECOMPRESS_THREADS
];
889 bool migrate_use_events(void)
893 s
= migrate_get_current();
895 return s
->enabled_capabilities
[MIGRATION_CAPABILITY_EVENTS
];
898 int migrate_use_xbzrle(void)
902 s
= migrate_get_current();
904 return s
->enabled_capabilities
[MIGRATION_CAPABILITY_XBZRLE
];
907 int64_t migrate_xbzrle_cache_size(void)
911 s
= migrate_get_current();
913 return s
->xbzrle_cache_size
;
916 /* migration thread support */
918 static void *migration_thread(void *opaque
)
920 MigrationState
*s
= opaque
;
921 int64_t initial_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
922 int64_t setup_start
= qemu_clock_get_ms(QEMU_CLOCK_HOST
);
923 int64_t initial_bytes
= 0;
924 int64_t max_size
= 0;
925 int64_t start_time
= initial_time
;
926 bool old_vm_running
= false;
928 rcu_register_thread();
930 qemu_savevm_state_header(s
->file
);
931 qemu_savevm_state_begin(s
->file
, &s
->params
);
933 s
->setup_time
= qemu_clock_get_ms(QEMU_CLOCK_HOST
) - setup_start
;
934 migrate_set_state(s
, MIGRATION_STATUS_SETUP
, MIGRATION_STATUS_ACTIVE
);
936 while (s
->state
== MIGRATION_STATUS_ACTIVE
) {
937 int64_t current_time
;
938 uint64_t pending_size
;
940 if (!qemu_file_rate_limit(s
->file
)) {
941 pending_size
= qemu_savevm_state_pending(s
->file
, max_size
);
942 trace_migrate_pending(pending_size
, max_size
);
943 if (pending_size
&& pending_size
>= max_size
) {
944 qemu_savevm_state_iterate(s
->file
);
948 qemu_mutex_lock_iothread();
949 start_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
950 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER
);
951 old_vm_running
= runstate_is_running();
953 ret
= global_state_store();
955 ret
= vm_stop_force_state(RUN_STATE_FINISH_MIGRATE
);
957 qemu_file_set_rate_limit(s
->file
, INT64_MAX
);
958 qemu_savevm_state_complete(s
->file
);
961 qemu_mutex_unlock_iothread();
964 migrate_set_state(s
, MIGRATION_STATUS_ACTIVE
,
965 MIGRATION_STATUS_FAILED
);
969 if (!qemu_file_get_error(s
->file
)) {
970 migrate_set_state(s
, MIGRATION_STATUS_ACTIVE
,
971 MIGRATION_STATUS_COMPLETED
);
977 if (qemu_file_get_error(s
->file
)) {
978 migrate_set_state(s
, MIGRATION_STATUS_ACTIVE
,
979 MIGRATION_STATUS_FAILED
);
982 current_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
983 if (current_time
>= initial_time
+ BUFFER_DELAY
) {
984 uint64_t transferred_bytes
= qemu_ftell(s
->file
) - initial_bytes
;
985 uint64_t time_spent
= current_time
- initial_time
;
986 double bandwidth
= transferred_bytes
/ time_spent
;
987 max_size
= bandwidth
* migrate_max_downtime() / 1000000;
989 s
->mbps
= time_spent
? (((double) transferred_bytes
* 8.0) /
990 ((double) time_spent
/ 1000.0)) / 1000.0 / 1000.0 : -1;
992 trace_migrate_transferred(transferred_bytes
, time_spent
,
993 bandwidth
, max_size
);
994 /* if we haven't sent anything, we don't want to recalculate
995 10000 is a small enough number for our purposes */
996 if (s
->dirty_bytes_rate
&& transferred_bytes
> 10000) {
997 s
->expected_downtime
= s
->dirty_bytes_rate
/ bandwidth
;
1000 qemu_file_reset_rate_limit(s
->file
);
1001 initial_time
= current_time
;
1002 initial_bytes
= qemu_ftell(s
->file
);
1004 if (qemu_file_rate_limit(s
->file
)) {
1005 /* usleep expects microseconds */
1006 g_usleep((initial_time
+ BUFFER_DELAY
- current_time
)*1000);
1010 qemu_mutex_lock_iothread();
1011 if (s
->state
== MIGRATION_STATUS_COMPLETED
) {
1012 int64_t end_time
= qemu_clock_get_ms(QEMU_CLOCK_REALTIME
);
1013 uint64_t transferred_bytes
= qemu_ftell(s
->file
);
1014 s
->total_time
= end_time
- s
->total_time
;
1015 s
->downtime
= end_time
- start_time
;
1016 if (s
->total_time
) {
1017 s
->mbps
= (((double) transferred_bytes
* 8.0) /
1018 ((double) s
->total_time
)) / 1000;
1020 runstate_set(RUN_STATE_POSTMIGRATE
);
1022 if (old_vm_running
) {
1026 qemu_bh_schedule(s
->cleanup_bh
);
1027 qemu_mutex_unlock_iothread();
1029 rcu_unregister_thread();
1033 void migrate_fd_connect(MigrationState
*s
)
1035 /* This is a best 1st approximation. ns to ms */
1036 s
->expected_downtime
= max_downtime
/1000000;
1037 s
->cleanup_bh
= qemu_bh_new(migrate_fd_cleanup
, s
);
1039 qemu_file_set_rate_limit(s
->file
,
1040 s
->bandwidth_limit
/ XFER_LIMIT_RATIO
);
1042 /* Notify before starting migration thread */
1043 notifier_list_notify(&migration_state_notifiers
, s
);
1045 migrate_compress_threads_create();
1046 qemu_thread_create(&s
->thread
, "migration", migration_thread
, s
,
1047 QEMU_THREAD_JOINABLE
);