4 * Copyright IBM, Corp. 2008
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu-common.h"
17 #include "migration/migration.h"
18 #include "monitor/monitor.h"
19 #include "migration/qemu-file.h"
20 #include "sysemu/sysemu.h"
21 #include "block/block.h"
22 #include "qemu/sockets.h"
23 #include "migration/block.h"
24 #include "qemu/thread.h"
25 #include "qmp-commands.h"
27 //#define DEBUG_MIGRATION
29 #ifdef DEBUG_MIGRATION
30 #define DPRINTF(fmt, ...) \
31 do { printf("migration: " fmt, ## __VA_ARGS__); } while (0)
33 #define DPRINTF(fmt, ...) \
45 #define MAX_THROTTLE (32 << 20) /* Migration speed throttling */
47 /* Amount of time to allocate to each "chunk" of bandwidth-throttled
49 #define BUFFER_DELAY 100
50 #define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)
52 /* Migration XBZRLE default cache size */
53 #define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024)
55 static NotifierList migration_state_notifiers
=
56 NOTIFIER_LIST_INITIALIZER(migration_state_notifiers
);
58 /* When we add fault tolerance, we could have several
59 migrations at once. For now we don't need to add
60 dynamic creation of migration */
62 MigrationState
*migrate_get_current(void)
64 static MigrationState current_migration
= {
65 .state
= MIG_STATE_SETUP
,
66 .bandwidth_limit
= MAX_THROTTLE
,
67 .xbzrle_cache_size
= DEFAULT_MIGRATE_CACHE_SIZE
,
70 return ¤t_migration
;
73 void qemu_start_incoming_migration(const char *uri
, Error
**errp
)
77 if (strstart(uri
, "tcp:", &p
))
78 tcp_start_incoming_migration(p
, errp
);
80 else if (strstart(uri
, "exec:", &p
))
81 exec_start_incoming_migration(p
, errp
);
82 else if (strstart(uri
, "unix:", &p
))
83 unix_start_incoming_migration(p
, errp
);
84 else if (strstart(uri
, "fd:", &p
))
85 fd_start_incoming_migration(p
, errp
);
88 error_setg(errp
, "unknown migration protocol: %s", uri
);
92 static void process_incoming_migration_co(void *opaque
)
97 ret
= qemu_loadvm_state(f
);
100 fprintf(stderr
, "load of migration failed\n");
103 qemu_announce_self();
104 DPRINTF("successfully loaded vm state\n");
106 bdrv_clear_incoming_migration_all();
107 /* Make sure all file formats flush their mutable metadata */
108 bdrv_invalidate_cache_all();
113 runstate_set(RUN_STATE_PAUSED
);
117 void process_incoming_migration(QEMUFile
*f
)
119 Coroutine
*co
= qemu_coroutine_create(process_incoming_migration_co
);
120 int fd
= qemu_get_fd(f
);
123 socket_set_nonblock(fd
);
124 qemu_coroutine_enter(co
, f
);
127 /* amount of nanoseconds we are willing to wait for migration to be down.
128 * the choice of nanoseconds is because it is the maximum resolution that
129 * get_clock() can achieve. It is an internal measure. All user-visible
130 * units must be in seconds */
131 static uint64_t max_downtime
= 30000000;
133 uint64_t migrate_max_downtime(void)
138 MigrationCapabilityStatusList
*qmp_query_migrate_capabilities(Error
**errp
)
140 MigrationCapabilityStatusList
*head
= NULL
;
141 MigrationCapabilityStatusList
*caps
;
142 MigrationState
*s
= migrate_get_current();
145 for (i
= 0; i
< MIGRATION_CAPABILITY_MAX
; i
++) {
147 head
= g_malloc0(sizeof(*caps
));
150 caps
->next
= g_malloc0(sizeof(*caps
));
154 g_malloc(sizeof(*caps
->value
));
155 caps
->value
->capability
= i
;
156 caps
->value
->state
= s
->enabled_capabilities
[i
];
162 static void get_xbzrle_cache_stats(MigrationInfo
*info
)
164 if (migrate_use_xbzrle()) {
165 info
->has_xbzrle_cache
= true;
166 info
->xbzrle_cache
= g_malloc0(sizeof(*info
->xbzrle_cache
));
167 info
->xbzrle_cache
->cache_size
= migrate_xbzrle_cache_size();
168 info
->xbzrle_cache
->bytes
= xbzrle_mig_bytes_transferred();
169 info
->xbzrle_cache
->pages
= xbzrle_mig_pages_transferred();
170 info
->xbzrle_cache
->cache_miss
= xbzrle_mig_pages_cache_miss();
171 info
->xbzrle_cache
->overflow
= xbzrle_mig_pages_overflow();
175 MigrationInfo
*qmp_query_migrate(Error
**errp
)
177 MigrationInfo
*info
= g_malloc0(sizeof(*info
));
178 MigrationState
*s
= migrate_get_current();
181 case MIG_STATE_SETUP
:
182 /* no migration has happened ever */
184 case MIG_STATE_ACTIVE
:
185 info
->has_status
= true;
186 info
->status
= g_strdup("active");
187 info
->has_total_time
= true;
188 info
->total_time
= qemu_get_clock_ms(rt_clock
)
190 info
->has_expected_downtime
= true;
191 info
->expected_downtime
= s
->expected_downtime
;
193 info
->has_ram
= true;
194 info
->ram
= g_malloc0(sizeof(*info
->ram
));
195 info
->ram
->transferred
= ram_bytes_transferred();
196 info
->ram
->remaining
= ram_bytes_remaining();
197 info
->ram
->total
= ram_bytes_total();
198 info
->ram
->duplicate
= dup_mig_pages_transferred();
199 info
->ram
->normal
= norm_mig_pages_transferred();
200 info
->ram
->normal_bytes
= norm_mig_bytes_transferred();
201 info
->ram
->dirty_pages_rate
= s
->dirty_pages_rate
;
204 if (blk_mig_active()) {
205 info
->has_disk
= true;
206 info
->disk
= g_malloc0(sizeof(*info
->disk
));
207 info
->disk
->transferred
= blk_mig_bytes_transferred();
208 info
->disk
->remaining
= blk_mig_bytes_remaining();
209 info
->disk
->total
= blk_mig_bytes_total();
212 get_xbzrle_cache_stats(info
);
214 case MIG_STATE_COMPLETED
:
215 get_xbzrle_cache_stats(info
);
217 info
->has_status
= true;
218 info
->status
= g_strdup("completed");
219 info
->total_time
= s
->total_time
;
220 info
->has_downtime
= true;
221 info
->downtime
= s
->downtime
;
223 info
->has_ram
= true;
224 info
->ram
= g_malloc0(sizeof(*info
->ram
));
225 info
->ram
->transferred
= ram_bytes_transferred();
226 info
->ram
->remaining
= 0;
227 info
->ram
->total
= ram_bytes_total();
228 info
->ram
->duplicate
= dup_mig_pages_transferred();
229 info
->ram
->normal
= norm_mig_pages_transferred();
230 info
->ram
->normal_bytes
= norm_mig_bytes_transferred();
232 case MIG_STATE_ERROR
:
233 info
->has_status
= true;
234 info
->status
= g_strdup("failed");
236 case MIG_STATE_CANCELLED
:
237 info
->has_status
= true;
238 info
->status
= g_strdup("cancelled");
245 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList
*params
,
248 MigrationState
*s
= migrate_get_current();
249 MigrationCapabilityStatusList
*cap
;
251 if (s
->state
== MIG_STATE_ACTIVE
) {
252 error_set(errp
, QERR_MIGRATION_ACTIVE
);
256 for (cap
= params
; cap
; cap
= cap
->next
) {
257 s
->enabled_capabilities
[cap
->value
->capability
] = cap
->value
->state
;
261 /* shared migration helpers */
263 static void migrate_fd_cleanup(MigrationState
*s
)
268 DPRINTF("closing file\n");
269 ret
= qemu_fclose(s
->file
);
274 if (ret
< 0 && s
->state
== MIG_STATE_ACTIVE
) {
275 s
->state
= MIG_STATE_ERROR
;
278 if (s
->state
!= MIG_STATE_ACTIVE
) {
279 qemu_savevm_state_cancel();
283 void migrate_fd_error(MigrationState
*s
)
285 DPRINTF("setting error state\n");
286 s
->state
= MIG_STATE_ERROR
;
287 notifier_list_notify(&migration_state_notifiers
, s
);
288 migrate_fd_cleanup(s
);
291 static void migrate_fd_completed(MigrationState
*s
)
293 DPRINTF("setting completed state\n");
294 migrate_fd_cleanup(s
);
295 if (s
->state
== MIG_STATE_ACTIVE
) {
296 s
->state
= MIG_STATE_COMPLETED
;
297 runstate_set(RUN_STATE_POSTMIGRATE
);
299 notifier_list_notify(&migration_state_notifiers
, s
);
302 static ssize_t
migrate_fd_put_buffer(MigrationState
*s
, const void *data
,
307 if (s
->state
!= MIG_STATE_ACTIVE
) {
312 ret
= s
->write(s
, data
, size
);
313 } while (ret
== -1 && ((s
->get_error(s
)) == EINTR
));
316 ret
= -(s
->get_error(s
));
321 static void migrate_fd_cancel(MigrationState
*s
)
323 if (s
->state
!= MIG_STATE_ACTIVE
)
326 DPRINTF("cancelling migration\n");
328 s
->state
= MIG_STATE_CANCELLED
;
329 notifier_list_notify(&migration_state_notifiers
, s
);
331 migrate_fd_cleanup(s
);
334 int migrate_fd_close(MigrationState
*s
)
344 void add_migration_state_change_notifier(Notifier
*notify
)
346 notifier_list_add(&migration_state_notifiers
, notify
);
349 void remove_migration_state_change_notifier(Notifier
*notify
)
351 notifier_remove(notify
);
354 bool migration_is_active(MigrationState
*s
)
356 return s
->state
== MIG_STATE_ACTIVE
;
359 bool migration_has_finished(MigrationState
*s
)
361 return s
->state
== MIG_STATE_COMPLETED
;
364 bool migration_has_failed(MigrationState
*s
)
366 return (s
->state
== MIG_STATE_CANCELLED
||
367 s
->state
== MIG_STATE_ERROR
);
370 static MigrationState
*migrate_init(const MigrationParams
*params
)
372 MigrationState
*s
= migrate_get_current();
373 int64_t bandwidth_limit
= s
->bandwidth_limit
;
374 bool enabled_capabilities
[MIGRATION_CAPABILITY_MAX
];
375 int64_t xbzrle_cache_size
= s
->xbzrle_cache_size
;
377 memcpy(enabled_capabilities
, s
->enabled_capabilities
,
378 sizeof(enabled_capabilities
));
380 memset(s
, 0, sizeof(*s
));
381 s
->bandwidth_limit
= bandwidth_limit
;
383 memcpy(s
->enabled_capabilities
, enabled_capabilities
,
384 sizeof(enabled_capabilities
));
385 s
->xbzrle_cache_size
= xbzrle_cache_size
;
387 s
->bandwidth_limit
= bandwidth_limit
;
388 s
->state
= MIG_STATE_SETUP
;
389 s
->total_time
= qemu_get_clock_ms(rt_clock
);
394 static GSList
*migration_blockers
;
396 void migrate_add_blocker(Error
*reason
)
398 migration_blockers
= g_slist_prepend(migration_blockers
, reason
);
401 void migrate_del_blocker(Error
*reason
)
403 migration_blockers
= g_slist_remove(migration_blockers
, reason
);
406 void qmp_migrate(const char *uri
, bool has_blk
, bool blk
,
407 bool has_inc
, bool inc
, bool has_detach
, bool detach
,
410 Error
*local_err
= NULL
;
411 MigrationState
*s
= migrate_get_current();
412 MigrationParams params
;
418 if (s
->state
== MIG_STATE_ACTIVE
) {
419 error_set(errp
, QERR_MIGRATION_ACTIVE
);
423 if (qemu_savevm_state_blocked(errp
)) {
427 if (migration_blockers
) {
428 *errp
= error_copy(migration_blockers
->data
);
432 s
= migrate_init(¶ms
);
434 if (strstart(uri
, "tcp:", &p
)) {
435 tcp_start_outgoing_migration(s
, p
, &local_err
);
437 } else if (strstart(uri
, "exec:", &p
)) {
438 exec_start_outgoing_migration(s
, p
, &local_err
);
439 } else if (strstart(uri
, "unix:", &p
)) {
440 unix_start_outgoing_migration(s
, p
, &local_err
);
441 } else if (strstart(uri
, "fd:", &p
)) {
442 fd_start_outgoing_migration(s
, p
, &local_err
);
445 error_set(errp
, QERR_INVALID_PARAMETER_VALUE
, "uri", "a valid migration protocol");
451 error_propagate(errp
, local_err
);
456 void qmp_migrate_cancel(Error
**errp
)
458 migrate_fd_cancel(migrate_get_current());
461 void qmp_migrate_set_cache_size(int64_t value
, Error
**errp
)
463 MigrationState
*s
= migrate_get_current();
465 /* Check for truncation */
466 if (value
!= (size_t)value
) {
467 error_set(errp
, QERR_INVALID_PARAMETER_VALUE
, "cache size",
468 "exceeding address space");
472 s
->xbzrle_cache_size
= xbzrle_cache_resize(value
);
475 int64_t qmp_query_migrate_cache_size(Error
**errp
)
477 return migrate_xbzrle_cache_size();
480 void qmp_migrate_set_speed(int64_t value
, Error
**errp
)
488 s
= migrate_get_current();
489 s
->bandwidth_limit
= value
;
490 qemu_file_set_rate_limit(s
->file
, s
->bandwidth_limit
);
493 void qmp_migrate_set_downtime(double value
, Error
**errp
)
496 value
= MAX(0, MIN(UINT64_MAX
, value
));
497 max_downtime
= (uint64_t)value
;
500 int migrate_use_xbzrle(void)
504 s
= migrate_get_current();
506 return s
->enabled_capabilities
[MIGRATION_CAPABILITY_XBZRLE
];
509 int64_t migrate_xbzrle_cache_size(void)
513 s
= migrate_get_current();
515 return s
->xbzrle_cache_size
;
518 /* migration thread support */
521 static ssize_t
buffered_flush(MigrationState
*s
)
526 DPRINTF("flushing %zu byte(s) of data\n", s
->buffer_size
);
528 while (s
->bytes_xfer
< s
->xfer_limit
&& offset
< s
->buffer_size
) {
529 size_t to_send
= MIN(s
->buffer_size
- offset
, s
->xfer_limit
- s
->bytes_xfer
);
530 ret
= migrate_fd_put_buffer(s
, s
->buffer
+ offset
, to_send
);
532 DPRINTF("error flushing data, %zd\n", ret
);
535 DPRINTF("flushed %zd byte(s)\n", ret
);
537 s
->bytes_xfer
+= ret
;
541 DPRINTF("flushed %zu of %zu byte(s)\n", offset
, s
->buffer_size
);
542 memmove(s
->buffer
, s
->buffer
+ offset
, s
->buffer_size
- offset
);
543 s
->buffer_size
-= offset
;
551 static int buffered_put_buffer(void *opaque
, const uint8_t *buf
,
552 int64_t pos
, int size
)
554 MigrationState
*s
= opaque
;
557 DPRINTF("putting %d bytes at %" PRId64
"\n", size
, pos
);
559 error
= qemu_file_get_error(s
->file
);
561 DPRINTF("flush when error, bailing: %s\n", strerror(-error
));
569 if (size
> (s
->buffer_capacity
- s
->buffer_size
)) {
570 DPRINTF("increasing buffer capacity from %zu by %zu\n",
571 s
->buffer_capacity
, size
+ 1024);
573 s
->buffer_capacity
+= size
+ 1024;
575 s
->buffer
= g_realloc(s
->buffer
, s
->buffer_capacity
);
578 memcpy(s
->buffer
+ s
->buffer_size
, buf
, size
);
579 s
->buffer_size
+= size
;
584 static int buffered_close(void *opaque
)
586 MigrationState
*s
= opaque
;
590 DPRINTF("closing\n");
592 s
->xfer_limit
= INT_MAX
;
593 while (!qemu_file_get_error(s
->file
) && s
->buffer_size
) {
594 ret
= buffered_flush(s
);
600 ret2
= migrate_fd_close(s
);
608 static int buffered_get_fd(void *opaque
)
610 MigrationState
*s
= opaque
;
616 * The meaning of the return values is:
617 * 0: We can continue sending
619 * negative: There has been an error
621 static int buffered_rate_limit(void *opaque
)
623 MigrationState
*s
= opaque
;
626 ret
= qemu_file_get_error(s
->file
);
631 if (s
->bytes_xfer
>= s
->xfer_limit
) {
638 static int64_t buffered_set_rate_limit(void *opaque
, int64_t new_rate
)
640 MigrationState
*s
= opaque
;
641 if (qemu_file_get_error(s
->file
)) {
644 if (new_rate
> SIZE_MAX
) {
648 s
->xfer_limit
= new_rate
/ XFER_LIMIT_RATIO
;
651 return s
->xfer_limit
;
654 static int64_t buffered_get_rate_limit(void *opaque
)
656 MigrationState
*s
= opaque
;
658 return s
->xfer_limit
;
661 static void *buffered_file_thread(void *opaque
)
663 MigrationState
*s
= opaque
;
664 int64_t initial_time
= qemu_get_clock_ms(rt_clock
);
665 int64_t sleep_time
= 0;
666 int64_t max_size
= 0;
667 bool last_round
= false;
670 qemu_mutex_lock_iothread();
671 DPRINTF("beginning savevm\n");
672 ret
= qemu_savevm_state_begin(s
->file
, &s
->params
);
673 qemu_mutex_unlock_iothread();
676 int64_t current_time
;
677 uint64_t pending_size
;
679 qemu_mutex_lock_iothread();
680 if (s
->state
!= MIG_STATE_ACTIVE
) {
681 DPRINTF("put_ready returning because of non-active state\n");
682 qemu_mutex_unlock_iothread();
686 qemu_mutex_unlock_iothread();
689 if (s
->bytes_xfer
< s
->xfer_limit
) {
690 DPRINTF("iterate\n");
691 pending_size
= qemu_savevm_state_pending(s
->file
, max_size
);
692 DPRINTF("pending size %lu max %lu\n", pending_size
, max_size
);
693 if (pending_size
&& pending_size
>= max_size
) {
694 ret
= qemu_savevm_state_iterate(s
->file
);
696 qemu_mutex_unlock_iothread();
700 int old_vm_running
= runstate_is_running();
701 int64_t start_time
, end_time
;
703 DPRINTF("done iterating\n");
704 start_time
= qemu_get_clock_ms(rt_clock
);
705 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER
);
706 vm_stop_force_state(RUN_STATE_FINISH_MIGRATE
);
707 ret
= qemu_savevm_state_complete(s
->file
);
709 qemu_mutex_unlock_iothread();
712 migrate_fd_completed(s
);
714 end_time
= qemu_get_clock_ms(rt_clock
);
715 s
->total_time
= end_time
- s
->total_time
;
716 s
->downtime
= end_time
- start_time
;
717 if (s
->state
!= MIG_STATE_COMPLETED
) {
718 if (old_vm_running
) {
725 qemu_mutex_unlock_iothread();
726 current_time
= qemu_get_clock_ms(rt_clock
);
727 if (current_time
>= initial_time
+ BUFFER_DELAY
) {
728 uint64_t transferred_bytes
= s
->bytes_xfer
;
729 uint64_t time_spent
= current_time
- initial_time
- sleep_time
;
730 double bandwidth
= transferred_bytes
/ time_spent
;
731 max_size
= bandwidth
* migrate_max_downtime() / 1000000;
733 DPRINTF("transferred %" PRIu64
" time_spent %" PRIu64
734 " bandwidth %g max_size %" PRId64
"\n",
735 transferred_bytes
, time_spent
, bandwidth
, max_size
);
736 /* if we haven't sent anything, we don't want to recalculate
737 10000 is a small enough number for our purposes */
738 if (s
->dirty_bytes_rate
&& transferred_bytes
> 10000) {
739 s
->expected_downtime
= s
->dirty_bytes_rate
/ bandwidth
;
744 initial_time
= current_time
;
746 if (!last_round
&& (s
->bytes_xfer
>= s
->xfer_limit
)) {
747 /* usleep expects microseconds */
748 g_usleep((initial_time
+ BUFFER_DELAY
- current_time
)*1000);
749 sleep_time
+= qemu_get_clock_ms(rt_clock
) - current_time
;
751 ret
= buffered_flush(s
);
761 static const QEMUFileOps buffered_file_ops
= {
762 .get_fd
= buffered_get_fd
,
763 .put_buffer
= buffered_put_buffer
,
764 .close
= buffered_close
,
765 .rate_limit
= buffered_rate_limit
,
766 .get_rate_limit
= buffered_get_rate_limit
,
767 .set_rate_limit
= buffered_set_rate_limit
,
770 void migrate_fd_connect(MigrationState
*s
)
772 s
->state
= MIG_STATE_ACTIVE
;
776 s
->buffer_capacity
= 0;
777 /* This is a best 1st approximation. ns to ms */
778 s
->expected_downtime
= max_downtime
/1000000;
780 s
->xfer_limit
= s
->bandwidth_limit
/ XFER_LIMIT_RATIO
;
783 s
->file
= qemu_fopen_ops(s
, &buffered_file_ops
);
785 qemu_thread_create(&s
->thread
, buffered_file_thread
, s
,
786 QEMU_THREAD_DETACHED
);
787 notifier_list_notify(&migration_state_notifiers
, s
);