4 * Copyright IBM, Corp. 2008
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu-common.h"
17 #include "migration/migration.h"
18 #include "monitor/monitor.h"
19 #include "migration/qemu-file.h"
20 #include "sysemu/sysemu.h"
21 #include "block/block.h"
22 #include "qemu/sockets.h"
23 #include "migration/block.h"
24 #include "qemu/thread.h"
25 #include "qmp-commands.h"
27 //#define DEBUG_MIGRATION
29 #ifdef DEBUG_MIGRATION
30 #define DPRINTF(fmt, ...) \
31 do { printf("migration: " fmt, ## __VA_ARGS__); } while (0)
33 #define DPRINTF(fmt, ...) \
45 #define MAX_THROTTLE (32 << 20) /* Migration speed throttling */
47 /* Amount of time to allocate to each "chunk" of bandwidth-throttled
49 #define BUFFER_DELAY 100
50 #define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)
52 /* Migration XBZRLE default cache size */
53 #define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024)
55 static NotifierList migration_state_notifiers
=
56 NOTIFIER_LIST_INITIALIZER(migration_state_notifiers
);
58 /* When we add fault tolerance, we could have several
59 migrations at once. For now we don't need to add
60 dynamic creation of migration */
62 MigrationState
*migrate_get_current(void)
64 static MigrationState current_migration
= {
65 .state
= MIG_STATE_SETUP
,
66 .bandwidth_limit
= MAX_THROTTLE
,
67 .xbzrle_cache_size
= DEFAULT_MIGRATE_CACHE_SIZE
,
70 return ¤t_migration
;
73 void qemu_start_incoming_migration(const char *uri
, Error
**errp
)
77 if (strstart(uri
, "tcp:", &p
))
78 tcp_start_incoming_migration(p
, errp
);
80 else if (strstart(uri
, "exec:", &p
))
81 exec_start_incoming_migration(p
, errp
);
82 else if (strstart(uri
, "unix:", &p
))
83 unix_start_incoming_migration(p
, errp
);
84 else if (strstart(uri
, "fd:", &p
))
85 fd_start_incoming_migration(p
, errp
);
88 error_setg(errp
, "unknown migration protocol: %s", uri
);
92 static void process_incoming_migration_co(void *opaque
)
97 ret
= qemu_loadvm_state(f
);
100 fprintf(stderr
, "load of migration failed\n");
103 qemu_announce_self();
104 DPRINTF("successfully loaded vm state\n");
106 bdrv_clear_incoming_migration_all();
107 /* Make sure all file formats flush their mutable metadata */
108 bdrv_invalidate_cache_all();
113 runstate_set(RUN_STATE_PAUSED
);
117 void process_incoming_migration(QEMUFile
*f
)
119 Coroutine
*co
= qemu_coroutine_create(process_incoming_migration_co
);
120 int fd
= qemu_get_fd(f
);
123 socket_set_nonblock(fd
);
124 qemu_coroutine_enter(co
, f
);
127 /* amount of nanoseconds we are willing to wait for migration to be down.
128 * the choice of nanoseconds is because it is the maximum resolution that
129 * get_clock() can achieve. It is an internal measure. All user-visible
130 * units must be in seconds */
131 static uint64_t max_downtime
= 30000000;
133 uint64_t migrate_max_downtime(void)
138 MigrationCapabilityStatusList
*qmp_query_migrate_capabilities(Error
**errp
)
140 MigrationCapabilityStatusList
*head
= NULL
;
141 MigrationCapabilityStatusList
*caps
;
142 MigrationState
*s
= migrate_get_current();
145 for (i
= 0; i
< MIGRATION_CAPABILITY_MAX
; i
++) {
147 head
= g_malloc0(sizeof(*caps
));
150 caps
->next
= g_malloc0(sizeof(*caps
));
154 g_malloc(sizeof(*caps
->value
));
155 caps
->value
->capability
= i
;
156 caps
->value
->state
= s
->enabled_capabilities
[i
];
162 static void get_xbzrle_cache_stats(MigrationInfo
*info
)
164 if (migrate_use_xbzrle()) {
165 info
->has_xbzrle_cache
= true;
166 info
->xbzrle_cache
= g_malloc0(sizeof(*info
->xbzrle_cache
));
167 info
->xbzrle_cache
->cache_size
= migrate_xbzrle_cache_size();
168 info
->xbzrle_cache
->bytes
= xbzrle_mig_bytes_transferred();
169 info
->xbzrle_cache
->pages
= xbzrle_mig_pages_transferred();
170 info
->xbzrle_cache
->cache_miss
= xbzrle_mig_pages_cache_miss();
171 info
->xbzrle_cache
->overflow
= xbzrle_mig_pages_overflow();
175 MigrationInfo
*qmp_query_migrate(Error
**errp
)
177 MigrationInfo
*info
= g_malloc0(sizeof(*info
));
178 MigrationState
*s
= migrate_get_current();
181 case MIG_STATE_SETUP
:
182 /* no migration has happened ever */
184 case MIG_STATE_ACTIVE
:
185 info
->has_status
= true;
186 info
->status
= g_strdup("active");
187 info
->has_total_time
= true;
188 info
->total_time
= qemu_get_clock_ms(rt_clock
)
190 info
->has_expected_downtime
= true;
191 info
->expected_downtime
= s
->expected_downtime
;
193 info
->has_ram
= true;
194 info
->ram
= g_malloc0(sizeof(*info
->ram
));
195 info
->ram
->transferred
= ram_bytes_transferred();
196 info
->ram
->remaining
= ram_bytes_remaining();
197 info
->ram
->total
= ram_bytes_total();
198 info
->ram
->duplicate
= dup_mig_pages_transferred();
199 info
->ram
->normal
= norm_mig_pages_transferred();
200 info
->ram
->normal_bytes
= norm_mig_bytes_transferred();
201 info
->ram
->dirty_pages_rate
= s
->dirty_pages_rate
;
204 if (blk_mig_active()) {
205 info
->has_disk
= true;
206 info
->disk
= g_malloc0(sizeof(*info
->disk
));
207 info
->disk
->transferred
= blk_mig_bytes_transferred();
208 info
->disk
->remaining
= blk_mig_bytes_remaining();
209 info
->disk
->total
= blk_mig_bytes_total();
212 get_xbzrle_cache_stats(info
);
214 case MIG_STATE_COMPLETED
:
215 get_xbzrle_cache_stats(info
);
217 info
->has_status
= true;
218 info
->status
= g_strdup("completed");
219 info
->total_time
= s
->total_time
;
220 info
->has_downtime
= true;
221 info
->downtime
= s
->downtime
;
223 info
->has_ram
= true;
224 info
->ram
= g_malloc0(sizeof(*info
->ram
));
225 info
->ram
->transferred
= ram_bytes_transferred();
226 info
->ram
->remaining
= 0;
227 info
->ram
->total
= ram_bytes_total();
228 info
->ram
->duplicate
= dup_mig_pages_transferred();
229 info
->ram
->normal
= norm_mig_pages_transferred();
230 info
->ram
->normal_bytes
= norm_mig_bytes_transferred();
232 case MIG_STATE_ERROR
:
233 info
->has_status
= true;
234 info
->status
= g_strdup("failed");
236 case MIG_STATE_CANCELLED
:
237 info
->has_status
= true;
238 info
->status
= g_strdup("cancelled");
245 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList
*params
,
248 MigrationState
*s
= migrate_get_current();
249 MigrationCapabilityStatusList
*cap
;
251 if (s
->state
== MIG_STATE_ACTIVE
) {
252 error_set(errp
, QERR_MIGRATION_ACTIVE
);
256 for (cap
= params
; cap
; cap
= cap
->next
) {
257 s
->enabled_capabilities
[cap
->value
->capability
] = cap
->value
->state
;
261 /* shared migration helpers */
263 static int migrate_fd_cleanup(MigrationState
*s
)
268 DPRINTF("closing file\n");
269 ret
= qemu_fclose(s
->file
);
277 void migrate_fd_error(MigrationState
*s
)
279 DPRINTF("setting error state\n");
280 s
->state
= MIG_STATE_ERROR
;
281 notifier_list_notify(&migration_state_notifiers
, s
);
282 migrate_fd_cleanup(s
);
285 static void migrate_fd_completed(MigrationState
*s
)
287 DPRINTF("setting completed state\n");
288 if (migrate_fd_cleanup(s
) < 0) {
289 s
->state
= MIG_STATE_ERROR
;
291 s
->state
= MIG_STATE_COMPLETED
;
292 runstate_set(RUN_STATE_POSTMIGRATE
);
294 notifier_list_notify(&migration_state_notifiers
, s
);
297 static ssize_t
migrate_fd_put_buffer(MigrationState
*s
, const void *data
,
302 if (s
->state
!= MIG_STATE_ACTIVE
) {
307 ret
= s
->write(s
, data
, size
);
308 } while (ret
== -1 && ((s
->get_error(s
)) == EINTR
));
311 ret
= -(s
->get_error(s
));
316 static void migrate_fd_cancel(MigrationState
*s
)
318 if (s
->state
!= MIG_STATE_ACTIVE
)
321 DPRINTF("cancelling migration\n");
323 s
->state
= MIG_STATE_CANCELLED
;
324 notifier_list_notify(&migration_state_notifiers
, s
);
325 qemu_savevm_state_cancel();
327 migrate_fd_cleanup(s
);
330 int migrate_fd_close(MigrationState
*s
)
340 void add_migration_state_change_notifier(Notifier
*notify
)
342 notifier_list_add(&migration_state_notifiers
, notify
);
345 void remove_migration_state_change_notifier(Notifier
*notify
)
347 notifier_remove(notify
);
350 bool migration_is_active(MigrationState
*s
)
352 return s
->state
== MIG_STATE_ACTIVE
;
355 bool migration_has_finished(MigrationState
*s
)
357 return s
->state
== MIG_STATE_COMPLETED
;
360 bool migration_has_failed(MigrationState
*s
)
362 return (s
->state
== MIG_STATE_CANCELLED
||
363 s
->state
== MIG_STATE_ERROR
);
366 static MigrationState
*migrate_init(const MigrationParams
*params
)
368 MigrationState
*s
= migrate_get_current();
369 int64_t bandwidth_limit
= s
->bandwidth_limit
;
370 bool enabled_capabilities
[MIGRATION_CAPABILITY_MAX
];
371 int64_t xbzrle_cache_size
= s
->xbzrle_cache_size
;
373 memcpy(enabled_capabilities
, s
->enabled_capabilities
,
374 sizeof(enabled_capabilities
));
376 memset(s
, 0, sizeof(*s
));
377 s
->bandwidth_limit
= bandwidth_limit
;
379 memcpy(s
->enabled_capabilities
, enabled_capabilities
,
380 sizeof(enabled_capabilities
));
381 s
->xbzrle_cache_size
= xbzrle_cache_size
;
383 s
->bandwidth_limit
= bandwidth_limit
;
384 s
->state
= MIG_STATE_SETUP
;
385 s
->total_time
= qemu_get_clock_ms(rt_clock
);
390 static GSList
*migration_blockers
;
392 void migrate_add_blocker(Error
*reason
)
394 migration_blockers
= g_slist_prepend(migration_blockers
, reason
);
397 void migrate_del_blocker(Error
*reason
)
399 migration_blockers
= g_slist_remove(migration_blockers
, reason
);
402 void qmp_migrate(const char *uri
, bool has_blk
, bool blk
,
403 bool has_inc
, bool inc
, bool has_detach
, bool detach
,
406 Error
*local_err
= NULL
;
407 MigrationState
*s
= migrate_get_current();
408 MigrationParams params
;
414 if (s
->state
== MIG_STATE_ACTIVE
) {
415 error_set(errp
, QERR_MIGRATION_ACTIVE
);
419 if (qemu_savevm_state_blocked(errp
)) {
423 if (migration_blockers
) {
424 *errp
= error_copy(migration_blockers
->data
);
428 s
= migrate_init(¶ms
);
430 if (strstart(uri
, "tcp:", &p
)) {
431 tcp_start_outgoing_migration(s
, p
, &local_err
);
433 } else if (strstart(uri
, "exec:", &p
)) {
434 exec_start_outgoing_migration(s
, p
, &local_err
);
435 } else if (strstart(uri
, "unix:", &p
)) {
436 unix_start_outgoing_migration(s
, p
, &local_err
);
437 } else if (strstart(uri
, "fd:", &p
)) {
438 fd_start_outgoing_migration(s
, p
, &local_err
);
441 error_set(errp
, QERR_INVALID_PARAMETER_VALUE
, "uri", "a valid migration protocol");
447 error_propagate(errp
, local_err
);
452 void qmp_migrate_cancel(Error
**errp
)
454 migrate_fd_cancel(migrate_get_current());
457 void qmp_migrate_set_cache_size(int64_t value
, Error
**errp
)
459 MigrationState
*s
= migrate_get_current();
461 /* Check for truncation */
462 if (value
!= (size_t)value
) {
463 error_set(errp
, QERR_INVALID_PARAMETER_VALUE
, "cache size",
464 "exceeding address space");
468 s
->xbzrle_cache_size
= xbzrle_cache_resize(value
);
471 int64_t qmp_query_migrate_cache_size(Error
**errp
)
473 return migrate_xbzrle_cache_size();
476 void qmp_migrate_set_speed(int64_t value
, Error
**errp
)
484 s
= migrate_get_current();
485 s
->bandwidth_limit
= value
;
486 qemu_file_set_rate_limit(s
->file
, s
->bandwidth_limit
);
489 void qmp_migrate_set_downtime(double value
, Error
**errp
)
492 value
= MAX(0, MIN(UINT64_MAX
, value
));
493 max_downtime
= (uint64_t)value
;
496 int migrate_use_xbzrle(void)
500 s
= migrate_get_current();
502 return s
->enabled_capabilities
[MIGRATION_CAPABILITY_XBZRLE
];
505 int64_t migrate_xbzrle_cache_size(void)
509 s
= migrate_get_current();
511 return s
->xbzrle_cache_size
;
514 /* migration thread support */
517 static ssize_t
buffered_flush(MigrationState
*s
)
522 DPRINTF("flushing %zu byte(s) of data\n", s
->buffer_size
);
524 while (s
->bytes_xfer
< s
->xfer_limit
&& offset
< s
->buffer_size
) {
525 size_t to_send
= MIN(s
->buffer_size
- offset
, s
->xfer_limit
- s
->bytes_xfer
);
526 ret
= migrate_fd_put_buffer(s
, s
->buffer
+ offset
, to_send
);
528 DPRINTF("error flushing data, %zd\n", ret
);
531 DPRINTF("flushed %zd byte(s)\n", ret
);
533 s
->bytes_xfer
+= ret
;
537 DPRINTF("flushed %zu of %zu byte(s)\n", offset
, s
->buffer_size
);
538 memmove(s
->buffer
, s
->buffer
+ offset
, s
->buffer_size
- offset
);
539 s
->buffer_size
-= offset
;
547 static int buffered_put_buffer(void *opaque
, const uint8_t *buf
,
548 int64_t pos
, int size
)
550 MigrationState
*s
= opaque
;
553 DPRINTF("putting %d bytes at %" PRId64
"\n", size
, pos
);
555 error
= qemu_file_get_error(s
->file
);
557 DPRINTF("flush when error, bailing: %s\n", strerror(-error
));
565 if (size
> (s
->buffer_capacity
- s
->buffer_size
)) {
566 DPRINTF("increasing buffer capacity from %zu by %zu\n",
567 s
->buffer_capacity
, size
+ 1024);
569 s
->buffer_capacity
+= size
+ 1024;
571 s
->buffer
= g_realloc(s
->buffer
, s
->buffer_capacity
);
574 memcpy(s
->buffer
+ s
->buffer_size
, buf
, size
);
575 s
->buffer_size
+= size
;
580 static int buffered_close(void *opaque
)
582 MigrationState
*s
= opaque
;
586 DPRINTF("closing\n");
588 s
->xfer_limit
= INT_MAX
;
589 while (!qemu_file_get_error(s
->file
) && s
->buffer_size
) {
590 ret
= buffered_flush(s
);
596 ret2
= migrate_fd_close(s
);
604 static int buffered_get_fd(void *opaque
)
606 MigrationState
*s
= opaque
;
612 * The meaning of the return values is:
613 * 0: We can continue sending
615 * negative: There has been an error
617 static int buffered_rate_limit(void *opaque
)
619 MigrationState
*s
= opaque
;
622 ret
= qemu_file_get_error(s
->file
);
627 if (s
->bytes_xfer
>= s
->xfer_limit
) {
634 static int64_t buffered_set_rate_limit(void *opaque
, int64_t new_rate
)
636 MigrationState
*s
= opaque
;
637 if (qemu_file_get_error(s
->file
)) {
640 if (new_rate
> SIZE_MAX
) {
644 s
->xfer_limit
= new_rate
/ XFER_LIMIT_RATIO
;
647 return s
->xfer_limit
;
650 static int64_t buffered_get_rate_limit(void *opaque
)
652 MigrationState
*s
= opaque
;
654 return s
->xfer_limit
;
657 static void *buffered_file_thread(void *opaque
)
659 MigrationState
*s
= opaque
;
660 int64_t initial_time
= qemu_get_clock_ms(rt_clock
);
661 int64_t sleep_time
= 0;
662 int64_t max_size
= 0;
663 bool last_round
= false;
666 qemu_mutex_lock_iothread();
667 DPRINTF("beginning savevm\n");
668 ret
= qemu_savevm_state_begin(s
->file
, &s
->params
);
670 DPRINTF("failed, %d\n", ret
);
671 qemu_mutex_unlock_iothread();
674 qemu_mutex_unlock_iothread();
677 int64_t current_time
;
678 uint64_t pending_size
;
680 qemu_mutex_lock_iothread();
681 if (s
->state
!= MIG_STATE_ACTIVE
) {
682 DPRINTF("put_ready returning because of non-active state\n");
683 qemu_mutex_unlock_iothread();
687 qemu_mutex_unlock_iothread();
690 if (s
->bytes_xfer
< s
->xfer_limit
) {
691 DPRINTF("iterate\n");
692 pending_size
= qemu_savevm_state_pending(s
->file
, max_size
);
693 DPRINTF("pending size %lu max %lu\n", pending_size
, max_size
);
694 if (pending_size
&& pending_size
>= max_size
) {
695 ret
= qemu_savevm_state_iterate(s
->file
);
697 qemu_mutex_unlock_iothread();
701 int old_vm_running
= runstate_is_running();
702 int64_t start_time
, end_time
;
704 DPRINTF("done iterating\n");
705 start_time
= qemu_get_clock_ms(rt_clock
);
706 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER
);
707 if (old_vm_running
) {
708 vm_stop(RUN_STATE_FINISH_MIGRATE
);
710 vm_stop_force_state(RUN_STATE_FINISH_MIGRATE
);
712 ret
= qemu_savevm_state_complete(s
->file
);
714 qemu_mutex_unlock_iothread();
717 migrate_fd_completed(s
);
719 end_time
= qemu_get_clock_ms(rt_clock
);
720 s
->total_time
= end_time
- s
->total_time
;
721 s
->downtime
= end_time
- start_time
;
722 if (s
->state
!= MIG_STATE_COMPLETED
) {
723 if (old_vm_running
) {
730 qemu_mutex_unlock_iothread();
731 current_time
= qemu_get_clock_ms(rt_clock
);
732 if (current_time
>= initial_time
+ BUFFER_DELAY
) {
733 uint64_t transferred_bytes
= s
->bytes_xfer
;
734 uint64_t time_spent
= current_time
- initial_time
- sleep_time
;
735 double bandwidth
= transferred_bytes
/ time_spent
;
736 max_size
= bandwidth
* migrate_max_downtime() / 1000000;
738 DPRINTF("transferred %" PRIu64
" time_spent %" PRIu64
739 " bandwidth %g max_size %" PRId64
"\n",
740 transferred_bytes
, time_spent
, bandwidth
, max_size
);
741 /* if we haven't sent anything, we don't want to recalculate
742 10000 is a small enough number for our purposes */
743 if (s
->dirty_bytes_rate
&& transferred_bytes
> 10000) {
744 s
->expected_downtime
= s
->dirty_bytes_rate
/ bandwidth
;
749 initial_time
= current_time
;
751 if (!last_round
&& (s
->bytes_xfer
>= s
->xfer_limit
)) {
752 /* usleep expects microseconds */
753 g_usleep((initial_time
+ BUFFER_DELAY
- current_time
)*1000);
754 sleep_time
+= qemu_get_clock_ms(rt_clock
) - current_time
;
756 ret
= buffered_flush(s
);
770 static const QEMUFileOps buffered_file_ops
= {
771 .get_fd
= buffered_get_fd
,
772 .put_buffer
= buffered_put_buffer
,
773 .close
= buffered_close
,
774 .rate_limit
= buffered_rate_limit
,
775 .get_rate_limit
= buffered_get_rate_limit
,
776 .set_rate_limit
= buffered_set_rate_limit
,
779 void migrate_fd_connect(MigrationState
*s
)
781 s
->state
= MIG_STATE_ACTIVE
;
785 s
->buffer_capacity
= 0;
786 /* This is a best 1st approximation. ns to ms */
787 s
->expected_downtime
= max_downtime
/1000000;
789 s
->xfer_limit
= s
->bandwidth_limit
/ XFER_LIMIT_RATIO
;
792 s
->file
= qemu_fopen_ops(s
, &buffered_file_ops
);
794 qemu_thread_create(&s
->thread
, buffered_file_thread
, s
,
795 QEMU_THREAD_DETACHED
);
796 notifier_list_notify(&migration_state_notifiers
, s
);