4 * Copyright (c) 2019-2020 Red Hat Inc
7 * Juan Quintela <quintela@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
13 #include "qemu/osdep.h"
15 #include "exec/target_page.h"
16 #include "sysemu/sysemu.h"
17 #include "exec/ramblock.h"
18 #include "qemu/error-report.h"
19 #include "qapi/error.h"
21 #include "migration.h"
22 #include "migration-stats.h"
25 #include "qemu-file.h"
28 #include "threadinfo.h"
30 #include "qemu/yank.h"
31 #include "io/channel-socket.h"
32 #include "yank_functions.h"
36 #define MULTIFD_MAGIC 0x11223344U
37 #define MULTIFD_VERSION 1
42 unsigned char uuid
[16]; /* QemuUUID */
44 uint8_t unused1
[7]; /* Reserved for future use */
45 uint64_t unused2
[4]; /* Reserved for future use */
46 } __attribute__((packed
)) MultiFDInit_t
;
49 MultiFDSendParams
*params
;
50 /* array of pages to sent */
51 MultiFDPages_t
*pages
;
53 * Global number of generated multifd packets.
55 * Note that we used 'uintptr_t' because it'll naturally support atomic
56 * operations on both 32bit / 64 bits hosts. It means on 32bit systems
57 * multifd will overflow the packet_num easier, but that should be
60 * Another option is to use QEMU's Stat64 then it'll be 64 bits on all
61 * hosts, however so far it does not support atomic fetch_add() yet.
62 * Make it easy for now.
66 * Synchronization point past which no more channels will be
69 QemuSemaphore channels_created
;
70 /* send channels ready */
71 QemuSemaphore channels_ready
;
73 * Have we already run terminate threads. There is a race when it
74 * happens that we got one error while we are exiting.
75 * We will use atomic operations. Only valid values are 0 and 1.
80 } *multifd_send_state
;
82 /* Multifd without compression */
85 * nocomp_send_setup: setup send side
87 * @p: Params for the channel that we are using
88 * @errp: pointer to an error
90 static int nocomp_send_setup(MultiFDSendParams
*p
, Error
**errp
)
92 if (migrate_zero_copy_send()) {
93 p
->write_flags
|= QIO_CHANNEL_WRITE_FLAG_ZERO_COPY
;
100 * nocomp_send_cleanup: cleanup send side
102 * For no compression this function does nothing.
104 * @p: Params for the channel that we are using
105 * @errp: pointer to an error
107 static void nocomp_send_cleanup(MultiFDSendParams
*p
, Error
**errp
)
113 * nocomp_send_prepare: prepare date to be able to send
115 * For no compression we just have to calculate the size of the
118 * Returns 0 for success or -1 for error
120 * @p: Params for the channel that we are using
121 * @errp: pointer to an error
123 static int nocomp_send_prepare(MultiFDSendParams
*p
, Error
**errp
)
125 bool use_zero_copy_send
= migrate_zero_copy_send();
126 MultiFDPages_t
*pages
= p
->pages
;
129 if (!use_zero_copy_send
) {
131 * Only !zerocopy needs the header in IOV; zerocopy will
132 * send it separately.
134 multifd_send_prepare_header(p
);
137 for (int i
= 0; i
< pages
->num
; i
++) {
138 p
->iov
[p
->iovs_num
].iov_base
= pages
->block
->host
+ pages
->offset
[i
];
139 p
->iov
[p
->iovs_num
].iov_len
= p
->page_size
;
143 p
->next_packet_size
= pages
->num
* p
->page_size
;
144 p
->flags
|= MULTIFD_FLAG_NOCOMP
;
146 multifd_send_fill_packet(p
);
148 if (use_zero_copy_send
) {
149 /* Send header first, without zerocopy */
150 ret
= qio_channel_write_all(p
->c
, (void *)p
->packet
,
151 p
->packet_len
, errp
);
161 * nocomp_recv_setup: setup receive side
163 * For no compression this function does nothing.
165 * Returns 0 for success or -1 for error
167 * @p: Params for the channel that we are using
168 * @errp: pointer to an error
170 static int nocomp_recv_setup(MultiFDRecvParams
*p
, Error
**errp
)
176 * nocomp_recv_cleanup: setup receive side
178 * For no compression this function does nothing.
180 * @p: Params for the channel that we are using
182 static void nocomp_recv_cleanup(MultiFDRecvParams
*p
)
187 * nocomp_recv_pages: read the data from the channel into actual pages
189 * For no compression we just need to read things into the correct place.
191 * Returns 0 for success or -1 for error
193 * @p: Params for the channel that we are using
194 * @errp: pointer to an error
196 static int nocomp_recv_pages(MultiFDRecvParams
*p
, Error
**errp
)
198 uint32_t flags
= p
->flags
& MULTIFD_FLAG_COMPRESSION_MASK
;
200 if (flags
!= MULTIFD_FLAG_NOCOMP
) {
201 error_setg(errp
, "multifd %u: flags received %x flags expected %x",
202 p
->id
, flags
, MULTIFD_FLAG_NOCOMP
);
205 for (int i
= 0; i
< p
->normal_num
; i
++) {
206 p
->iov
[i
].iov_base
= p
->host
+ p
->normal
[i
];
207 p
->iov
[i
].iov_len
= p
->page_size
;
209 return qio_channel_readv_all(p
->c
, p
->iov
, p
->normal_num
, errp
);
212 static MultiFDMethods multifd_nocomp_ops
= {
213 .send_setup
= nocomp_send_setup
,
214 .send_cleanup
= nocomp_send_cleanup
,
215 .send_prepare
= nocomp_send_prepare
,
216 .recv_setup
= nocomp_recv_setup
,
217 .recv_cleanup
= nocomp_recv_cleanup
,
218 .recv_pages
= nocomp_recv_pages
221 static MultiFDMethods
*multifd_ops
[MULTIFD_COMPRESSION__MAX
] = {
222 [MULTIFD_COMPRESSION_NONE
] = &multifd_nocomp_ops
,
225 void multifd_register_ops(int method
, MultiFDMethods
*ops
)
227 assert(0 < method
&& method
< MULTIFD_COMPRESSION__MAX
);
228 multifd_ops
[method
] = ops
;
231 /* Reset a MultiFDPages_t* object for the next use */
232 static void multifd_pages_reset(MultiFDPages_t
*pages
)
235 * We don't need to touch offset[] array, because it will be
236 * overwritten later when reused.
242 static int multifd_send_initial_packet(MultiFDSendParams
*p
, Error
**errp
)
244 MultiFDInit_t msg
= {};
245 size_t size
= sizeof(msg
);
248 msg
.magic
= cpu_to_be32(MULTIFD_MAGIC
);
249 msg
.version
= cpu_to_be32(MULTIFD_VERSION
);
251 memcpy(msg
.uuid
, &qemu_uuid
.data
, sizeof(msg
.uuid
));
253 ret
= qio_channel_write_all(p
->c
, (char *)&msg
, size
, errp
);
257 stat64_add(&mig_stats
.multifd_bytes
, size
);
261 static int multifd_recv_initial_packet(QIOChannel
*c
, Error
**errp
)
266 ret
= qio_channel_read_all(c
, (char *)&msg
, sizeof(msg
), errp
);
271 msg
.magic
= be32_to_cpu(msg
.magic
);
272 msg
.version
= be32_to_cpu(msg
.version
);
274 if (msg
.magic
!= MULTIFD_MAGIC
) {
275 error_setg(errp
, "multifd: received packet magic %x "
276 "expected %x", msg
.magic
, MULTIFD_MAGIC
);
280 if (msg
.version
!= MULTIFD_VERSION
) {
281 error_setg(errp
, "multifd: received packet version %u "
282 "expected %u", msg
.version
, MULTIFD_VERSION
);
286 if (memcmp(msg
.uuid
, &qemu_uuid
, sizeof(qemu_uuid
))) {
287 char *uuid
= qemu_uuid_unparse_strdup(&qemu_uuid
);
288 char *msg_uuid
= qemu_uuid_unparse_strdup((const QemuUUID
*)msg
.uuid
);
290 error_setg(errp
, "multifd: received uuid '%s' and expected "
291 "uuid '%s' for channel %hhd", msg_uuid
, uuid
, msg
.id
);
297 if (msg
.id
> migrate_multifd_channels()) {
298 error_setg(errp
, "multifd: received channel id %u is greater than "
299 "number of channels %u", msg
.id
, migrate_multifd_channels());
306 static MultiFDPages_t
*multifd_pages_init(uint32_t n
)
308 MultiFDPages_t
*pages
= g_new0(MultiFDPages_t
, 1);
310 pages
->allocated
= n
;
311 pages
->offset
= g_new0(ram_addr_t
, n
);
316 static void multifd_pages_clear(MultiFDPages_t
*pages
)
318 multifd_pages_reset(pages
);
319 pages
->allocated
= 0;
320 g_free(pages
->offset
);
321 pages
->offset
= NULL
;
325 void multifd_send_fill_packet(MultiFDSendParams
*p
)
327 MultiFDPacket_t
*packet
= p
->packet
;
328 MultiFDPages_t
*pages
= p
->pages
;
332 packet
->flags
= cpu_to_be32(p
->flags
);
333 packet
->pages_alloc
= cpu_to_be32(p
->pages
->allocated
);
334 packet
->normal_pages
= cpu_to_be32(pages
->num
);
335 packet
->next_packet_size
= cpu_to_be32(p
->next_packet_size
);
337 packet_num
= qatomic_fetch_inc(&multifd_send_state
->packet_num
);
338 packet
->packet_num
= cpu_to_be64(packet_num
);
341 strncpy(packet
->ramblock
, pages
->block
->idstr
, 256);
344 for (i
= 0; i
< pages
->num
; i
++) {
345 /* there are architectures where ram_addr_t is 32 bit */
346 uint64_t temp
= pages
->offset
[i
];
348 packet
->offset
[i
] = cpu_to_be64(temp
);
352 p
->total_normal_pages
+= pages
->num
;
354 trace_multifd_send(p
->id
, packet_num
, pages
->num
, p
->flags
,
355 p
->next_packet_size
);
358 static int multifd_recv_unfill_packet(MultiFDRecvParams
*p
, Error
**errp
)
360 MultiFDPacket_t
*packet
= p
->packet
;
363 packet
->magic
= be32_to_cpu(packet
->magic
);
364 if (packet
->magic
!= MULTIFD_MAGIC
) {
365 error_setg(errp
, "multifd: received packet "
366 "magic %x and expected magic %x",
367 packet
->magic
, MULTIFD_MAGIC
);
371 packet
->version
= be32_to_cpu(packet
->version
);
372 if (packet
->version
!= MULTIFD_VERSION
) {
373 error_setg(errp
, "multifd: received packet "
374 "version %u and expected version %u",
375 packet
->version
, MULTIFD_VERSION
);
379 p
->flags
= be32_to_cpu(packet
->flags
);
381 packet
->pages_alloc
= be32_to_cpu(packet
->pages_alloc
);
383 * If we received a packet that is 100 times bigger than expected
384 * just stop migration. It is a magic number.
386 if (packet
->pages_alloc
> p
->page_count
) {
387 error_setg(errp
, "multifd: received packet "
388 "with size %u and expected a size of %u",
389 packet
->pages_alloc
, p
->page_count
) ;
393 p
->normal_num
= be32_to_cpu(packet
->normal_pages
);
394 if (p
->normal_num
> packet
->pages_alloc
) {
395 error_setg(errp
, "multifd: received packet "
396 "with %u pages and expected maximum pages are %u",
397 p
->normal_num
, packet
->pages_alloc
) ;
401 p
->next_packet_size
= be32_to_cpu(packet
->next_packet_size
);
402 p
->packet_num
= be64_to_cpu(packet
->packet_num
);
404 p
->total_normal_pages
+= p
->normal_num
;
406 trace_multifd_recv(p
->id
, p
->packet_num
, p
->normal_num
, p
->flags
,
407 p
->next_packet_size
);
409 if (p
->normal_num
== 0) {
413 /* make sure that ramblock is 0 terminated */
414 packet
->ramblock
[255] = 0;
415 p
->block
= qemu_ram_block_by_name(packet
->ramblock
);
417 error_setg(errp
, "multifd: unknown ram block %s",
422 p
->host
= p
->block
->host
;
423 for (i
= 0; i
< p
->normal_num
; i
++) {
424 uint64_t offset
= be64_to_cpu(packet
->offset
[i
]);
426 if (offset
> (p
->block
->used_length
- p
->page_size
)) {
427 error_setg(errp
, "multifd: offset too long %" PRIu64
428 " (max " RAM_ADDR_FMT
")",
429 offset
, p
->block
->used_length
);
432 p
->normal
[i
] = offset
;
438 static bool multifd_send_should_exit(void)
440 return qatomic_read(&multifd_send_state
->exiting
);
444 * The migration thread can wait on either of the two semaphores. This
445 * function can be used to kick the main thread out of waiting on either of
446 * them. Should mostly only be called when something wrong happened with
447 * the current multifd send thread.
449 static void multifd_send_kick_main(MultiFDSendParams
*p
)
451 qemu_sem_post(&p
->sem_sync
);
452 qemu_sem_post(&multifd_send_state
->channels_ready
);
456 * How we use multifd_send_state->pages and channel->pages?
458 * We create a pages for each channel, and a main one. Each time that
459 * we need to send a batch of pages we interchange the ones between
460 * multifd_send_state and the channel that is sending it. There are
461 * two reasons for that:
462 * - to not have to do so many mallocs during migration
463 * - to make easier to know what to free at the end of migration
465 * This way we always know who is the owner of each "pages" struct,
466 * and we don't need any locking. It belongs to the migration thread
467 * or to the channel thread. Switching is safe because the migration
468 * thread is using the channel mutex when changing it, and the channel
469 * have to had finish with its own, otherwise pending_job can't be
472 * Returns true if succeed, false otherwise.
474 static bool multifd_send_pages(void)
477 static int next_channel
;
478 MultiFDSendParams
*p
= NULL
; /* make happy gcc */
479 MultiFDPages_t
*pages
= multifd_send_state
->pages
;
481 if (multifd_send_should_exit()) {
485 /* We wait here, until at least one channel is ready */
486 qemu_sem_wait(&multifd_send_state
->channels_ready
);
489 * next_channel can remain from a previous migration that was
490 * using more channels, so ensure it doesn't overflow if the
491 * limit is lower now.
493 next_channel
%= migrate_multifd_channels();
494 for (i
= next_channel
;; i
= (i
+ 1) % migrate_multifd_channels()) {
495 if (multifd_send_should_exit()) {
498 p
= &multifd_send_state
->params
[i
];
500 * Lockless read to p->pending_job is safe, because only multifd
501 * sender thread can clear it.
503 if (qatomic_read(&p
->pending_job
) == false) {
504 next_channel
= (i
+ 1) % migrate_multifd_channels();
510 * Make sure we read p->pending_job before all the rest. Pairs with
511 * qatomic_store_release() in multifd_send_thread().
514 assert(!p
->pages
->num
);
515 multifd_send_state
->pages
= p
->pages
;
518 * Making sure p->pages is setup before marking pending_job=true. Pairs
519 * with the qatomic_load_acquire() in multifd_send_thread().
521 qatomic_store_release(&p
->pending_job
, true);
522 qemu_sem_post(&p
->sem
);
527 static inline bool multifd_queue_empty(MultiFDPages_t
*pages
)
529 return pages
->num
== 0;
532 static inline bool multifd_queue_full(MultiFDPages_t
*pages
)
534 return pages
->num
== pages
->allocated
;
537 static inline void multifd_enqueue(MultiFDPages_t
*pages
, ram_addr_t offset
)
539 pages
->offset
[pages
->num
++] = offset
;
542 /* Returns true if enqueue successful, false otherwise */
543 bool multifd_queue_page(RAMBlock
*block
, ram_addr_t offset
)
545 MultiFDPages_t
*pages
;
548 pages
= multifd_send_state
->pages
;
550 /* If the queue is empty, we can already enqueue now */
551 if (multifd_queue_empty(pages
)) {
552 pages
->block
= block
;
553 multifd_enqueue(pages
, offset
);
558 * Not empty, meanwhile we need a flush. It can because of either:
560 * (1) The page is not on the same ramblock of previous ones, or,
561 * (2) The queue is full.
563 * After flush, always retry.
565 if (pages
->block
!= block
|| multifd_queue_full(pages
)) {
566 if (!multifd_send_pages()) {
572 /* Not empty, and we still have space, do it! */
573 multifd_enqueue(pages
, offset
);
577 /* Multifd send side hit an error; remember it and prepare to quit */
578 static void multifd_send_set_error(Error
*err
)
581 * We don't want to exit each threads twice. Depending on where
582 * we get the error, or if there are two independent errors in two
583 * threads at the same time, we can end calling this function
586 if (qatomic_xchg(&multifd_send_state
->exiting
, 1)) {
591 MigrationState
*s
= migrate_get_current();
592 migrate_set_error(s
, err
);
593 if (s
->state
== MIGRATION_STATUS_SETUP
||
594 s
->state
== MIGRATION_STATUS_PRE_SWITCHOVER
||
595 s
->state
== MIGRATION_STATUS_DEVICE
||
596 s
->state
== MIGRATION_STATUS_ACTIVE
) {
597 migrate_set_state(&s
->state
, s
->state
,
598 MIGRATION_STATUS_FAILED
);
603 static void multifd_send_terminate_threads(void)
607 trace_multifd_send_terminate_threads();
610 * Tell everyone we're quitting. No xchg() needed here; we simply
613 qatomic_set(&multifd_send_state
->exiting
, 1);
616 * Firstly, kick all threads out; no matter whether they are just idle,
617 * or blocked in an IO system call.
619 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
620 MultiFDSendParams
*p
= &multifd_send_state
->params
[i
];
622 qemu_sem_post(&p
->sem
);
624 qio_channel_shutdown(p
->c
, QIO_CHANNEL_SHUTDOWN_BOTH
, NULL
);
629 * Finally recycle all the threads.
631 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
632 MultiFDSendParams
*p
= &multifd_send_state
->params
[i
];
634 if (p
->tls_thread_created
) {
635 qemu_thread_join(&p
->tls_thread
);
638 if (p
->thread_created
) {
639 qemu_thread_join(&p
->thread
);
644 static int multifd_send_channel_destroy(QIOChannel
*send
)
646 return socket_send_channel_destroy(send
);
649 static bool multifd_send_cleanup_channel(MultiFDSendParams
*p
, Error
**errp
)
651 if (p
->registered_yank
) {
652 migration_ioc_unregister_yank(p
->c
);
654 multifd_send_channel_destroy(p
->c
);
656 qemu_sem_destroy(&p
->sem
);
657 qemu_sem_destroy(&p
->sem_sync
);
660 multifd_pages_clear(p
->pages
);
667 multifd_send_state
->ops
->send_cleanup(p
, errp
);
669 return *errp
== NULL
;
672 static void multifd_send_cleanup_state(void)
674 qemu_sem_destroy(&multifd_send_state
->channels_created
);
675 qemu_sem_destroy(&multifd_send_state
->channels_ready
);
676 g_free(multifd_send_state
->params
);
677 multifd_send_state
->params
= NULL
;
678 multifd_pages_clear(multifd_send_state
->pages
);
679 multifd_send_state
->pages
= NULL
;
680 g_free(multifd_send_state
);
681 multifd_send_state
= NULL
;
684 void multifd_send_shutdown(void)
688 if (!migrate_multifd()) {
692 multifd_send_terminate_threads();
694 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
695 MultiFDSendParams
*p
= &multifd_send_state
->params
[i
];
696 Error
*local_err
= NULL
;
698 if (!multifd_send_cleanup_channel(p
, &local_err
)) {
699 migrate_set_error(migrate_get_current(), local_err
);
700 error_free(local_err
);
704 multifd_send_cleanup_state();
707 static int multifd_zero_copy_flush(QIOChannel
*c
)
712 ret
= qio_channel_flush(c
, &err
);
714 error_report_err(err
);
718 stat64_add(&mig_stats
.dirty_sync_missed_zero_copy
, 1);
724 int multifd_send_sync_main(void)
727 bool flush_zero_copy
;
729 if (!migrate_multifd()) {
732 if (multifd_send_state
->pages
->num
) {
733 if (!multifd_send_pages()) {
734 error_report("%s: multifd_send_pages fail", __func__
);
739 flush_zero_copy
= migrate_zero_copy_send();
741 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
742 MultiFDSendParams
*p
= &multifd_send_state
->params
[i
];
744 if (multifd_send_should_exit()) {
748 trace_multifd_send_sync_main_signal(p
->id
);
751 * We should be the only user so far, so not possible to be set by
752 * others concurrently.
754 assert(qatomic_read(&p
->pending_sync
) == false);
755 qatomic_set(&p
->pending_sync
, true);
756 qemu_sem_post(&p
->sem
);
758 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
759 MultiFDSendParams
*p
= &multifd_send_state
->params
[i
];
761 if (multifd_send_should_exit()) {
765 qemu_sem_wait(&multifd_send_state
->channels_ready
);
766 trace_multifd_send_sync_main_wait(p
->id
);
767 qemu_sem_wait(&p
->sem_sync
);
769 if (flush_zero_copy
&& p
->c
&& (multifd_zero_copy_flush(p
->c
) < 0)) {
773 trace_multifd_send_sync_main(multifd_send_state
->packet_num
);
778 static void *multifd_send_thread(void *opaque
)
780 MultiFDSendParams
*p
= opaque
;
781 MigrationThread
*thread
= NULL
;
782 Error
*local_err
= NULL
;
785 thread
= migration_threads_add(p
->name
, qemu_get_thread_id());
787 trace_multifd_send_thread_start(p
->id
);
788 rcu_register_thread();
790 if (multifd_send_initial_packet(p
, &local_err
) < 0) {
796 qemu_sem_post(&multifd_send_state
->channels_ready
);
797 qemu_sem_wait(&p
->sem
);
799 if (multifd_send_should_exit()) {
804 * Read pending_job flag before p->pages. Pairs with the
805 * qatomic_store_release() in multifd_send_pages().
807 if (qatomic_load_acquire(&p
->pending_job
)) {
808 MultiFDPages_t
*pages
= p
->pages
;
813 ret
= multifd_send_state
->ops
->send_prepare(p
, &local_err
);
818 ret
= qio_channel_writev_full_all(p
->c
, p
->iov
, p
->iovs_num
, NULL
,
819 0, p
->write_flags
, &local_err
);
824 stat64_add(&mig_stats
.multifd_bytes
,
825 p
->next_packet_size
+ p
->packet_len
);
827 multifd_pages_reset(p
->pages
);
828 p
->next_packet_size
= 0;
831 * Making sure p->pages is published before saying "we're
832 * free". Pairs with the smp_mb_acquire() in
833 * multifd_send_pages().
835 qatomic_store_release(&p
->pending_job
, false);
838 * If not a normal job, must be a sync request. Note that
839 * pending_sync is a standalone flag (unlike pending_job), so
840 * it doesn't require explicit memory barriers.
842 assert(qatomic_read(&p
->pending_sync
));
843 p
->flags
= MULTIFD_FLAG_SYNC
;
844 multifd_send_fill_packet(p
);
845 ret
= qio_channel_write_all(p
->c
, (void *)p
->packet
,
846 p
->packet_len
, &local_err
);
850 /* p->next_packet_size will always be zero for a SYNC packet */
851 stat64_add(&mig_stats
.multifd_bytes
, p
->packet_len
);
853 qatomic_set(&p
->pending_sync
, false);
854 qemu_sem_post(&p
->sem_sync
);
861 trace_multifd_send_error(p
->id
);
862 multifd_send_set_error(local_err
);
863 multifd_send_kick_main(p
);
864 error_free(local_err
);
867 rcu_unregister_thread();
868 migration_threads_remove(thread
);
869 trace_multifd_send_thread_end(p
->id
, p
->packets_sent
, p
->total_normal_pages
);
874 static void multifd_new_send_channel_async(QIOTask
*task
, gpointer opaque
);
876 static void *multifd_tls_handshake_thread(void *opaque
)
878 MultiFDSendParams
*p
= opaque
;
879 QIOChannelTLS
*tioc
= QIO_CHANNEL_TLS(p
->c
);
881 qio_channel_tls_handshake(tioc
,
882 multifd_new_send_channel_async
,
889 static bool multifd_tls_channel_connect(MultiFDSendParams
*p
,
893 MigrationState
*s
= migrate_get_current();
894 const char *hostname
= s
->hostname
;
897 tioc
= migration_tls_client_create(ioc
, hostname
, errp
);
903 * Ownership of the socket channel now transfers to the newly
904 * created TLS channel, which has already taken a reference.
906 object_unref(OBJECT(ioc
));
907 trace_multifd_tls_outgoing_handshake_start(ioc
, tioc
, hostname
);
908 qio_channel_set_name(QIO_CHANNEL(tioc
), "multifd-tls-outgoing");
909 p
->c
= QIO_CHANNEL(tioc
);
911 p
->tls_thread_created
= true;
912 qemu_thread_create(&p
->tls_thread
, "multifd-tls-handshake-worker",
913 multifd_tls_handshake_thread
, p
,
914 QEMU_THREAD_JOINABLE
);
918 static bool multifd_channel_connect(MultiFDSendParams
*p
,
922 qio_channel_set_delay(ioc
, false);
924 migration_ioc_register_yank(ioc
);
925 p
->registered_yank
= true;
928 p
->thread_created
= true;
929 qemu_thread_create(&p
->thread
, p
->name
, multifd_send_thread
, p
,
930 QEMU_THREAD_JOINABLE
);
935 * When TLS is enabled this function is called once to establish the
936 * TLS connection and a second time after the TLS handshake to create
937 * the multifd channel. Without TLS it goes straight into the channel
940 static void multifd_new_send_channel_async(QIOTask
*task
, gpointer opaque
)
942 MultiFDSendParams
*p
= opaque
;
943 QIOChannel
*ioc
= QIO_CHANNEL(qio_task_get_source(task
));
944 Error
*local_err
= NULL
;
947 trace_multifd_new_send_channel_async(p
->id
);
949 if (qio_task_propagate_error(task
, &local_err
)) {
954 trace_multifd_set_outgoing_channel(ioc
, object_get_typename(OBJECT(ioc
)),
955 migrate_get_current()->hostname
);
957 if (migrate_channel_requires_tls_upgrade(ioc
)) {
958 ret
= multifd_tls_channel_connect(p
, ioc
, &local_err
);
963 ret
= multifd_channel_connect(p
, ioc
, &local_err
);
968 * Here we're not interested whether creation succeeded, only that
969 * it happened at all.
971 qemu_sem_post(&multifd_send_state
->channels_created
);
977 trace_multifd_new_send_channel_async_error(p
->id
, local_err
);
978 multifd_send_set_error(local_err
);
981 * If no channel has been created, drop the initial
982 * reference. Otherwise cleanup happens at
983 * multifd_send_channel_destroy()
985 object_unref(OBJECT(ioc
));
987 error_free(local_err
);
990 static void multifd_new_send_channel_create(gpointer opaque
)
992 socket_send_channel_create(multifd_new_send_channel_async
, opaque
);
995 bool multifd_send_setup(void)
997 MigrationState
*s
= migrate_get_current();
998 Error
*local_err
= NULL
;
999 int thread_count
, ret
= 0;
1000 uint32_t page_count
= MULTIFD_PACKET_SIZE
/ qemu_target_page_size();
1003 if (!migrate_multifd()) {
1007 thread_count
= migrate_multifd_channels();
1008 multifd_send_state
= g_malloc0(sizeof(*multifd_send_state
));
1009 multifd_send_state
->params
= g_new0(MultiFDSendParams
, thread_count
);
1010 multifd_send_state
->pages
= multifd_pages_init(page_count
);
1011 qemu_sem_init(&multifd_send_state
->channels_created
, 0);
1012 qemu_sem_init(&multifd_send_state
->channels_ready
, 0);
1013 qatomic_set(&multifd_send_state
->exiting
, 0);
1014 multifd_send_state
->ops
= multifd_ops
[migrate_multifd_compression()];
1016 for (i
= 0; i
< thread_count
; i
++) {
1017 MultiFDSendParams
*p
= &multifd_send_state
->params
[i
];
1019 qemu_sem_init(&p
->sem
, 0);
1020 qemu_sem_init(&p
->sem_sync
, 0);
1022 p
->pages
= multifd_pages_init(page_count
);
1023 p
->packet_len
= sizeof(MultiFDPacket_t
)
1024 + sizeof(uint64_t) * page_count
;
1025 p
->packet
= g_malloc0(p
->packet_len
);
1026 p
->packet
->magic
= cpu_to_be32(MULTIFD_MAGIC
);
1027 p
->packet
->version
= cpu_to_be32(MULTIFD_VERSION
);
1028 p
->name
= g_strdup_printf("multifdsend_%d", i
);
1029 /* We need one extra place for the packet header */
1030 p
->iov
= g_new0(struct iovec
, page_count
+ 1);
1031 p
->page_size
= qemu_target_page_size();
1032 p
->page_count
= page_count
;
1034 multifd_new_send_channel_create(p
);
1038 * Wait until channel creation has started for all channels. The
1039 * creation can still fail, but no more channels will be created
1042 for (i
= 0; i
< thread_count
; i
++) {
1043 qemu_sem_wait(&multifd_send_state
->channels_created
);
1046 for (i
= 0; i
< thread_count
; i
++) {
1047 MultiFDSendParams
*p
= &multifd_send_state
->params
[i
];
1049 ret
= multifd_send_state
->ops
->send_setup(p
, &local_err
);
1056 migrate_set_error(s
, local_err
);
1057 error_report_err(local_err
);
1058 migrate_set_state(&s
->state
, MIGRATION_STATUS_SETUP
,
1059 MIGRATION_STATUS_FAILED
);
1067 MultiFDRecvParams
*params
;
1068 /* number of created threads */
1070 /* syncs main thread and channels */
1071 QemuSemaphore sem_sync
;
1072 /* global number of generated multifd packets */
1073 uint64_t packet_num
;
1075 MultiFDMethods
*ops
;
1076 } *multifd_recv_state
;
1078 static void multifd_recv_terminate_threads(Error
*err
)
1082 trace_multifd_recv_terminate_threads(err
!= NULL
);
1085 MigrationState
*s
= migrate_get_current();
1086 migrate_set_error(s
, err
);
1087 if (s
->state
== MIGRATION_STATUS_SETUP
||
1088 s
->state
== MIGRATION_STATUS_ACTIVE
) {
1089 migrate_set_state(&s
->state
, s
->state
,
1090 MIGRATION_STATUS_FAILED
);
1094 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
1095 MultiFDRecvParams
*p
= &multifd_recv_state
->params
[i
];
1097 qemu_mutex_lock(&p
->mutex
);
1100 * We could arrive here for two reasons:
1101 * - normal quit, i.e. everything went fine, just finished
1102 * - error quit: We close the channels so the channel threads
1103 * finish the qio_channel_read_all_eof()
1106 qio_channel_shutdown(p
->c
, QIO_CHANNEL_SHUTDOWN_BOTH
, NULL
);
1108 qemu_mutex_unlock(&p
->mutex
);
1112 void multifd_recv_shutdown(void)
1114 if (migrate_multifd()) {
1115 multifd_recv_terminate_threads(NULL
);
1119 static void multifd_recv_cleanup_channel(MultiFDRecvParams
*p
)
1121 migration_ioc_unregister_yank(p
->c
);
1122 object_unref(OBJECT(p
->c
));
1124 qemu_mutex_destroy(&p
->mutex
);
1125 qemu_sem_destroy(&p
->sem_sync
);
1135 multifd_recv_state
->ops
->recv_cleanup(p
);
1138 static void multifd_recv_cleanup_state(void)
1140 qemu_sem_destroy(&multifd_recv_state
->sem_sync
);
1141 g_free(multifd_recv_state
->params
);
1142 multifd_recv_state
->params
= NULL
;
1143 g_free(multifd_recv_state
);
1144 multifd_recv_state
= NULL
;
1147 void multifd_recv_cleanup(void)
1151 if (!migrate_multifd()) {
1154 multifd_recv_terminate_threads(NULL
);
1155 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
1156 MultiFDRecvParams
*p
= &multifd_recv_state
->params
[i
];
1159 * multifd_recv_thread may hung at MULTIFD_FLAG_SYNC handle code,
1160 * however try to wakeup it without harm in cleanup phase.
1162 qemu_sem_post(&p
->sem_sync
);
1164 if (p
->thread_created
) {
1165 qemu_thread_join(&p
->thread
);
1168 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
1169 multifd_recv_cleanup_channel(&multifd_recv_state
->params
[i
]);
1171 multifd_recv_cleanup_state();
1174 void multifd_recv_sync_main(void)
1178 if (!migrate_multifd()) {
1181 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
1182 MultiFDRecvParams
*p
= &multifd_recv_state
->params
[i
];
1184 trace_multifd_recv_sync_main_wait(p
->id
);
1185 qemu_sem_wait(&multifd_recv_state
->sem_sync
);
1187 for (i
= 0; i
< migrate_multifd_channels(); i
++) {
1188 MultiFDRecvParams
*p
= &multifd_recv_state
->params
[i
];
1190 WITH_QEMU_LOCK_GUARD(&p
->mutex
) {
1191 if (multifd_recv_state
->packet_num
< p
->packet_num
) {
1192 multifd_recv_state
->packet_num
= p
->packet_num
;
1195 trace_multifd_recv_sync_main_signal(p
->id
);
1196 qemu_sem_post(&p
->sem_sync
);
1198 trace_multifd_recv_sync_main(multifd_recv_state
->packet_num
);
1201 static void *multifd_recv_thread(void *opaque
)
1203 MultiFDRecvParams
*p
= opaque
;
1204 Error
*local_err
= NULL
;
1207 trace_multifd_recv_thread_start(p
->id
);
1208 rcu_register_thread();
1217 ret
= qio_channel_read_all_eof(p
->c
, (void *)p
->packet
,
1218 p
->packet_len
, &local_err
);
1219 if (ret
== 0 || ret
== -1) { /* 0: EOF -1: Error */
1223 qemu_mutex_lock(&p
->mutex
);
1224 ret
= multifd_recv_unfill_packet(p
, &local_err
);
1226 qemu_mutex_unlock(&p
->mutex
);
1231 /* recv methods don't know how to handle the SYNC flag */
1232 p
->flags
&= ~MULTIFD_FLAG_SYNC
;
1233 qemu_mutex_unlock(&p
->mutex
);
1235 if (p
->normal_num
) {
1236 ret
= multifd_recv_state
->ops
->recv_pages(p
, &local_err
);
1242 if (flags
& MULTIFD_FLAG_SYNC
) {
1243 qemu_sem_post(&multifd_recv_state
->sem_sync
);
1244 qemu_sem_wait(&p
->sem_sync
);
1249 multifd_recv_terminate_threads(local_err
);
1250 error_free(local_err
);
1253 rcu_unregister_thread();
1254 trace_multifd_recv_thread_end(p
->id
, p
->packets_recved
, p
->total_normal_pages
);
1259 int multifd_recv_setup(Error
**errp
)
1262 uint32_t page_count
= MULTIFD_PACKET_SIZE
/ qemu_target_page_size();
1266 * Return successfully if multiFD recv state is already initialised
1267 * or multiFD is not enabled.
1269 if (multifd_recv_state
|| !migrate_multifd()) {
1273 thread_count
= migrate_multifd_channels();
1274 multifd_recv_state
= g_malloc0(sizeof(*multifd_recv_state
));
1275 multifd_recv_state
->params
= g_new0(MultiFDRecvParams
, thread_count
);
1276 qatomic_set(&multifd_recv_state
->count
, 0);
1277 qemu_sem_init(&multifd_recv_state
->sem_sync
, 0);
1278 multifd_recv_state
->ops
= multifd_ops
[migrate_multifd_compression()];
1280 for (i
= 0; i
< thread_count
; i
++) {
1281 MultiFDRecvParams
*p
= &multifd_recv_state
->params
[i
];
1283 qemu_mutex_init(&p
->mutex
);
1284 qemu_sem_init(&p
->sem_sync
, 0);
1287 p
->packet_len
= sizeof(MultiFDPacket_t
)
1288 + sizeof(uint64_t) * page_count
;
1289 p
->packet
= g_malloc0(p
->packet_len
);
1290 p
->name
= g_strdup_printf("multifdrecv_%d", i
);
1291 p
->iov
= g_new0(struct iovec
, page_count
);
1292 p
->normal
= g_new0(ram_addr_t
, page_count
);
1293 p
->page_count
= page_count
;
1294 p
->page_size
= qemu_target_page_size();
1297 for (i
= 0; i
< thread_count
; i
++) {
1298 MultiFDRecvParams
*p
= &multifd_recv_state
->params
[i
];
1301 ret
= multifd_recv_state
->ops
->recv_setup(p
, errp
);
1309 bool multifd_recv_all_channels_created(void)
1311 int thread_count
= migrate_multifd_channels();
1313 if (!migrate_multifd()) {
1317 if (!multifd_recv_state
) {
1318 /* Called before any connections created */
1322 return thread_count
== qatomic_read(&multifd_recv_state
->count
);
1326 * Try to receive all multifd channels to get ready for the migration.
1327 * Sets @errp when failing to receive the current channel.
1329 void multifd_recv_new_channel(QIOChannel
*ioc
, Error
**errp
)
1331 MultiFDRecvParams
*p
;
1332 Error
*local_err
= NULL
;
1335 id
= multifd_recv_initial_packet(ioc
, &local_err
);
1337 multifd_recv_terminate_threads(local_err
);
1338 error_propagate_prepend(errp
, local_err
,
1339 "failed to receive packet"
1340 " via multifd channel %d: ",
1341 qatomic_read(&multifd_recv_state
->count
));
1344 trace_multifd_recv_new_channel(id
);
1346 p
= &multifd_recv_state
->params
[id
];
1348 error_setg(&local_err
, "multifd: received id '%d' already setup'",
1350 multifd_recv_terminate_threads(local_err
);
1351 error_propagate(errp
, local_err
);
1355 object_ref(OBJECT(ioc
));
1357 p
->thread_created
= true;
1358 qemu_thread_create(&p
->thread
, p
->name
, multifd_recv_thread
, p
,
1359 QEMU_THREAD_JOINABLE
);
1360 qatomic_inc(&multifd_recv_state
->count
);