2 * RDMA protocol and interfaces
4 * Copyright IBM, Corp. 2010-2013
5 * Copyright Red Hat, Inc. 2015-2016
8 * Michael R. Hines <mrhines@us.ibm.com>
9 * Jiuxing Liu <jl@us.ibm.com>
10 * Daniel P. Berrange <berrange@redhat.com>
12 * This work is licensed under the terms of the GNU GPL, version 2 or
13 * later. See the COPYING file in the top-level directory.
17 #include "qemu/osdep.h"
18 #include "qapi/error.h"
19 #include "qemu/cutils.h"
21 #include "migration.h"
22 #include "qemu-file.h"
24 #include "qemu-file-channel.h"
25 #include "qemu/error-report.h"
26 #include "qemu/main-loop.h"
27 #include "qemu/module.h"
29 #include "qemu/sockets.h"
30 #include "qemu/bitmap.h"
31 #include "qemu/coroutine.h"
32 #include "exec/memory.h"
33 #include <sys/socket.h>
35 #include <arpa/inet.h>
36 #include <rdma/rdma_cma.h>
38 #include "qom/object.h"
41 * Print and error on both the Monitor and the Log file.
43 #define ERROR(errp, fmt, ...) \
45 fprintf(stderr, "RDMA ERROR: " fmt "\n", ## __VA_ARGS__); \
46 if (errp && (*(errp) == NULL)) { \
47 error_setg(errp, "RDMA ERROR: " fmt, ## __VA_ARGS__); \
51 #define RDMA_RESOLVE_TIMEOUT_MS 10000
53 /* Do not merge data if larger than this. */
54 #define RDMA_MERGE_MAX (2 * 1024 * 1024)
55 #define RDMA_SIGNALED_SEND_MAX (RDMA_MERGE_MAX / 4096)
57 #define RDMA_REG_CHUNK_SHIFT 20 /* 1 MB */
60 * This is only for non-live state being migrated.
61 * Instead of RDMA_WRITE messages, we use RDMA_SEND
62 * messages for that state, which requires a different
63 * delivery design than main memory.
65 #define RDMA_SEND_INCREMENT 32768
68 * Maximum size infiniband SEND message
70 #define RDMA_CONTROL_MAX_BUFFER (512 * 1024)
71 #define RDMA_CONTROL_MAX_COMMANDS_PER_MESSAGE 4096
73 #define RDMA_CONTROL_VERSION_CURRENT 1
75 * Capabilities for negotiation.
77 #define RDMA_CAPABILITY_PIN_ALL 0x01
80 * Add the other flags above to this list of known capabilities
81 * as they are introduced.
83 static uint32_t known_capabilities
= RDMA_CAPABILITY_PIN_ALL
;
85 #define CHECK_ERROR_STATE() \
87 if (rdma->error_state) { \
88 if (!rdma->error_reported) { \
89 error_report("RDMA is in an error state waiting migration" \
91 rdma->error_reported = 1; \
93 return rdma->error_state; \
98 * A work request ID is 64-bits and we split up these bits
101 * bits 0-15 : type of control message, 2^16
102 * bits 16-29: ram block index, 2^14
103 * bits 30-63: ram block chunk number, 2^34
105 * The last two bit ranges are only used for RDMA writes,
106 * in order to track their completion and potentially
107 * also track unregistration status of the message.
109 #define RDMA_WRID_TYPE_SHIFT 0UL
110 #define RDMA_WRID_BLOCK_SHIFT 16UL
111 #define RDMA_WRID_CHUNK_SHIFT 30UL
113 #define RDMA_WRID_TYPE_MASK \
114 ((1UL << RDMA_WRID_BLOCK_SHIFT) - 1UL)
116 #define RDMA_WRID_BLOCK_MASK \
117 (~RDMA_WRID_TYPE_MASK & ((1UL << RDMA_WRID_CHUNK_SHIFT) - 1UL))
119 #define RDMA_WRID_CHUNK_MASK (~RDMA_WRID_BLOCK_MASK & ~RDMA_WRID_TYPE_MASK)
122 * RDMA migration protocol:
123 * 1. RDMA Writes (data messages, i.e. RAM)
124 * 2. IB Send/Recv (control channel messages)
128 RDMA_WRID_RDMA_WRITE
= 1,
129 RDMA_WRID_SEND_CONTROL
= 2000,
130 RDMA_WRID_RECV_CONTROL
= 4000,
133 static const char *wrid_desc
[] = {
134 [RDMA_WRID_NONE
] = "NONE",
135 [RDMA_WRID_RDMA_WRITE
] = "WRITE RDMA",
136 [RDMA_WRID_SEND_CONTROL
] = "CONTROL SEND",
137 [RDMA_WRID_RECV_CONTROL
] = "CONTROL RECV",
141 * Work request IDs for IB SEND messages only (not RDMA writes).
142 * This is used by the migration protocol to transmit
143 * control messages (such as device state and registration commands)
145 * We could use more WRs, but we have enough for now.
155 * SEND/RECV IB Control Messages.
158 RDMA_CONTROL_NONE
= 0,
160 RDMA_CONTROL_READY
, /* ready to receive */
161 RDMA_CONTROL_QEMU_FILE
, /* QEMUFile-transmitted bytes */
162 RDMA_CONTROL_RAM_BLOCKS_REQUEST
, /* RAMBlock synchronization */
163 RDMA_CONTROL_RAM_BLOCKS_RESULT
, /* RAMBlock synchronization */
164 RDMA_CONTROL_COMPRESS
, /* page contains repeat values */
165 RDMA_CONTROL_REGISTER_REQUEST
, /* dynamic page registration */
166 RDMA_CONTROL_REGISTER_RESULT
, /* key to use after registration */
167 RDMA_CONTROL_REGISTER_FINISHED
, /* current iteration finished */
168 RDMA_CONTROL_UNREGISTER_REQUEST
, /* dynamic UN-registration */
169 RDMA_CONTROL_UNREGISTER_FINISHED
, /* unpinning finished */
174 * Memory and MR structures used to represent an IB Send/Recv work request.
175 * This is *not* used for RDMA writes, only IB Send/Recv.
178 uint8_t control
[RDMA_CONTROL_MAX_BUFFER
]; /* actual buffer to register */
179 struct ibv_mr
*control_mr
; /* registration metadata */
180 size_t control_len
; /* length of the message */
181 uint8_t *control_curr
; /* start of unconsumed bytes */
182 } RDMAWorkRequestData
;
185 * Negotiate RDMA capabilities during connection-setup time.
192 static void caps_to_network(RDMACapabilities
*cap
)
194 cap
->version
= htonl(cap
->version
);
195 cap
->flags
= htonl(cap
->flags
);
198 static void network_to_caps(RDMACapabilities
*cap
)
200 cap
->version
= ntohl(cap
->version
);
201 cap
->flags
= ntohl(cap
->flags
);
205 * Representation of a RAMBlock from an RDMA perspective.
206 * This is not transmitted, only local.
207 * This and subsequent structures cannot be linked lists
208 * because we're using a single IB message to transmit
209 * the information. It's small anyway, so a list is overkill.
211 typedef struct RDMALocalBlock
{
213 uint8_t *local_host_addr
; /* local virtual address */
214 uint64_t remote_host_addr
; /* remote virtual address */
217 struct ibv_mr
**pmr
; /* MRs for chunk-level registration */
218 struct ibv_mr
*mr
; /* MR for non-chunk-level registration */
219 uint32_t *remote_keys
; /* rkeys for chunk-level registration */
220 uint32_t remote_rkey
; /* rkeys for non-chunk-level registration */
221 int index
; /* which block are we */
222 unsigned int src_index
; /* (Only used on dest) */
225 unsigned long *transit_bitmap
;
226 unsigned long *unregister_bitmap
;
230 * Also represents a RAMblock, but only on the dest.
231 * This gets transmitted by the dest during connection-time
232 * to the source VM and then is used to populate the
233 * corresponding RDMALocalBlock with
234 * the information needed to perform the actual RDMA.
236 typedef struct QEMU_PACKED RDMADestBlock
{
237 uint64_t remote_host_addr
;
240 uint32_t remote_rkey
;
244 static const char *control_desc(unsigned int rdma_control
)
246 static const char *strs
[] = {
247 [RDMA_CONTROL_NONE
] = "NONE",
248 [RDMA_CONTROL_ERROR
] = "ERROR",
249 [RDMA_CONTROL_READY
] = "READY",
250 [RDMA_CONTROL_QEMU_FILE
] = "QEMU FILE",
251 [RDMA_CONTROL_RAM_BLOCKS_REQUEST
] = "RAM BLOCKS REQUEST",
252 [RDMA_CONTROL_RAM_BLOCKS_RESULT
] = "RAM BLOCKS RESULT",
253 [RDMA_CONTROL_COMPRESS
] = "COMPRESS",
254 [RDMA_CONTROL_REGISTER_REQUEST
] = "REGISTER REQUEST",
255 [RDMA_CONTROL_REGISTER_RESULT
] = "REGISTER RESULT",
256 [RDMA_CONTROL_REGISTER_FINISHED
] = "REGISTER FINISHED",
257 [RDMA_CONTROL_UNREGISTER_REQUEST
] = "UNREGISTER REQUEST",
258 [RDMA_CONTROL_UNREGISTER_FINISHED
] = "UNREGISTER FINISHED",
261 if (rdma_control
> RDMA_CONTROL_UNREGISTER_FINISHED
) {
262 return "??BAD CONTROL VALUE??";
265 return strs
[rdma_control
];
268 static uint64_t htonll(uint64_t v
)
270 union { uint32_t lv
[2]; uint64_t llv
; } u
;
271 u
.lv
[0] = htonl(v
>> 32);
272 u
.lv
[1] = htonl(v
& 0xFFFFFFFFULL
);
276 static uint64_t ntohll(uint64_t v
)
278 union { uint32_t lv
[2]; uint64_t llv
; } u
;
280 return ((uint64_t)ntohl(u
.lv
[0]) << 32) | (uint64_t) ntohl(u
.lv
[1]);
283 static void dest_block_to_network(RDMADestBlock
*db
)
285 db
->remote_host_addr
= htonll(db
->remote_host_addr
);
286 db
->offset
= htonll(db
->offset
);
287 db
->length
= htonll(db
->length
);
288 db
->remote_rkey
= htonl(db
->remote_rkey
);
291 static void network_to_dest_block(RDMADestBlock
*db
)
293 db
->remote_host_addr
= ntohll(db
->remote_host_addr
);
294 db
->offset
= ntohll(db
->offset
);
295 db
->length
= ntohll(db
->length
);
296 db
->remote_rkey
= ntohl(db
->remote_rkey
);
300 * Virtual address of the above structures used for transmitting
301 * the RAMBlock descriptions at connection-time.
302 * This structure is *not* transmitted.
304 typedef struct RDMALocalBlocks
{
306 bool init
; /* main memory init complete */
307 RDMALocalBlock
*block
;
311 * Main data structure for RDMA state.
312 * While there is only one copy of this structure being allocated right now,
313 * this is the place where one would start if you wanted to consider
314 * having more than one RDMA connection open at the same time.
316 typedef struct RDMAContext
{
320 RDMAWorkRequestData wr_data
[RDMA_WRID_MAX
];
323 * This is used by *_exchange_send() to figure out whether or not
324 * the initial "READY" message has already been received or not.
325 * This is because other functions may potentially poll() and detect
326 * the READY message before send() does, in which case we need to
327 * know if it completed.
329 int control_ready_expected
;
331 /* number of outstanding writes */
334 /* store info about current buffer so that we can
335 merge it with future sends */
336 uint64_t current_addr
;
337 uint64_t current_length
;
338 /* index of ram block the current buffer belongs to */
340 /* index of the chunk in the current ram block */
346 * infiniband-specific variables for opening the device
347 * and maintaining connection state and so forth.
349 * cm_id also has ibv_context, rdma_event_channel, and ibv_qp in
350 * cm_id->verbs, cm_id->channel, and cm_id->qp.
352 struct rdma_cm_id
*cm_id
; /* connection manager ID */
353 struct rdma_cm_id
*listen_id
;
356 struct ibv_context
*verbs
;
357 struct rdma_event_channel
*channel
;
358 struct ibv_qp
*qp
; /* queue pair */
359 struct ibv_comp_channel
*comp_channel
; /* completion channel */
360 struct ibv_pd
*pd
; /* protection domain */
361 struct ibv_cq
*cq
; /* completion queue */
364 * If a previous write failed (perhaps because of a failed
365 * memory registration, then do not attempt any future work
366 * and remember the error state.
373 * Description of ram blocks used throughout the code.
375 RDMALocalBlocks local_ram_blocks
;
376 RDMADestBlock
*dest_blocks
;
378 /* Index of the next RAMBlock received during block registration */
379 unsigned int next_src_index
;
382 * Migration on *destination* started.
383 * Then use coroutine yield function.
384 * Source runs in a thread, so we don't care.
386 int migration_started_on_destination
;
388 int total_registrations
;
391 int unregister_current
, unregister_next
;
392 uint64_t unregistrations
[RDMA_SIGNALED_SEND_MAX
];
394 GHashTable
*blockmap
;
396 /* the RDMAContext for return path */
397 struct RDMAContext
*return_path
;
401 #define TYPE_QIO_CHANNEL_RDMA "qio-channel-rdma"
402 OBJECT_DECLARE_SIMPLE_TYPE(QIOChannelRDMA
, QIO_CHANNEL_RDMA
)
406 struct QIOChannelRDMA
{
409 RDMAContext
*rdmaout
;
411 bool blocking
; /* XXX we don't actually honour this yet */
415 * Main structure for IB Send/Recv control messages.
416 * This gets prepended at the beginning of every Send/Recv.
418 typedef struct QEMU_PACKED
{
419 uint32_t len
; /* Total length of data portion */
420 uint32_t type
; /* which control command to perform */
421 uint32_t repeat
; /* number of commands in data portion of same type */
425 static void control_to_network(RDMAControlHeader
*control
)
427 control
->type
= htonl(control
->type
);
428 control
->len
= htonl(control
->len
);
429 control
->repeat
= htonl(control
->repeat
);
432 static void network_to_control(RDMAControlHeader
*control
)
434 control
->type
= ntohl(control
->type
);
435 control
->len
= ntohl(control
->len
);
436 control
->repeat
= ntohl(control
->repeat
);
440 * Register a single Chunk.
441 * Information sent by the source VM to inform the dest
442 * to register an single chunk of memory before we can perform
443 * the actual RDMA operation.
445 typedef struct QEMU_PACKED
{
447 uint64_t current_addr
; /* offset into the ram_addr_t space */
448 uint64_t chunk
; /* chunk to lookup if unregistering */
450 uint32_t current_index
; /* which ramblock the chunk belongs to */
452 uint64_t chunks
; /* how many sequential chunks to register */
455 static void register_to_network(RDMAContext
*rdma
, RDMARegister
*reg
)
457 RDMALocalBlock
*local_block
;
458 local_block
= &rdma
->local_ram_blocks
.block
[reg
->current_index
];
460 if (local_block
->is_ram_block
) {
462 * current_addr as passed in is an address in the local ram_addr_t
463 * space, we need to translate this for the destination
465 reg
->key
.current_addr
-= local_block
->offset
;
466 reg
->key
.current_addr
+= rdma
->dest_blocks
[reg
->current_index
].offset
;
468 reg
->key
.current_addr
= htonll(reg
->key
.current_addr
);
469 reg
->current_index
= htonl(reg
->current_index
);
470 reg
->chunks
= htonll(reg
->chunks
);
473 static void network_to_register(RDMARegister
*reg
)
475 reg
->key
.current_addr
= ntohll(reg
->key
.current_addr
);
476 reg
->current_index
= ntohl(reg
->current_index
);
477 reg
->chunks
= ntohll(reg
->chunks
);
480 typedef struct QEMU_PACKED
{
481 uint32_t value
; /* if zero, we will madvise() */
482 uint32_t block_idx
; /* which ram block index */
483 uint64_t offset
; /* Address in remote ram_addr_t space */
484 uint64_t length
; /* length of the chunk */
487 static void compress_to_network(RDMAContext
*rdma
, RDMACompress
*comp
)
489 comp
->value
= htonl(comp
->value
);
491 * comp->offset as passed in is an address in the local ram_addr_t
492 * space, we need to translate this for the destination
494 comp
->offset
-= rdma
->local_ram_blocks
.block
[comp
->block_idx
].offset
;
495 comp
->offset
+= rdma
->dest_blocks
[comp
->block_idx
].offset
;
496 comp
->block_idx
= htonl(comp
->block_idx
);
497 comp
->offset
= htonll(comp
->offset
);
498 comp
->length
= htonll(comp
->length
);
501 static void network_to_compress(RDMACompress
*comp
)
503 comp
->value
= ntohl(comp
->value
);
504 comp
->block_idx
= ntohl(comp
->block_idx
);
505 comp
->offset
= ntohll(comp
->offset
);
506 comp
->length
= ntohll(comp
->length
);
510 * The result of the dest's memory registration produces an "rkey"
511 * which the source VM must reference in order to perform
512 * the RDMA operation.
514 typedef struct QEMU_PACKED
{
518 } RDMARegisterResult
;
520 static void result_to_network(RDMARegisterResult
*result
)
522 result
->rkey
= htonl(result
->rkey
);
523 result
->host_addr
= htonll(result
->host_addr
);
526 static void network_to_result(RDMARegisterResult
*result
)
528 result
->rkey
= ntohl(result
->rkey
);
529 result
->host_addr
= ntohll(result
->host_addr
);
532 const char *print_wrid(int wrid
);
533 static int qemu_rdma_exchange_send(RDMAContext
*rdma
, RDMAControlHeader
*head
,
534 uint8_t *data
, RDMAControlHeader
*resp
,
536 int (*callback
)(RDMAContext
*rdma
));
538 static inline uint64_t ram_chunk_index(const uint8_t *start
,
541 return ((uintptr_t) host
- (uintptr_t) start
) >> RDMA_REG_CHUNK_SHIFT
;
544 static inline uint8_t *ram_chunk_start(const RDMALocalBlock
*rdma_ram_block
,
547 return (uint8_t *)(uintptr_t)(rdma_ram_block
->local_host_addr
+
548 (i
<< RDMA_REG_CHUNK_SHIFT
));
551 static inline uint8_t *ram_chunk_end(const RDMALocalBlock
*rdma_ram_block
,
554 uint8_t *result
= ram_chunk_start(rdma_ram_block
, i
) +
555 (1UL << RDMA_REG_CHUNK_SHIFT
);
557 if (result
> (rdma_ram_block
->local_host_addr
+ rdma_ram_block
->length
)) {
558 result
= rdma_ram_block
->local_host_addr
+ rdma_ram_block
->length
;
564 static int rdma_add_block(RDMAContext
*rdma
, const char *block_name
,
566 ram_addr_t block_offset
, uint64_t length
)
568 RDMALocalBlocks
*local
= &rdma
->local_ram_blocks
;
569 RDMALocalBlock
*block
;
570 RDMALocalBlock
*old
= local
->block
;
572 local
->block
= g_new0(RDMALocalBlock
, local
->nb_blocks
+ 1);
574 if (local
->nb_blocks
) {
577 if (rdma
->blockmap
) {
578 for (x
= 0; x
< local
->nb_blocks
; x
++) {
579 g_hash_table_remove(rdma
->blockmap
,
580 (void *)(uintptr_t)old
[x
].offset
);
581 g_hash_table_insert(rdma
->blockmap
,
582 (void *)(uintptr_t)old
[x
].offset
,
586 memcpy(local
->block
, old
, sizeof(RDMALocalBlock
) * local
->nb_blocks
);
590 block
= &local
->block
[local
->nb_blocks
];
592 block
->block_name
= g_strdup(block_name
);
593 block
->local_host_addr
= host_addr
;
594 block
->offset
= block_offset
;
595 block
->length
= length
;
596 block
->index
= local
->nb_blocks
;
597 block
->src_index
= ~0U; /* Filled in by the receipt of the block list */
598 block
->nb_chunks
= ram_chunk_index(host_addr
, host_addr
+ length
) + 1UL;
599 block
->transit_bitmap
= bitmap_new(block
->nb_chunks
);
600 bitmap_clear(block
->transit_bitmap
, 0, block
->nb_chunks
);
601 block
->unregister_bitmap
= bitmap_new(block
->nb_chunks
);
602 bitmap_clear(block
->unregister_bitmap
, 0, block
->nb_chunks
);
603 block
->remote_keys
= g_new0(uint32_t, block
->nb_chunks
);
605 block
->is_ram_block
= local
->init
? false : true;
607 if (rdma
->blockmap
) {
608 g_hash_table_insert(rdma
->blockmap
, (void *)(uintptr_t)block_offset
, block
);
611 trace_rdma_add_block(block_name
, local
->nb_blocks
,
612 (uintptr_t) block
->local_host_addr
,
613 block
->offset
, block
->length
,
614 (uintptr_t) (block
->local_host_addr
+ block
->length
),
615 BITS_TO_LONGS(block
->nb_chunks
) *
616 sizeof(unsigned long) * 8,
625 * Memory regions need to be registered with the device and queue pairs setup
626 * in advanced before the migration starts. This tells us where the RAM blocks
627 * are so that we can register them individually.
629 static int qemu_rdma_init_one_block(RAMBlock
*rb
, void *opaque
)
631 const char *block_name
= qemu_ram_get_idstr(rb
);
632 void *host_addr
= qemu_ram_get_host_addr(rb
);
633 ram_addr_t block_offset
= qemu_ram_get_offset(rb
);
634 ram_addr_t length
= qemu_ram_get_used_length(rb
);
635 return rdma_add_block(opaque
, block_name
, host_addr
, block_offset
, length
);
639 * Identify the RAMBlocks and their quantity. They will be references to
640 * identify chunk boundaries inside each RAMBlock and also be referenced
641 * during dynamic page registration.
643 static int qemu_rdma_init_ram_blocks(RDMAContext
*rdma
)
645 RDMALocalBlocks
*local
= &rdma
->local_ram_blocks
;
648 assert(rdma
->blockmap
== NULL
);
649 memset(local
, 0, sizeof *local
);
650 ret
= foreach_not_ignored_block(qemu_rdma_init_one_block
, rdma
);
654 trace_qemu_rdma_init_ram_blocks(local
->nb_blocks
);
655 rdma
->dest_blocks
= g_new0(RDMADestBlock
,
656 rdma
->local_ram_blocks
.nb_blocks
);
662 * Note: If used outside of cleanup, the caller must ensure that the destination
663 * block structures are also updated
665 static int rdma_delete_block(RDMAContext
*rdma
, RDMALocalBlock
*block
)
667 RDMALocalBlocks
*local
= &rdma
->local_ram_blocks
;
668 RDMALocalBlock
*old
= local
->block
;
671 if (rdma
->blockmap
) {
672 g_hash_table_remove(rdma
->blockmap
, (void *)(uintptr_t)block
->offset
);
677 for (j
= 0; j
< block
->nb_chunks
; j
++) {
678 if (!block
->pmr
[j
]) {
681 ibv_dereg_mr(block
->pmr
[j
]);
682 rdma
->total_registrations
--;
689 ibv_dereg_mr(block
->mr
);
690 rdma
->total_registrations
--;
694 g_free(block
->transit_bitmap
);
695 block
->transit_bitmap
= NULL
;
697 g_free(block
->unregister_bitmap
);
698 block
->unregister_bitmap
= NULL
;
700 g_free(block
->remote_keys
);
701 block
->remote_keys
= NULL
;
703 g_free(block
->block_name
);
704 block
->block_name
= NULL
;
706 if (rdma
->blockmap
) {
707 for (x
= 0; x
< local
->nb_blocks
; x
++) {
708 g_hash_table_remove(rdma
->blockmap
,
709 (void *)(uintptr_t)old
[x
].offset
);
713 if (local
->nb_blocks
> 1) {
715 local
->block
= g_new0(RDMALocalBlock
, local
->nb_blocks
- 1);
718 memcpy(local
->block
, old
, sizeof(RDMALocalBlock
) * block
->index
);
721 if (block
->index
< (local
->nb_blocks
- 1)) {
722 memcpy(local
->block
+ block
->index
, old
+ (block
->index
+ 1),
723 sizeof(RDMALocalBlock
) *
724 (local
->nb_blocks
- (block
->index
+ 1)));
725 for (x
= block
->index
; x
< local
->nb_blocks
- 1; x
++) {
726 local
->block
[x
].index
--;
730 assert(block
== local
->block
);
734 trace_rdma_delete_block(block
, (uintptr_t)block
->local_host_addr
,
735 block
->offset
, block
->length
,
736 (uintptr_t)(block
->local_host_addr
+ block
->length
),
737 BITS_TO_LONGS(block
->nb_chunks
) *
738 sizeof(unsigned long) * 8, block
->nb_chunks
);
744 if (local
->nb_blocks
&& rdma
->blockmap
) {
745 for (x
= 0; x
< local
->nb_blocks
; x
++) {
746 g_hash_table_insert(rdma
->blockmap
,
747 (void *)(uintptr_t)local
->block
[x
].offset
,
756 * Put in the log file which RDMA device was opened and the details
757 * associated with that device.
759 static void qemu_rdma_dump_id(const char *who
, struct ibv_context
*verbs
)
761 struct ibv_port_attr port
;
763 if (ibv_query_port(verbs
, 1, &port
)) {
764 error_report("Failed to query port information");
768 printf("%s RDMA Device opened: kernel name %s "
769 "uverbs device name %s, "
770 "infiniband_verbs class device path %s, "
771 "infiniband class device path %s, "
772 "transport: (%d) %s\n",
775 verbs
->device
->dev_name
,
776 verbs
->device
->dev_path
,
777 verbs
->device
->ibdev_path
,
779 (port
.link_layer
== IBV_LINK_LAYER_INFINIBAND
) ? "Infiniband" :
780 ((port
.link_layer
== IBV_LINK_LAYER_ETHERNET
)
781 ? "Ethernet" : "Unknown"));
785 * Put in the log file the RDMA gid addressing information,
786 * useful for folks who have trouble understanding the
787 * RDMA device hierarchy in the kernel.
789 static void qemu_rdma_dump_gid(const char *who
, struct rdma_cm_id
*id
)
793 inet_ntop(AF_INET6
, &id
->route
.addr
.addr
.ibaddr
.sgid
, sgid
, sizeof sgid
);
794 inet_ntop(AF_INET6
, &id
->route
.addr
.addr
.ibaddr
.dgid
, dgid
, sizeof dgid
);
795 trace_qemu_rdma_dump_gid(who
, sgid
, dgid
);
799 * As of now, IPv6 over RoCE / iWARP is not supported by linux.
800 * We will try the next addrinfo struct, and fail if there are
801 * no other valid addresses to bind against.
803 * If user is listening on '[::]', then we will not have a opened a device
804 * yet and have no way of verifying if the device is RoCE or not.
806 * In this case, the source VM will throw an error for ALL types of
807 * connections (both IPv4 and IPv6) if the destination machine does not have
808 * a regular infiniband network available for use.
810 * The only way to guarantee that an error is thrown for broken kernels is
811 * for the management software to choose a *specific* interface at bind time
812 * and validate what time of hardware it is.
814 * Unfortunately, this puts the user in a fix:
816 * If the source VM connects with an IPv4 address without knowing that the
817 * destination has bound to '[::]' the migration will unconditionally fail
818 * unless the management software is explicitly listening on the IPv4
819 * address while using a RoCE-based device.
821 * If the source VM connects with an IPv6 address, then we're OK because we can
822 * throw an error on the source (and similarly on the destination).
824 * But in mixed environments, this will be broken for a while until it is fixed
827 * We do provide a *tiny* bit of help in this function: We can list all of the
828 * devices in the system and check to see if all the devices are RoCE or
831 * If we detect that we have a *pure* RoCE environment, then we can safely
832 * thrown an error even if the management software has specified '[::]' as the
835 * However, if there is are multiple hetergeneous devices, then we cannot make
836 * this assumption and the user just has to be sure they know what they are
839 * Patches are being reviewed on linux-rdma.
841 static int qemu_rdma_broken_ipv6_kernel(struct ibv_context
*verbs
, Error
**errp
)
843 /* This bug only exists in linux, to our knowledge. */
845 struct ibv_port_attr port_attr
;
848 * Verbs are only NULL if management has bound to '[::]'.
850 * Let's iterate through all the devices and see if there any pure IB
851 * devices (non-ethernet).
853 * If not, then we can safely proceed with the migration.
854 * Otherwise, there are no guarantees until the bug is fixed in linux.
858 struct ibv_device
**dev_list
= ibv_get_device_list(&num_devices
);
859 bool roce_found
= false;
860 bool ib_found
= false;
862 for (x
= 0; x
< num_devices
; x
++) {
863 verbs
= ibv_open_device(dev_list
[x
]);
865 if (errno
== EPERM
) {
872 if (ibv_query_port(verbs
, 1, &port_attr
)) {
873 ibv_close_device(verbs
);
874 ERROR(errp
, "Could not query initial IB port");
878 if (port_attr
.link_layer
== IBV_LINK_LAYER_INFINIBAND
) {
880 } else if (port_attr
.link_layer
== IBV_LINK_LAYER_ETHERNET
) {
884 ibv_close_device(verbs
);
890 fprintf(stderr
, "WARN: migrations may fail:"
891 " IPv6 over RoCE / iWARP in linux"
892 " is broken. But since you appear to have a"
893 " mixed RoCE / IB environment, be sure to only"
894 " migrate over the IB fabric until the kernel "
895 " fixes the bug.\n");
897 ERROR(errp
, "You only have RoCE / iWARP devices in your systems"
898 " and your management software has specified '[::]'"
899 ", but IPv6 over RoCE / iWARP is not supported in Linux.");
908 * If we have a verbs context, that means that some other than '[::]' was
909 * used by the management software for binding. In which case we can
910 * actually warn the user about a potentially broken kernel.
913 /* IB ports start with 1, not 0 */
914 if (ibv_query_port(verbs
, 1, &port_attr
)) {
915 ERROR(errp
, "Could not query initial IB port");
919 if (port_attr
.link_layer
== IBV_LINK_LAYER_ETHERNET
) {
920 ERROR(errp
, "Linux kernel's RoCE / iWARP does not support IPv6 "
921 "(but patches on linux-rdma in progress)");
931 * Figure out which RDMA device corresponds to the requested IP hostname
932 * Also create the initial connection manager identifiers for opening
935 static int qemu_rdma_resolve_host(RDMAContext
*rdma
, Error
**errp
)
938 struct rdma_addrinfo
*res
;
940 struct rdma_cm_event
*cm_event
;
941 char ip
[40] = "unknown";
942 struct rdma_addrinfo
*e
;
944 if (rdma
->host
== NULL
|| !strcmp(rdma
->host
, "")) {
945 ERROR(errp
, "RDMA hostname has not been set");
949 /* create CM channel */
950 rdma
->channel
= rdma_create_event_channel();
951 if (!rdma
->channel
) {
952 ERROR(errp
, "could not create CM channel");
957 ret
= rdma_create_id(rdma
->channel
, &rdma
->cm_id
, NULL
, RDMA_PS_TCP
);
959 ERROR(errp
, "could not create channel id");
960 goto err_resolve_create_id
;
963 snprintf(port_str
, 16, "%d", rdma
->port
);
966 ret
= rdma_getaddrinfo(rdma
->host
, port_str
, NULL
, &res
);
968 ERROR(errp
, "could not rdma_getaddrinfo address %s", rdma
->host
);
969 goto err_resolve_get_addr
;
972 for (e
= res
; e
!= NULL
; e
= e
->ai_next
) {
973 inet_ntop(e
->ai_family
,
974 &((struct sockaddr_in
*) e
->ai_dst_addr
)->sin_addr
, ip
, sizeof ip
);
975 trace_qemu_rdma_resolve_host_trying(rdma
->host
, ip
);
977 ret
= rdma_resolve_addr(rdma
->cm_id
, NULL
, e
->ai_dst_addr
,
978 RDMA_RESOLVE_TIMEOUT_MS
);
980 if (e
->ai_family
== AF_INET6
) {
981 ret
= qemu_rdma_broken_ipv6_kernel(rdma
->cm_id
->verbs
, errp
);
990 rdma_freeaddrinfo(res
);
991 ERROR(errp
, "could not resolve address %s", rdma
->host
);
992 goto err_resolve_get_addr
;
995 rdma_freeaddrinfo(res
);
996 qemu_rdma_dump_gid("source_resolve_addr", rdma
->cm_id
);
998 ret
= rdma_get_cm_event(rdma
->channel
, &cm_event
);
1000 ERROR(errp
, "could not perform event_addr_resolved");
1001 goto err_resolve_get_addr
;
1004 if (cm_event
->event
!= RDMA_CM_EVENT_ADDR_RESOLVED
) {
1005 ERROR(errp
, "result not equal to event_addr_resolved %s",
1006 rdma_event_str(cm_event
->event
));
1007 perror("rdma_resolve_addr");
1008 rdma_ack_cm_event(cm_event
);
1010 goto err_resolve_get_addr
;
1012 rdma_ack_cm_event(cm_event
);
1015 ret
= rdma_resolve_route(rdma
->cm_id
, RDMA_RESOLVE_TIMEOUT_MS
);
1017 ERROR(errp
, "could not resolve rdma route");
1018 goto err_resolve_get_addr
;
1021 ret
= rdma_get_cm_event(rdma
->channel
, &cm_event
);
1023 ERROR(errp
, "could not perform event_route_resolved");
1024 goto err_resolve_get_addr
;
1026 if (cm_event
->event
!= RDMA_CM_EVENT_ROUTE_RESOLVED
) {
1027 ERROR(errp
, "result not equal to event_route_resolved: %s",
1028 rdma_event_str(cm_event
->event
));
1029 rdma_ack_cm_event(cm_event
);
1031 goto err_resolve_get_addr
;
1033 rdma_ack_cm_event(cm_event
);
1034 rdma
->verbs
= rdma
->cm_id
->verbs
;
1035 qemu_rdma_dump_id("source_resolve_host", rdma
->cm_id
->verbs
);
1036 qemu_rdma_dump_gid("source_resolve_host", rdma
->cm_id
);
1039 err_resolve_get_addr
:
1040 rdma_destroy_id(rdma
->cm_id
);
1042 err_resolve_create_id
:
1043 rdma_destroy_event_channel(rdma
->channel
);
1044 rdma
->channel
= NULL
;
1049 * Create protection domain and completion queues
1051 static int qemu_rdma_alloc_pd_cq(RDMAContext
*rdma
)
1054 rdma
->pd
= ibv_alloc_pd(rdma
->verbs
);
1056 error_report("failed to allocate protection domain");
1060 /* create completion channel */
1061 rdma
->comp_channel
= ibv_create_comp_channel(rdma
->verbs
);
1062 if (!rdma
->comp_channel
) {
1063 error_report("failed to allocate completion channel");
1064 goto err_alloc_pd_cq
;
1068 * Completion queue can be filled by both read and write work requests,
1069 * so must reflect the sum of both possible queue sizes.
1071 rdma
->cq
= ibv_create_cq(rdma
->verbs
, (RDMA_SIGNALED_SEND_MAX
* 3),
1072 NULL
, rdma
->comp_channel
, 0);
1074 error_report("failed to allocate completion queue");
1075 goto err_alloc_pd_cq
;
1082 ibv_dealloc_pd(rdma
->pd
);
1084 if (rdma
->comp_channel
) {
1085 ibv_destroy_comp_channel(rdma
->comp_channel
);
1088 rdma
->comp_channel
= NULL
;
1094 * Create queue pairs.
1096 static int qemu_rdma_alloc_qp(RDMAContext
*rdma
)
1098 struct ibv_qp_init_attr attr
= { 0 };
1101 attr
.cap
.max_send_wr
= RDMA_SIGNALED_SEND_MAX
;
1102 attr
.cap
.max_recv_wr
= 3;
1103 attr
.cap
.max_send_sge
= 1;
1104 attr
.cap
.max_recv_sge
= 1;
1105 attr
.send_cq
= rdma
->cq
;
1106 attr
.recv_cq
= rdma
->cq
;
1107 attr
.qp_type
= IBV_QPT_RC
;
1109 ret
= rdma_create_qp(rdma
->cm_id
, rdma
->pd
, &attr
);
1114 rdma
->qp
= rdma
->cm_id
->qp
;
1118 static int qemu_rdma_reg_whole_ram_blocks(RDMAContext
*rdma
)
1121 RDMALocalBlocks
*local
= &rdma
->local_ram_blocks
;
1123 for (i
= 0; i
< local
->nb_blocks
; i
++) {
1124 local
->block
[i
].mr
=
1125 ibv_reg_mr(rdma
->pd
,
1126 local
->block
[i
].local_host_addr
,
1127 local
->block
[i
].length
,
1128 IBV_ACCESS_LOCAL_WRITE
|
1129 IBV_ACCESS_REMOTE_WRITE
1131 if (!local
->block
[i
].mr
) {
1132 perror("Failed to register local dest ram block!\n");
1135 rdma
->total_registrations
++;
1138 if (i
>= local
->nb_blocks
) {
1142 for (i
--; i
>= 0; i
--) {
1143 ibv_dereg_mr(local
->block
[i
].mr
);
1144 rdma
->total_registrations
--;
1152 * Find the ram block that corresponds to the page requested to be
1153 * transmitted by QEMU.
1155 * Once the block is found, also identify which 'chunk' within that
1156 * block that the page belongs to.
1158 * This search cannot fail or the migration will fail.
1160 static int qemu_rdma_search_ram_block(RDMAContext
*rdma
,
1161 uintptr_t block_offset
,
1164 uint64_t *block_index
,
1165 uint64_t *chunk_index
)
1167 uint64_t current_addr
= block_offset
+ offset
;
1168 RDMALocalBlock
*block
= g_hash_table_lookup(rdma
->blockmap
,
1169 (void *) block_offset
);
1171 assert(current_addr
>= block
->offset
);
1172 assert((current_addr
+ length
) <= (block
->offset
+ block
->length
));
1174 *block_index
= block
->index
;
1175 *chunk_index
= ram_chunk_index(block
->local_host_addr
,
1176 block
->local_host_addr
+ (current_addr
- block
->offset
));
1182 * Register a chunk with IB. If the chunk was already registered
1183 * previously, then skip.
1185 * Also return the keys associated with the registration needed
1186 * to perform the actual RDMA operation.
1188 static int qemu_rdma_register_and_get_keys(RDMAContext
*rdma
,
1189 RDMALocalBlock
*block
, uintptr_t host_addr
,
1190 uint32_t *lkey
, uint32_t *rkey
, int chunk
,
1191 uint8_t *chunk_start
, uint8_t *chunk_end
)
1195 *lkey
= block
->mr
->lkey
;
1198 *rkey
= block
->mr
->rkey
;
1203 /* allocate memory to store chunk MRs */
1205 block
->pmr
= g_new0(struct ibv_mr
*, block
->nb_chunks
);
1209 * If 'rkey', then we're the destination, so grant access to the source.
1211 * If 'lkey', then we're the source VM, so grant access only to ourselves.
1213 if (!block
->pmr
[chunk
]) {
1214 uint64_t len
= chunk_end
- chunk_start
;
1216 trace_qemu_rdma_register_and_get_keys(len
, chunk_start
);
1218 block
->pmr
[chunk
] = ibv_reg_mr(rdma
->pd
,
1220 (rkey
? (IBV_ACCESS_LOCAL_WRITE
|
1221 IBV_ACCESS_REMOTE_WRITE
) : 0));
1223 if (!block
->pmr
[chunk
]) {
1224 perror("Failed to register chunk!");
1225 fprintf(stderr
, "Chunk details: block: %d chunk index %d"
1226 " start %" PRIuPTR
" end %" PRIuPTR
1228 " local %" PRIuPTR
" registrations: %d\n",
1229 block
->index
, chunk
, (uintptr_t)chunk_start
,
1230 (uintptr_t)chunk_end
, host_addr
,
1231 (uintptr_t)block
->local_host_addr
,
1232 rdma
->total_registrations
);
1235 rdma
->total_registrations
++;
1239 *lkey
= block
->pmr
[chunk
]->lkey
;
1242 *rkey
= block
->pmr
[chunk
]->rkey
;
1248 * Register (at connection time) the memory used for control
1251 static int qemu_rdma_reg_control(RDMAContext
*rdma
, int idx
)
1253 rdma
->wr_data
[idx
].control_mr
= ibv_reg_mr(rdma
->pd
,
1254 rdma
->wr_data
[idx
].control
, RDMA_CONTROL_MAX_BUFFER
,
1255 IBV_ACCESS_LOCAL_WRITE
| IBV_ACCESS_REMOTE_WRITE
);
1256 if (rdma
->wr_data
[idx
].control_mr
) {
1257 rdma
->total_registrations
++;
1260 error_report("qemu_rdma_reg_control failed");
1264 const char *print_wrid(int wrid
)
1266 if (wrid
>= RDMA_WRID_RECV_CONTROL
) {
1267 return wrid_desc
[RDMA_WRID_RECV_CONTROL
];
1269 return wrid_desc
[wrid
];
1273 * RDMA requires memory registration (mlock/pinning), but this is not good for
1276 * In preparation for the future where LRU information or workload-specific
1277 * writable writable working set memory access behavior is available to QEMU
1278 * it would be nice to have in place the ability to UN-register/UN-pin
1279 * particular memory regions from the RDMA hardware when it is determine that
1280 * those regions of memory will likely not be accessed again in the near future.
1282 * While we do not yet have such information right now, the following
1283 * compile-time option allows us to perform a non-optimized version of this
1286 * By uncommenting this option, you will cause *all* RDMA transfers to be
1287 * unregistered immediately after the transfer completes on both sides of the
1288 * connection. This has no effect in 'rdma-pin-all' mode, only regular mode.
1290 * This will have a terrible impact on migration performance, so until future
1291 * workload information or LRU information is available, do not attempt to use
1292 * this feature except for basic testing.
1294 /* #define RDMA_UNREGISTRATION_EXAMPLE */
1297 * Perform a non-optimized memory unregistration after every transfer
1298 * for demonstration purposes, only if pin-all is not requested.
1300 * Potential optimizations:
1301 * 1. Start a new thread to run this function continuously
1303 - and for receipt of unregister messages
1305 * 3. Use workload hints.
1307 static int qemu_rdma_unregister_waiting(RDMAContext
*rdma
)
1309 while (rdma
->unregistrations
[rdma
->unregister_current
]) {
1311 uint64_t wr_id
= rdma
->unregistrations
[rdma
->unregister_current
];
1313 (wr_id
& RDMA_WRID_CHUNK_MASK
) >> RDMA_WRID_CHUNK_SHIFT
;
1315 (wr_id
& RDMA_WRID_BLOCK_MASK
) >> RDMA_WRID_BLOCK_SHIFT
;
1316 RDMALocalBlock
*block
=
1317 &(rdma
->local_ram_blocks
.block
[index
]);
1318 RDMARegister reg
= { .current_index
= index
};
1319 RDMAControlHeader resp
= { .type
= RDMA_CONTROL_UNREGISTER_FINISHED
,
1321 RDMAControlHeader head
= { .len
= sizeof(RDMARegister
),
1322 .type
= RDMA_CONTROL_UNREGISTER_REQUEST
,
1326 trace_qemu_rdma_unregister_waiting_proc(chunk
,
1327 rdma
->unregister_current
);
1329 rdma
->unregistrations
[rdma
->unregister_current
] = 0;
1330 rdma
->unregister_current
++;
1332 if (rdma
->unregister_current
== RDMA_SIGNALED_SEND_MAX
) {
1333 rdma
->unregister_current
= 0;
1338 * Unregistration is speculative (because migration is single-threaded
1339 * and we cannot break the protocol's inifinband message ordering).
1340 * Thus, if the memory is currently being used for transmission,
1341 * then abort the attempt to unregister and try again
1342 * later the next time a completion is received for this memory.
1344 clear_bit(chunk
, block
->unregister_bitmap
);
1346 if (test_bit(chunk
, block
->transit_bitmap
)) {
1347 trace_qemu_rdma_unregister_waiting_inflight(chunk
);
1351 trace_qemu_rdma_unregister_waiting_send(chunk
);
1353 ret
= ibv_dereg_mr(block
->pmr
[chunk
]);
1354 block
->pmr
[chunk
] = NULL
;
1355 block
->remote_keys
[chunk
] = 0;
1358 perror("unregistration chunk failed");
1361 rdma
->total_registrations
--;
1363 reg
.key
.chunk
= chunk
;
1364 register_to_network(rdma
, ®
);
1365 ret
= qemu_rdma_exchange_send(rdma
, &head
, (uint8_t *) ®
,
1371 trace_qemu_rdma_unregister_waiting_complete(chunk
);
1377 static uint64_t qemu_rdma_make_wrid(uint64_t wr_id
, uint64_t index
,
1380 uint64_t result
= wr_id
& RDMA_WRID_TYPE_MASK
;
1382 result
|= (index
<< RDMA_WRID_BLOCK_SHIFT
);
1383 result
|= (chunk
<< RDMA_WRID_CHUNK_SHIFT
);
1389 * Set bit for unregistration in the next iteration.
1390 * We cannot transmit right here, but will unpin later.
1392 static void qemu_rdma_signal_unregister(RDMAContext
*rdma
, uint64_t index
,
1393 uint64_t chunk
, uint64_t wr_id
)
1395 if (rdma
->unregistrations
[rdma
->unregister_next
] != 0) {
1396 error_report("rdma migration: queue is full");
1398 RDMALocalBlock
*block
= &(rdma
->local_ram_blocks
.block
[index
]);
1400 if (!test_and_set_bit(chunk
, block
->unregister_bitmap
)) {
1401 trace_qemu_rdma_signal_unregister_append(chunk
,
1402 rdma
->unregister_next
);
1404 rdma
->unregistrations
[rdma
->unregister_next
++] =
1405 qemu_rdma_make_wrid(wr_id
, index
, chunk
);
1407 if (rdma
->unregister_next
== RDMA_SIGNALED_SEND_MAX
) {
1408 rdma
->unregister_next
= 0;
1411 trace_qemu_rdma_signal_unregister_already(chunk
);
1417 * Consult the connection manager to see a work request
1418 * (of any kind) has completed.
1419 * Return the work request ID that completed.
1421 static uint64_t qemu_rdma_poll(RDMAContext
*rdma
, uint64_t *wr_id_out
,
1428 ret
= ibv_poll_cq(rdma
->cq
, 1, &wc
);
1431 *wr_id_out
= RDMA_WRID_NONE
;
1436 error_report("ibv_poll_cq return %d", ret
);
1440 wr_id
= wc
.wr_id
& RDMA_WRID_TYPE_MASK
;
1442 if (wc
.status
!= IBV_WC_SUCCESS
) {
1443 fprintf(stderr
, "ibv_poll_cq wc.status=%d %s!\n",
1444 wc
.status
, ibv_wc_status_str(wc
.status
));
1445 fprintf(stderr
, "ibv_poll_cq wrid=%s!\n", wrid_desc
[wr_id
]);
1450 if (rdma
->control_ready_expected
&&
1451 (wr_id
>= RDMA_WRID_RECV_CONTROL
)) {
1452 trace_qemu_rdma_poll_recv(wrid_desc
[RDMA_WRID_RECV_CONTROL
],
1453 wr_id
- RDMA_WRID_RECV_CONTROL
, wr_id
, rdma
->nb_sent
);
1454 rdma
->control_ready_expected
= 0;
1457 if (wr_id
== RDMA_WRID_RDMA_WRITE
) {
1459 (wc
.wr_id
& RDMA_WRID_CHUNK_MASK
) >> RDMA_WRID_CHUNK_SHIFT
;
1461 (wc
.wr_id
& RDMA_WRID_BLOCK_MASK
) >> RDMA_WRID_BLOCK_SHIFT
;
1462 RDMALocalBlock
*block
= &(rdma
->local_ram_blocks
.block
[index
]);
1464 trace_qemu_rdma_poll_write(print_wrid(wr_id
), wr_id
, rdma
->nb_sent
,
1465 index
, chunk
, block
->local_host_addr
,
1466 (void *)(uintptr_t)block
->remote_host_addr
);
1468 clear_bit(chunk
, block
->transit_bitmap
);
1470 if (rdma
->nb_sent
> 0) {
1474 if (!rdma
->pin_all
) {
1476 * FYI: If one wanted to signal a specific chunk to be unregistered
1477 * using LRU or workload-specific information, this is the function
1478 * you would call to do so. That chunk would then get asynchronously
1479 * unregistered later.
1481 #ifdef RDMA_UNREGISTRATION_EXAMPLE
1482 qemu_rdma_signal_unregister(rdma
, index
, chunk
, wc
.wr_id
);
1486 trace_qemu_rdma_poll_other(print_wrid(wr_id
), wr_id
, rdma
->nb_sent
);
1489 *wr_id_out
= wc
.wr_id
;
1491 *byte_len
= wc
.byte_len
;
1497 /* Wait for activity on the completion channel.
1498 * Returns 0 on success, none-0 on error.
1500 static int qemu_rdma_wait_comp_channel(RDMAContext
*rdma
)
1502 struct rdma_cm_event
*cm_event
;
1506 * Coroutine doesn't start until migration_fd_process_incoming()
1507 * so don't yield unless we know we're running inside of a coroutine.
1509 if (rdma
->migration_started_on_destination
&&
1510 migration_incoming_get_current()->state
== MIGRATION_STATUS_ACTIVE
) {
1511 yield_until_fd_readable(rdma
->comp_channel
->fd
);
1513 /* This is the source side, we're in a separate thread
1514 * or destination prior to migration_fd_process_incoming()
1515 * after postcopy, the destination also in a separate thread.
1516 * we can't yield; so we have to poll the fd.
1517 * But we need to be able to handle 'cancel' or an error
1518 * without hanging forever.
1520 while (!rdma
->error_state
&& !rdma
->received_error
) {
1522 pfds
[0].fd
= rdma
->comp_channel
->fd
;
1523 pfds
[0].events
= G_IO_IN
| G_IO_HUP
| G_IO_ERR
;
1524 pfds
[0].revents
= 0;
1526 pfds
[1].fd
= rdma
->channel
->fd
;
1527 pfds
[1].events
= G_IO_IN
| G_IO_HUP
| G_IO_ERR
;
1528 pfds
[1].revents
= 0;
1530 /* 0.1s timeout, should be fine for a 'cancel' */
1531 switch (qemu_poll_ns(pfds
, 2, 100 * 1000 * 1000)) {
1533 case 1: /* fd active */
1534 if (pfds
[0].revents
) {
1538 if (pfds
[1].revents
) {
1539 ret
= rdma_get_cm_event(rdma
->channel
, &cm_event
);
1541 rdma_ack_cm_event(cm_event
);
1544 error_report("receive cm event while wait comp channel,"
1545 "cm event is %d", cm_event
->event
);
1546 if (cm_event
->event
== RDMA_CM_EVENT_DISCONNECTED
||
1547 cm_event
->event
== RDMA_CM_EVENT_DEVICE_REMOVAL
) {
1553 case 0: /* Timeout, go around again */
1556 default: /* Error of some type -
1557 * I don't trust errno from qemu_poll_ns
1559 error_report("%s: poll failed", __func__
);
1563 if (migrate_get_current()->state
== MIGRATION_STATUS_CANCELLING
) {
1564 /* Bail out and let the cancellation happen */
1570 if (rdma
->received_error
) {
1573 return rdma
->error_state
;
1577 * Block until the next work request has completed.
1579 * First poll to see if a work request has already completed,
1582 * If we encounter completed work requests for IDs other than
1583 * the one we're interested in, then that's generally an error.
1585 * The only exception is actual RDMA Write completions. These
1586 * completions only need to be recorded, but do not actually
1587 * need further processing.
1589 static int qemu_rdma_block_for_wrid(RDMAContext
*rdma
, int wrid_requested
,
1592 int num_cq_events
= 0, ret
= 0;
1595 uint64_t wr_id
= RDMA_WRID_NONE
, wr_id_in
;
1597 if (ibv_req_notify_cq(rdma
->cq
, 0)) {
1601 while (wr_id
!= wrid_requested
) {
1602 ret
= qemu_rdma_poll(rdma
, &wr_id_in
, byte_len
);
1607 wr_id
= wr_id_in
& RDMA_WRID_TYPE_MASK
;
1609 if (wr_id
== RDMA_WRID_NONE
) {
1612 if (wr_id
!= wrid_requested
) {
1613 trace_qemu_rdma_block_for_wrid_miss(print_wrid(wrid_requested
),
1614 wrid_requested
, print_wrid(wr_id
), wr_id
);
1618 if (wr_id
== wrid_requested
) {
1623 ret
= qemu_rdma_wait_comp_channel(rdma
);
1625 goto err_block_for_wrid
;
1628 ret
= ibv_get_cq_event(rdma
->comp_channel
, &cq
, &cq_ctx
);
1630 perror("ibv_get_cq_event");
1631 goto err_block_for_wrid
;
1636 ret
= -ibv_req_notify_cq(cq
, 0);
1638 goto err_block_for_wrid
;
1641 while (wr_id
!= wrid_requested
) {
1642 ret
= qemu_rdma_poll(rdma
, &wr_id_in
, byte_len
);
1644 goto err_block_for_wrid
;
1647 wr_id
= wr_id_in
& RDMA_WRID_TYPE_MASK
;
1649 if (wr_id
== RDMA_WRID_NONE
) {
1652 if (wr_id
!= wrid_requested
) {
1653 trace_qemu_rdma_block_for_wrid_miss(print_wrid(wrid_requested
),
1654 wrid_requested
, print_wrid(wr_id
), wr_id
);
1658 if (wr_id
== wrid_requested
) {
1659 goto success_block_for_wrid
;
1663 success_block_for_wrid
:
1664 if (num_cq_events
) {
1665 ibv_ack_cq_events(cq
, num_cq_events
);
1670 if (num_cq_events
) {
1671 ibv_ack_cq_events(cq
, num_cq_events
);
1674 rdma
->error_state
= ret
;
1679 * Post a SEND message work request for the control channel
1680 * containing some data and block until the post completes.
1682 static int qemu_rdma_post_send_control(RDMAContext
*rdma
, uint8_t *buf
,
1683 RDMAControlHeader
*head
)
1686 RDMAWorkRequestData
*wr
= &rdma
->wr_data
[RDMA_WRID_CONTROL
];
1687 struct ibv_send_wr
*bad_wr
;
1688 struct ibv_sge sge
= {
1689 .addr
= (uintptr_t)(wr
->control
),
1690 .length
= head
->len
+ sizeof(RDMAControlHeader
),
1691 .lkey
= wr
->control_mr
->lkey
,
1693 struct ibv_send_wr send_wr
= {
1694 .wr_id
= RDMA_WRID_SEND_CONTROL
,
1695 .opcode
= IBV_WR_SEND
,
1696 .send_flags
= IBV_SEND_SIGNALED
,
1701 trace_qemu_rdma_post_send_control(control_desc(head
->type
));
1704 * We don't actually need to do a memcpy() in here if we used
1705 * the "sge" properly, but since we're only sending control messages
1706 * (not RAM in a performance-critical path), then its OK for now.
1708 * The copy makes the RDMAControlHeader simpler to manipulate
1709 * for the time being.
1711 assert(head
->len
<= RDMA_CONTROL_MAX_BUFFER
- sizeof(*head
));
1712 memcpy(wr
->control
, head
, sizeof(RDMAControlHeader
));
1713 control_to_network((void *) wr
->control
);
1716 memcpy(wr
->control
+ sizeof(RDMAControlHeader
), buf
, head
->len
);
1720 ret
= ibv_post_send(rdma
->qp
, &send_wr
, &bad_wr
);
1723 error_report("Failed to use post IB SEND for control");
1727 ret
= qemu_rdma_block_for_wrid(rdma
, RDMA_WRID_SEND_CONTROL
, NULL
);
1729 error_report("rdma migration: send polling control error");
1736 * Post a RECV work request in anticipation of some future receipt
1737 * of data on the control channel.
1739 static int qemu_rdma_post_recv_control(RDMAContext
*rdma
, int idx
)
1741 struct ibv_recv_wr
*bad_wr
;
1742 struct ibv_sge sge
= {
1743 .addr
= (uintptr_t)(rdma
->wr_data
[idx
].control
),
1744 .length
= RDMA_CONTROL_MAX_BUFFER
,
1745 .lkey
= rdma
->wr_data
[idx
].control_mr
->lkey
,
1748 struct ibv_recv_wr recv_wr
= {
1749 .wr_id
= RDMA_WRID_RECV_CONTROL
+ idx
,
1755 if (ibv_post_recv(rdma
->qp
, &recv_wr
, &bad_wr
)) {
1763 * Block and wait for a RECV control channel message to arrive.
1765 static int qemu_rdma_exchange_get_response(RDMAContext
*rdma
,
1766 RDMAControlHeader
*head
, int expecting
, int idx
)
1769 int ret
= qemu_rdma_block_for_wrid(rdma
, RDMA_WRID_RECV_CONTROL
+ idx
,
1773 error_report("rdma migration: recv polling control error!");
1777 network_to_control((void *) rdma
->wr_data
[idx
].control
);
1778 memcpy(head
, rdma
->wr_data
[idx
].control
, sizeof(RDMAControlHeader
));
1780 trace_qemu_rdma_exchange_get_response_start(control_desc(expecting
));
1782 if (expecting
== RDMA_CONTROL_NONE
) {
1783 trace_qemu_rdma_exchange_get_response_none(control_desc(head
->type
),
1785 } else if (head
->type
!= expecting
|| head
->type
== RDMA_CONTROL_ERROR
) {
1786 error_report("Was expecting a %s (%d) control message"
1787 ", but got: %s (%d), length: %d",
1788 control_desc(expecting
), expecting
,
1789 control_desc(head
->type
), head
->type
, head
->len
);
1790 if (head
->type
== RDMA_CONTROL_ERROR
) {
1791 rdma
->received_error
= true;
1795 if (head
->len
> RDMA_CONTROL_MAX_BUFFER
- sizeof(*head
)) {
1796 error_report("too long length: %d", head
->len
);
1799 if (sizeof(*head
) + head
->len
!= byte_len
) {
1800 error_report("Malformed length: %d byte_len %d", head
->len
, byte_len
);
1808 * When a RECV work request has completed, the work request's
1809 * buffer is pointed at the header.
1811 * This will advance the pointer to the data portion
1812 * of the control message of the work request's buffer that
1813 * was populated after the work request finished.
1815 static void qemu_rdma_move_header(RDMAContext
*rdma
, int idx
,
1816 RDMAControlHeader
*head
)
1818 rdma
->wr_data
[idx
].control_len
= head
->len
;
1819 rdma
->wr_data
[idx
].control_curr
=
1820 rdma
->wr_data
[idx
].control
+ sizeof(RDMAControlHeader
);
1824 * This is an 'atomic' high-level operation to deliver a single, unified
1825 * control-channel message.
1827 * Additionally, if the user is expecting some kind of reply to this message,
1828 * they can request a 'resp' response message be filled in by posting an
1829 * additional work request on behalf of the user and waiting for an additional
1832 * The extra (optional) response is used during registration to us from having
1833 * to perform an *additional* exchange of message just to provide a response by
1834 * instead piggy-backing on the acknowledgement.
1836 static int qemu_rdma_exchange_send(RDMAContext
*rdma
, RDMAControlHeader
*head
,
1837 uint8_t *data
, RDMAControlHeader
*resp
,
1839 int (*callback
)(RDMAContext
*rdma
))
1844 * Wait until the dest is ready before attempting to deliver the message
1845 * by waiting for a READY message.
1847 if (rdma
->control_ready_expected
) {
1848 RDMAControlHeader resp
;
1849 ret
= qemu_rdma_exchange_get_response(rdma
,
1850 &resp
, RDMA_CONTROL_READY
, RDMA_WRID_READY
);
1857 * If the user is expecting a response, post a WR in anticipation of it.
1860 ret
= qemu_rdma_post_recv_control(rdma
, RDMA_WRID_DATA
);
1862 error_report("rdma migration: error posting"
1863 " extra control recv for anticipated result!");
1869 * Post a WR to replace the one we just consumed for the READY message.
1871 ret
= qemu_rdma_post_recv_control(rdma
, RDMA_WRID_READY
);
1873 error_report("rdma migration: error posting first control recv!");
1878 * Deliver the control message that was requested.
1880 ret
= qemu_rdma_post_send_control(rdma
, data
, head
);
1883 error_report("Failed to send control buffer!");
1888 * If we're expecting a response, block and wait for it.
1892 trace_qemu_rdma_exchange_send_issue_callback();
1893 ret
= callback(rdma
);
1899 trace_qemu_rdma_exchange_send_waiting(control_desc(resp
->type
));
1900 ret
= qemu_rdma_exchange_get_response(rdma
, resp
,
1901 resp
->type
, RDMA_WRID_DATA
);
1907 qemu_rdma_move_header(rdma
, RDMA_WRID_DATA
, resp
);
1909 *resp_idx
= RDMA_WRID_DATA
;
1911 trace_qemu_rdma_exchange_send_received(control_desc(resp
->type
));
1914 rdma
->control_ready_expected
= 1;
1920 * This is an 'atomic' high-level operation to receive a single, unified
1921 * control-channel message.
1923 static int qemu_rdma_exchange_recv(RDMAContext
*rdma
, RDMAControlHeader
*head
,
1926 RDMAControlHeader ready
= {
1928 .type
= RDMA_CONTROL_READY
,
1934 * Inform the source that we're ready to receive a message.
1936 ret
= qemu_rdma_post_send_control(rdma
, NULL
, &ready
);
1939 error_report("Failed to send control buffer!");
1944 * Block and wait for the message.
1946 ret
= qemu_rdma_exchange_get_response(rdma
, head
,
1947 expecting
, RDMA_WRID_READY
);
1953 qemu_rdma_move_header(rdma
, RDMA_WRID_READY
, head
);
1956 * Post a new RECV work request to replace the one we just consumed.
1958 ret
= qemu_rdma_post_recv_control(rdma
, RDMA_WRID_READY
);
1960 error_report("rdma migration: error posting second control recv!");
1968 * Write an actual chunk of memory using RDMA.
1970 * If we're using dynamic registration on the dest-side, we have to
1971 * send a registration command first.
1973 static int qemu_rdma_write_one(QEMUFile
*f
, RDMAContext
*rdma
,
1974 int current_index
, uint64_t current_addr
,
1978 struct ibv_send_wr send_wr
= { 0 };
1979 struct ibv_send_wr
*bad_wr
;
1980 int reg_result_idx
, ret
, count
= 0;
1981 uint64_t chunk
, chunks
;
1982 uint8_t *chunk_start
, *chunk_end
;
1983 RDMALocalBlock
*block
= &(rdma
->local_ram_blocks
.block
[current_index
]);
1985 RDMARegisterResult
*reg_result
;
1986 RDMAControlHeader resp
= { .type
= RDMA_CONTROL_REGISTER_RESULT
};
1987 RDMAControlHeader head
= { .len
= sizeof(RDMARegister
),
1988 .type
= RDMA_CONTROL_REGISTER_REQUEST
,
1993 sge
.addr
= (uintptr_t)(block
->local_host_addr
+
1994 (current_addr
- block
->offset
));
1995 sge
.length
= length
;
1997 chunk
= ram_chunk_index(block
->local_host_addr
,
1998 (uint8_t *)(uintptr_t)sge
.addr
);
1999 chunk_start
= ram_chunk_start(block
, chunk
);
2001 if (block
->is_ram_block
) {
2002 chunks
= length
/ (1UL << RDMA_REG_CHUNK_SHIFT
);
2004 if (chunks
&& ((length
% (1UL << RDMA_REG_CHUNK_SHIFT
)) == 0)) {
2008 chunks
= block
->length
/ (1UL << RDMA_REG_CHUNK_SHIFT
);
2010 if (chunks
&& ((block
->length
% (1UL << RDMA_REG_CHUNK_SHIFT
)) == 0)) {
2015 trace_qemu_rdma_write_one_top(chunks
+ 1,
2017 (1UL << RDMA_REG_CHUNK_SHIFT
) / 1024 / 1024);
2019 chunk_end
= ram_chunk_end(block
, chunk
+ chunks
);
2021 if (!rdma
->pin_all
) {
2022 #ifdef RDMA_UNREGISTRATION_EXAMPLE
2023 qemu_rdma_unregister_waiting(rdma
);
2027 while (test_bit(chunk
, block
->transit_bitmap
)) {
2029 trace_qemu_rdma_write_one_block(count
++, current_index
, chunk
,
2030 sge
.addr
, length
, rdma
->nb_sent
, block
->nb_chunks
);
2032 ret
= qemu_rdma_block_for_wrid(rdma
, RDMA_WRID_RDMA_WRITE
, NULL
);
2035 error_report("Failed to Wait for previous write to complete "
2036 "block %d chunk %" PRIu64
2037 " current %" PRIu64
" len %" PRIu64
" %d",
2038 current_index
, chunk
, sge
.addr
, length
, rdma
->nb_sent
);
2043 if (!rdma
->pin_all
|| !block
->is_ram_block
) {
2044 if (!block
->remote_keys
[chunk
]) {
2046 * This chunk has not yet been registered, so first check to see
2047 * if the entire chunk is zero. If so, tell the other size to
2048 * memset() + madvise() the entire chunk without RDMA.
2051 if (buffer_is_zero((void *)(uintptr_t)sge
.addr
, length
)) {
2052 RDMACompress comp
= {
2053 .offset
= current_addr
,
2055 .block_idx
= current_index
,
2059 head
.len
= sizeof(comp
);
2060 head
.type
= RDMA_CONTROL_COMPRESS
;
2062 trace_qemu_rdma_write_one_zero(chunk
, sge
.length
,
2063 current_index
, current_addr
);
2065 compress_to_network(rdma
, &comp
);
2066 ret
= qemu_rdma_exchange_send(rdma
, &head
,
2067 (uint8_t *) &comp
, NULL
, NULL
, NULL
);
2073 acct_update_position(f
, sge
.length
, true);
2079 * Otherwise, tell other side to register.
2081 reg
.current_index
= current_index
;
2082 if (block
->is_ram_block
) {
2083 reg
.key
.current_addr
= current_addr
;
2085 reg
.key
.chunk
= chunk
;
2087 reg
.chunks
= chunks
;
2089 trace_qemu_rdma_write_one_sendreg(chunk
, sge
.length
, current_index
,
2092 register_to_network(rdma
, ®
);
2093 ret
= qemu_rdma_exchange_send(rdma
, &head
, (uint8_t *) ®
,
2094 &resp
, ®_result_idx
, NULL
);
2099 /* try to overlap this single registration with the one we sent. */
2100 if (qemu_rdma_register_and_get_keys(rdma
, block
, sge
.addr
,
2101 &sge
.lkey
, NULL
, chunk
,
2102 chunk_start
, chunk_end
)) {
2103 error_report("cannot get lkey");
2107 reg_result
= (RDMARegisterResult
*)
2108 rdma
->wr_data
[reg_result_idx
].control_curr
;
2110 network_to_result(reg_result
);
2112 trace_qemu_rdma_write_one_recvregres(block
->remote_keys
[chunk
],
2113 reg_result
->rkey
, chunk
);
2115 block
->remote_keys
[chunk
] = reg_result
->rkey
;
2116 block
->remote_host_addr
= reg_result
->host_addr
;
2118 /* already registered before */
2119 if (qemu_rdma_register_and_get_keys(rdma
, block
, sge
.addr
,
2120 &sge
.lkey
, NULL
, chunk
,
2121 chunk_start
, chunk_end
)) {
2122 error_report("cannot get lkey!");
2127 send_wr
.wr
.rdma
.rkey
= block
->remote_keys
[chunk
];
2129 send_wr
.wr
.rdma
.rkey
= block
->remote_rkey
;
2131 if (qemu_rdma_register_and_get_keys(rdma
, block
, sge
.addr
,
2132 &sge
.lkey
, NULL
, chunk
,
2133 chunk_start
, chunk_end
)) {
2134 error_report("cannot get lkey!");
2140 * Encode the ram block index and chunk within this wrid.
2141 * We will use this information at the time of completion
2142 * to figure out which bitmap to check against and then which
2143 * chunk in the bitmap to look for.
2145 send_wr
.wr_id
= qemu_rdma_make_wrid(RDMA_WRID_RDMA_WRITE
,
2146 current_index
, chunk
);
2148 send_wr
.opcode
= IBV_WR_RDMA_WRITE
;
2149 send_wr
.send_flags
= IBV_SEND_SIGNALED
;
2150 send_wr
.sg_list
= &sge
;
2151 send_wr
.num_sge
= 1;
2152 send_wr
.wr
.rdma
.remote_addr
= block
->remote_host_addr
+
2153 (current_addr
- block
->offset
);
2155 trace_qemu_rdma_write_one_post(chunk
, sge
.addr
, send_wr
.wr
.rdma
.remote_addr
,
2159 * ibv_post_send() does not return negative error numbers,
2160 * per the specification they are positive - no idea why.
2162 ret
= ibv_post_send(rdma
->qp
, &send_wr
, &bad_wr
);
2164 if (ret
== ENOMEM
) {
2165 trace_qemu_rdma_write_one_queue_full();
2166 ret
= qemu_rdma_block_for_wrid(rdma
, RDMA_WRID_RDMA_WRITE
, NULL
);
2168 error_report("rdma migration: failed to make "
2169 "room in full send queue! %d", ret
);
2175 } else if (ret
> 0) {
2176 perror("rdma migration: post rdma write failed");
2180 set_bit(chunk
, block
->transit_bitmap
);
2181 acct_update_position(f
, sge
.length
, false);
2182 rdma
->total_writes
++;
2188 * Push out any unwritten RDMA operations.
2190 * We support sending out multiple chunks at the same time.
2191 * Not all of them need to get signaled in the completion queue.
2193 static int qemu_rdma_write_flush(QEMUFile
*f
, RDMAContext
*rdma
)
2197 if (!rdma
->current_length
) {
2201 ret
= qemu_rdma_write_one(f
, rdma
,
2202 rdma
->current_index
, rdma
->current_addr
, rdma
->current_length
);
2210 trace_qemu_rdma_write_flush(rdma
->nb_sent
);
2213 rdma
->current_length
= 0;
2214 rdma
->current_addr
= 0;
2219 static inline int qemu_rdma_buffer_mergable(RDMAContext
*rdma
,
2220 uint64_t offset
, uint64_t len
)
2222 RDMALocalBlock
*block
;
2226 if (rdma
->current_index
< 0) {
2230 if (rdma
->current_chunk
< 0) {
2234 block
= &(rdma
->local_ram_blocks
.block
[rdma
->current_index
]);
2235 host_addr
= block
->local_host_addr
+ (offset
- block
->offset
);
2236 chunk_end
= ram_chunk_end(block
, rdma
->current_chunk
);
2238 if (rdma
->current_length
== 0) {
2243 * Only merge into chunk sequentially.
2245 if (offset
!= (rdma
->current_addr
+ rdma
->current_length
)) {
2249 if (offset
< block
->offset
) {
2253 if ((offset
+ len
) > (block
->offset
+ block
->length
)) {
2257 if ((host_addr
+ len
) > chunk_end
) {
2265 * We're not actually writing here, but doing three things:
2267 * 1. Identify the chunk the buffer belongs to.
2268 * 2. If the chunk is full or the buffer doesn't belong to the current
2269 * chunk, then start a new chunk and flush() the old chunk.
2270 * 3. To keep the hardware busy, we also group chunks into batches
2271 * and only require that a batch gets acknowledged in the completion
2272 * queue instead of each individual chunk.
2274 static int qemu_rdma_write(QEMUFile
*f
, RDMAContext
*rdma
,
2275 uint64_t block_offset
, uint64_t offset
,
2278 uint64_t current_addr
= block_offset
+ offset
;
2279 uint64_t index
= rdma
->current_index
;
2280 uint64_t chunk
= rdma
->current_chunk
;
2283 /* If we cannot merge it, we flush the current buffer first. */
2284 if (!qemu_rdma_buffer_mergable(rdma
, current_addr
, len
)) {
2285 ret
= qemu_rdma_write_flush(f
, rdma
);
2289 rdma
->current_length
= 0;
2290 rdma
->current_addr
= current_addr
;
2292 ret
= qemu_rdma_search_ram_block(rdma
, block_offset
,
2293 offset
, len
, &index
, &chunk
);
2295 error_report("ram block search failed");
2298 rdma
->current_index
= index
;
2299 rdma
->current_chunk
= chunk
;
2303 rdma
->current_length
+= len
;
2305 /* flush it if buffer is too large */
2306 if (rdma
->current_length
>= RDMA_MERGE_MAX
) {
2307 return qemu_rdma_write_flush(f
, rdma
);
2313 static void qemu_rdma_cleanup(RDMAContext
*rdma
)
2317 if (rdma
->cm_id
&& rdma
->connected
) {
2318 if ((rdma
->error_state
||
2319 migrate_get_current()->state
== MIGRATION_STATUS_CANCELLING
) &&
2320 !rdma
->received_error
) {
2321 RDMAControlHeader head
= { .len
= 0,
2322 .type
= RDMA_CONTROL_ERROR
,
2325 error_report("Early error. Sending error.");
2326 qemu_rdma_post_send_control(rdma
, NULL
, &head
);
2329 rdma_disconnect(rdma
->cm_id
);
2330 trace_qemu_rdma_cleanup_disconnect();
2331 rdma
->connected
= false;
2334 if (rdma
->channel
) {
2335 qemu_set_fd_handler(rdma
->channel
->fd
, NULL
, NULL
, NULL
);
2337 g_free(rdma
->dest_blocks
);
2338 rdma
->dest_blocks
= NULL
;
2340 for (idx
= 0; idx
< RDMA_WRID_MAX
; idx
++) {
2341 if (rdma
->wr_data
[idx
].control_mr
) {
2342 rdma
->total_registrations
--;
2343 ibv_dereg_mr(rdma
->wr_data
[idx
].control_mr
);
2345 rdma
->wr_data
[idx
].control_mr
= NULL
;
2348 if (rdma
->local_ram_blocks
.block
) {
2349 while (rdma
->local_ram_blocks
.nb_blocks
) {
2350 rdma_delete_block(rdma
, &rdma
->local_ram_blocks
.block
[0]);
2355 rdma_destroy_qp(rdma
->cm_id
);
2359 ibv_destroy_cq(rdma
->cq
);
2362 if (rdma
->comp_channel
) {
2363 ibv_destroy_comp_channel(rdma
->comp_channel
);
2364 rdma
->comp_channel
= NULL
;
2367 ibv_dealloc_pd(rdma
->pd
);
2371 rdma_destroy_id(rdma
->cm_id
);
2375 /* the destination side, listen_id and channel is shared */
2376 if (rdma
->listen_id
) {
2377 if (!rdma
->is_return_path
) {
2378 rdma_destroy_id(rdma
->listen_id
);
2380 rdma
->listen_id
= NULL
;
2382 if (rdma
->channel
) {
2383 if (!rdma
->is_return_path
) {
2384 rdma_destroy_event_channel(rdma
->channel
);
2386 rdma
->channel
= NULL
;
2390 if (rdma
->channel
) {
2391 rdma_destroy_event_channel(rdma
->channel
);
2392 rdma
->channel
= NULL
;
2399 static int qemu_rdma_source_init(RDMAContext
*rdma
, bool pin_all
, Error
**errp
)
2402 Error
*local_err
= NULL
, **temp
= &local_err
;
2405 * Will be validated against destination's actual capabilities
2406 * after the connect() completes.
2408 rdma
->pin_all
= pin_all
;
2410 ret
= qemu_rdma_resolve_host(rdma
, temp
);
2412 goto err_rdma_source_init
;
2415 ret
= qemu_rdma_alloc_pd_cq(rdma
);
2417 ERROR(temp
, "rdma migration: error allocating pd and cq! Your mlock()"
2418 " limits may be too low. Please check $ ulimit -a # and "
2419 "search for 'ulimit -l' in the output");
2420 goto err_rdma_source_init
;
2423 ret
= qemu_rdma_alloc_qp(rdma
);
2425 ERROR(temp
, "rdma migration: error allocating qp!");
2426 goto err_rdma_source_init
;
2429 ret
= qemu_rdma_init_ram_blocks(rdma
);
2431 ERROR(temp
, "rdma migration: error initializing ram blocks!");
2432 goto err_rdma_source_init
;
2435 /* Build the hash that maps from offset to RAMBlock */
2436 rdma
->blockmap
= g_hash_table_new(g_direct_hash
, g_direct_equal
);
2437 for (idx
= 0; idx
< rdma
->local_ram_blocks
.nb_blocks
; idx
++) {
2438 g_hash_table_insert(rdma
->blockmap
,
2439 (void *)(uintptr_t)rdma
->local_ram_blocks
.block
[idx
].offset
,
2440 &rdma
->local_ram_blocks
.block
[idx
]);
2443 for (idx
= 0; idx
< RDMA_WRID_MAX
; idx
++) {
2444 ret
= qemu_rdma_reg_control(rdma
, idx
);
2446 ERROR(temp
, "rdma migration: error registering %d control!",
2448 goto err_rdma_source_init
;
2454 err_rdma_source_init
:
2455 error_propagate(errp
, local_err
);
2456 qemu_rdma_cleanup(rdma
);
2460 static int qemu_rdma_connect(RDMAContext
*rdma
, Error
**errp
)
2462 RDMACapabilities cap
= {
2463 .version
= RDMA_CONTROL_VERSION_CURRENT
,
2466 struct rdma_conn_param conn_param
= { .initiator_depth
= 2,
2468 .private_data
= &cap
,
2469 .private_data_len
= sizeof(cap
),
2471 struct rdma_cm_event
*cm_event
;
2475 * Only negotiate the capability with destination if the user
2476 * on the source first requested the capability.
2478 if (rdma
->pin_all
) {
2479 trace_qemu_rdma_connect_pin_all_requested();
2480 cap
.flags
|= RDMA_CAPABILITY_PIN_ALL
;
2483 caps_to_network(&cap
);
2485 ret
= qemu_rdma_post_recv_control(rdma
, RDMA_WRID_READY
);
2487 ERROR(errp
, "posting second control recv");
2488 goto err_rdma_source_connect
;
2491 ret
= rdma_connect(rdma
->cm_id
, &conn_param
);
2493 perror("rdma_connect");
2494 ERROR(errp
, "connecting to destination!");
2495 goto err_rdma_source_connect
;
2498 ret
= rdma_get_cm_event(rdma
->channel
, &cm_event
);
2500 perror("rdma_get_cm_event after rdma_connect");
2501 ERROR(errp
, "connecting to destination!");
2502 goto err_rdma_source_connect
;
2505 if (cm_event
->event
!= RDMA_CM_EVENT_ESTABLISHED
) {
2506 perror("rdma_get_cm_event != EVENT_ESTABLISHED after rdma_connect");
2507 ERROR(errp
, "connecting to destination!");
2508 rdma_ack_cm_event(cm_event
);
2509 goto err_rdma_source_connect
;
2511 rdma
->connected
= true;
2513 memcpy(&cap
, cm_event
->param
.conn
.private_data
, sizeof(cap
));
2514 network_to_caps(&cap
);
2517 * Verify that the *requested* capabilities are supported by the destination
2518 * and disable them otherwise.
2520 if (rdma
->pin_all
&& !(cap
.flags
& RDMA_CAPABILITY_PIN_ALL
)) {
2521 ERROR(errp
, "Server cannot support pinning all memory. "
2522 "Will register memory dynamically.");
2523 rdma
->pin_all
= false;
2526 trace_qemu_rdma_connect_pin_all_outcome(rdma
->pin_all
);
2528 rdma_ack_cm_event(cm_event
);
2530 rdma
->control_ready_expected
= 1;
2534 err_rdma_source_connect
:
2535 qemu_rdma_cleanup(rdma
);
2539 static int qemu_rdma_dest_init(RDMAContext
*rdma
, Error
**errp
)
2542 struct rdma_cm_id
*listen_id
;
2543 char ip
[40] = "unknown";
2544 struct rdma_addrinfo
*res
, *e
;
2547 for (idx
= 0; idx
< RDMA_WRID_MAX
; idx
++) {
2548 rdma
->wr_data
[idx
].control_len
= 0;
2549 rdma
->wr_data
[idx
].control_curr
= NULL
;
2552 if (!rdma
->host
|| !rdma
->host
[0]) {
2553 ERROR(errp
, "RDMA host is not set!");
2554 rdma
->error_state
= -EINVAL
;
2557 /* create CM channel */
2558 rdma
->channel
= rdma_create_event_channel();
2559 if (!rdma
->channel
) {
2560 ERROR(errp
, "could not create rdma event channel");
2561 rdma
->error_state
= -EINVAL
;
2566 ret
= rdma_create_id(rdma
->channel
, &listen_id
, NULL
, RDMA_PS_TCP
);
2568 ERROR(errp
, "could not create cm_id!");
2569 goto err_dest_init_create_listen_id
;
2572 snprintf(port_str
, 16, "%d", rdma
->port
);
2573 port_str
[15] = '\0';
2575 ret
= rdma_getaddrinfo(rdma
->host
, port_str
, NULL
, &res
);
2577 ERROR(errp
, "could not rdma_getaddrinfo address %s", rdma
->host
);
2578 goto err_dest_init_bind_addr
;
2581 for (e
= res
; e
!= NULL
; e
= e
->ai_next
) {
2582 inet_ntop(e
->ai_family
,
2583 &((struct sockaddr_in
*) e
->ai_dst_addr
)->sin_addr
, ip
, sizeof ip
);
2584 trace_qemu_rdma_dest_init_trying(rdma
->host
, ip
);
2585 ret
= rdma_bind_addr(listen_id
, e
->ai_dst_addr
);
2589 if (e
->ai_family
== AF_INET6
) {
2590 ret
= qemu_rdma_broken_ipv6_kernel(listen_id
->verbs
, errp
);
2598 rdma_freeaddrinfo(res
);
2600 ERROR(errp
, "Error: could not rdma_bind_addr!");
2601 goto err_dest_init_bind_addr
;
2604 rdma
->listen_id
= listen_id
;
2605 qemu_rdma_dump_gid("dest_init", listen_id
);
2608 err_dest_init_bind_addr
:
2609 rdma_destroy_id(listen_id
);
2610 err_dest_init_create_listen_id
:
2611 rdma_destroy_event_channel(rdma
->channel
);
2612 rdma
->channel
= NULL
;
2613 rdma
->error_state
= ret
;
2618 static void qemu_rdma_return_path_dest_init(RDMAContext
*rdma_return_path
,
2623 for (idx
= 0; idx
< RDMA_WRID_MAX
; idx
++) {
2624 rdma_return_path
->wr_data
[idx
].control_len
= 0;
2625 rdma_return_path
->wr_data
[idx
].control_curr
= NULL
;
2628 /*the CM channel and CM id is shared*/
2629 rdma_return_path
->channel
= rdma
->channel
;
2630 rdma_return_path
->listen_id
= rdma
->listen_id
;
2632 rdma
->return_path
= rdma_return_path
;
2633 rdma_return_path
->return_path
= rdma
;
2634 rdma_return_path
->is_return_path
= true;
2637 static void *qemu_rdma_data_init(const char *host_port
, Error
**errp
)
2639 RDMAContext
*rdma
= NULL
;
2640 InetSocketAddress
*addr
;
2643 rdma
= g_new0(RDMAContext
, 1);
2644 rdma
->current_index
= -1;
2645 rdma
->current_chunk
= -1;
2647 addr
= g_new(InetSocketAddress
, 1);
2648 if (!inet_parse(addr
, host_port
, NULL
)) {
2649 rdma
->port
= atoi(addr
->port
);
2650 rdma
->host
= g_strdup(addr
->host
);
2652 ERROR(errp
, "bad RDMA migration address '%s'", host_port
);
2657 qapi_free_InetSocketAddress(addr
);
2664 * QEMUFile interface to the control channel.
2665 * SEND messages for control only.
2666 * VM's ram is handled with regular RDMA messages.
2668 static ssize_t
qio_channel_rdma_writev(QIOChannel
*ioc
,
2669 const struct iovec
*iov
,
2675 QIOChannelRDMA
*rioc
= QIO_CHANNEL_RDMA(ioc
);
2676 QEMUFile
*f
= rioc
->file
;
2683 RCU_READ_LOCK_GUARD();
2684 rdma
= qatomic_rcu_read(&rioc
->rdmaout
);
2690 CHECK_ERROR_STATE();
2693 * Push out any writes that
2694 * we're queued up for VM's ram.
2696 ret
= qemu_rdma_write_flush(f
, rdma
);
2698 rdma
->error_state
= ret
;
2702 for (i
= 0; i
< niov
; i
++) {
2703 size_t remaining
= iov
[i
].iov_len
;
2704 uint8_t * data
= (void *)iov
[i
].iov_base
;
2706 RDMAControlHeader head
;
2708 len
= MIN(remaining
, RDMA_SEND_INCREMENT
);
2712 head
.type
= RDMA_CONTROL_QEMU_FILE
;
2714 ret
= qemu_rdma_exchange_send(rdma
, &head
, data
, NULL
, NULL
, NULL
);
2717 rdma
->error_state
= ret
;
2729 static size_t qemu_rdma_fill(RDMAContext
*rdma
, uint8_t *buf
,
2730 size_t size
, int idx
)
2734 if (rdma
->wr_data
[idx
].control_len
) {
2735 trace_qemu_rdma_fill(rdma
->wr_data
[idx
].control_len
, size
);
2737 len
= MIN(size
, rdma
->wr_data
[idx
].control_len
);
2738 memcpy(buf
, rdma
->wr_data
[idx
].control_curr
, len
);
2739 rdma
->wr_data
[idx
].control_curr
+= len
;
2740 rdma
->wr_data
[idx
].control_len
-= len
;
2747 * QEMUFile interface to the control channel.
2748 * RDMA links don't use bytestreams, so we have to
2749 * return bytes to QEMUFile opportunistically.
2751 static ssize_t
qio_channel_rdma_readv(QIOChannel
*ioc
,
2752 const struct iovec
*iov
,
2758 QIOChannelRDMA
*rioc
= QIO_CHANNEL_RDMA(ioc
);
2760 RDMAControlHeader head
;
2765 RCU_READ_LOCK_GUARD();
2766 rdma
= qatomic_rcu_read(&rioc
->rdmain
);
2772 CHECK_ERROR_STATE();
2774 for (i
= 0; i
< niov
; i
++) {
2775 size_t want
= iov
[i
].iov_len
;
2776 uint8_t *data
= (void *)iov
[i
].iov_base
;
2779 * First, we hold on to the last SEND message we
2780 * were given and dish out the bytes until we run
2783 ret
= qemu_rdma_fill(rdma
, data
, want
, 0);
2786 /* Got what we needed, so go to next iovec */
2791 /* If we got any data so far, then don't wait
2792 * for more, just return what we have */
2798 /* We've got nothing at all, so lets wait for
2801 ret
= qemu_rdma_exchange_recv(rdma
, &head
, RDMA_CONTROL_QEMU_FILE
);
2804 rdma
->error_state
= ret
;
2809 * SEND was received with new bytes, now try again.
2811 ret
= qemu_rdma_fill(rdma
, data
, want
, 0);
2815 /* Still didn't get enough, so lets just return */
2818 return QIO_CHANNEL_ERR_BLOCK
;
2828 * Block until all the outstanding chunks have been delivered by the hardware.
2830 static int qemu_rdma_drain_cq(QEMUFile
*f
, RDMAContext
*rdma
)
2834 if (qemu_rdma_write_flush(f
, rdma
) < 0) {
2838 while (rdma
->nb_sent
) {
2839 ret
= qemu_rdma_block_for_wrid(rdma
, RDMA_WRID_RDMA_WRITE
, NULL
);
2841 error_report("rdma migration: complete polling error!");
2846 qemu_rdma_unregister_waiting(rdma
);
2852 static int qio_channel_rdma_set_blocking(QIOChannel
*ioc
,
2856 QIOChannelRDMA
*rioc
= QIO_CHANNEL_RDMA(ioc
);
2857 /* XXX we should make readv/writev actually honour this :-) */
2858 rioc
->blocking
= blocking
;
2863 typedef struct QIOChannelRDMASource QIOChannelRDMASource
;
2864 struct QIOChannelRDMASource
{
2866 QIOChannelRDMA
*rioc
;
2867 GIOCondition condition
;
2871 qio_channel_rdma_source_prepare(GSource
*source
,
2874 QIOChannelRDMASource
*rsource
= (QIOChannelRDMASource
*)source
;
2876 GIOCondition cond
= 0;
2879 RCU_READ_LOCK_GUARD();
2880 if (rsource
->condition
== G_IO_IN
) {
2881 rdma
= qatomic_rcu_read(&rsource
->rioc
->rdmain
);
2883 rdma
= qatomic_rcu_read(&rsource
->rioc
->rdmaout
);
2887 error_report("RDMAContext is NULL when prepare Gsource");
2891 if (rdma
->wr_data
[0].control_len
) {
2896 return cond
& rsource
->condition
;
2900 qio_channel_rdma_source_check(GSource
*source
)
2902 QIOChannelRDMASource
*rsource
= (QIOChannelRDMASource
*)source
;
2904 GIOCondition cond
= 0;
2906 RCU_READ_LOCK_GUARD();
2907 if (rsource
->condition
== G_IO_IN
) {
2908 rdma
= qatomic_rcu_read(&rsource
->rioc
->rdmain
);
2910 rdma
= qatomic_rcu_read(&rsource
->rioc
->rdmaout
);
2914 error_report("RDMAContext is NULL when check Gsource");
2918 if (rdma
->wr_data
[0].control_len
) {
2923 return cond
& rsource
->condition
;
2927 qio_channel_rdma_source_dispatch(GSource
*source
,
2928 GSourceFunc callback
,
2931 QIOChannelFunc func
= (QIOChannelFunc
)callback
;
2932 QIOChannelRDMASource
*rsource
= (QIOChannelRDMASource
*)source
;
2934 GIOCondition cond
= 0;
2936 RCU_READ_LOCK_GUARD();
2937 if (rsource
->condition
== G_IO_IN
) {
2938 rdma
= qatomic_rcu_read(&rsource
->rioc
->rdmain
);
2940 rdma
= qatomic_rcu_read(&rsource
->rioc
->rdmaout
);
2944 error_report("RDMAContext is NULL when dispatch Gsource");
2948 if (rdma
->wr_data
[0].control_len
) {
2953 return (*func
)(QIO_CHANNEL(rsource
->rioc
),
2954 (cond
& rsource
->condition
),
2959 qio_channel_rdma_source_finalize(GSource
*source
)
2961 QIOChannelRDMASource
*ssource
= (QIOChannelRDMASource
*)source
;
2963 object_unref(OBJECT(ssource
->rioc
));
2966 GSourceFuncs qio_channel_rdma_source_funcs
= {
2967 qio_channel_rdma_source_prepare
,
2968 qio_channel_rdma_source_check
,
2969 qio_channel_rdma_source_dispatch
,
2970 qio_channel_rdma_source_finalize
2973 static GSource
*qio_channel_rdma_create_watch(QIOChannel
*ioc
,
2974 GIOCondition condition
)
2976 QIOChannelRDMA
*rioc
= QIO_CHANNEL_RDMA(ioc
);
2977 QIOChannelRDMASource
*ssource
;
2980 source
= g_source_new(&qio_channel_rdma_source_funcs
,
2981 sizeof(QIOChannelRDMASource
));
2982 ssource
= (QIOChannelRDMASource
*)source
;
2984 ssource
->rioc
= rioc
;
2985 object_ref(OBJECT(rioc
));
2987 ssource
->condition
= condition
;
2992 static void qio_channel_rdma_set_aio_fd_handler(QIOChannel
*ioc
,
2995 IOHandler
*io_write
,
2998 QIOChannelRDMA
*rioc
= QIO_CHANNEL_RDMA(ioc
);
3000 aio_set_fd_handler(ctx
, rioc
->rdmain
->comp_channel
->fd
,
3001 false, io_read
, io_write
, NULL
, opaque
);
3003 aio_set_fd_handler(ctx
, rioc
->rdmaout
->comp_channel
->fd
,
3004 false, io_read
, io_write
, NULL
, opaque
);
3008 struct rdma_close_rcu
{
3009 struct rcu_head rcu
;
3010 RDMAContext
*rdmain
;
3011 RDMAContext
*rdmaout
;
3014 /* callback from qio_channel_rdma_close via call_rcu */
3015 static void qio_channel_rdma_close_rcu(struct rdma_close_rcu
*rcu
)
3018 qemu_rdma_cleanup(rcu
->rdmain
);
3022 qemu_rdma_cleanup(rcu
->rdmaout
);
3025 g_free(rcu
->rdmain
);
3026 g_free(rcu
->rdmaout
);
3030 static int qio_channel_rdma_close(QIOChannel
*ioc
,
3033 QIOChannelRDMA
*rioc
= QIO_CHANNEL_RDMA(ioc
);
3034 RDMAContext
*rdmain
, *rdmaout
;
3035 struct rdma_close_rcu
*rcu
= g_new(struct rdma_close_rcu
, 1);
3037 trace_qemu_rdma_close();
3039 rdmain
= rioc
->rdmain
;
3041 qatomic_rcu_set(&rioc
->rdmain
, NULL
);
3044 rdmaout
= rioc
->rdmaout
;
3046 qatomic_rcu_set(&rioc
->rdmaout
, NULL
);
3049 rcu
->rdmain
= rdmain
;
3050 rcu
->rdmaout
= rdmaout
;
3051 call_rcu(rcu
, qio_channel_rdma_close_rcu
, rcu
);
3057 qio_channel_rdma_shutdown(QIOChannel
*ioc
,
3058 QIOChannelShutdown how
,
3061 QIOChannelRDMA
*rioc
= QIO_CHANNEL_RDMA(ioc
);
3062 RDMAContext
*rdmain
, *rdmaout
;
3064 RCU_READ_LOCK_GUARD();
3066 rdmain
= qatomic_rcu_read(&rioc
->rdmain
);
3067 rdmaout
= qatomic_rcu_read(&rioc
->rdmain
);
3070 case QIO_CHANNEL_SHUTDOWN_READ
:
3072 rdmain
->error_state
= -1;
3075 case QIO_CHANNEL_SHUTDOWN_WRITE
:
3077 rdmaout
->error_state
= -1;
3080 case QIO_CHANNEL_SHUTDOWN_BOTH
:
3083 rdmain
->error_state
= -1;
3086 rdmaout
->error_state
= -1;
3097 * This means that 'block_offset' is a full virtual address that does not
3098 * belong to a RAMBlock of the virtual machine and instead
3099 * represents a private malloc'd memory area that the caller wishes to
3103 * Offset is an offset to be added to block_offset and used
3104 * to also lookup the corresponding RAMBlock.
3107 * Initiate an transfer this size.
3110 * A 'hint' or 'advice' that means that we wish to speculatively
3111 * and asynchronously unregister this memory. In this case, there is no
3112 * guarantee that the unregister will actually happen, for example,
3113 * if the memory is being actively transmitted. Additionally, the memory
3114 * may be re-registered at any future time if a write within the same
3115 * chunk was requested again, even if you attempted to unregister it
3118 * @size < 0 : TODO, not yet supported
3119 * Unregister the memory NOW. This means that the caller does not
3120 * expect there to be any future RDMA transfers and we just want to clean
3121 * things up. This is used in case the upper layer owns the memory and
3122 * cannot wait for qemu_fclose() to occur.
3124 * @bytes_sent : User-specificed pointer to indicate how many bytes were
3125 * sent. Usually, this will not be more than a few bytes of
3126 * the protocol because most transfers are sent asynchronously.
3128 static size_t qemu_rdma_save_page(QEMUFile
*f
, void *opaque
,
3129 ram_addr_t block_offset
, ram_addr_t offset
,
3130 size_t size
, uint64_t *bytes_sent
)
3132 QIOChannelRDMA
*rioc
= QIO_CHANNEL_RDMA(opaque
);
3136 RCU_READ_LOCK_GUARD();
3137 rdma
= qatomic_rcu_read(&rioc
->rdmaout
);
3143 CHECK_ERROR_STATE();
3145 if (migration_in_postcopy()) {
3146 return RAM_SAVE_CONTROL_NOT_SUPP
;
3153 * Add this page to the current 'chunk'. If the chunk
3154 * is full, or the page doesn't belong to the current chunk,
3155 * an actual RDMA write will occur and a new chunk will be formed.
3157 ret
= qemu_rdma_write(f
, rdma
, block_offset
, offset
, size
);
3159 error_report("rdma migration: write error! %d", ret
);
3164 * We always return 1 bytes because the RDMA
3165 * protocol is completely asynchronous. We do not yet know
3166 * whether an identified chunk is zero or not because we're
3167 * waiting for other pages to potentially be merged with
3168 * the current chunk. So, we have to call qemu_update_position()
3169 * later on when the actual write occurs.
3175 uint64_t index
, chunk
;
3177 /* TODO: Change QEMUFileOps prototype to be signed: size_t => long
3179 ret = qemu_rdma_drain_cq(f, rdma);
3181 fprintf(stderr, "rdma: failed to synchronously drain"
3182 " completion queue before unregistration.\n");
3188 ret
= qemu_rdma_search_ram_block(rdma
, block_offset
,
3189 offset
, size
, &index
, &chunk
);
3192 error_report("ram block search failed");
3196 qemu_rdma_signal_unregister(rdma
, index
, chunk
, 0);
3199 * TODO: Synchronous, guaranteed unregistration (should not occur during
3200 * fast-path). Otherwise, unregisters will process on the next call to
3201 * qemu_rdma_drain_cq()
3203 qemu_rdma_unregister_waiting(rdma);
3209 * Drain the Completion Queue if possible, but do not block,
3212 * If nothing to poll, the end of the iteration will do this
3213 * again to make sure we don't overflow the request queue.
3216 uint64_t wr_id
, wr_id_in
;
3217 int ret
= qemu_rdma_poll(rdma
, &wr_id_in
, NULL
);
3219 error_report("rdma migration: polling error! %d", ret
);
3223 wr_id
= wr_id_in
& RDMA_WRID_TYPE_MASK
;
3225 if (wr_id
== RDMA_WRID_NONE
) {
3230 return RAM_SAVE_CONTROL_DELAYED
;
3232 rdma
->error_state
= ret
;
3236 static void rdma_accept_incoming_migration(void *opaque
);
3238 static void rdma_cm_poll_handler(void *opaque
)
3240 RDMAContext
*rdma
= opaque
;
3242 struct rdma_cm_event
*cm_event
;
3243 MigrationIncomingState
*mis
= migration_incoming_get_current();
3245 ret
= rdma_get_cm_event(rdma
->channel
, &cm_event
);
3247 error_report("get_cm_event failed %d", errno
);
3250 rdma_ack_cm_event(cm_event
);
3252 if (cm_event
->event
== RDMA_CM_EVENT_DISCONNECTED
||
3253 cm_event
->event
== RDMA_CM_EVENT_DEVICE_REMOVAL
) {
3254 if (!rdma
->error_state
&&
3255 migration_incoming_get_current()->state
!=
3256 MIGRATION_STATUS_COMPLETED
) {
3257 error_report("receive cm event, cm event is %d", cm_event
->event
);
3258 rdma
->error_state
= -EPIPE
;
3259 if (rdma
->return_path
) {
3260 rdma
->return_path
->error_state
= -EPIPE
;
3264 if (mis
->migration_incoming_co
) {
3265 qemu_coroutine_enter(mis
->migration_incoming_co
);
3271 static int qemu_rdma_accept(RDMAContext
*rdma
)
3273 RDMACapabilities cap
;
3274 struct rdma_conn_param conn_param
= {
3275 .responder_resources
= 2,
3276 .private_data
= &cap
,
3277 .private_data_len
= sizeof(cap
),
3279 struct rdma_cm_event
*cm_event
;
3280 struct ibv_context
*verbs
;
3284 ret
= rdma_get_cm_event(rdma
->channel
, &cm_event
);
3286 goto err_rdma_dest_wait
;
3289 if (cm_event
->event
!= RDMA_CM_EVENT_CONNECT_REQUEST
) {
3290 rdma_ack_cm_event(cm_event
);
3291 goto err_rdma_dest_wait
;
3294 memcpy(&cap
, cm_event
->param
.conn
.private_data
, sizeof(cap
));
3296 network_to_caps(&cap
);
3298 if (cap
.version
< 1 || cap
.version
> RDMA_CONTROL_VERSION_CURRENT
) {
3299 error_report("Unknown source RDMA version: %d, bailing...",
3301 rdma_ack_cm_event(cm_event
);
3302 goto err_rdma_dest_wait
;
3306 * Respond with only the capabilities this version of QEMU knows about.
3308 cap
.flags
&= known_capabilities
;
3311 * Enable the ones that we do know about.
3312 * Add other checks here as new ones are introduced.
3314 if (cap
.flags
& RDMA_CAPABILITY_PIN_ALL
) {
3315 rdma
->pin_all
= true;
3318 rdma
->cm_id
= cm_event
->id
;
3319 verbs
= cm_event
->id
->verbs
;
3321 rdma_ack_cm_event(cm_event
);
3323 trace_qemu_rdma_accept_pin_state(rdma
->pin_all
);
3325 caps_to_network(&cap
);
3327 trace_qemu_rdma_accept_pin_verbsc(verbs
);
3330 rdma
->verbs
= verbs
;
3331 } else if (rdma
->verbs
!= verbs
) {
3332 error_report("ibv context not matching %p, %p!", rdma
->verbs
,
3334 goto err_rdma_dest_wait
;
3337 qemu_rdma_dump_id("dest_init", verbs
);
3339 ret
= qemu_rdma_alloc_pd_cq(rdma
);
3341 error_report("rdma migration: error allocating pd and cq!");
3342 goto err_rdma_dest_wait
;
3345 ret
= qemu_rdma_alloc_qp(rdma
);
3347 error_report("rdma migration: error allocating qp!");
3348 goto err_rdma_dest_wait
;
3351 ret
= qemu_rdma_init_ram_blocks(rdma
);
3353 error_report("rdma migration: error initializing ram blocks!");
3354 goto err_rdma_dest_wait
;
3357 for (idx
= 0; idx
< RDMA_WRID_MAX
; idx
++) {
3358 ret
= qemu_rdma_reg_control(rdma
, idx
);
3360 error_report("rdma: error registering %d control", idx
);
3361 goto err_rdma_dest_wait
;
3365 /* Accept the second connection request for return path */
3366 if (migrate_postcopy() && !rdma
->is_return_path
) {
3367 qemu_set_fd_handler(rdma
->channel
->fd
, rdma_accept_incoming_migration
,
3369 (void *)(intptr_t)rdma
->return_path
);
3371 qemu_set_fd_handler(rdma
->channel
->fd
, rdma_cm_poll_handler
,
3375 ret
= rdma_accept(rdma
->cm_id
, &conn_param
);
3377 error_report("rdma_accept returns %d", ret
);
3378 goto err_rdma_dest_wait
;
3381 ret
= rdma_get_cm_event(rdma
->channel
, &cm_event
);
3383 error_report("rdma_accept get_cm_event failed %d", ret
);
3384 goto err_rdma_dest_wait
;
3387 if (cm_event
->event
!= RDMA_CM_EVENT_ESTABLISHED
) {
3388 error_report("rdma_accept not event established");
3389 rdma_ack_cm_event(cm_event
);
3390 goto err_rdma_dest_wait
;
3393 rdma_ack_cm_event(cm_event
);
3394 rdma
->connected
= true;
3396 ret
= qemu_rdma_post_recv_control(rdma
, RDMA_WRID_READY
);
3398 error_report("rdma migration: error posting second control recv");
3399 goto err_rdma_dest_wait
;
3402 qemu_rdma_dump_gid("dest_connect", rdma
->cm_id
);
3407 rdma
->error_state
= ret
;
3408 qemu_rdma_cleanup(rdma
);
3412 static int dest_ram_sort_func(const void *a
, const void *b
)
3414 unsigned int a_index
= ((const RDMALocalBlock
*)a
)->src_index
;
3415 unsigned int b_index
= ((const RDMALocalBlock
*)b
)->src_index
;
3417 return (a_index
< b_index
) ? -1 : (a_index
!= b_index
);
3421 * During each iteration of the migration, we listen for instructions
3422 * by the source VM to perform dynamic page registrations before they
3423 * can perform RDMA operations.
3425 * We respond with the 'rkey'.
3427 * Keep doing this until the source tells us to stop.
3429 static int qemu_rdma_registration_handle(QEMUFile
*f
, void *opaque
)
3431 RDMAControlHeader reg_resp
= { .len
= sizeof(RDMARegisterResult
),
3432 .type
= RDMA_CONTROL_REGISTER_RESULT
,
3435 RDMAControlHeader unreg_resp
= { .len
= 0,
3436 .type
= RDMA_CONTROL_UNREGISTER_FINISHED
,
3439 RDMAControlHeader blocks
= { .type
= RDMA_CONTROL_RAM_BLOCKS_RESULT
,
3441 QIOChannelRDMA
*rioc
= QIO_CHANNEL_RDMA(opaque
);
3443 RDMALocalBlocks
*local
;
3444 RDMAControlHeader head
;
3445 RDMARegister
*reg
, *registers
;
3447 RDMARegisterResult
*reg_result
;
3448 static RDMARegisterResult results
[RDMA_CONTROL_MAX_COMMANDS_PER_MESSAGE
];
3449 RDMALocalBlock
*block
;
3456 RCU_READ_LOCK_GUARD();
3457 rdma
= qatomic_rcu_read(&rioc
->rdmain
);
3463 CHECK_ERROR_STATE();
3465 local
= &rdma
->local_ram_blocks
;
3467 trace_qemu_rdma_registration_handle_wait();
3469 ret
= qemu_rdma_exchange_recv(rdma
, &head
, RDMA_CONTROL_NONE
);
3475 if (head
.repeat
> RDMA_CONTROL_MAX_COMMANDS_PER_MESSAGE
) {
3476 error_report("rdma: Too many requests in this message (%d)."
3477 "Bailing.", head
.repeat
);
3482 switch (head
.type
) {
3483 case RDMA_CONTROL_COMPRESS
:
3484 comp
= (RDMACompress
*) rdma
->wr_data
[idx
].control_curr
;
3485 network_to_compress(comp
);
3487 trace_qemu_rdma_registration_handle_compress(comp
->length
,
3490 if (comp
->block_idx
>= rdma
->local_ram_blocks
.nb_blocks
) {
3491 error_report("rdma: 'compress' bad block index %u (vs %d)",
3492 (unsigned int)comp
->block_idx
,
3493 rdma
->local_ram_blocks
.nb_blocks
);
3497 block
= &(rdma
->local_ram_blocks
.block
[comp
->block_idx
]);
3499 host_addr
= block
->local_host_addr
+
3500 (comp
->offset
- block
->offset
);
3502 ram_handle_compressed(host_addr
, comp
->value
, comp
->length
);
3505 case RDMA_CONTROL_REGISTER_FINISHED
:
3506 trace_qemu_rdma_registration_handle_finished();
3509 case RDMA_CONTROL_RAM_BLOCKS_REQUEST
:
3510 trace_qemu_rdma_registration_handle_ram_blocks();
3512 /* Sort our local RAM Block list so it's the same as the source,
3513 * we can do this since we've filled in a src_index in the list
3514 * as we received the RAMBlock list earlier.
3516 qsort(rdma
->local_ram_blocks
.block
,
3517 rdma
->local_ram_blocks
.nb_blocks
,
3518 sizeof(RDMALocalBlock
), dest_ram_sort_func
);
3519 for (i
= 0; i
< local
->nb_blocks
; i
++) {
3520 local
->block
[i
].index
= i
;
3523 if (rdma
->pin_all
) {
3524 ret
= qemu_rdma_reg_whole_ram_blocks(rdma
);
3526 error_report("rdma migration: error dest "
3527 "registering ram blocks");
3533 * Dest uses this to prepare to transmit the RAMBlock descriptions
3534 * to the source VM after connection setup.
3535 * Both sides use the "remote" structure to communicate and update
3536 * their "local" descriptions with what was sent.
3538 for (i
= 0; i
< local
->nb_blocks
; i
++) {
3539 rdma
->dest_blocks
[i
].remote_host_addr
=
3540 (uintptr_t)(local
->block
[i
].local_host_addr
);
3542 if (rdma
->pin_all
) {
3543 rdma
->dest_blocks
[i
].remote_rkey
= local
->block
[i
].mr
->rkey
;
3546 rdma
->dest_blocks
[i
].offset
= local
->block
[i
].offset
;
3547 rdma
->dest_blocks
[i
].length
= local
->block
[i
].length
;
3549 dest_block_to_network(&rdma
->dest_blocks
[i
]);
3550 trace_qemu_rdma_registration_handle_ram_blocks_loop(
3551 local
->block
[i
].block_name
,
3552 local
->block
[i
].offset
,
3553 local
->block
[i
].length
,
3554 local
->block
[i
].local_host_addr
,
3555 local
->block
[i
].src_index
);
3558 blocks
.len
= rdma
->local_ram_blocks
.nb_blocks
3559 * sizeof(RDMADestBlock
);
3562 ret
= qemu_rdma_post_send_control(rdma
,
3563 (uint8_t *) rdma
->dest_blocks
, &blocks
);
3566 error_report("rdma migration: error sending remote info");
3571 case RDMA_CONTROL_REGISTER_REQUEST
:
3572 trace_qemu_rdma_registration_handle_register(head
.repeat
);
3574 reg_resp
.repeat
= head
.repeat
;
3575 registers
= (RDMARegister
*) rdma
->wr_data
[idx
].control_curr
;
3577 for (count
= 0; count
< head
.repeat
; count
++) {
3579 uint8_t *chunk_start
, *chunk_end
;
3581 reg
= ®isters
[count
];
3582 network_to_register(reg
);
3584 reg_result
= &results
[count
];
3586 trace_qemu_rdma_registration_handle_register_loop(count
,
3587 reg
->current_index
, reg
->key
.current_addr
, reg
->chunks
);
3589 if (reg
->current_index
>= rdma
->local_ram_blocks
.nb_blocks
) {
3590 error_report("rdma: 'register' bad block index %u (vs %d)",
3591 (unsigned int)reg
->current_index
,
3592 rdma
->local_ram_blocks
.nb_blocks
);
3596 block
= &(rdma
->local_ram_blocks
.block
[reg
->current_index
]);
3597 if (block
->is_ram_block
) {
3598 if (block
->offset
> reg
->key
.current_addr
) {
3599 error_report("rdma: bad register address for block %s"
3600 " offset: %" PRIx64
" current_addr: %" PRIx64
,
3601 block
->block_name
, block
->offset
,
3602 reg
->key
.current_addr
);
3606 host_addr
= (block
->local_host_addr
+
3607 (reg
->key
.current_addr
- block
->offset
));
3608 chunk
= ram_chunk_index(block
->local_host_addr
,
3609 (uint8_t *) host_addr
);
3611 chunk
= reg
->key
.chunk
;
3612 host_addr
= block
->local_host_addr
+
3613 (reg
->key
.chunk
* (1UL << RDMA_REG_CHUNK_SHIFT
));
3614 /* Check for particularly bad chunk value */
3615 if (host_addr
< (void *)block
->local_host_addr
) {
3616 error_report("rdma: bad chunk for block %s"
3618 block
->block_name
, reg
->key
.chunk
);
3623 chunk_start
= ram_chunk_start(block
, chunk
);
3624 chunk_end
= ram_chunk_end(block
, chunk
+ reg
->chunks
);
3625 /* avoid "-Waddress-of-packed-member" warning */
3626 uint32_t tmp_rkey
= 0;
3627 if (qemu_rdma_register_and_get_keys(rdma
, block
,
3628 (uintptr_t)host_addr
, NULL
, &tmp_rkey
,
3629 chunk
, chunk_start
, chunk_end
)) {
3630 error_report("cannot get rkey");
3634 reg_result
->rkey
= tmp_rkey
;
3636 reg_result
->host_addr
= (uintptr_t)block
->local_host_addr
;
3638 trace_qemu_rdma_registration_handle_register_rkey(
3641 result_to_network(reg_result
);
3644 ret
= qemu_rdma_post_send_control(rdma
,
3645 (uint8_t *) results
, ®_resp
);
3648 error_report("Failed to send control buffer");
3652 case RDMA_CONTROL_UNREGISTER_REQUEST
:
3653 trace_qemu_rdma_registration_handle_unregister(head
.repeat
);
3654 unreg_resp
.repeat
= head
.repeat
;
3655 registers
= (RDMARegister
*) rdma
->wr_data
[idx
].control_curr
;
3657 for (count
= 0; count
< head
.repeat
; count
++) {
3658 reg
= ®isters
[count
];
3659 network_to_register(reg
);
3661 trace_qemu_rdma_registration_handle_unregister_loop(count
,
3662 reg
->current_index
, reg
->key
.chunk
);
3664 block
= &(rdma
->local_ram_blocks
.block
[reg
->current_index
]);
3666 ret
= ibv_dereg_mr(block
->pmr
[reg
->key
.chunk
]);
3667 block
->pmr
[reg
->key
.chunk
] = NULL
;
3670 perror("rdma unregistration chunk failed");
3675 rdma
->total_registrations
--;
3677 trace_qemu_rdma_registration_handle_unregister_success(
3681 ret
= qemu_rdma_post_send_control(rdma
, NULL
, &unreg_resp
);
3684 error_report("Failed to send control buffer");
3688 case RDMA_CONTROL_REGISTER_RESULT
:
3689 error_report("Invalid RESULT message at dest.");
3693 error_report("Unknown control message %s", control_desc(head
.type
));
3700 rdma
->error_state
= ret
;
3706 * Called via a ram_control_load_hook during the initial RAM load section which
3707 * lists the RAMBlocks by name. This lets us know the order of the RAMBlocks
3709 * We've already built our local RAMBlock list, but not yet sent the list to
3713 rdma_block_notification_handle(QIOChannelRDMA
*rioc
, const char *name
)
3719 RCU_READ_LOCK_GUARD();
3720 rdma
= qatomic_rcu_read(&rioc
->rdmain
);
3726 /* Find the matching RAMBlock in our local list */
3727 for (curr
= 0; curr
< rdma
->local_ram_blocks
.nb_blocks
; curr
++) {
3728 if (!strcmp(rdma
->local_ram_blocks
.block
[curr
].block_name
, name
)) {
3735 error_report("RAMBlock '%s' not found on destination", name
);
3739 rdma
->local_ram_blocks
.block
[curr
].src_index
= rdma
->next_src_index
;
3740 trace_rdma_block_notification_handle(name
, rdma
->next_src_index
);
3741 rdma
->next_src_index
++;
3746 static int rdma_load_hook(QEMUFile
*f
, void *opaque
, uint64_t flags
, void *data
)
3749 case RAM_CONTROL_BLOCK_REG
:
3750 return rdma_block_notification_handle(opaque
, data
);
3752 case RAM_CONTROL_HOOK
:
3753 return qemu_rdma_registration_handle(f
, opaque
);
3756 /* Shouldn't be called with any other values */
3761 static int qemu_rdma_registration_start(QEMUFile
*f
, void *opaque
,
3762 uint64_t flags
, void *data
)
3764 QIOChannelRDMA
*rioc
= QIO_CHANNEL_RDMA(opaque
);
3767 RCU_READ_LOCK_GUARD();
3768 rdma
= qatomic_rcu_read(&rioc
->rdmaout
);
3773 CHECK_ERROR_STATE();
3775 if (migration_in_postcopy()) {
3779 trace_qemu_rdma_registration_start(flags
);
3780 qemu_put_be64(f
, RAM_SAVE_FLAG_HOOK
);
3787 * Inform dest that dynamic registrations are done for now.
3788 * First, flush writes, if any.
3790 static int qemu_rdma_registration_stop(QEMUFile
*f
, void *opaque
,
3791 uint64_t flags
, void *data
)
3793 QIOChannelRDMA
*rioc
= QIO_CHANNEL_RDMA(opaque
);
3795 RDMAControlHeader head
= { .len
= 0, .repeat
= 1 };
3798 RCU_READ_LOCK_GUARD();
3799 rdma
= qatomic_rcu_read(&rioc
->rdmaout
);
3804 CHECK_ERROR_STATE();
3806 if (migration_in_postcopy()) {
3811 ret
= qemu_rdma_drain_cq(f
, rdma
);
3817 if (flags
== RAM_CONTROL_SETUP
) {
3818 RDMAControlHeader resp
= {.type
= RDMA_CONTROL_RAM_BLOCKS_RESULT
};
3819 RDMALocalBlocks
*local
= &rdma
->local_ram_blocks
;
3820 int reg_result_idx
, i
, nb_dest_blocks
;
3822 head
.type
= RDMA_CONTROL_RAM_BLOCKS_REQUEST
;
3823 trace_qemu_rdma_registration_stop_ram();
3826 * Make sure that we parallelize the pinning on both sides.
3827 * For very large guests, doing this serially takes a really
3828 * long time, so we have to 'interleave' the pinning locally
3829 * with the control messages by performing the pinning on this
3830 * side before we receive the control response from the other
3831 * side that the pinning has completed.
3833 ret
= qemu_rdma_exchange_send(rdma
, &head
, NULL
, &resp
,
3834 ®_result_idx
, rdma
->pin_all
?
3835 qemu_rdma_reg_whole_ram_blocks
: NULL
);
3837 fprintf(stderr
, "receiving remote info!");
3841 nb_dest_blocks
= resp
.len
/ sizeof(RDMADestBlock
);
3844 * The protocol uses two different sets of rkeys (mutually exclusive):
3845 * 1. One key to represent the virtual address of the entire ram block.
3846 * (dynamic chunk registration disabled - pin everything with one rkey.)
3847 * 2. One to represent individual chunks within a ram block.
3848 * (dynamic chunk registration enabled - pin individual chunks.)
3850 * Once the capability is successfully negotiated, the destination transmits
3851 * the keys to use (or sends them later) including the virtual addresses
3852 * and then propagates the remote ram block descriptions to his local copy.
3855 if (local
->nb_blocks
!= nb_dest_blocks
) {
3856 fprintf(stderr
, "ram blocks mismatch (Number of blocks %d vs %d) "
3857 "Your QEMU command line parameters are probably "
3858 "not identical on both the source and destination.",
3859 local
->nb_blocks
, nb_dest_blocks
);
3860 rdma
->error_state
= -EINVAL
;
3864 qemu_rdma_move_header(rdma
, reg_result_idx
, &resp
);
3865 memcpy(rdma
->dest_blocks
,
3866 rdma
->wr_data
[reg_result_idx
].control_curr
, resp
.len
);
3867 for (i
= 0; i
< nb_dest_blocks
; i
++) {
3868 network_to_dest_block(&rdma
->dest_blocks
[i
]);
3870 /* We require that the blocks are in the same order */
3871 if (rdma
->dest_blocks
[i
].length
!= local
->block
[i
].length
) {
3872 fprintf(stderr
, "Block %s/%d has a different length %" PRIu64
3873 "vs %" PRIu64
, local
->block
[i
].block_name
, i
,
3874 local
->block
[i
].length
,
3875 rdma
->dest_blocks
[i
].length
);
3876 rdma
->error_state
= -EINVAL
;
3879 local
->block
[i
].remote_host_addr
=
3880 rdma
->dest_blocks
[i
].remote_host_addr
;
3881 local
->block
[i
].remote_rkey
= rdma
->dest_blocks
[i
].remote_rkey
;
3885 trace_qemu_rdma_registration_stop(flags
);
3887 head
.type
= RDMA_CONTROL_REGISTER_FINISHED
;
3888 ret
= qemu_rdma_exchange_send(rdma
, &head
, NULL
, NULL
, NULL
, NULL
);
3896 rdma
->error_state
= ret
;
3900 static const QEMUFileHooks rdma_read_hooks
= {
3901 .hook_ram_load
= rdma_load_hook
,
3904 static const QEMUFileHooks rdma_write_hooks
= {
3905 .before_ram_iterate
= qemu_rdma_registration_start
,
3906 .after_ram_iterate
= qemu_rdma_registration_stop
,
3907 .save_page
= qemu_rdma_save_page
,
3911 static void qio_channel_rdma_finalize(Object
*obj
)
3913 QIOChannelRDMA
*rioc
= QIO_CHANNEL_RDMA(obj
);
3915 qemu_rdma_cleanup(rioc
->rdmain
);
3916 g_free(rioc
->rdmain
);
3917 rioc
->rdmain
= NULL
;
3919 if (rioc
->rdmaout
) {
3920 qemu_rdma_cleanup(rioc
->rdmaout
);
3921 g_free(rioc
->rdmaout
);
3922 rioc
->rdmaout
= NULL
;
3926 static void qio_channel_rdma_class_init(ObjectClass
*klass
,
3927 void *class_data G_GNUC_UNUSED
)
3929 QIOChannelClass
*ioc_klass
= QIO_CHANNEL_CLASS(klass
);
3931 ioc_klass
->io_writev
= qio_channel_rdma_writev
;
3932 ioc_klass
->io_readv
= qio_channel_rdma_readv
;
3933 ioc_klass
->io_set_blocking
= qio_channel_rdma_set_blocking
;
3934 ioc_klass
->io_close
= qio_channel_rdma_close
;
3935 ioc_klass
->io_create_watch
= qio_channel_rdma_create_watch
;
3936 ioc_klass
->io_set_aio_fd_handler
= qio_channel_rdma_set_aio_fd_handler
;
3937 ioc_klass
->io_shutdown
= qio_channel_rdma_shutdown
;
3940 static const TypeInfo qio_channel_rdma_info
= {
3941 .parent
= TYPE_QIO_CHANNEL
,
3942 .name
= TYPE_QIO_CHANNEL_RDMA
,
3943 .instance_size
= sizeof(QIOChannelRDMA
),
3944 .instance_finalize
= qio_channel_rdma_finalize
,
3945 .class_init
= qio_channel_rdma_class_init
,
3948 static void qio_channel_rdma_register_types(void)
3950 type_register_static(&qio_channel_rdma_info
);
3953 type_init(qio_channel_rdma_register_types
);
3955 static QEMUFile
*qemu_fopen_rdma(RDMAContext
*rdma
, const char *mode
)
3957 QIOChannelRDMA
*rioc
;
3959 if (qemu_file_mode_is_not_valid(mode
)) {
3963 rioc
= QIO_CHANNEL_RDMA(object_new(TYPE_QIO_CHANNEL_RDMA
));
3965 if (mode
[0] == 'w') {
3966 rioc
->file
= qemu_fopen_channel_output(QIO_CHANNEL(rioc
));
3967 rioc
->rdmaout
= rdma
;
3968 rioc
->rdmain
= rdma
->return_path
;
3969 qemu_file_set_hooks(rioc
->file
, &rdma_write_hooks
);
3971 rioc
->file
= qemu_fopen_channel_input(QIO_CHANNEL(rioc
));
3972 rioc
->rdmain
= rdma
;
3973 rioc
->rdmaout
= rdma
->return_path
;
3974 qemu_file_set_hooks(rioc
->file
, &rdma_read_hooks
);
3980 static void rdma_accept_incoming_migration(void *opaque
)
3982 RDMAContext
*rdma
= opaque
;
3985 Error
*local_err
= NULL
;
3987 trace_qemu_rdma_accept_incoming_migration();
3988 ret
= qemu_rdma_accept(rdma
);
3991 fprintf(stderr
, "RDMA ERROR: Migration initialization failed\n");
3995 trace_qemu_rdma_accept_incoming_migration_accepted();
3997 if (rdma
->is_return_path
) {
4001 f
= qemu_fopen_rdma(rdma
, "rb");
4003 fprintf(stderr
, "RDMA ERROR: could not qemu_fopen_rdma\n");
4004 qemu_rdma_cleanup(rdma
);
4008 rdma
->migration_started_on_destination
= 1;
4009 migration_fd_process_incoming(f
, &local_err
);
4011 error_reportf_err(local_err
, "RDMA ERROR:");
4015 void rdma_start_incoming_migration(const char *host_port
, Error
**errp
)
4018 RDMAContext
*rdma
, *rdma_return_path
= NULL
;
4019 Error
*local_err
= NULL
;
4021 trace_rdma_start_incoming_migration();
4023 /* Avoid ram_block_discard_disable(), cannot change during migration. */
4024 if (ram_block_discard_is_required()) {
4025 error_setg(errp
, "RDMA: cannot disable RAM discard");
4029 rdma
= qemu_rdma_data_init(host_port
, &local_err
);
4034 ret
= qemu_rdma_dest_init(rdma
, &local_err
);
4040 trace_rdma_start_incoming_migration_after_dest_init();
4042 ret
= rdma_listen(rdma
->listen_id
, 5);
4045 ERROR(errp
, "listening on socket!");
4049 trace_rdma_start_incoming_migration_after_rdma_listen();
4051 /* initialize the RDMAContext for return path */
4052 if (migrate_postcopy()) {
4053 rdma_return_path
= qemu_rdma_data_init(host_port
, &local_err
);
4055 if (rdma_return_path
== NULL
) {
4059 qemu_rdma_return_path_dest_init(rdma_return_path
, rdma
);
4062 qemu_set_fd_handler(rdma
->channel
->fd
, rdma_accept_incoming_migration
,
4063 NULL
, (void *)(intptr_t)rdma
);
4067 qemu_rdma_cleanup(rdma
);
4069 error_propagate(errp
, local_err
);
4074 g_free(rdma_return_path
);
4077 void rdma_start_outgoing_migration(void *opaque
,
4078 const char *host_port
, Error
**errp
)
4080 MigrationState
*s
= opaque
;
4081 RDMAContext
*rdma_return_path
= NULL
;
4085 /* Avoid ram_block_discard_disable(), cannot change during migration. */
4086 if (ram_block_discard_is_required()) {
4087 error_setg(errp
, "RDMA: cannot disable RAM discard");
4091 rdma
= qemu_rdma_data_init(host_port
, errp
);
4096 ret
= qemu_rdma_source_init(rdma
,
4097 s
->enabled_capabilities
[MIGRATION_CAPABILITY_RDMA_PIN_ALL
], errp
);
4103 trace_rdma_start_outgoing_migration_after_rdma_source_init();
4104 ret
= qemu_rdma_connect(rdma
, errp
);
4110 /* RDMA postcopy need a separate queue pair for return path */
4111 if (migrate_postcopy()) {
4112 rdma_return_path
= qemu_rdma_data_init(host_port
, errp
);
4114 if (rdma_return_path
== NULL
) {
4115 goto return_path_err
;
4118 ret
= qemu_rdma_source_init(rdma_return_path
,
4119 s
->enabled_capabilities
[MIGRATION_CAPABILITY_RDMA_PIN_ALL
], errp
);
4122 goto return_path_err
;
4125 ret
= qemu_rdma_connect(rdma_return_path
, errp
);
4128 goto return_path_err
;
4131 rdma
->return_path
= rdma_return_path
;
4132 rdma_return_path
->return_path
= rdma
;
4133 rdma_return_path
->is_return_path
= true;
4136 trace_rdma_start_outgoing_migration_after_rdma_connect();
4138 s
->to_dst_file
= qemu_fopen_rdma(rdma
, "wb");
4139 migrate_fd_connect(s
, NULL
);
4142 qemu_rdma_cleanup(rdma
);
4145 g_free(rdma_return_path
);