2 * RDMA protocol and interfaces
4 * Copyright IBM, Corp. 2010-2013
5 * Copyright Red Hat, Inc. 2015-2016
8 * Michael R. Hines <mrhines@us.ibm.com>
9 * Jiuxing Liu <jl@us.ibm.com>
10 * Daniel P. Berrange <berrange@redhat.com>
12 * This work is licensed under the terms of the GNU GPL, version 2 or
13 * later. See the COPYING file in the top-level directory.
17 #include "qemu/osdep.h"
18 #include "qapi/error.h"
19 #include "qemu/cutils.h"
20 #include "exec/target_page.h"
22 #include "migration.h"
23 #include "migration-stats.h"
24 #include "qemu-file.h"
26 #include "qemu/error-report.h"
27 #include "qemu/main-loop.h"
28 #include "qemu/module.h"
30 #include "qemu/sockets.h"
31 #include "qemu/bitmap.h"
32 #include "qemu/coroutine.h"
33 #include "exec/memory.h"
34 #include <sys/socket.h>
36 #include <arpa/inet.h>
37 #include <rdma/rdma_cma.h>
39 #include "qom/object.h"
44 * Print and error on both the Monitor and the Log file.
46 #define ERROR(errp, fmt, ...) \
48 fprintf(stderr, "RDMA ERROR: " fmt "\n", ## __VA_ARGS__); \
49 if (errp && (*(errp) == NULL)) { \
50 error_setg(errp, "RDMA ERROR: " fmt, ## __VA_ARGS__); \
54 #define RDMA_RESOLVE_TIMEOUT_MS 10000
56 /* Do not merge data if larger than this. */
57 #define RDMA_MERGE_MAX (2 * 1024 * 1024)
58 #define RDMA_SIGNALED_SEND_MAX (RDMA_MERGE_MAX / 4096)
60 #define RDMA_REG_CHUNK_SHIFT 20 /* 1 MB */
63 * This is only for non-live state being migrated.
64 * Instead of RDMA_WRITE messages, we use RDMA_SEND
65 * messages for that state, which requires a different
66 * delivery design than main memory.
68 #define RDMA_SEND_INCREMENT 32768
71 * Maximum size infiniband SEND message
73 #define RDMA_CONTROL_MAX_BUFFER (512 * 1024)
74 #define RDMA_CONTROL_MAX_COMMANDS_PER_MESSAGE 4096
76 #define RDMA_CONTROL_VERSION_CURRENT 1
78 * Capabilities for negotiation.
80 #define RDMA_CAPABILITY_PIN_ALL 0x01
83 * Add the other flags above to this list of known capabilities
84 * as they are introduced.
86 static uint32_t known_capabilities
= RDMA_CAPABILITY_PIN_ALL
;
88 #define CHECK_ERROR_STATE() \
90 if (rdma->error_state) { \
91 if (!rdma->error_reported) { \
92 error_report("RDMA is in an error state waiting migration" \
94 rdma->error_reported = true; \
96 return rdma->error_state; \
101 * A work request ID is 64-bits and we split up these bits
104 * bits 0-15 : type of control message, 2^16
105 * bits 16-29: ram block index, 2^14
106 * bits 30-63: ram block chunk number, 2^34
108 * The last two bit ranges are only used for RDMA writes,
109 * in order to track their completion and potentially
110 * also track unregistration status of the message.
112 #define RDMA_WRID_TYPE_SHIFT 0UL
113 #define RDMA_WRID_BLOCK_SHIFT 16UL
114 #define RDMA_WRID_CHUNK_SHIFT 30UL
116 #define RDMA_WRID_TYPE_MASK \
117 ((1UL << RDMA_WRID_BLOCK_SHIFT) - 1UL)
119 #define RDMA_WRID_BLOCK_MASK \
120 (~RDMA_WRID_TYPE_MASK & ((1UL << RDMA_WRID_CHUNK_SHIFT) - 1UL))
122 #define RDMA_WRID_CHUNK_MASK (~RDMA_WRID_BLOCK_MASK & ~RDMA_WRID_TYPE_MASK)
125 * RDMA migration protocol:
126 * 1. RDMA Writes (data messages, i.e. RAM)
127 * 2. IB Send/Recv (control channel messages)
131 RDMA_WRID_RDMA_WRITE
= 1,
132 RDMA_WRID_SEND_CONTROL
= 2000,
133 RDMA_WRID_RECV_CONTROL
= 4000,
137 * Work request IDs for IB SEND messages only (not RDMA writes).
138 * This is used by the migration protocol to transmit
139 * control messages (such as device state and registration commands)
141 * We could use more WRs, but we have enough for now.
151 * SEND/RECV IB Control Messages.
154 RDMA_CONTROL_NONE
= 0,
156 RDMA_CONTROL_READY
, /* ready to receive */
157 RDMA_CONTROL_QEMU_FILE
, /* QEMUFile-transmitted bytes */
158 RDMA_CONTROL_RAM_BLOCKS_REQUEST
, /* RAMBlock synchronization */
159 RDMA_CONTROL_RAM_BLOCKS_RESULT
, /* RAMBlock synchronization */
160 RDMA_CONTROL_COMPRESS
, /* page contains repeat values */
161 RDMA_CONTROL_REGISTER_REQUEST
, /* dynamic page registration */
162 RDMA_CONTROL_REGISTER_RESULT
, /* key to use after registration */
163 RDMA_CONTROL_REGISTER_FINISHED
, /* current iteration finished */
164 RDMA_CONTROL_UNREGISTER_REQUEST
, /* dynamic UN-registration */
165 RDMA_CONTROL_UNREGISTER_FINISHED
, /* unpinning finished */
170 * Memory and MR structures used to represent an IB Send/Recv work request.
171 * This is *not* used for RDMA writes, only IB Send/Recv.
174 uint8_t control
[RDMA_CONTROL_MAX_BUFFER
]; /* actual buffer to register */
175 struct ibv_mr
*control_mr
; /* registration metadata */
176 size_t control_len
; /* length of the message */
177 uint8_t *control_curr
; /* start of unconsumed bytes */
178 } RDMAWorkRequestData
;
181 * Negotiate RDMA capabilities during connection-setup time.
188 static void caps_to_network(RDMACapabilities
*cap
)
190 cap
->version
= htonl(cap
->version
);
191 cap
->flags
= htonl(cap
->flags
);
194 static void network_to_caps(RDMACapabilities
*cap
)
196 cap
->version
= ntohl(cap
->version
);
197 cap
->flags
= ntohl(cap
->flags
);
201 * Representation of a RAMBlock from an RDMA perspective.
202 * This is not transmitted, only local.
203 * This and subsequent structures cannot be linked lists
204 * because we're using a single IB message to transmit
205 * the information. It's small anyway, so a list is overkill.
207 typedef struct RDMALocalBlock
{
209 uint8_t *local_host_addr
; /* local virtual address */
210 uint64_t remote_host_addr
; /* remote virtual address */
213 struct ibv_mr
**pmr
; /* MRs for chunk-level registration */
214 struct ibv_mr
*mr
; /* MR for non-chunk-level registration */
215 uint32_t *remote_keys
; /* rkeys for chunk-level registration */
216 uint32_t remote_rkey
; /* rkeys for non-chunk-level registration */
217 int index
; /* which block are we */
218 unsigned int src_index
; /* (Only used on dest) */
221 unsigned long *transit_bitmap
;
222 unsigned long *unregister_bitmap
;
226 * Also represents a RAMblock, but only on the dest.
227 * This gets transmitted by the dest during connection-time
228 * to the source VM and then is used to populate the
229 * corresponding RDMALocalBlock with
230 * the information needed to perform the actual RDMA.
232 typedef struct QEMU_PACKED RDMADestBlock
{
233 uint64_t remote_host_addr
;
236 uint32_t remote_rkey
;
240 static const char *control_desc(unsigned int rdma_control
)
242 static const char *strs
[] = {
243 [RDMA_CONTROL_NONE
] = "NONE",
244 [RDMA_CONTROL_ERROR
] = "ERROR",
245 [RDMA_CONTROL_READY
] = "READY",
246 [RDMA_CONTROL_QEMU_FILE
] = "QEMU FILE",
247 [RDMA_CONTROL_RAM_BLOCKS_REQUEST
] = "RAM BLOCKS REQUEST",
248 [RDMA_CONTROL_RAM_BLOCKS_RESULT
] = "RAM BLOCKS RESULT",
249 [RDMA_CONTROL_COMPRESS
] = "COMPRESS",
250 [RDMA_CONTROL_REGISTER_REQUEST
] = "REGISTER REQUEST",
251 [RDMA_CONTROL_REGISTER_RESULT
] = "REGISTER RESULT",
252 [RDMA_CONTROL_REGISTER_FINISHED
] = "REGISTER FINISHED",
253 [RDMA_CONTROL_UNREGISTER_REQUEST
] = "UNREGISTER REQUEST",
254 [RDMA_CONTROL_UNREGISTER_FINISHED
] = "UNREGISTER FINISHED",
257 if (rdma_control
> RDMA_CONTROL_UNREGISTER_FINISHED
) {
258 return "??BAD CONTROL VALUE??";
261 return strs
[rdma_control
];
264 static uint64_t htonll(uint64_t v
)
266 union { uint32_t lv
[2]; uint64_t llv
; } u
;
267 u
.lv
[0] = htonl(v
>> 32);
268 u
.lv
[1] = htonl(v
& 0xFFFFFFFFULL
);
272 static uint64_t ntohll(uint64_t v
)
274 union { uint32_t lv
[2]; uint64_t llv
; } u
;
276 return ((uint64_t)ntohl(u
.lv
[0]) << 32) | (uint64_t) ntohl(u
.lv
[1]);
279 static void dest_block_to_network(RDMADestBlock
*db
)
281 db
->remote_host_addr
= htonll(db
->remote_host_addr
);
282 db
->offset
= htonll(db
->offset
);
283 db
->length
= htonll(db
->length
);
284 db
->remote_rkey
= htonl(db
->remote_rkey
);
287 static void network_to_dest_block(RDMADestBlock
*db
)
289 db
->remote_host_addr
= ntohll(db
->remote_host_addr
);
290 db
->offset
= ntohll(db
->offset
);
291 db
->length
= ntohll(db
->length
);
292 db
->remote_rkey
= ntohl(db
->remote_rkey
);
296 * Virtual address of the above structures used for transmitting
297 * the RAMBlock descriptions at connection-time.
298 * This structure is *not* transmitted.
300 typedef struct RDMALocalBlocks
{
302 bool init
; /* main memory init complete */
303 RDMALocalBlock
*block
;
307 * Main data structure for RDMA state.
308 * While there is only one copy of this structure being allocated right now,
309 * this is the place where one would start if you wanted to consider
310 * having more than one RDMA connection open at the same time.
312 typedef struct RDMAContext
{
317 RDMAWorkRequestData wr_data
[RDMA_WRID_MAX
];
320 * This is used by *_exchange_send() to figure out whether or not
321 * the initial "READY" message has already been received or not.
322 * This is because other functions may potentially poll() and detect
323 * the READY message before send() does, in which case we need to
324 * know if it completed.
326 int control_ready_expected
;
328 /* number of outstanding writes */
331 /* store info about current buffer so that we can
332 merge it with future sends */
333 uint64_t current_addr
;
334 uint64_t current_length
;
335 /* index of ram block the current buffer belongs to */
337 /* index of the chunk in the current ram block */
343 * infiniband-specific variables for opening the device
344 * and maintaining connection state and so forth.
346 * cm_id also has ibv_context, rdma_event_channel, and ibv_qp in
347 * cm_id->verbs, cm_id->channel, and cm_id->qp.
349 struct rdma_cm_id
*cm_id
; /* connection manager ID */
350 struct rdma_cm_id
*listen_id
;
353 struct ibv_context
*verbs
;
354 struct rdma_event_channel
*channel
;
355 struct ibv_qp
*qp
; /* queue pair */
356 struct ibv_comp_channel
*recv_comp_channel
; /* recv completion channel */
357 struct ibv_comp_channel
*send_comp_channel
; /* send completion channel */
358 struct ibv_pd
*pd
; /* protection domain */
359 struct ibv_cq
*recv_cq
; /* recvieve completion queue */
360 struct ibv_cq
*send_cq
; /* send completion queue */
363 * If a previous write failed (perhaps because of a failed
364 * memory registration, then do not attempt any future work
365 * and remember the error state.
372 * Description of ram blocks used throughout the code.
374 RDMALocalBlocks local_ram_blocks
;
375 RDMADestBlock
*dest_blocks
;
377 /* Index of the next RAMBlock received during block registration */
378 unsigned int next_src_index
;
381 * Migration on *destination* started.
382 * Then use coroutine yield function.
383 * Source runs in a thread, so we don't care.
385 int migration_started_on_destination
;
387 int total_registrations
;
390 int unregister_current
, unregister_next
;
391 uint64_t unregistrations
[RDMA_SIGNALED_SEND_MAX
];
393 GHashTable
*blockmap
;
395 /* the RDMAContext for return path */
396 struct RDMAContext
*return_path
;
400 #define TYPE_QIO_CHANNEL_RDMA "qio-channel-rdma"
401 OBJECT_DECLARE_SIMPLE_TYPE(QIOChannelRDMA
, QIO_CHANNEL_RDMA
)
405 struct QIOChannelRDMA
{
408 RDMAContext
*rdmaout
;
410 bool blocking
; /* XXX we don't actually honour this yet */
414 * Main structure for IB Send/Recv control messages.
415 * This gets prepended at the beginning of every Send/Recv.
417 typedef struct QEMU_PACKED
{
418 uint32_t len
; /* Total length of data portion */
419 uint32_t type
; /* which control command to perform */
420 uint32_t repeat
; /* number of commands in data portion of same type */
424 static void control_to_network(RDMAControlHeader
*control
)
426 control
->type
= htonl(control
->type
);
427 control
->len
= htonl(control
->len
);
428 control
->repeat
= htonl(control
->repeat
);
431 static void network_to_control(RDMAControlHeader
*control
)
433 control
->type
= ntohl(control
->type
);
434 control
->len
= ntohl(control
->len
);
435 control
->repeat
= ntohl(control
->repeat
);
439 * Register a single Chunk.
440 * Information sent by the source VM to inform the dest
441 * to register an single chunk of memory before we can perform
442 * the actual RDMA operation.
444 typedef struct QEMU_PACKED
{
446 uint64_t current_addr
; /* offset into the ram_addr_t space */
447 uint64_t chunk
; /* chunk to lookup if unregistering */
449 uint32_t current_index
; /* which ramblock the chunk belongs to */
451 uint64_t chunks
; /* how many sequential chunks to register */
454 static void register_to_network(RDMAContext
*rdma
, RDMARegister
*reg
)
456 RDMALocalBlock
*local_block
;
457 local_block
= &rdma
->local_ram_blocks
.block
[reg
->current_index
];
459 if (local_block
->is_ram_block
) {
461 * current_addr as passed in is an address in the local ram_addr_t
462 * space, we need to translate this for the destination
464 reg
->key
.current_addr
-= local_block
->offset
;
465 reg
->key
.current_addr
+= rdma
->dest_blocks
[reg
->current_index
].offset
;
467 reg
->key
.current_addr
= htonll(reg
->key
.current_addr
);
468 reg
->current_index
= htonl(reg
->current_index
);
469 reg
->chunks
= htonll(reg
->chunks
);
472 static void network_to_register(RDMARegister
*reg
)
474 reg
->key
.current_addr
= ntohll(reg
->key
.current_addr
);
475 reg
->current_index
= ntohl(reg
->current_index
);
476 reg
->chunks
= ntohll(reg
->chunks
);
479 typedef struct QEMU_PACKED
{
480 uint32_t value
; /* if zero, we will madvise() */
481 uint32_t block_idx
; /* which ram block index */
482 uint64_t offset
; /* Address in remote ram_addr_t space */
483 uint64_t length
; /* length of the chunk */
486 static void compress_to_network(RDMAContext
*rdma
, RDMACompress
*comp
)
488 comp
->value
= htonl(comp
->value
);
490 * comp->offset as passed in is an address in the local ram_addr_t
491 * space, we need to translate this for the destination
493 comp
->offset
-= rdma
->local_ram_blocks
.block
[comp
->block_idx
].offset
;
494 comp
->offset
+= rdma
->dest_blocks
[comp
->block_idx
].offset
;
495 comp
->block_idx
= htonl(comp
->block_idx
);
496 comp
->offset
= htonll(comp
->offset
);
497 comp
->length
= htonll(comp
->length
);
500 static void network_to_compress(RDMACompress
*comp
)
502 comp
->value
= ntohl(comp
->value
);
503 comp
->block_idx
= ntohl(comp
->block_idx
);
504 comp
->offset
= ntohll(comp
->offset
);
505 comp
->length
= ntohll(comp
->length
);
509 * The result of the dest's memory registration produces an "rkey"
510 * which the source VM must reference in order to perform
511 * the RDMA operation.
513 typedef struct QEMU_PACKED
{
517 } RDMARegisterResult
;
519 static void result_to_network(RDMARegisterResult
*result
)
521 result
->rkey
= htonl(result
->rkey
);
522 result
->host_addr
= htonll(result
->host_addr
);
525 static void network_to_result(RDMARegisterResult
*result
)
527 result
->rkey
= ntohl(result
->rkey
);
528 result
->host_addr
= ntohll(result
->host_addr
);
531 static int qemu_rdma_exchange_send(RDMAContext
*rdma
, RDMAControlHeader
*head
,
532 uint8_t *data
, RDMAControlHeader
*resp
,
534 int (*callback
)(RDMAContext
*rdma
));
536 static inline uint64_t ram_chunk_index(const uint8_t *start
,
539 return ((uintptr_t) host
- (uintptr_t) start
) >> RDMA_REG_CHUNK_SHIFT
;
542 static inline uint8_t *ram_chunk_start(const RDMALocalBlock
*rdma_ram_block
,
545 return (uint8_t *)(uintptr_t)(rdma_ram_block
->local_host_addr
+
546 (i
<< RDMA_REG_CHUNK_SHIFT
));
549 static inline uint8_t *ram_chunk_end(const RDMALocalBlock
*rdma_ram_block
,
552 uint8_t *result
= ram_chunk_start(rdma_ram_block
, i
) +
553 (1UL << RDMA_REG_CHUNK_SHIFT
);
555 if (result
> (rdma_ram_block
->local_host_addr
+ rdma_ram_block
->length
)) {
556 result
= rdma_ram_block
->local_host_addr
+ rdma_ram_block
->length
;
562 static void rdma_add_block(RDMAContext
*rdma
, const char *block_name
,
564 ram_addr_t block_offset
, uint64_t length
)
566 RDMALocalBlocks
*local
= &rdma
->local_ram_blocks
;
567 RDMALocalBlock
*block
;
568 RDMALocalBlock
*old
= local
->block
;
570 local
->block
= g_new0(RDMALocalBlock
, local
->nb_blocks
+ 1);
572 if (local
->nb_blocks
) {
575 if (rdma
->blockmap
) {
576 for (x
= 0; x
< local
->nb_blocks
; x
++) {
577 g_hash_table_remove(rdma
->blockmap
,
578 (void *)(uintptr_t)old
[x
].offset
);
579 g_hash_table_insert(rdma
->blockmap
,
580 (void *)(uintptr_t)old
[x
].offset
,
584 memcpy(local
->block
, old
, sizeof(RDMALocalBlock
) * local
->nb_blocks
);
588 block
= &local
->block
[local
->nb_blocks
];
590 block
->block_name
= g_strdup(block_name
);
591 block
->local_host_addr
= host_addr
;
592 block
->offset
= block_offset
;
593 block
->length
= length
;
594 block
->index
= local
->nb_blocks
;
595 block
->src_index
= ~0U; /* Filled in by the receipt of the block list */
596 block
->nb_chunks
= ram_chunk_index(host_addr
, host_addr
+ length
) + 1UL;
597 block
->transit_bitmap
= bitmap_new(block
->nb_chunks
);
598 bitmap_clear(block
->transit_bitmap
, 0, block
->nb_chunks
);
599 block
->unregister_bitmap
= bitmap_new(block
->nb_chunks
);
600 bitmap_clear(block
->unregister_bitmap
, 0, block
->nb_chunks
);
601 block
->remote_keys
= g_new0(uint32_t, block
->nb_chunks
);
603 block
->is_ram_block
= local
->init
? false : true;
605 if (rdma
->blockmap
) {
606 g_hash_table_insert(rdma
->blockmap
, (void *)(uintptr_t)block_offset
, block
);
609 trace_rdma_add_block(block_name
, local
->nb_blocks
,
610 (uintptr_t) block
->local_host_addr
,
611 block
->offset
, block
->length
,
612 (uintptr_t) (block
->local_host_addr
+ block
->length
),
613 BITS_TO_LONGS(block
->nb_chunks
) *
614 sizeof(unsigned long) * 8,
621 * Memory regions need to be registered with the device and queue pairs setup
622 * in advanced before the migration starts. This tells us where the RAM blocks
623 * are so that we can register them individually.
625 static int qemu_rdma_init_one_block(RAMBlock
*rb
, void *opaque
)
627 const char *block_name
= qemu_ram_get_idstr(rb
);
628 void *host_addr
= qemu_ram_get_host_addr(rb
);
629 ram_addr_t block_offset
= qemu_ram_get_offset(rb
);
630 ram_addr_t length
= qemu_ram_get_used_length(rb
);
631 rdma_add_block(opaque
, block_name
, host_addr
, block_offset
, length
);
636 * Identify the RAMBlocks and their quantity. They will be references to
637 * identify chunk boundaries inside each RAMBlock and also be referenced
638 * during dynamic page registration.
640 static void qemu_rdma_init_ram_blocks(RDMAContext
*rdma
)
642 RDMALocalBlocks
*local
= &rdma
->local_ram_blocks
;
645 assert(rdma
->blockmap
== NULL
);
646 memset(local
, 0, sizeof *local
);
647 ret
= foreach_not_ignored_block(qemu_rdma_init_one_block
, rdma
);
649 trace_qemu_rdma_init_ram_blocks(local
->nb_blocks
);
650 rdma
->dest_blocks
= g_new0(RDMADestBlock
,
651 rdma
->local_ram_blocks
.nb_blocks
);
656 * Note: If used outside of cleanup, the caller must ensure that the destination
657 * block structures are also updated
659 static void rdma_delete_block(RDMAContext
*rdma
, RDMALocalBlock
*block
)
661 RDMALocalBlocks
*local
= &rdma
->local_ram_blocks
;
662 RDMALocalBlock
*old
= local
->block
;
665 if (rdma
->blockmap
) {
666 g_hash_table_remove(rdma
->blockmap
, (void *)(uintptr_t)block
->offset
);
671 for (j
= 0; j
< block
->nb_chunks
; j
++) {
672 if (!block
->pmr
[j
]) {
675 ibv_dereg_mr(block
->pmr
[j
]);
676 rdma
->total_registrations
--;
683 ibv_dereg_mr(block
->mr
);
684 rdma
->total_registrations
--;
688 g_free(block
->transit_bitmap
);
689 block
->transit_bitmap
= NULL
;
691 g_free(block
->unregister_bitmap
);
692 block
->unregister_bitmap
= NULL
;
694 g_free(block
->remote_keys
);
695 block
->remote_keys
= NULL
;
697 g_free(block
->block_name
);
698 block
->block_name
= NULL
;
700 if (rdma
->blockmap
) {
701 for (x
= 0; x
< local
->nb_blocks
; x
++) {
702 g_hash_table_remove(rdma
->blockmap
,
703 (void *)(uintptr_t)old
[x
].offset
);
707 if (local
->nb_blocks
> 1) {
709 local
->block
= g_new0(RDMALocalBlock
, local
->nb_blocks
- 1);
712 memcpy(local
->block
, old
, sizeof(RDMALocalBlock
) * block
->index
);
715 if (block
->index
< (local
->nb_blocks
- 1)) {
716 memcpy(local
->block
+ block
->index
, old
+ (block
->index
+ 1),
717 sizeof(RDMALocalBlock
) *
718 (local
->nb_blocks
- (block
->index
+ 1)));
719 for (x
= block
->index
; x
< local
->nb_blocks
- 1; x
++) {
720 local
->block
[x
].index
--;
724 assert(block
== local
->block
);
728 trace_rdma_delete_block(block
, (uintptr_t)block
->local_host_addr
,
729 block
->offset
, block
->length
,
730 (uintptr_t)(block
->local_host_addr
+ block
->length
),
731 BITS_TO_LONGS(block
->nb_chunks
) *
732 sizeof(unsigned long) * 8, block
->nb_chunks
);
738 if (local
->nb_blocks
&& rdma
->blockmap
) {
739 for (x
= 0; x
< local
->nb_blocks
; x
++) {
740 g_hash_table_insert(rdma
->blockmap
,
741 (void *)(uintptr_t)local
->block
[x
].offset
,
748 * Put in the log file which RDMA device was opened and the details
749 * associated with that device.
751 static void qemu_rdma_dump_id(const char *who
, struct ibv_context
*verbs
)
753 struct ibv_port_attr port
;
755 if (ibv_query_port(verbs
, 1, &port
)) {
756 error_report("Failed to query port information");
760 printf("%s RDMA Device opened: kernel name %s "
761 "uverbs device name %s, "
762 "infiniband_verbs class device path %s, "
763 "infiniband class device path %s, "
764 "transport: (%d) %s\n",
767 verbs
->device
->dev_name
,
768 verbs
->device
->dev_path
,
769 verbs
->device
->ibdev_path
,
771 (port
.link_layer
== IBV_LINK_LAYER_INFINIBAND
) ? "Infiniband" :
772 ((port
.link_layer
== IBV_LINK_LAYER_ETHERNET
)
773 ? "Ethernet" : "Unknown"));
777 * Put in the log file the RDMA gid addressing information,
778 * useful for folks who have trouble understanding the
779 * RDMA device hierarchy in the kernel.
781 static void qemu_rdma_dump_gid(const char *who
, struct rdma_cm_id
*id
)
785 inet_ntop(AF_INET6
, &id
->route
.addr
.addr
.ibaddr
.sgid
, sgid
, sizeof sgid
);
786 inet_ntop(AF_INET6
, &id
->route
.addr
.addr
.ibaddr
.dgid
, dgid
, sizeof dgid
);
787 trace_qemu_rdma_dump_gid(who
, sgid
, dgid
);
791 * As of now, IPv6 over RoCE / iWARP is not supported by linux.
792 * We will try the next addrinfo struct, and fail if there are
793 * no other valid addresses to bind against.
795 * If user is listening on '[::]', then we will not have a opened a device
796 * yet and have no way of verifying if the device is RoCE or not.
798 * In this case, the source VM will throw an error for ALL types of
799 * connections (both IPv4 and IPv6) if the destination machine does not have
800 * a regular infiniband network available for use.
802 * The only way to guarantee that an error is thrown for broken kernels is
803 * for the management software to choose a *specific* interface at bind time
804 * and validate what time of hardware it is.
806 * Unfortunately, this puts the user in a fix:
808 * If the source VM connects with an IPv4 address without knowing that the
809 * destination has bound to '[::]' the migration will unconditionally fail
810 * unless the management software is explicitly listening on the IPv4
811 * address while using a RoCE-based device.
813 * If the source VM connects with an IPv6 address, then we're OK because we can
814 * throw an error on the source (and similarly on the destination).
816 * But in mixed environments, this will be broken for a while until it is fixed
819 * We do provide a *tiny* bit of help in this function: We can list all of the
820 * devices in the system and check to see if all the devices are RoCE or
823 * If we detect that we have a *pure* RoCE environment, then we can safely
824 * thrown an error even if the management software has specified '[::]' as the
827 * However, if there is are multiple hetergeneous devices, then we cannot make
828 * this assumption and the user just has to be sure they know what they are
831 * Patches are being reviewed on linux-rdma.
833 static int qemu_rdma_broken_ipv6_kernel(struct ibv_context
*verbs
, Error
**errp
)
835 /* This bug only exists in linux, to our knowledge. */
837 struct ibv_port_attr port_attr
;
840 * Verbs are only NULL if management has bound to '[::]'.
842 * Let's iterate through all the devices and see if there any pure IB
843 * devices (non-ethernet).
845 * If not, then we can safely proceed with the migration.
846 * Otherwise, there are no guarantees until the bug is fixed in linux.
850 struct ibv_device
**dev_list
= ibv_get_device_list(&num_devices
);
851 bool roce_found
= false;
852 bool ib_found
= false;
854 for (x
= 0; x
< num_devices
; x
++) {
855 verbs
= ibv_open_device(dev_list
[x
]);
857 * ibv_open_device() is not documented to set errno. If
858 * it does, it's somebody else's doc bug. If it doesn't,
859 * the use of errno below is wrong.
860 * TODO Find out whether ibv_open_device() sets errno.
863 if (errno
== EPERM
) {
870 if (ibv_query_port(verbs
, 1, &port_attr
)) {
871 ibv_close_device(verbs
);
872 ERROR(errp
, "Could not query initial IB port");
876 if (port_attr
.link_layer
== IBV_LINK_LAYER_INFINIBAND
) {
878 } else if (port_attr
.link_layer
== IBV_LINK_LAYER_ETHERNET
) {
882 ibv_close_device(verbs
);
888 fprintf(stderr
, "WARN: migrations may fail:"
889 " IPv6 over RoCE / iWARP in linux"
890 " is broken. But since you appear to have a"
891 " mixed RoCE / IB environment, be sure to only"
892 " migrate over the IB fabric until the kernel "
893 " fixes the bug.\n");
895 ERROR(errp
, "You only have RoCE / iWARP devices in your systems"
896 " and your management software has specified '[::]'"
897 ", but IPv6 over RoCE / iWARP is not supported in Linux.");
906 * If we have a verbs context, that means that some other than '[::]' was
907 * used by the management software for binding. In which case we can
908 * actually warn the user about a potentially broken kernel.
911 /* IB ports start with 1, not 0 */
912 if (ibv_query_port(verbs
, 1, &port_attr
)) {
913 ERROR(errp
, "Could not query initial IB port");
917 if (port_attr
.link_layer
== IBV_LINK_LAYER_ETHERNET
) {
918 ERROR(errp
, "Linux kernel's RoCE / iWARP does not support IPv6 "
919 "(but patches on linux-rdma in progress)");
929 * Figure out which RDMA device corresponds to the requested IP hostname
930 * Also create the initial connection manager identifiers for opening
933 static int qemu_rdma_resolve_host(RDMAContext
*rdma
, Error
**errp
)
936 struct rdma_addrinfo
*res
;
938 struct rdma_cm_event
*cm_event
;
939 char ip
[40] = "unknown";
940 struct rdma_addrinfo
*e
;
942 if (rdma
->host
== NULL
|| !strcmp(rdma
->host
, "")) {
943 ERROR(errp
, "RDMA hostname has not been set");
947 /* create CM channel */
948 rdma
->channel
= rdma_create_event_channel();
949 if (!rdma
->channel
) {
950 ERROR(errp
, "could not create CM channel");
955 ret
= rdma_create_id(rdma
->channel
, &rdma
->cm_id
, NULL
, RDMA_PS_TCP
);
957 ERROR(errp
, "could not create channel id");
958 goto err_resolve_create_id
;
961 snprintf(port_str
, 16, "%d", rdma
->port
);
964 ret
= rdma_getaddrinfo(rdma
->host
, port_str
, NULL
, &res
);
966 ERROR(errp
, "could not rdma_getaddrinfo address %s", rdma
->host
);
967 goto err_resolve_get_addr
;
970 for (e
= res
; e
!= NULL
; e
= e
->ai_next
) {
971 inet_ntop(e
->ai_family
,
972 &((struct sockaddr_in
*) e
->ai_dst_addr
)->sin_addr
, ip
, sizeof ip
);
973 trace_qemu_rdma_resolve_host_trying(rdma
->host
, ip
);
975 ret
= rdma_resolve_addr(rdma
->cm_id
, NULL
, e
->ai_dst_addr
,
976 RDMA_RESOLVE_TIMEOUT_MS
);
978 if (e
->ai_family
== AF_INET6
) {
979 ret
= qemu_rdma_broken_ipv6_kernel(rdma
->cm_id
->verbs
, errp
);
988 rdma_freeaddrinfo(res
);
989 ERROR(errp
, "could not resolve address %s", rdma
->host
);
990 goto err_resolve_get_addr
;
993 rdma_freeaddrinfo(res
);
994 qemu_rdma_dump_gid("source_resolve_addr", rdma
->cm_id
);
996 ret
= rdma_get_cm_event(rdma
->channel
, &cm_event
);
998 ERROR(errp
, "could not perform event_addr_resolved");
999 goto err_resolve_get_addr
;
1002 if (cm_event
->event
!= RDMA_CM_EVENT_ADDR_RESOLVED
) {
1003 ERROR(errp
, "result not equal to event_addr_resolved %s",
1004 rdma_event_str(cm_event
->event
));
1005 error_report("rdma_resolve_addr");
1006 rdma_ack_cm_event(cm_event
);
1008 goto err_resolve_get_addr
;
1010 rdma_ack_cm_event(cm_event
);
1013 ret
= rdma_resolve_route(rdma
->cm_id
, RDMA_RESOLVE_TIMEOUT_MS
);
1015 ERROR(errp
, "could not resolve rdma route");
1016 goto err_resolve_get_addr
;
1019 ret
= rdma_get_cm_event(rdma
->channel
, &cm_event
);
1021 ERROR(errp
, "could not perform event_route_resolved");
1022 goto err_resolve_get_addr
;
1024 if (cm_event
->event
!= RDMA_CM_EVENT_ROUTE_RESOLVED
) {
1025 ERROR(errp
, "result not equal to event_route_resolved: %s",
1026 rdma_event_str(cm_event
->event
));
1027 rdma_ack_cm_event(cm_event
);
1029 goto err_resolve_get_addr
;
1031 rdma_ack_cm_event(cm_event
);
1032 rdma
->verbs
= rdma
->cm_id
->verbs
;
1033 qemu_rdma_dump_id("source_resolve_host", rdma
->cm_id
->verbs
);
1034 qemu_rdma_dump_gid("source_resolve_host", rdma
->cm_id
);
1037 err_resolve_get_addr
:
1038 rdma_destroy_id(rdma
->cm_id
);
1040 err_resolve_create_id
:
1041 rdma_destroy_event_channel(rdma
->channel
);
1042 rdma
->channel
= NULL
;
1047 * Create protection domain and completion queues
1049 static int qemu_rdma_alloc_pd_cq(RDMAContext
*rdma
)
1052 rdma
->pd
= ibv_alloc_pd(rdma
->verbs
);
1054 error_report("failed to allocate protection domain");
1058 /* create receive completion channel */
1059 rdma
->recv_comp_channel
= ibv_create_comp_channel(rdma
->verbs
);
1060 if (!rdma
->recv_comp_channel
) {
1061 error_report("failed to allocate receive completion channel");
1062 goto err_alloc_pd_cq
;
1066 * Completion queue can be filled by read work requests.
1068 rdma
->recv_cq
= ibv_create_cq(rdma
->verbs
, (RDMA_SIGNALED_SEND_MAX
* 3),
1069 NULL
, rdma
->recv_comp_channel
, 0);
1070 if (!rdma
->recv_cq
) {
1071 error_report("failed to allocate receive completion queue");
1072 goto err_alloc_pd_cq
;
1075 /* create send completion channel */
1076 rdma
->send_comp_channel
= ibv_create_comp_channel(rdma
->verbs
);
1077 if (!rdma
->send_comp_channel
) {
1078 error_report("failed to allocate send completion channel");
1079 goto err_alloc_pd_cq
;
1082 rdma
->send_cq
= ibv_create_cq(rdma
->verbs
, (RDMA_SIGNALED_SEND_MAX
* 3),
1083 NULL
, rdma
->send_comp_channel
, 0);
1084 if (!rdma
->send_cq
) {
1085 error_report("failed to allocate send completion queue");
1086 goto err_alloc_pd_cq
;
1093 ibv_dealloc_pd(rdma
->pd
);
1095 if (rdma
->recv_comp_channel
) {
1096 ibv_destroy_comp_channel(rdma
->recv_comp_channel
);
1098 if (rdma
->send_comp_channel
) {
1099 ibv_destroy_comp_channel(rdma
->send_comp_channel
);
1101 if (rdma
->recv_cq
) {
1102 ibv_destroy_cq(rdma
->recv_cq
);
1103 rdma
->recv_cq
= NULL
;
1106 rdma
->recv_comp_channel
= NULL
;
1107 rdma
->send_comp_channel
= NULL
;
1113 * Create queue pairs.
1115 static int qemu_rdma_alloc_qp(RDMAContext
*rdma
)
1117 struct ibv_qp_init_attr attr
= { 0 };
1120 attr
.cap
.max_send_wr
= RDMA_SIGNALED_SEND_MAX
;
1121 attr
.cap
.max_recv_wr
= 3;
1122 attr
.cap
.max_send_sge
= 1;
1123 attr
.cap
.max_recv_sge
= 1;
1124 attr
.send_cq
= rdma
->send_cq
;
1125 attr
.recv_cq
= rdma
->recv_cq
;
1126 attr
.qp_type
= IBV_QPT_RC
;
1128 ret
= rdma_create_qp(rdma
->cm_id
, rdma
->pd
, &attr
);
1133 rdma
->qp
= rdma
->cm_id
->qp
;
1137 /* Check whether On-Demand Paging is supported by RDAM device */
1138 static bool rdma_support_odp(struct ibv_context
*dev
)
1140 struct ibv_device_attr_ex attr
= {0};
1141 int ret
= ibv_query_device_ex(dev
, NULL
, &attr
);
1146 if (attr
.odp_caps
.general_caps
& IBV_ODP_SUPPORT
) {
1154 * ibv_advise_mr to avoid RNR NAK error as far as possible.
1155 * The responder mr registering with ODP will sent RNR NAK back to
1156 * the requester in the face of the page fault.
1158 static void qemu_rdma_advise_prefetch_mr(struct ibv_pd
*pd
, uint64_t addr
,
1159 uint32_t len
, uint32_t lkey
,
1160 const char *name
, bool wr
)
1162 #ifdef HAVE_IBV_ADVISE_MR
1164 int advice
= wr
? IBV_ADVISE_MR_ADVICE_PREFETCH_WRITE
:
1165 IBV_ADVISE_MR_ADVICE_PREFETCH
;
1166 struct ibv_sge sg_list
= {.lkey
= lkey
, .addr
= addr
, .length
= len
};
1168 ret
= ibv_advise_mr(pd
, advice
,
1169 IBV_ADVISE_MR_FLAG_FLUSH
, &sg_list
, 1);
1170 /* ignore the error */
1171 trace_qemu_rdma_advise_mr(name
, len
, addr
, strerror(ret
));
1175 static int qemu_rdma_reg_whole_ram_blocks(RDMAContext
*rdma
)
1178 RDMALocalBlocks
*local
= &rdma
->local_ram_blocks
;
1180 for (i
= 0; i
< local
->nb_blocks
; i
++) {
1181 int access
= IBV_ACCESS_LOCAL_WRITE
| IBV_ACCESS_REMOTE_WRITE
;
1183 local
->block
[i
].mr
=
1184 ibv_reg_mr(rdma
->pd
,
1185 local
->block
[i
].local_host_addr
,
1186 local
->block
[i
].length
, access
1189 * ibv_reg_mr() is not documented to set errno. If it does,
1190 * it's somebody else's doc bug. If it doesn't, the use of
1191 * errno below is wrong.
1192 * TODO Find out whether ibv_reg_mr() sets errno.
1194 if (!local
->block
[i
].mr
&&
1195 errno
== ENOTSUP
&& rdma_support_odp(rdma
->verbs
)) {
1196 access
|= IBV_ACCESS_ON_DEMAND
;
1197 /* register ODP mr */
1198 local
->block
[i
].mr
=
1199 ibv_reg_mr(rdma
->pd
,
1200 local
->block
[i
].local_host_addr
,
1201 local
->block
[i
].length
, access
);
1202 trace_qemu_rdma_register_odp_mr(local
->block
[i
].block_name
);
1204 if (local
->block
[i
].mr
) {
1205 qemu_rdma_advise_prefetch_mr(rdma
->pd
,
1206 (uintptr_t)local
->block
[i
].local_host_addr
,
1207 local
->block
[i
].length
,
1208 local
->block
[i
].mr
->lkey
,
1209 local
->block
[i
].block_name
,
1214 if (!local
->block
[i
].mr
) {
1215 perror("Failed to register local dest ram block!");
1218 rdma
->total_registrations
++;
1221 if (i
>= local
->nb_blocks
) {
1225 for (i
--; i
>= 0; i
--) {
1226 ibv_dereg_mr(local
->block
[i
].mr
);
1227 local
->block
[i
].mr
= NULL
;
1228 rdma
->total_registrations
--;
1236 * Find the ram block that corresponds to the page requested to be
1237 * transmitted by QEMU.
1239 * Once the block is found, also identify which 'chunk' within that
1240 * block that the page belongs to.
1242 static void qemu_rdma_search_ram_block(RDMAContext
*rdma
,
1243 uintptr_t block_offset
,
1246 uint64_t *block_index
,
1247 uint64_t *chunk_index
)
1249 uint64_t current_addr
= block_offset
+ offset
;
1250 RDMALocalBlock
*block
= g_hash_table_lookup(rdma
->blockmap
,
1251 (void *) block_offset
);
1253 assert(current_addr
>= block
->offset
);
1254 assert((current_addr
+ length
) <= (block
->offset
+ block
->length
));
1256 *block_index
= block
->index
;
1257 *chunk_index
= ram_chunk_index(block
->local_host_addr
,
1258 block
->local_host_addr
+ (current_addr
- block
->offset
));
1262 * Register a chunk with IB. If the chunk was already registered
1263 * previously, then skip.
1265 * Also return the keys associated with the registration needed
1266 * to perform the actual RDMA operation.
1268 static int qemu_rdma_register_and_get_keys(RDMAContext
*rdma
,
1269 RDMALocalBlock
*block
, uintptr_t host_addr
,
1270 uint32_t *lkey
, uint32_t *rkey
, int chunk
,
1271 uint8_t *chunk_start
, uint8_t *chunk_end
)
1275 *lkey
= block
->mr
->lkey
;
1278 *rkey
= block
->mr
->rkey
;
1283 /* allocate memory to store chunk MRs */
1285 block
->pmr
= g_new0(struct ibv_mr
*, block
->nb_chunks
);
1289 * If 'rkey', then we're the destination, so grant access to the source.
1291 * If 'lkey', then we're the source VM, so grant access only to ourselves.
1293 if (!block
->pmr
[chunk
]) {
1294 uint64_t len
= chunk_end
- chunk_start
;
1295 int access
= rkey
? IBV_ACCESS_LOCAL_WRITE
| IBV_ACCESS_REMOTE_WRITE
:
1298 trace_qemu_rdma_register_and_get_keys(len
, chunk_start
);
1300 block
->pmr
[chunk
] = ibv_reg_mr(rdma
->pd
, chunk_start
, len
, access
);
1302 * ibv_reg_mr() is not documented to set errno. If it does,
1303 * it's somebody else's doc bug. If it doesn't, the use of
1304 * errno below is wrong.
1305 * TODO Find out whether ibv_reg_mr() sets errno.
1307 if (!block
->pmr
[chunk
] &&
1308 errno
== ENOTSUP
&& rdma_support_odp(rdma
->verbs
)) {
1309 access
|= IBV_ACCESS_ON_DEMAND
;
1310 /* register ODP mr */
1311 block
->pmr
[chunk
] = ibv_reg_mr(rdma
->pd
, chunk_start
, len
, access
);
1312 trace_qemu_rdma_register_odp_mr(block
->block_name
);
1314 if (block
->pmr
[chunk
]) {
1315 qemu_rdma_advise_prefetch_mr(rdma
->pd
, (uintptr_t)chunk_start
,
1316 len
, block
->pmr
[chunk
]->lkey
,
1317 block
->block_name
, rkey
);
1322 if (!block
->pmr
[chunk
]) {
1323 perror("Failed to register chunk!");
1324 fprintf(stderr
, "Chunk details: block: %d chunk index %d"
1325 " start %" PRIuPTR
" end %" PRIuPTR
1327 " local %" PRIuPTR
" registrations: %d\n",
1328 block
->index
, chunk
, (uintptr_t)chunk_start
,
1329 (uintptr_t)chunk_end
, host_addr
,
1330 (uintptr_t)block
->local_host_addr
,
1331 rdma
->total_registrations
);
1334 rdma
->total_registrations
++;
1337 *lkey
= block
->pmr
[chunk
]->lkey
;
1340 *rkey
= block
->pmr
[chunk
]->rkey
;
1346 * Register (at connection time) the memory used for control
1349 static int qemu_rdma_reg_control(RDMAContext
*rdma
, int idx
)
1351 rdma
->wr_data
[idx
].control_mr
= ibv_reg_mr(rdma
->pd
,
1352 rdma
->wr_data
[idx
].control
, RDMA_CONTROL_MAX_BUFFER
,
1353 IBV_ACCESS_LOCAL_WRITE
| IBV_ACCESS_REMOTE_WRITE
);
1354 if (rdma
->wr_data
[idx
].control_mr
) {
1355 rdma
->total_registrations
++;
1358 error_report("qemu_rdma_reg_control failed");
1363 * Perform a non-optimized memory unregistration after every transfer
1364 * for demonstration purposes, only if pin-all is not requested.
1366 * Potential optimizations:
1367 * 1. Start a new thread to run this function continuously
1369 - and for receipt of unregister messages
1371 * 3. Use workload hints.
1373 static int qemu_rdma_unregister_waiting(RDMAContext
*rdma
)
1375 while (rdma
->unregistrations
[rdma
->unregister_current
]) {
1377 uint64_t wr_id
= rdma
->unregistrations
[rdma
->unregister_current
];
1379 (wr_id
& RDMA_WRID_CHUNK_MASK
) >> RDMA_WRID_CHUNK_SHIFT
;
1381 (wr_id
& RDMA_WRID_BLOCK_MASK
) >> RDMA_WRID_BLOCK_SHIFT
;
1382 RDMALocalBlock
*block
=
1383 &(rdma
->local_ram_blocks
.block
[index
]);
1384 RDMARegister reg
= { .current_index
= index
};
1385 RDMAControlHeader resp
= { .type
= RDMA_CONTROL_UNREGISTER_FINISHED
,
1387 RDMAControlHeader head
= { .len
= sizeof(RDMARegister
),
1388 .type
= RDMA_CONTROL_UNREGISTER_REQUEST
,
1392 trace_qemu_rdma_unregister_waiting_proc(chunk
,
1393 rdma
->unregister_current
);
1395 rdma
->unregistrations
[rdma
->unregister_current
] = 0;
1396 rdma
->unregister_current
++;
1398 if (rdma
->unregister_current
== RDMA_SIGNALED_SEND_MAX
) {
1399 rdma
->unregister_current
= 0;
1404 * Unregistration is speculative (because migration is single-threaded
1405 * and we cannot break the protocol's inifinband message ordering).
1406 * Thus, if the memory is currently being used for transmission,
1407 * then abort the attempt to unregister and try again
1408 * later the next time a completion is received for this memory.
1410 clear_bit(chunk
, block
->unregister_bitmap
);
1412 if (test_bit(chunk
, block
->transit_bitmap
)) {
1413 trace_qemu_rdma_unregister_waiting_inflight(chunk
);
1417 trace_qemu_rdma_unregister_waiting_send(chunk
);
1419 ret
= ibv_dereg_mr(block
->pmr
[chunk
]);
1420 block
->pmr
[chunk
] = NULL
;
1421 block
->remote_keys
[chunk
] = 0;
1425 * FIXME perror() is problematic, bcause ibv_dereg_mr() is
1426 * not documented to set errno. Will go away later in
1429 perror("unregistration chunk failed");
1432 rdma
->total_registrations
--;
1434 reg
.key
.chunk
= chunk
;
1435 register_to_network(rdma
, ®
);
1436 ret
= qemu_rdma_exchange_send(rdma
, &head
, (uint8_t *) ®
,
1442 trace_qemu_rdma_unregister_waiting_complete(chunk
);
1448 static uint64_t qemu_rdma_make_wrid(uint64_t wr_id
, uint64_t index
,
1451 uint64_t result
= wr_id
& RDMA_WRID_TYPE_MASK
;
1453 result
|= (index
<< RDMA_WRID_BLOCK_SHIFT
);
1454 result
|= (chunk
<< RDMA_WRID_CHUNK_SHIFT
);
1460 * Consult the connection manager to see a work request
1461 * (of any kind) has completed.
1462 * Return the work request ID that completed.
1464 static int qemu_rdma_poll(RDMAContext
*rdma
, struct ibv_cq
*cq
,
1465 uint64_t *wr_id_out
, uint32_t *byte_len
)
1471 ret
= ibv_poll_cq(cq
, 1, &wc
);
1474 *wr_id_out
= RDMA_WRID_NONE
;
1479 error_report("ibv_poll_cq failed");
1483 wr_id
= wc
.wr_id
& RDMA_WRID_TYPE_MASK
;
1485 if (wc
.status
!= IBV_WC_SUCCESS
) {
1486 fprintf(stderr
, "ibv_poll_cq wc.status=%d %s!\n",
1487 wc
.status
, ibv_wc_status_str(wc
.status
));
1488 fprintf(stderr
, "ibv_poll_cq wrid=%" PRIu64
"!\n", wr_id
);
1493 if (rdma
->control_ready_expected
&&
1494 (wr_id
>= RDMA_WRID_RECV_CONTROL
)) {
1495 trace_qemu_rdma_poll_recv(wr_id
- RDMA_WRID_RECV_CONTROL
, wr_id
,
1497 rdma
->control_ready_expected
= 0;
1500 if (wr_id
== RDMA_WRID_RDMA_WRITE
) {
1502 (wc
.wr_id
& RDMA_WRID_CHUNK_MASK
) >> RDMA_WRID_CHUNK_SHIFT
;
1504 (wc
.wr_id
& RDMA_WRID_BLOCK_MASK
) >> RDMA_WRID_BLOCK_SHIFT
;
1505 RDMALocalBlock
*block
= &(rdma
->local_ram_blocks
.block
[index
]);
1507 trace_qemu_rdma_poll_write(wr_id
, rdma
->nb_sent
,
1508 index
, chunk
, block
->local_host_addr
,
1509 (void *)(uintptr_t)block
->remote_host_addr
);
1511 clear_bit(chunk
, block
->transit_bitmap
);
1513 if (rdma
->nb_sent
> 0) {
1517 trace_qemu_rdma_poll_other(wr_id
, rdma
->nb_sent
);
1520 *wr_id_out
= wc
.wr_id
;
1522 *byte_len
= wc
.byte_len
;
1528 /* Wait for activity on the completion channel.
1529 * Returns 0 on success, none-0 on error.
1531 static int qemu_rdma_wait_comp_channel(RDMAContext
*rdma
,
1532 struct ibv_comp_channel
*comp_channel
)
1534 struct rdma_cm_event
*cm_event
;
1538 * Coroutine doesn't start until migration_fd_process_incoming()
1539 * so don't yield unless we know we're running inside of a coroutine.
1541 if (rdma
->migration_started_on_destination
&&
1542 migration_incoming_get_current()->state
== MIGRATION_STATUS_ACTIVE
) {
1543 yield_until_fd_readable(comp_channel
->fd
);
1545 /* This is the source side, we're in a separate thread
1546 * or destination prior to migration_fd_process_incoming()
1547 * after postcopy, the destination also in a separate thread.
1548 * we can't yield; so we have to poll the fd.
1549 * But we need to be able to handle 'cancel' or an error
1550 * without hanging forever.
1552 while (!rdma
->error_state
&& !rdma
->received_error
) {
1554 pfds
[0].fd
= comp_channel
->fd
;
1555 pfds
[0].events
= G_IO_IN
| G_IO_HUP
| G_IO_ERR
;
1556 pfds
[0].revents
= 0;
1558 pfds
[1].fd
= rdma
->channel
->fd
;
1559 pfds
[1].events
= G_IO_IN
| G_IO_HUP
| G_IO_ERR
;
1560 pfds
[1].revents
= 0;
1562 /* 0.1s timeout, should be fine for a 'cancel' */
1563 switch (qemu_poll_ns(pfds
, 2, 100 * 1000 * 1000)) {
1565 case 1: /* fd active */
1566 if (pfds
[0].revents
) {
1570 if (pfds
[1].revents
) {
1571 ret
= rdma_get_cm_event(rdma
->channel
, &cm_event
);
1573 error_report("failed to get cm event while wait "
1574 "completion channel");
1578 error_report("receive cm event while wait comp channel,"
1579 "cm event is %d", cm_event
->event
);
1580 if (cm_event
->event
== RDMA_CM_EVENT_DISCONNECTED
||
1581 cm_event
->event
== RDMA_CM_EVENT_DEVICE_REMOVAL
) {
1582 rdma_ack_cm_event(cm_event
);
1585 rdma_ack_cm_event(cm_event
);
1589 case 0: /* Timeout, go around again */
1592 default: /* Error of some type -
1593 * I don't trust errno from qemu_poll_ns
1595 error_report("%s: poll failed", __func__
);
1599 if (migrate_get_current()->state
== MIGRATION_STATUS_CANCELLING
) {
1600 /* Bail out and let the cancellation happen */
1606 if (rdma
->received_error
) {
1609 return rdma
->error_state
;
1612 static struct ibv_comp_channel
*to_channel(RDMAContext
*rdma
, uint64_t wrid
)
1614 return wrid
< RDMA_WRID_RECV_CONTROL
? rdma
->send_comp_channel
:
1615 rdma
->recv_comp_channel
;
1618 static struct ibv_cq
*to_cq(RDMAContext
*rdma
, uint64_t wrid
)
1620 return wrid
< RDMA_WRID_RECV_CONTROL
? rdma
->send_cq
: rdma
->recv_cq
;
1624 * Block until the next work request has completed.
1626 * First poll to see if a work request has already completed,
1629 * If we encounter completed work requests for IDs other than
1630 * the one we're interested in, then that's generally an error.
1632 * The only exception is actual RDMA Write completions. These
1633 * completions only need to be recorded, but do not actually
1634 * need further processing.
1636 static int qemu_rdma_block_for_wrid(RDMAContext
*rdma
,
1637 uint64_t wrid_requested
,
1640 int num_cq_events
= 0, ret
= 0;
1643 uint64_t wr_id
= RDMA_WRID_NONE
, wr_id_in
;
1644 struct ibv_comp_channel
*ch
= to_channel(rdma
, wrid_requested
);
1645 struct ibv_cq
*poll_cq
= to_cq(rdma
, wrid_requested
);
1647 if (ibv_req_notify_cq(poll_cq
, 0)) {
1651 while (wr_id
!= wrid_requested
) {
1652 ret
= qemu_rdma_poll(rdma
, poll_cq
, &wr_id_in
, byte_len
);
1657 wr_id
= wr_id_in
& RDMA_WRID_TYPE_MASK
;
1659 if (wr_id
== RDMA_WRID_NONE
) {
1662 if (wr_id
!= wrid_requested
) {
1663 trace_qemu_rdma_block_for_wrid_miss(wrid_requested
, wr_id
);
1667 if (wr_id
== wrid_requested
) {
1672 ret
= qemu_rdma_wait_comp_channel(rdma
, ch
);
1674 goto err_block_for_wrid
;
1677 ret
= ibv_get_cq_event(ch
, &cq
, &cq_ctx
);
1680 * FIXME perror() is problematic, because ibv_reg_mr() is
1681 * not documented to set errno. Will go away later in
1684 perror("ibv_get_cq_event");
1685 goto err_block_for_wrid
;
1690 ret
= -ibv_req_notify_cq(cq
, 0);
1692 goto err_block_for_wrid
;
1695 while (wr_id
!= wrid_requested
) {
1696 ret
= qemu_rdma_poll(rdma
, poll_cq
, &wr_id_in
, byte_len
);
1698 goto err_block_for_wrid
;
1701 wr_id
= wr_id_in
& RDMA_WRID_TYPE_MASK
;
1703 if (wr_id
== RDMA_WRID_NONE
) {
1706 if (wr_id
!= wrid_requested
) {
1707 trace_qemu_rdma_block_for_wrid_miss(wrid_requested
, wr_id
);
1711 if (wr_id
== wrid_requested
) {
1712 goto success_block_for_wrid
;
1716 success_block_for_wrid
:
1717 if (num_cq_events
) {
1718 ibv_ack_cq_events(cq
, num_cq_events
);
1723 if (num_cq_events
) {
1724 ibv_ack_cq_events(cq
, num_cq_events
);
1727 rdma
->error_state
= ret
;
1732 * Post a SEND message work request for the control channel
1733 * containing some data and block until the post completes.
1735 static int qemu_rdma_post_send_control(RDMAContext
*rdma
, uint8_t *buf
,
1736 RDMAControlHeader
*head
)
1739 RDMAWorkRequestData
*wr
= &rdma
->wr_data
[RDMA_WRID_CONTROL
];
1740 struct ibv_send_wr
*bad_wr
;
1741 struct ibv_sge sge
= {
1742 .addr
= (uintptr_t)(wr
->control
),
1743 .length
= head
->len
+ sizeof(RDMAControlHeader
),
1744 .lkey
= wr
->control_mr
->lkey
,
1746 struct ibv_send_wr send_wr
= {
1747 .wr_id
= RDMA_WRID_SEND_CONTROL
,
1748 .opcode
= IBV_WR_SEND
,
1749 .send_flags
= IBV_SEND_SIGNALED
,
1754 trace_qemu_rdma_post_send_control(control_desc(head
->type
));
1757 * We don't actually need to do a memcpy() in here if we used
1758 * the "sge" properly, but since we're only sending control messages
1759 * (not RAM in a performance-critical path), then its OK for now.
1761 * The copy makes the RDMAControlHeader simpler to manipulate
1762 * for the time being.
1764 assert(head
->len
<= RDMA_CONTROL_MAX_BUFFER
- sizeof(*head
));
1765 memcpy(wr
->control
, head
, sizeof(RDMAControlHeader
));
1766 control_to_network((void *) wr
->control
);
1769 memcpy(wr
->control
+ sizeof(RDMAControlHeader
), buf
, head
->len
);
1773 ret
= ibv_post_send(rdma
->qp
, &send_wr
, &bad_wr
);
1776 error_report("Failed to use post IB SEND for control");
1780 ret
= qemu_rdma_block_for_wrid(rdma
, RDMA_WRID_SEND_CONTROL
, NULL
);
1782 error_report("rdma migration: send polling control error");
1789 * Post a RECV work request in anticipation of some future receipt
1790 * of data on the control channel.
1792 static int qemu_rdma_post_recv_control(RDMAContext
*rdma
, int idx
)
1794 struct ibv_recv_wr
*bad_wr
;
1795 struct ibv_sge sge
= {
1796 .addr
= (uintptr_t)(rdma
->wr_data
[idx
].control
),
1797 .length
= RDMA_CONTROL_MAX_BUFFER
,
1798 .lkey
= rdma
->wr_data
[idx
].control_mr
->lkey
,
1801 struct ibv_recv_wr recv_wr
= {
1802 .wr_id
= RDMA_WRID_RECV_CONTROL
+ idx
,
1808 if (ibv_post_recv(rdma
->qp
, &recv_wr
, &bad_wr
)) {
1816 * Block and wait for a RECV control channel message to arrive.
1818 static int qemu_rdma_exchange_get_response(RDMAContext
*rdma
,
1819 RDMAControlHeader
*head
, uint32_t expecting
, int idx
)
1822 int ret
= qemu_rdma_block_for_wrid(rdma
, RDMA_WRID_RECV_CONTROL
+ idx
,
1826 error_report("rdma migration: recv polling control error!");
1830 network_to_control((void *) rdma
->wr_data
[idx
].control
);
1831 memcpy(head
, rdma
->wr_data
[idx
].control
, sizeof(RDMAControlHeader
));
1833 trace_qemu_rdma_exchange_get_response_start(control_desc(expecting
));
1835 if (expecting
== RDMA_CONTROL_NONE
) {
1836 trace_qemu_rdma_exchange_get_response_none(control_desc(head
->type
),
1838 } else if (head
->type
!= expecting
|| head
->type
== RDMA_CONTROL_ERROR
) {
1839 error_report("Was expecting a %s (%d) control message"
1840 ", but got: %s (%d), length: %d",
1841 control_desc(expecting
), expecting
,
1842 control_desc(head
->type
), head
->type
, head
->len
);
1843 if (head
->type
== RDMA_CONTROL_ERROR
) {
1844 rdma
->received_error
= true;
1848 if (head
->len
> RDMA_CONTROL_MAX_BUFFER
- sizeof(*head
)) {
1849 error_report("too long length: %d", head
->len
);
1852 if (sizeof(*head
) + head
->len
!= byte_len
) {
1853 error_report("Malformed length: %d byte_len %d", head
->len
, byte_len
);
1861 * When a RECV work request has completed, the work request's
1862 * buffer is pointed at the header.
1864 * This will advance the pointer to the data portion
1865 * of the control message of the work request's buffer that
1866 * was populated after the work request finished.
1868 static void qemu_rdma_move_header(RDMAContext
*rdma
, int idx
,
1869 RDMAControlHeader
*head
)
1871 rdma
->wr_data
[idx
].control_len
= head
->len
;
1872 rdma
->wr_data
[idx
].control_curr
=
1873 rdma
->wr_data
[idx
].control
+ sizeof(RDMAControlHeader
);
1877 * This is an 'atomic' high-level operation to deliver a single, unified
1878 * control-channel message.
1880 * Additionally, if the user is expecting some kind of reply to this message,
1881 * they can request a 'resp' response message be filled in by posting an
1882 * additional work request on behalf of the user and waiting for an additional
1885 * The extra (optional) response is used during registration to us from having
1886 * to perform an *additional* exchange of message just to provide a response by
1887 * instead piggy-backing on the acknowledgement.
1889 static int qemu_rdma_exchange_send(RDMAContext
*rdma
, RDMAControlHeader
*head
,
1890 uint8_t *data
, RDMAControlHeader
*resp
,
1892 int (*callback
)(RDMAContext
*rdma
))
1897 * Wait until the dest is ready before attempting to deliver the message
1898 * by waiting for a READY message.
1900 if (rdma
->control_ready_expected
) {
1901 RDMAControlHeader resp_ignored
;
1903 ret
= qemu_rdma_exchange_get_response(rdma
, &resp_ignored
,
1912 * If the user is expecting a response, post a WR in anticipation of it.
1915 ret
= qemu_rdma_post_recv_control(rdma
, RDMA_WRID_DATA
);
1917 error_report("rdma migration: error posting"
1918 " extra control recv for anticipated result!");
1924 * Post a WR to replace the one we just consumed for the READY message.
1926 ret
= qemu_rdma_post_recv_control(rdma
, RDMA_WRID_READY
);
1928 error_report("rdma migration: error posting first control recv!");
1933 * Deliver the control message that was requested.
1935 ret
= qemu_rdma_post_send_control(rdma
, data
, head
);
1938 error_report("Failed to send control buffer!");
1943 * If we're expecting a response, block and wait for it.
1947 trace_qemu_rdma_exchange_send_issue_callback();
1948 ret
= callback(rdma
);
1954 trace_qemu_rdma_exchange_send_waiting(control_desc(resp
->type
));
1955 ret
= qemu_rdma_exchange_get_response(rdma
, resp
,
1956 resp
->type
, RDMA_WRID_DATA
);
1962 qemu_rdma_move_header(rdma
, RDMA_WRID_DATA
, resp
);
1964 *resp_idx
= RDMA_WRID_DATA
;
1966 trace_qemu_rdma_exchange_send_received(control_desc(resp
->type
));
1969 rdma
->control_ready_expected
= 1;
1975 * This is an 'atomic' high-level operation to receive a single, unified
1976 * control-channel message.
1978 static int qemu_rdma_exchange_recv(RDMAContext
*rdma
, RDMAControlHeader
*head
,
1981 RDMAControlHeader ready
= {
1983 .type
= RDMA_CONTROL_READY
,
1989 * Inform the source that we're ready to receive a message.
1991 ret
= qemu_rdma_post_send_control(rdma
, NULL
, &ready
);
1994 error_report("Failed to send control buffer!");
1999 * Block and wait for the message.
2001 ret
= qemu_rdma_exchange_get_response(rdma
, head
,
2002 expecting
, RDMA_WRID_READY
);
2008 qemu_rdma_move_header(rdma
, RDMA_WRID_READY
, head
);
2011 * Post a new RECV work request to replace the one we just consumed.
2013 ret
= qemu_rdma_post_recv_control(rdma
, RDMA_WRID_READY
);
2015 error_report("rdma migration: error posting second control recv!");
2023 * Write an actual chunk of memory using RDMA.
2025 * If we're using dynamic registration on the dest-side, we have to
2026 * send a registration command first.
2028 static int qemu_rdma_write_one(RDMAContext
*rdma
,
2029 int current_index
, uint64_t current_addr
,
2033 struct ibv_send_wr send_wr
= { 0 };
2034 struct ibv_send_wr
*bad_wr
;
2035 int reg_result_idx
, ret
, count
= 0;
2036 uint64_t chunk
, chunks
;
2037 uint8_t *chunk_start
, *chunk_end
;
2038 RDMALocalBlock
*block
= &(rdma
->local_ram_blocks
.block
[current_index
]);
2040 RDMARegisterResult
*reg_result
;
2041 RDMAControlHeader resp
= { .type
= RDMA_CONTROL_REGISTER_RESULT
};
2042 RDMAControlHeader head
= { .len
= sizeof(RDMARegister
),
2043 .type
= RDMA_CONTROL_REGISTER_REQUEST
,
2048 sge
.addr
= (uintptr_t)(block
->local_host_addr
+
2049 (current_addr
- block
->offset
));
2050 sge
.length
= length
;
2052 chunk
= ram_chunk_index(block
->local_host_addr
,
2053 (uint8_t *)(uintptr_t)sge
.addr
);
2054 chunk_start
= ram_chunk_start(block
, chunk
);
2056 if (block
->is_ram_block
) {
2057 chunks
= length
/ (1UL << RDMA_REG_CHUNK_SHIFT
);
2059 if (chunks
&& ((length
% (1UL << RDMA_REG_CHUNK_SHIFT
)) == 0)) {
2063 chunks
= block
->length
/ (1UL << RDMA_REG_CHUNK_SHIFT
);
2065 if (chunks
&& ((block
->length
% (1UL << RDMA_REG_CHUNK_SHIFT
)) == 0)) {
2070 trace_qemu_rdma_write_one_top(chunks
+ 1,
2072 (1UL << RDMA_REG_CHUNK_SHIFT
) / 1024 / 1024);
2074 chunk_end
= ram_chunk_end(block
, chunk
+ chunks
);
2077 while (test_bit(chunk
, block
->transit_bitmap
)) {
2079 trace_qemu_rdma_write_one_block(count
++, current_index
, chunk
,
2080 sge
.addr
, length
, rdma
->nb_sent
, block
->nb_chunks
);
2082 ret
= qemu_rdma_block_for_wrid(rdma
, RDMA_WRID_RDMA_WRITE
, NULL
);
2085 error_report("Failed to Wait for previous write to complete "
2086 "block %d chunk %" PRIu64
2087 " current %" PRIu64
" len %" PRIu64
" %d",
2088 current_index
, chunk
, sge
.addr
, length
, rdma
->nb_sent
);
2093 if (!rdma
->pin_all
|| !block
->is_ram_block
) {
2094 if (!block
->remote_keys
[chunk
]) {
2096 * This chunk has not yet been registered, so first check to see
2097 * if the entire chunk is zero. If so, tell the other size to
2098 * memset() + madvise() the entire chunk without RDMA.
2101 if (buffer_is_zero((void *)(uintptr_t)sge
.addr
, length
)) {
2102 RDMACompress comp
= {
2103 .offset
= current_addr
,
2105 .block_idx
= current_index
,
2109 head
.len
= sizeof(comp
);
2110 head
.type
= RDMA_CONTROL_COMPRESS
;
2112 trace_qemu_rdma_write_one_zero(chunk
, sge
.length
,
2113 current_index
, current_addr
);
2115 compress_to_network(rdma
, &comp
);
2116 ret
= qemu_rdma_exchange_send(rdma
, &head
,
2117 (uint8_t *) &comp
, NULL
, NULL
, NULL
);
2124 * TODO: Here we are sending something, but we are not
2125 * accounting for anything transferred. The following is wrong:
2127 * stat64_add(&mig_stats.rdma_bytes, sge.length);
2129 * because we are using some kind of compression. I
2130 * would think that head.len would be the more similar
2131 * thing to a correct value.
2133 stat64_add(&mig_stats
.zero_pages
,
2134 sge
.length
/ qemu_target_page_size());
2139 * Otherwise, tell other side to register.
2141 reg
.current_index
= current_index
;
2142 if (block
->is_ram_block
) {
2143 reg
.key
.current_addr
= current_addr
;
2145 reg
.key
.chunk
= chunk
;
2147 reg
.chunks
= chunks
;
2149 trace_qemu_rdma_write_one_sendreg(chunk
, sge
.length
, current_index
,
2152 register_to_network(rdma
, ®
);
2153 ret
= qemu_rdma_exchange_send(rdma
, &head
, (uint8_t *) ®
,
2154 &resp
, ®_result_idx
, NULL
);
2159 /* try to overlap this single registration with the one we sent. */
2160 if (qemu_rdma_register_and_get_keys(rdma
, block
, sge
.addr
,
2161 &sge
.lkey
, NULL
, chunk
,
2162 chunk_start
, chunk_end
)) {
2163 error_report("cannot get lkey");
2167 reg_result
= (RDMARegisterResult
*)
2168 rdma
->wr_data
[reg_result_idx
].control_curr
;
2170 network_to_result(reg_result
);
2172 trace_qemu_rdma_write_one_recvregres(block
->remote_keys
[chunk
],
2173 reg_result
->rkey
, chunk
);
2175 block
->remote_keys
[chunk
] = reg_result
->rkey
;
2176 block
->remote_host_addr
= reg_result
->host_addr
;
2178 /* already registered before */
2179 if (qemu_rdma_register_and_get_keys(rdma
, block
, sge
.addr
,
2180 &sge
.lkey
, NULL
, chunk
,
2181 chunk_start
, chunk_end
)) {
2182 error_report("cannot get lkey!");
2187 send_wr
.wr
.rdma
.rkey
= block
->remote_keys
[chunk
];
2189 send_wr
.wr
.rdma
.rkey
= block
->remote_rkey
;
2191 if (qemu_rdma_register_and_get_keys(rdma
, block
, sge
.addr
,
2192 &sge
.lkey
, NULL
, chunk
,
2193 chunk_start
, chunk_end
)) {
2194 error_report("cannot get lkey!");
2200 * Encode the ram block index and chunk within this wrid.
2201 * We will use this information at the time of completion
2202 * to figure out which bitmap to check against and then which
2203 * chunk in the bitmap to look for.
2205 send_wr
.wr_id
= qemu_rdma_make_wrid(RDMA_WRID_RDMA_WRITE
,
2206 current_index
, chunk
);
2208 send_wr
.opcode
= IBV_WR_RDMA_WRITE
;
2209 send_wr
.send_flags
= IBV_SEND_SIGNALED
;
2210 send_wr
.sg_list
= &sge
;
2211 send_wr
.num_sge
= 1;
2212 send_wr
.wr
.rdma
.remote_addr
= block
->remote_host_addr
+
2213 (current_addr
- block
->offset
);
2215 trace_qemu_rdma_write_one_post(chunk
, sge
.addr
, send_wr
.wr
.rdma
.remote_addr
,
2219 * ibv_post_send() does not return negative error numbers,
2220 * per the specification they are positive - no idea why.
2222 ret
= ibv_post_send(rdma
->qp
, &send_wr
, &bad_wr
);
2224 if (ret
== ENOMEM
) {
2225 trace_qemu_rdma_write_one_queue_full();
2226 ret
= qemu_rdma_block_for_wrid(rdma
, RDMA_WRID_RDMA_WRITE
, NULL
);
2228 error_report("rdma migration: failed to make "
2229 "room in full send queue!");
2235 } else if (ret
> 0) {
2237 * FIXME perror() is problematic, because whether
2238 * ibv_post_send() sets errno is unclear. Will go away later
2241 perror("rdma migration: post rdma write failed");
2245 set_bit(chunk
, block
->transit_bitmap
);
2246 stat64_add(&mig_stats
.normal_pages
, sge
.length
/ qemu_target_page_size());
2248 * We are adding to transferred the amount of data written, but no
2249 * overhead at all. I will asume that RDMA is magicaly and don't
2250 * need to transfer (at least) the addresses where it wants to
2251 * write the pages. Here it looks like it should be something
2253 * sizeof(send_wr) + sge.length
2254 * but this being RDMA, who knows.
2256 stat64_add(&mig_stats
.rdma_bytes
, sge
.length
);
2257 ram_transferred_add(sge
.length
);
2258 rdma
->total_writes
++;
2264 * Push out any unwritten RDMA operations.
2266 * We support sending out multiple chunks at the same time.
2267 * Not all of them need to get signaled in the completion queue.
2269 static int qemu_rdma_write_flush(RDMAContext
*rdma
)
2273 if (!rdma
->current_length
) {
2277 ret
= qemu_rdma_write_one(rdma
,
2278 rdma
->current_index
, rdma
->current_addr
, rdma
->current_length
);
2286 trace_qemu_rdma_write_flush(rdma
->nb_sent
);
2289 rdma
->current_length
= 0;
2290 rdma
->current_addr
= 0;
2295 static inline bool qemu_rdma_buffer_mergeable(RDMAContext
*rdma
,
2296 uint64_t offset
, uint64_t len
)
2298 RDMALocalBlock
*block
;
2302 if (rdma
->current_index
< 0) {
2306 if (rdma
->current_chunk
< 0) {
2310 block
= &(rdma
->local_ram_blocks
.block
[rdma
->current_index
]);
2311 host_addr
= block
->local_host_addr
+ (offset
- block
->offset
);
2312 chunk_end
= ram_chunk_end(block
, rdma
->current_chunk
);
2314 if (rdma
->current_length
== 0) {
2319 * Only merge into chunk sequentially.
2321 if (offset
!= (rdma
->current_addr
+ rdma
->current_length
)) {
2325 if (offset
< block
->offset
) {
2329 if ((offset
+ len
) > (block
->offset
+ block
->length
)) {
2333 if ((host_addr
+ len
) > chunk_end
) {
2341 * We're not actually writing here, but doing three things:
2343 * 1. Identify the chunk the buffer belongs to.
2344 * 2. If the chunk is full or the buffer doesn't belong to the current
2345 * chunk, then start a new chunk and flush() the old chunk.
2346 * 3. To keep the hardware busy, we also group chunks into batches
2347 * and only require that a batch gets acknowledged in the completion
2348 * queue instead of each individual chunk.
2350 static int qemu_rdma_write(RDMAContext
*rdma
,
2351 uint64_t block_offset
, uint64_t offset
,
2354 uint64_t current_addr
= block_offset
+ offset
;
2355 uint64_t index
= rdma
->current_index
;
2356 uint64_t chunk
= rdma
->current_chunk
;
2359 /* If we cannot merge it, we flush the current buffer first. */
2360 if (!qemu_rdma_buffer_mergeable(rdma
, current_addr
, len
)) {
2361 ret
= qemu_rdma_write_flush(rdma
);
2365 rdma
->current_length
= 0;
2366 rdma
->current_addr
= current_addr
;
2368 qemu_rdma_search_ram_block(rdma
, block_offset
,
2369 offset
, len
, &index
, &chunk
);
2370 rdma
->current_index
= index
;
2371 rdma
->current_chunk
= chunk
;
2375 rdma
->current_length
+= len
;
2377 /* flush it if buffer is too large */
2378 if (rdma
->current_length
>= RDMA_MERGE_MAX
) {
2379 return qemu_rdma_write_flush(rdma
);
2385 static void qemu_rdma_cleanup(RDMAContext
*rdma
)
2389 if (rdma
->cm_id
&& rdma
->connected
) {
2390 if ((rdma
->error_state
||
2391 migrate_get_current()->state
== MIGRATION_STATUS_CANCELLING
) &&
2392 !rdma
->received_error
) {
2393 RDMAControlHeader head
= { .len
= 0,
2394 .type
= RDMA_CONTROL_ERROR
,
2397 error_report("Early error. Sending error.");
2398 qemu_rdma_post_send_control(rdma
, NULL
, &head
);
2401 rdma_disconnect(rdma
->cm_id
);
2402 trace_qemu_rdma_cleanup_disconnect();
2403 rdma
->connected
= false;
2406 if (rdma
->channel
) {
2407 qemu_set_fd_handler(rdma
->channel
->fd
, NULL
, NULL
, NULL
);
2409 g_free(rdma
->dest_blocks
);
2410 rdma
->dest_blocks
= NULL
;
2412 for (idx
= 0; idx
< RDMA_WRID_MAX
; idx
++) {
2413 if (rdma
->wr_data
[idx
].control_mr
) {
2414 rdma
->total_registrations
--;
2415 ibv_dereg_mr(rdma
->wr_data
[idx
].control_mr
);
2417 rdma
->wr_data
[idx
].control_mr
= NULL
;
2420 if (rdma
->local_ram_blocks
.block
) {
2421 while (rdma
->local_ram_blocks
.nb_blocks
) {
2422 rdma_delete_block(rdma
, &rdma
->local_ram_blocks
.block
[0]);
2427 rdma_destroy_qp(rdma
->cm_id
);
2430 if (rdma
->recv_cq
) {
2431 ibv_destroy_cq(rdma
->recv_cq
);
2432 rdma
->recv_cq
= NULL
;
2434 if (rdma
->send_cq
) {
2435 ibv_destroy_cq(rdma
->send_cq
);
2436 rdma
->send_cq
= NULL
;
2438 if (rdma
->recv_comp_channel
) {
2439 ibv_destroy_comp_channel(rdma
->recv_comp_channel
);
2440 rdma
->recv_comp_channel
= NULL
;
2442 if (rdma
->send_comp_channel
) {
2443 ibv_destroy_comp_channel(rdma
->send_comp_channel
);
2444 rdma
->send_comp_channel
= NULL
;
2447 ibv_dealloc_pd(rdma
->pd
);
2451 rdma_destroy_id(rdma
->cm_id
);
2455 /* the destination side, listen_id and channel is shared */
2456 if (rdma
->listen_id
) {
2457 if (!rdma
->is_return_path
) {
2458 rdma_destroy_id(rdma
->listen_id
);
2460 rdma
->listen_id
= NULL
;
2462 if (rdma
->channel
) {
2463 if (!rdma
->is_return_path
) {
2464 rdma_destroy_event_channel(rdma
->channel
);
2466 rdma
->channel
= NULL
;
2470 if (rdma
->channel
) {
2471 rdma_destroy_event_channel(rdma
->channel
);
2472 rdma
->channel
= NULL
;
2475 g_free(rdma
->host_port
);
2477 rdma
->host_port
= NULL
;
2481 static int qemu_rdma_source_init(RDMAContext
*rdma
, bool pin_all
, Error
**errp
)
2486 * Will be validated against destination's actual capabilities
2487 * after the connect() completes.
2489 rdma
->pin_all
= pin_all
;
2491 ret
= qemu_rdma_resolve_host(rdma
, errp
);
2493 goto err_rdma_source_init
;
2496 ret
= qemu_rdma_alloc_pd_cq(rdma
);
2498 ERROR(errp
, "rdma migration: error allocating pd and cq! Your mlock()"
2499 " limits may be too low. Please check $ ulimit -a # and "
2500 "search for 'ulimit -l' in the output");
2501 goto err_rdma_source_init
;
2504 ret
= qemu_rdma_alloc_qp(rdma
);
2506 ERROR(errp
, "rdma migration: error allocating qp!");
2507 goto err_rdma_source_init
;
2510 qemu_rdma_init_ram_blocks(rdma
);
2512 /* Build the hash that maps from offset to RAMBlock */
2513 rdma
->blockmap
= g_hash_table_new(g_direct_hash
, g_direct_equal
);
2514 for (idx
= 0; idx
< rdma
->local_ram_blocks
.nb_blocks
; idx
++) {
2515 g_hash_table_insert(rdma
->blockmap
,
2516 (void *)(uintptr_t)rdma
->local_ram_blocks
.block
[idx
].offset
,
2517 &rdma
->local_ram_blocks
.block
[idx
]);
2520 for (idx
= 0; idx
< RDMA_WRID_MAX
; idx
++) {
2521 ret
= qemu_rdma_reg_control(rdma
, idx
);
2523 ERROR(errp
, "rdma migration: error registering %d control!",
2525 goto err_rdma_source_init
;
2531 err_rdma_source_init
:
2532 qemu_rdma_cleanup(rdma
);
2536 static int qemu_get_cm_event_timeout(RDMAContext
*rdma
,
2537 struct rdma_cm_event
**cm_event
,
2538 long msec
, Error
**errp
)
2541 struct pollfd poll_fd
= {
2542 .fd
= rdma
->channel
->fd
,
2548 ret
= poll(&poll_fd
, 1, msec
);
2549 } while (ret
< 0 && errno
== EINTR
);
2552 ERROR(errp
, "poll cm event timeout");
2554 } else if (ret
< 0) {
2555 ERROR(errp
, "failed to poll cm event, errno=%i", errno
);
2557 } else if (poll_fd
.revents
& POLLIN
) {
2558 return rdma_get_cm_event(rdma
->channel
, cm_event
);
2560 ERROR(errp
, "no POLLIN event, revent=%x", poll_fd
.revents
);
2565 static int qemu_rdma_connect(RDMAContext
*rdma
, bool return_path
,
2568 RDMACapabilities cap
= {
2569 .version
= RDMA_CONTROL_VERSION_CURRENT
,
2572 struct rdma_conn_param conn_param
= { .initiator_depth
= 2,
2574 .private_data
= &cap
,
2575 .private_data_len
= sizeof(cap
),
2577 struct rdma_cm_event
*cm_event
;
2581 * Only negotiate the capability with destination if the user
2582 * on the source first requested the capability.
2584 if (rdma
->pin_all
) {
2585 trace_qemu_rdma_connect_pin_all_requested();
2586 cap
.flags
|= RDMA_CAPABILITY_PIN_ALL
;
2589 caps_to_network(&cap
);
2591 ret
= qemu_rdma_post_recv_control(rdma
, RDMA_WRID_READY
);
2593 ERROR(errp
, "posting second control recv");
2594 goto err_rdma_source_connect
;
2597 ret
= rdma_connect(rdma
->cm_id
, &conn_param
);
2599 perror("rdma_connect");
2600 ERROR(errp
, "connecting to destination!");
2601 goto err_rdma_source_connect
;
2605 ret
= qemu_get_cm_event_timeout(rdma
, &cm_event
, 5000, errp
);
2607 ret
= rdma_get_cm_event(rdma
->channel
, &cm_event
);
2611 * FIXME perror() is wrong, because
2612 * qemu_get_cm_event_timeout() can fail without setting errno.
2613 * Will go away later in this series.
2615 perror("rdma_get_cm_event after rdma_connect");
2616 ERROR(errp
, "connecting to destination!");
2617 goto err_rdma_source_connect
;
2620 if (cm_event
->event
!= RDMA_CM_EVENT_ESTABLISHED
) {
2621 error_report("rdma_get_cm_event != EVENT_ESTABLISHED after rdma_connect");
2622 ERROR(errp
, "connecting to destination!");
2623 rdma_ack_cm_event(cm_event
);
2624 goto err_rdma_source_connect
;
2626 rdma
->connected
= true;
2628 memcpy(&cap
, cm_event
->param
.conn
.private_data
, sizeof(cap
));
2629 network_to_caps(&cap
);
2632 * Verify that the *requested* capabilities are supported by the destination
2633 * and disable them otherwise.
2635 if (rdma
->pin_all
&& !(cap
.flags
& RDMA_CAPABILITY_PIN_ALL
)) {
2636 ERROR(errp
, "Server cannot support pinning all memory. "
2637 "Will register memory dynamically.");
2638 rdma
->pin_all
= false;
2641 trace_qemu_rdma_connect_pin_all_outcome(rdma
->pin_all
);
2643 rdma_ack_cm_event(cm_event
);
2645 rdma
->control_ready_expected
= 1;
2649 err_rdma_source_connect
:
2650 qemu_rdma_cleanup(rdma
);
2654 static int qemu_rdma_dest_init(RDMAContext
*rdma
, Error
**errp
)
2657 struct rdma_cm_id
*listen_id
;
2658 char ip
[40] = "unknown";
2659 struct rdma_addrinfo
*res
, *e
;
2663 for (idx
= 0; idx
< RDMA_WRID_MAX
; idx
++) {
2664 rdma
->wr_data
[idx
].control_len
= 0;
2665 rdma
->wr_data
[idx
].control_curr
= NULL
;
2668 if (!rdma
->host
|| !rdma
->host
[0]) {
2669 ERROR(errp
, "RDMA host is not set!");
2670 rdma
->error_state
= -EINVAL
;
2673 /* create CM channel */
2674 rdma
->channel
= rdma_create_event_channel();
2675 if (!rdma
->channel
) {
2676 ERROR(errp
, "could not create rdma event channel");
2677 rdma
->error_state
= -EINVAL
;
2682 ret
= rdma_create_id(rdma
->channel
, &listen_id
, NULL
, RDMA_PS_TCP
);
2684 ERROR(errp
, "could not create cm_id!");
2685 goto err_dest_init_create_listen_id
;
2688 snprintf(port_str
, 16, "%d", rdma
->port
);
2689 port_str
[15] = '\0';
2691 ret
= rdma_getaddrinfo(rdma
->host
, port_str
, NULL
, &res
);
2693 ERROR(errp
, "could not rdma_getaddrinfo address %s", rdma
->host
);
2694 goto err_dest_init_bind_addr
;
2697 ret
= rdma_set_option(listen_id
, RDMA_OPTION_ID
, RDMA_OPTION_ID_REUSEADDR
,
2698 &reuse
, sizeof reuse
);
2700 ERROR(errp
, "Error: could not set REUSEADDR option");
2701 goto err_dest_init_bind_addr
;
2703 for (e
= res
; e
!= NULL
; e
= e
->ai_next
) {
2704 inet_ntop(e
->ai_family
,
2705 &((struct sockaddr_in
*) e
->ai_dst_addr
)->sin_addr
, ip
, sizeof ip
);
2706 trace_qemu_rdma_dest_init_trying(rdma
->host
, ip
);
2707 ret
= rdma_bind_addr(listen_id
, e
->ai_dst_addr
);
2711 if (e
->ai_family
== AF_INET6
) {
2712 ret
= qemu_rdma_broken_ipv6_kernel(listen_id
->verbs
, errp
);
2720 rdma_freeaddrinfo(res
);
2722 ERROR(errp
, "Error: could not rdma_bind_addr!");
2723 goto err_dest_init_bind_addr
;
2726 rdma
->listen_id
= listen_id
;
2727 qemu_rdma_dump_gid("dest_init", listen_id
);
2730 err_dest_init_bind_addr
:
2731 rdma_destroy_id(listen_id
);
2732 err_dest_init_create_listen_id
:
2733 rdma_destroy_event_channel(rdma
->channel
);
2734 rdma
->channel
= NULL
;
2735 rdma
->error_state
= ret
;
2740 static void qemu_rdma_return_path_dest_init(RDMAContext
*rdma_return_path
,
2745 for (idx
= 0; idx
< RDMA_WRID_MAX
; idx
++) {
2746 rdma_return_path
->wr_data
[idx
].control_len
= 0;
2747 rdma_return_path
->wr_data
[idx
].control_curr
= NULL
;
2750 /*the CM channel and CM id is shared*/
2751 rdma_return_path
->channel
= rdma
->channel
;
2752 rdma_return_path
->listen_id
= rdma
->listen_id
;
2754 rdma
->return_path
= rdma_return_path
;
2755 rdma_return_path
->return_path
= rdma
;
2756 rdma_return_path
->is_return_path
= true;
2759 static RDMAContext
*qemu_rdma_data_init(const char *host_port
, Error
**errp
)
2761 RDMAContext
*rdma
= NULL
;
2762 InetSocketAddress
*addr
;
2765 rdma
= g_new0(RDMAContext
, 1);
2766 rdma
->current_index
= -1;
2767 rdma
->current_chunk
= -1;
2769 addr
= g_new(InetSocketAddress
, 1);
2770 if (!inet_parse(addr
, host_port
, NULL
)) {
2771 rdma
->port
= atoi(addr
->port
);
2772 rdma
->host
= g_strdup(addr
->host
);
2773 rdma
->host_port
= g_strdup(host_port
);
2775 ERROR(errp
, "bad RDMA migration address '%s'", host_port
);
2780 qapi_free_InetSocketAddress(addr
);
2787 * QEMUFile interface to the control channel.
2788 * SEND messages for control only.
2789 * VM's ram is handled with regular RDMA messages.
2791 static ssize_t
qio_channel_rdma_writev(QIOChannel
*ioc
,
2792 const struct iovec
*iov
,
2799 QIOChannelRDMA
*rioc
= QIO_CHANNEL_RDMA(ioc
);
2805 RCU_READ_LOCK_GUARD();
2806 rdma
= qatomic_rcu_read(&rioc
->rdmaout
);
2809 error_setg(errp
, "RDMA control channel output is not set");
2813 if (rdma
->error_state
) {
2815 "RDMA is in an error state waiting migration to abort!");
2820 * Push out any writes that
2821 * we're queued up for VM's ram.
2823 ret
= qemu_rdma_write_flush(rdma
);
2825 rdma
->error_state
= ret
;
2826 error_setg(errp
, "qemu_rdma_write_flush failed");
2830 for (i
= 0; i
< niov
; i
++) {
2831 size_t remaining
= iov
[i
].iov_len
;
2832 uint8_t * data
= (void *)iov
[i
].iov_base
;
2834 RDMAControlHeader head
= {};
2836 len
= MIN(remaining
, RDMA_SEND_INCREMENT
);
2840 head
.type
= RDMA_CONTROL_QEMU_FILE
;
2842 ret
= qemu_rdma_exchange_send(rdma
, &head
, data
, NULL
, NULL
, NULL
);
2845 rdma
->error_state
= ret
;
2846 error_setg(errp
, "qemu_rdma_exchange_send failed");
2858 static size_t qemu_rdma_fill(RDMAContext
*rdma
, uint8_t *buf
,
2859 size_t size
, int idx
)
2863 if (rdma
->wr_data
[idx
].control_len
) {
2864 trace_qemu_rdma_fill(rdma
->wr_data
[idx
].control_len
, size
);
2866 len
= MIN(size
, rdma
->wr_data
[idx
].control_len
);
2867 memcpy(buf
, rdma
->wr_data
[idx
].control_curr
, len
);
2868 rdma
->wr_data
[idx
].control_curr
+= len
;
2869 rdma
->wr_data
[idx
].control_len
-= len
;
2876 * QEMUFile interface to the control channel.
2877 * RDMA links don't use bytestreams, so we have to
2878 * return bytes to QEMUFile opportunistically.
2880 static ssize_t
qio_channel_rdma_readv(QIOChannel
*ioc
,
2881 const struct iovec
*iov
,
2888 QIOChannelRDMA
*rioc
= QIO_CHANNEL_RDMA(ioc
);
2890 RDMAControlHeader head
;
2895 RCU_READ_LOCK_GUARD();
2896 rdma
= qatomic_rcu_read(&rioc
->rdmain
);
2899 error_setg(errp
, "RDMA control channel input is not set");
2903 if (rdma
->error_state
) {
2905 "RDMA is in an error state waiting migration to abort!");
2909 for (i
= 0; i
< niov
; i
++) {
2910 size_t want
= iov
[i
].iov_len
;
2911 uint8_t *data
= (void *)iov
[i
].iov_base
;
2914 * First, we hold on to the last SEND message we
2915 * were given and dish out the bytes until we run
2918 len
= qemu_rdma_fill(rdma
, data
, want
, 0);
2921 /* Got what we needed, so go to next iovec */
2926 /* If we got any data so far, then don't wait
2927 * for more, just return what we have */
2933 /* We've got nothing at all, so lets wait for
2936 ret
= qemu_rdma_exchange_recv(rdma
, &head
, RDMA_CONTROL_QEMU_FILE
);
2939 rdma
->error_state
= ret
;
2940 error_setg(errp
, "qemu_rdma_exchange_recv failed");
2945 * SEND was received with new bytes, now try again.
2947 len
= qemu_rdma_fill(rdma
, data
, want
, 0);
2951 /* Still didn't get enough, so lets just return */
2954 return QIO_CHANNEL_ERR_BLOCK
;
2964 * Block until all the outstanding chunks have been delivered by the hardware.
2966 static int qemu_rdma_drain_cq(RDMAContext
*rdma
)
2970 if (qemu_rdma_write_flush(rdma
) < 0) {
2974 while (rdma
->nb_sent
) {
2975 ret
= qemu_rdma_block_for_wrid(rdma
, RDMA_WRID_RDMA_WRITE
, NULL
);
2977 error_report("rdma migration: complete polling error!");
2982 qemu_rdma_unregister_waiting(rdma
);
2988 static int qio_channel_rdma_set_blocking(QIOChannel
*ioc
,
2992 QIOChannelRDMA
*rioc
= QIO_CHANNEL_RDMA(ioc
);
2993 /* XXX we should make readv/writev actually honour this :-) */
2994 rioc
->blocking
= blocking
;
2999 typedef struct QIOChannelRDMASource QIOChannelRDMASource
;
3000 struct QIOChannelRDMASource
{
3002 QIOChannelRDMA
*rioc
;
3003 GIOCondition condition
;
3007 qio_channel_rdma_source_prepare(GSource
*source
,
3010 QIOChannelRDMASource
*rsource
= (QIOChannelRDMASource
*)source
;
3012 GIOCondition cond
= 0;
3015 RCU_READ_LOCK_GUARD();
3016 if (rsource
->condition
== G_IO_IN
) {
3017 rdma
= qatomic_rcu_read(&rsource
->rioc
->rdmain
);
3019 rdma
= qatomic_rcu_read(&rsource
->rioc
->rdmaout
);
3023 error_report("RDMAContext is NULL when prepare Gsource");
3027 if (rdma
->wr_data
[0].control_len
) {
3032 return cond
& rsource
->condition
;
3036 qio_channel_rdma_source_check(GSource
*source
)
3038 QIOChannelRDMASource
*rsource
= (QIOChannelRDMASource
*)source
;
3040 GIOCondition cond
= 0;
3042 RCU_READ_LOCK_GUARD();
3043 if (rsource
->condition
== G_IO_IN
) {
3044 rdma
= qatomic_rcu_read(&rsource
->rioc
->rdmain
);
3046 rdma
= qatomic_rcu_read(&rsource
->rioc
->rdmaout
);
3050 error_report("RDMAContext is NULL when check Gsource");
3054 if (rdma
->wr_data
[0].control_len
) {
3059 return cond
& rsource
->condition
;
3063 qio_channel_rdma_source_dispatch(GSource
*source
,
3064 GSourceFunc callback
,
3067 QIOChannelFunc func
= (QIOChannelFunc
)callback
;
3068 QIOChannelRDMASource
*rsource
= (QIOChannelRDMASource
*)source
;
3070 GIOCondition cond
= 0;
3072 RCU_READ_LOCK_GUARD();
3073 if (rsource
->condition
== G_IO_IN
) {
3074 rdma
= qatomic_rcu_read(&rsource
->rioc
->rdmain
);
3076 rdma
= qatomic_rcu_read(&rsource
->rioc
->rdmaout
);
3080 error_report("RDMAContext is NULL when dispatch Gsource");
3084 if (rdma
->wr_data
[0].control_len
) {
3089 return (*func
)(QIO_CHANNEL(rsource
->rioc
),
3090 (cond
& rsource
->condition
),
3095 qio_channel_rdma_source_finalize(GSource
*source
)
3097 QIOChannelRDMASource
*ssource
= (QIOChannelRDMASource
*)source
;
3099 object_unref(OBJECT(ssource
->rioc
));
3102 static GSourceFuncs qio_channel_rdma_source_funcs
= {
3103 qio_channel_rdma_source_prepare
,
3104 qio_channel_rdma_source_check
,
3105 qio_channel_rdma_source_dispatch
,
3106 qio_channel_rdma_source_finalize
3109 static GSource
*qio_channel_rdma_create_watch(QIOChannel
*ioc
,
3110 GIOCondition condition
)
3112 QIOChannelRDMA
*rioc
= QIO_CHANNEL_RDMA(ioc
);
3113 QIOChannelRDMASource
*ssource
;
3116 source
= g_source_new(&qio_channel_rdma_source_funcs
,
3117 sizeof(QIOChannelRDMASource
));
3118 ssource
= (QIOChannelRDMASource
*)source
;
3120 ssource
->rioc
= rioc
;
3121 object_ref(OBJECT(rioc
));
3123 ssource
->condition
= condition
;
3128 static void qio_channel_rdma_set_aio_fd_handler(QIOChannel
*ioc
,
3129 AioContext
*read_ctx
,
3131 AioContext
*write_ctx
,
3132 IOHandler
*io_write
,
3135 QIOChannelRDMA
*rioc
= QIO_CHANNEL_RDMA(ioc
);
3137 aio_set_fd_handler(read_ctx
, rioc
->rdmain
->recv_comp_channel
->fd
,
3138 io_read
, io_write
, NULL
, NULL
, opaque
);
3139 aio_set_fd_handler(read_ctx
, rioc
->rdmain
->send_comp_channel
->fd
,
3140 io_read
, io_write
, NULL
, NULL
, opaque
);
3142 aio_set_fd_handler(write_ctx
, rioc
->rdmaout
->recv_comp_channel
->fd
,
3143 io_read
, io_write
, NULL
, NULL
, opaque
);
3144 aio_set_fd_handler(write_ctx
, rioc
->rdmaout
->send_comp_channel
->fd
,
3145 io_read
, io_write
, NULL
, NULL
, opaque
);
3149 struct rdma_close_rcu
{
3150 struct rcu_head rcu
;
3151 RDMAContext
*rdmain
;
3152 RDMAContext
*rdmaout
;
3155 /* callback from qio_channel_rdma_close via call_rcu */
3156 static void qio_channel_rdma_close_rcu(struct rdma_close_rcu
*rcu
)
3159 qemu_rdma_cleanup(rcu
->rdmain
);
3163 qemu_rdma_cleanup(rcu
->rdmaout
);
3166 g_free(rcu
->rdmain
);
3167 g_free(rcu
->rdmaout
);
3171 static int qio_channel_rdma_close(QIOChannel
*ioc
,
3174 QIOChannelRDMA
*rioc
= QIO_CHANNEL_RDMA(ioc
);
3175 RDMAContext
*rdmain
, *rdmaout
;
3176 struct rdma_close_rcu
*rcu
= g_new(struct rdma_close_rcu
, 1);
3178 trace_qemu_rdma_close();
3180 rdmain
= rioc
->rdmain
;
3182 qatomic_rcu_set(&rioc
->rdmain
, NULL
);
3185 rdmaout
= rioc
->rdmaout
;
3187 qatomic_rcu_set(&rioc
->rdmaout
, NULL
);
3190 rcu
->rdmain
= rdmain
;
3191 rcu
->rdmaout
= rdmaout
;
3192 call_rcu(rcu
, qio_channel_rdma_close_rcu
, rcu
);
3198 qio_channel_rdma_shutdown(QIOChannel
*ioc
,
3199 QIOChannelShutdown how
,
3202 QIOChannelRDMA
*rioc
= QIO_CHANNEL_RDMA(ioc
);
3203 RDMAContext
*rdmain
, *rdmaout
;
3205 RCU_READ_LOCK_GUARD();
3207 rdmain
= qatomic_rcu_read(&rioc
->rdmain
);
3208 rdmaout
= qatomic_rcu_read(&rioc
->rdmain
);
3211 case QIO_CHANNEL_SHUTDOWN_READ
:
3213 rdmain
->error_state
= -1;
3216 case QIO_CHANNEL_SHUTDOWN_WRITE
:
3218 rdmaout
->error_state
= -1;
3221 case QIO_CHANNEL_SHUTDOWN_BOTH
:
3224 rdmain
->error_state
= -1;
3227 rdmaout
->error_state
= -1;
3238 * This means that 'block_offset' is a full virtual address that does not
3239 * belong to a RAMBlock of the virtual machine and instead
3240 * represents a private malloc'd memory area that the caller wishes to
3244 * Offset is an offset to be added to block_offset and used
3245 * to also lookup the corresponding RAMBlock.
3247 * @size : Number of bytes to transfer
3249 * @pages_sent : User-specificed pointer to indicate how many pages were
3250 * sent. Usually, this will not be more than a few bytes of
3251 * the protocol because most transfers are sent asynchronously.
3253 static int qemu_rdma_save_page(QEMUFile
*f
, ram_addr_t block_offset
,
3254 ram_addr_t offset
, size_t size
)
3256 QIOChannelRDMA
*rioc
= QIO_CHANNEL_RDMA(qemu_file_get_ioc(f
));
3260 if (migration_in_postcopy()) {
3261 return RAM_SAVE_CONTROL_NOT_SUPP
;
3264 RCU_READ_LOCK_GUARD();
3265 rdma
= qatomic_rcu_read(&rioc
->rdmaout
);
3271 CHECK_ERROR_STATE();
3276 * Add this page to the current 'chunk'. If the chunk
3277 * is full, or the page doesn't belong to the current chunk,
3278 * an actual RDMA write will occur and a new chunk will be formed.
3280 ret
= qemu_rdma_write(rdma
, block_offset
, offset
, size
);
3282 error_report("rdma migration: write error");
3287 * Drain the Completion Queue if possible, but do not block,
3290 * If nothing to poll, the end of the iteration will do this
3291 * again to make sure we don't overflow the request queue.
3294 uint64_t wr_id
, wr_id_in
;
3295 ret
= qemu_rdma_poll(rdma
, rdma
->recv_cq
, &wr_id_in
, NULL
);
3298 error_report("rdma migration: polling error");
3302 wr_id
= wr_id_in
& RDMA_WRID_TYPE_MASK
;
3304 if (wr_id
== RDMA_WRID_NONE
) {
3310 uint64_t wr_id
, wr_id_in
;
3311 ret
= qemu_rdma_poll(rdma
, rdma
->send_cq
, &wr_id_in
, NULL
);
3314 error_report("rdma migration: polling error");
3318 wr_id
= wr_id_in
& RDMA_WRID_TYPE_MASK
;
3320 if (wr_id
== RDMA_WRID_NONE
) {
3325 return RAM_SAVE_CONTROL_DELAYED
;
3327 rdma
->error_state
= ret
;
3331 static void rdma_accept_incoming_migration(void *opaque
);
3333 static void rdma_cm_poll_handler(void *opaque
)
3335 RDMAContext
*rdma
= opaque
;
3337 struct rdma_cm_event
*cm_event
;
3338 MigrationIncomingState
*mis
= migration_incoming_get_current();
3340 ret
= rdma_get_cm_event(rdma
->channel
, &cm_event
);
3342 error_report("get_cm_event failed %d", errno
);
3346 if (cm_event
->event
== RDMA_CM_EVENT_DISCONNECTED
||
3347 cm_event
->event
== RDMA_CM_EVENT_DEVICE_REMOVAL
) {
3348 if (!rdma
->error_state
&&
3349 migration_incoming_get_current()->state
!=
3350 MIGRATION_STATUS_COMPLETED
) {
3351 error_report("receive cm event, cm event is %d", cm_event
->event
);
3352 rdma
->error_state
= -EPIPE
;
3353 if (rdma
->return_path
) {
3354 rdma
->return_path
->error_state
= -EPIPE
;
3357 rdma_ack_cm_event(cm_event
);
3358 if (mis
->loadvm_co
) {
3359 qemu_coroutine_enter(mis
->loadvm_co
);
3363 rdma_ack_cm_event(cm_event
);
3366 static int qemu_rdma_accept(RDMAContext
*rdma
)
3368 RDMACapabilities cap
;
3369 struct rdma_conn_param conn_param
= {
3370 .responder_resources
= 2,
3371 .private_data
= &cap
,
3372 .private_data_len
= sizeof(cap
),
3374 RDMAContext
*rdma_return_path
= NULL
;
3375 struct rdma_cm_event
*cm_event
;
3376 struct ibv_context
*verbs
;
3380 ret
= rdma_get_cm_event(rdma
->channel
, &cm_event
);
3382 goto err_rdma_dest_wait
;
3385 if (cm_event
->event
!= RDMA_CM_EVENT_CONNECT_REQUEST
) {
3386 rdma_ack_cm_event(cm_event
);
3388 goto err_rdma_dest_wait
;
3392 * initialize the RDMAContext for return path for postcopy after first
3393 * connection request reached.
3395 if ((migrate_postcopy() || migrate_return_path())
3396 && !rdma
->is_return_path
) {
3397 rdma_return_path
= qemu_rdma_data_init(rdma
->host_port
, NULL
);
3398 if (rdma_return_path
== NULL
) {
3399 rdma_ack_cm_event(cm_event
);
3401 goto err_rdma_dest_wait
;
3404 qemu_rdma_return_path_dest_init(rdma_return_path
, rdma
);
3407 memcpy(&cap
, cm_event
->param
.conn
.private_data
, sizeof(cap
));
3409 network_to_caps(&cap
);
3411 if (cap
.version
< 1 || cap
.version
> RDMA_CONTROL_VERSION_CURRENT
) {
3412 error_report("Unknown source RDMA version: %d, bailing...",
3414 rdma_ack_cm_event(cm_event
);
3416 goto err_rdma_dest_wait
;
3420 * Respond with only the capabilities this version of QEMU knows about.
3422 cap
.flags
&= known_capabilities
;
3425 * Enable the ones that we do know about.
3426 * Add other checks here as new ones are introduced.
3428 if (cap
.flags
& RDMA_CAPABILITY_PIN_ALL
) {
3429 rdma
->pin_all
= true;
3432 rdma
->cm_id
= cm_event
->id
;
3433 verbs
= cm_event
->id
->verbs
;
3435 rdma_ack_cm_event(cm_event
);
3437 trace_qemu_rdma_accept_pin_state(rdma
->pin_all
);
3439 caps_to_network(&cap
);
3441 trace_qemu_rdma_accept_pin_verbsc(verbs
);
3444 rdma
->verbs
= verbs
;
3445 } else if (rdma
->verbs
!= verbs
) {
3446 error_report("ibv context not matching %p, %p!", rdma
->verbs
,
3449 goto err_rdma_dest_wait
;
3452 qemu_rdma_dump_id("dest_init", verbs
);
3454 ret
= qemu_rdma_alloc_pd_cq(rdma
);
3456 error_report("rdma migration: error allocating pd and cq!");
3457 goto err_rdma_dest_wait
;
3460 ret
= qemu_rdma_alloc_qp(rdma
);
3462 error_report("rdma migration: error allocating qp!");
3463 goto err_rdma_dest_wait
;
3466 qemu_rdma_init_ram_blocks(rdma
);
3468 for (idx
= 0; idx
< RDMA_WRID_MAX
; idx
++) {
3469 ret
= qemu_rdma_reg_control(rdma
, idx
);
3471 error_report("rdma: error registering %d control", idx
);
3472 goto err_rdma_dest_wait
;
3476 /* Accept the second connection request for return path */
3477 if ((migrate_postcopy() || migrate_return_path())
3478 && !rdma
->is_return_path
) {
3479 qemu_set_fd_handler(rdma
->channel
->fd
, rdma_accept_incoming_migration
,
3481 (void *)(intptr_t)rdma
->return_path
);
3483 qemu_set_fd_handler(rdma
->channel
->fd
, rdma_cm_poll_handler
,
3487 ret
= rdma_accept(rdma
->cm_id
, &conn_param
);
3489 error_report("rdma_accept failed");
3490 goto err_rdma_dest_wait
;
3493 ret
= rdma_get_cm_event(rdma
->channel
, &cm_event
);
3495 error_report("rdma_accept get_cm_event failed");
3496 goto err_rdma_dest_wait
;
3499 if (cm_event
->event
!= RDMA_CM_EVENT_ESTABLISHED
) {
3500 error_report("rdma_accept not event established");
3501 rdma_ack_cm_event(cm_event
);
3503 goto err_rdma_dest_wait
;
3506 rdma_ack_cm_event(cm_event
);
3507 rdma
->connected
= true;
3509 ret
= qemu_rdma_post_recv_control(rdma
, RDMA_WRID_READY
);
3511 error_report("rdma migration: error posting second control recv");
3512 goto err_rdma_dest_wait
;
3515 qemu_rdma_dump_gid("dest_connect", rdma
->cm_id
);
3520 rdma
->error_state
= ret
;
3521 qemu_rdma_cleanup(rdma
);
3522 g_free(rdma_return_path
);
3526 static int dest_ram_sort_func(const void *a
, const void *b
)
3528 unsigned int a_index
= ((const RDMALocalBlock
*)a
)->src_index
;
3529 unsigned int b_index
= ((const RDMALocalBlock
*)b
)->src_index
;
3531 return (a_index
< b_index
) ? -1 : (a_index
!= b_index
);
3535 * During each iteration of the migration, we listen for instructions
3536 * by the source VM to perform dynamic page registrations before they
3537 * can perform RDMA operations.
3539 * We respond with the 'rkey'.
3541 * Keep doing this until the source tells us to stop.
3543 static int qemu_rdma_registration_handle(QEMUFile
*f
)
3545 RDMAControlHeader reg_resp
= { .len
= sizeof(RDMARegisterResult
),
3546 .type
= RDMA_CONTROL_REGISTER_RESULT
,
3549 RDMAControlHeader unreg_resp
= { .len
= 0,
3550 .type
= RDMA_CONTROL_UNREGISTER_FINISHED
,
3553 RDMAControlHeader blocks
= { .type
= RDMA_CONTROL_RAM_BLOCKS_RESULT
,
3555 QIOChannelRDMA
*rioc
= QIO_CHANNEL_RDMA(qemu_file_get_ioc(f
));
3557 RDMALocalBlocks
*local
;
3558 RDMAControlHeader head
;
3559 RDMARegister
*reg
, *registers
;
3561 RDMARegisterResult
*reg_result
;
3562 static RDMARegisterResult results
[RDMA_CONTROL_MAX_COMMANDS_PER_MESSAGE
];
3563 RDMALocalBlock
*block
;
3570 RCU_READ_LOCK_GUARD();
3571 rdma
= qatomic_rcu_read(&rioc
->rdmain
);
3577 CHECK_ERROR_STATE();
3579 local
= &rdma
->local_ram_blocks
;
3581 trace_qemu_rdma_registration_handle_wait();
3583 ret
= qemu_rdma_exchange_recv(rdma
, &head
, RDMA_CONTROL_NONE
);
3589 if (head
.repeat
> RDMA_CONTROL_MAX_COMMANDS_PER_MESSAGE
) {
3590 error_report("rdma: Too many requests in this message (%d)."
3591 "Bailing.", head
.repeat
);
3596 switch (head
.type
) {
3597 case RDMA_CONTROL_COMPRESS
:
3598 comp
= (RDMACompress
*) rdma
->wr_data
[idx
].control_curr
;
3599 network_to_compress(comp
);
3601 trace_qemu_rdma_registration_handle_compress(comp
->length
,
3604 if (comp
->block_idx
>= rdma
->local_ram_blocks
.nb_blocks
) {
3605 error_report("rdma: 'compress' bad block index %u (vs %d)",
3606 (unsigned int)comp
->block_idx
,
3607 rdma
->local_ram_blocks
.nb_blocks
);
3611 block
= &(rdma
->local_ram_blocks
.block
[comp
->block_idx
]);
3613 host_addr
= block
->local_host_addr
+
3614 (comp
->offset
- block
->offset
);
3616 ram_handle_compressed(host_addr
, comp
->value
, comp
->length
);
3619 case RDMA_CONTROL_REGISTER_FINISHED
:
3620 trace_qemu_rdma_registration_handle_finished();
3623 case RDMA_CONTROL_RAM_BLOCKS_REQUEST
:
3624 trace_qemu_rdma_registration_handle_ram_blocks();
3626 /* Sort our local RAM Block list so it's the same as the source,
3627 * we can do this since we've filled in a src_index in the list
3628 * as we received the RAMBlock list earlier.
3630 qsort(rdma
->local_ram_blocks
.block
,
3631 rdma
->local_ram_blocks
.nb_blocks
,
3632 sizeof(RDMALocalBlock
), dest_ram_sort_func
);
3633 for (i
= 0; i
< local
->nb_blocks
; i
++) {
3634 local
->block
[i
].index
= i
;
3637 if (rdma
->pin_all
) {
3638 ret
= qemu_rdma_reg_whole_ram_blocks(rdma
);
3640 error_report("rdma migration: error dest "
3641 "registering ram blocks");
3647 * Dest uses this to prepare to transmit the RAMBlock descriptions
3648 * to the source VM after connection setup.
3649 * Both sides use the "remote" structure to communicate and update
3650 * their "local" descriptions with what was sent.
3652 for (i
= 0; i
< local
->nb_blocks
; i
++) {
3653 rdma
->dest_blocks
[i
].remote_host_addr
=
3654 (uintptr_t)(local
->block
[i
].local_host_addr
);
3656 if (rdma
->pin_all
) {
3657 rdma
->dest_blocks
[i
].remote_rkey
= local
->block
[i
].mr
->rkey
;
3660 rdma
->dest_blocks
[i
].offset
= local
->block
[i
].offset
;
3661 rdma
->dest_blocks
[i
].length
= local
->block
[i
].length
;
3663 dest_block_to_network(&rdma
->dest_blocks
[i
]);
3664 trace_qemu_rdma_registration_handle_ram_blocks_loop(
3665 local
->block
[i
].block_name
,
3666 local
->block
[i
].offset
,
3667 local
->block
[i
].length
,
3668 local
->block
[i
].local_host_addr
,
3669 local
->block
[i
].src_index
);
3672 blocks
.len
= rdma
->local_ram_blocks
.nb_blocks
3673 * sizeof(RDMADestBlock
);
3676 ret
= qemu_rdma_post_send_control(rdma
,
3677 (uint8_t *) rdma
->dest_blocks
, &blocks
);
3680 error_report("rdma migration: error sending remote info");
3685 case RDMA_CONTROL_REGISTER_REQUEST
:
3686 trace_qemu_rdma_registration_handle_register(head
.repeat
);
3688 reg_resp
.repeat
= head
.repeat
;
3689 registers
= (RDMARegister
*) rdma
->wr_data
[idx
].control_curr
;
3691 for (count
= 0; count
< head
.repeat
; count
++) {
3693 uint8_t *chunk_start
, *chunk_end
;
3695 reg
= ®isters
[count
];
3696 network_to_register(reg
);
3698 reg_result
= &results
[count
];
3700 trace_qemu_rdma_registration_handle_register_loop(count
,
3701 reg
->current_index
, reg
->key
.current_addr
, reg
->chunks
);
3703 if (reg
->current_index
>= rdma
->local_ram_blocks
.nb_blocks
) {
3704 error_report("rdma: 'register' bad block index %u (vs %d)",
3705 (unsigned int)reg
->current_index
,
3706 rdma
->local_ram_blocks
.nb_blocks
);
3710 block
= &(rdma
->local_ram_blocks
.block
[reg
->current_index
]);
3711 if (block
->is_ram_block
) {
3712 if (block
->offset
> reg
->key
.current_addr
) {
3713 error_report("rdma: bad register address for block %s"
3714 " offset: %" PRIx64
" current_addr: %" PRIx64
,
3715 block
->block_name
, block
->offset
,
3716 reg
->key
.current_addr
);
3720 host_addr
= (block
->local_host_addr
+
3721 (reg
->key
.current_addr
- block
->offset
));
3722 chunk
= ram_chunk_index(block
->local_host_addr
,
3723 (uint8_t *) host_addr
);
3725 chunk
= reg
->key
.chunk
;
3726 host_addr
= block
->local_host_addr
+
3727 (reg
->key
.chunk
* (1UL << RDMA_REG_CHUNK_SHIFT
));
3728 /* Check for particularly bad chunk value */
3729 if (host_addr
< (void *)block
->local_host_addr
) {
3730 error_report("rdma: bad chunk for block %s"
3732 block
->block_name
, reg
->key
.chunk
);
3737 chunk_start
= ram_chunk_start(block
, chunk
);
3738 chunk_end
= ram_chunk_end(block
, chunk
+ reg
->chunks
);
3739 /* avoid "-Waddress-of-packed-member" warning */
3740 uint32_t tmp_rkey
= 0;
3741 if (qemu_rdma_register_and_get_keys(rdma
, block
,
3742 (uintptr_t)host_addr
, NULL
, &tmp_rkey
,
3743 chunk
, chunk_start
, chunk_end
)) {
3744 error_report("cannot get rkey");
3748 reg_result
->rkey
= tmp_rkey
;
3750 reg_result
->host_addr
= (uintptr_t)block
->local_host_addr
;
3752 trace_qemu_rdma_registration_handle_register_rkey(
3755 result_to_network(reg_result
);
3758 ret
= qemu_rdma_post_send_control(rdma
,
3759 (uint8_t *) results
, ®_resp
);
3762 error_report("Failed to send control buffer");
3766 case RDMA_CONTROL_UNREGISTER_REQUEST
:
3767 trace_qemu_rdma_registration_handle_unregister(head
.repeat
);
3768 unreg_resp
.repeat
= head
.repeat
;
3769 registers
= (RDMARegister
*) rdma
->wr_data
[idx
].control_curr
;
3771 for (count
= 0; count
< head
.repeat
; count
++) {
3772 reg
= ®isters
[count
];
3773 network_to_register(reg
);
3775 trace_qemu_rdma_registration_handle_unregister_loop(count
,
3776 reg
->current_index
, reg
->key
.chunk
);
3778 block
= &(rdma
->local_ram_blocks
.block
[reg
->current_index
]);
3780 ret
= ibv_dereg_mr(block
->pmr
[reg
->key
.chunk
]);
3781 block
->pmr
[reg
->key
.chunk
] = NULL
;
3784 perror("rdma unregistration chunk failed");
3789 rdma
->total_registrations
--;
3791 trace_qemu_rdma_registration_handle_unregister_success(
3795 ret
= qemu_rdma_post_send_control(rdma
, NULL
, &unreg_resp
);
3798 error_report("Failed to send control buffer");
3802 case RDMA_CONTROL_REGISTER_RESULT
:
3803 error_report("Invalid RESULT message at dest.");
3807 error_report("Unknown control message %s", control_desc(head
.type
));
3814 rdma
->error_state
= ret
;
3820 * Called via a ram_control_load_hook during the initial RAM load section which
3821 * lists the RAMBlocks by name. This lets us know the order of the RAMBlocks
3823 * We've already built our local RAMBlock list, but not yet sent the list to
3827 rdma_block_notification_handle(QEMUFile
*f
, const char *name
)
3830 QIOChannelRDMA
*rioc
= QIO_CHANNEL_RDMA(qemu_file_get_ioc(f
));
3834 RCU_READ_LOCK_GUARD();
3835 rdma
= qatomic_rcu_read(&rioc
->rdmain
);
3841 /* Find the matching RAMBlock in our local list */
3842 for (curr
= 0; curr
< rdma
->local_ram_blocks
.nb_blocks
; curr
++) {
3843 if (!strcmp(rdma
->local_ram_blocks
.block
[curr
].block_name
, name
)) {
3850 error_report("RAMBlock '%s' not found on destination", name
);
3854 rdma
->local_ram_blocks
.block
[curr
].src_index
= rdma
->next_src_index
;
3855 trace_rdma_block_notification_handle(name
, rdma
->next_src_index
);
3856 rdma
->next_src_index
++;
3861 static int rdma_load_hook(QEMUFile
*f
, uint64_t flags
, void *data
)
3864 case RAM_CONTROL_BLOCK_REG
:
3865 return rdma_block_notification_handle(f
, data
);
3867 case RAM_CONTROL_HOOK
:
3868 return qemu_rdma_registration_handle(f
);
3871 /* Shouldn't be called with any other values */
3876 static int qemu_rdma_registration_start(QEMUFile
*f
,
3877 uint64_t flags
, void *data
)
3879 QIOChannelRDMA
*rioc
= QIO_CHANNEL_RDMA(qemu_file_get_ioc(f
));
3882 if (migration_in_postcopy()) {
3886 RCU_READ_LOCK_GUARD();
3887 rdma
= qatomic_rcu_read(&rioc
->rdmaout
);
3892 CHECK_ERROR_STATE();
3894 trace_qemu_rdma_registration_start(flags
);
3895 qemu_put_be64(f
, RAM_SAVE_FLAG_HOOK
);
3902 * Inform dest that dynamic registrations are done for now.
3903 * First, flush writes, if any.
3905 static int qemu_rdma_registration_stop(QEMUFile
*f
,
3906 uint64_t flags
, void *data
)
3908 QIOChannelRDMA
*rioc
= QIO_CHANNEL_RDMA(qemu_file_get_ioc(f
));
3910 RDMAControlHeader head
= { .len
= 0, .repeat
= 1 };
3913 if (migration_in_postcopy()) {
3917 RCU_READ_LOCK_GUARD();
3918 rdma
= qatomic_rcu_read(&rioc
->rdmaout
);
3923 CHECK_ERROR_STATE();
3926 ret
= qemu_rdma_drain_cq(rdma
);
3932 if (flags
== RAM_CONTROL_SETUP
) {
3933 RDMAControlHeader resp
= {.type
= RDMA_CONTROL_RAM_BLOCKS_RESULT
};
3934 RDMALocalBlocks
*local
= &rdma
->local_ram_blocks
;
3935 int reg_result_idx
, i
, nb_dest_blocks
;
3937 head
.type
= RDMA_CONTROL_RAM_BLOCKS_REQUEST
;
3938 trace_qemu_rdma_registration_stop_ram();
3941 * Make sure that we parallelize the pinning on both sides.
3942 * For very large guests, doing this serially takes a really
3943 * long time, so we have to 'interleave' the pinning locally
3944 * with the control messages by performing the pinning on this
3945 * side before we receive the control response from the other
3946 * side that the pinning has completed.
3948 ret
= qemu_rdma_exchange_send(rdma
, &head
, NULL
, &resp
,
3949 ®_result_idx
, rdma
->pin_all
?
3950 qemu_rdma_reg_whole_ram_blocks
: NULL
);
3952 fprintf(stderr
, "receiving remote info!");
3956 nb_dest_blocks
= resp
.len
/ sizeof(RDMADestBlock
);
3959 * The protocol uses two different sets of rkeys (mutually exclusive):
3960 * 1. One key to represent the virtual address of the entire ram block.
3961 * (dynamic chunk registration disabled - pin everything with one rkey.)
3962 * 2. One to represent individual chunks within a ram block.
3963 * (dynamic chunk registration enabled - pin individual chunks.)
3965 * Once the capability is successfully negotiated, the destination transmits
3966 * the keys to use (or sends them later) including the virtual addresses
3967 * and then propagates the remote ram block descriptions to his local copy.
3970 if (local
->nb_blocks
!= nb_dest_blocks
) {
3971 fprintf(stderr
, "ram blocks mismatch (Number of blocks %d vs %d) "
3972 "Your QEMU command line parameters are probably "
3973 "not identical on both the source and destination.",
3974 local
->nb_blocks
, nb_dest_blocks
);
3975 rdma
->error_state
= -EINVAL
;
3979 qemu_rdma_move_header(rdma
, reg_result_idx
, &resp
);
3980 memcpy(rdma
->dest_blocks
,
3981 rdma
->wr_data
[reg_result_idx
].control_curr
, resp
.len
);
3982 for (i
= 0; i
< nb_dest_blocks
; i
++) {
3983 network_to_dest_block(&rdma
->dest_blocks
[i
]);
3985 /* We require that the blocks are in the same order */
3986 if (rdma
->dest_blocks
[i
].length
!= local
->block
[i
].length
) {
3987 fprintf(stderr
, "Block %s/%d has a different length %" PRIu64
3988 "vs %" PRIu64
, local
->block
[i
].block_name
, i
,
3989 local
->block
[i
].length
,
3990 rdma
->dest_blocks
[i
].length
);
3991 rdma
->error_state
= -EINVAL
;
3994 local
->block
[i
].remote_host_addr
=
3995 rdma
->dest_blocks
[i
].remote_host_addr
;
3996 local
->block
[i
].remote_rkey
= rdma
->dest_blocks
[i
].remote_rkey
;
4000 trace_qemu_rdma_registration_stop(flags
);
4002 head
.type
= RDMA_CONTROL_REGISTER_FINISHED
;
4003 ret
= qemu_rdma_exchange_send(rdma
, &head
, NULL
, NULL
, NULL
, NULL
);
4011 rdma
->error_state
= ret
;
4015 static const QEMUFileHooks rdma_read_hooks
= {
4016 .hook_ram_load
= rdma_load_hook
,
4019 static const QEMUFileHooks rdma_write_hooks
= {
4020 .before_ram_iterate
= qemu_rdma_registration_start
,
4021 .after_ram_iterate
= qemu_rdma_registration_stop
,
4022 .save_page
= qemu_rdma_save_page
,
4026 static void qio_channel_rdma_finalize(Object
*obj
)
4028 QIOChannelRDMA
*rioc
= QIO_CHANNEL_RDMA(obj
);
4030 qemu_rdma_cleanup(rioc
->rdmain
);
4031 g_free(rioc
->rdmain
);
4032 rioc
->rdmain
= NULL
;
4034 if (rioc
->rdmaout
) {
4035 qemu_rdma_cleanup(rioc
->rdmaout
);
4036 g_free(rioc
->rdmaout
);
4037 rioc
->rdmaout
= NULL
;
4041 static void qio_channel_rdma_class_init(ObjectClass
*klass
,
4042 void *class_data G_GNUC_UNUSED
)
4044 QIOChannelClass
*ioc_klass
= QIO_CHANNEL_CLASS(klass
);
4046 ioc_klass
->io_writev
= qio_channel_rdma_writev
;
4047 ioc_klass
->io_readv
= qio_channel_rdma_readv
;
4048 ioc_klass
->io_set_blocking
= qio_channel_rdma_set_blocking
;
4049 ioc_klass
->io_close
= qio_channel_rdma_close
;
4050 ioc_klass
->io_create_watch
= qio_channel_rdma_create_watch
;
4051 ioc_klass
->io_set_aio_fd_handler
= qio_channel_rdma_set_aio_fd_handler
;
4052 ioc_klass
->io_shutdown
= qio_channel_rdma_shutdown
;
4055 static const TypeInfo qio_channel_rdma_info
= {
4056 .parent
= TYPE_QIO_CHANNEL
,
4057 .name
= TYPE_QIO_CHANNEL_RDMA
,
4058 .instance_size
= sizeof(QIOChannelRDMA
),
4059 .instance_finalize
= qio_channel_rdma_finalize
,
4060 .class_init
= qio_channel_rdma_class_init
,
4063 static void qio_channel_rdma_register_types(void)
4065 type_register_static(&qio_channel_rdma_info
);
4068 type_init(qio_channel_rdma_register_types
);
4070 static QEMUFile
*rdma_new_input(RDMAContext
*rdma
)
4072 QIOChannelRDMA
*rioc
= QIO_CHANNEL_RDMA(object_new(TYPE_QIO_CHANNEL_RDMA
));
4074 rioc
->file
= qemu_file_new_input(QIO_CHANNEL(rioc
));
4075 rioc
->rdmain
= rdma
;
4076 rioc
->rdmaout
= rdma
->return_path
;
4077 qemu_file_set_hooks(rioc
->file
, &rdma_read_hooks
);
4082 static QEMUFile
*rdma_new_output(RDMAContext
*rdma
)
4084 QIOChannelRDMA
*rioc
= QIO_CHANNEL_RDMA(object_new(TYPE_QIO_CHANNEL_RDMA
));
4086 rioc
->file
= qemu_file_new_output(QIO_CHANNEL(rioc
));
4087 rioc
->rdmaout
= rdma
;
4088 rioc
->rdmain
= rdma
->return_path
;
4089 qemu_file_set_hooks(rioc
->file
, &rdma_write_hooks
);
4094 static void rdma_accept_incoming_migration(void *opaque
)
4096 RDMAContext
*rdma
= opaque
;
4099 Error
*local_err
= NULL
;
4101 trace_qemu_rdma_accept_incoming_migration();
4102 ret
= qemu_rdma_accept(rdma
);
4105 fprintf(stderr
, "RDMA ERROR: Migration initialization failed\n");
4109 trace_qemu_rdma_accept_incoming_migration_accepted();
4111 if (rdma
->is_return_path
) {
4115 f
= rdma_new_input(rdma
);
4117 fprintf(stderr
, "RDMA ERROR: could not open RDMA for input\n");
4118 qemu_rdma_cleanup(rdma
);
4122 rdma
->migration_started_on_destination
= 1;
4123 migration_fd_process_incoming(f
, &local_err
);
4125 error_reportf_err(local_err
, "RDMA ERROR:");
4129 void rdma_start_incoming_migration(const char *host_port
, Error
**errp
)
4134 trace_rdma_start_incoming_migration();
4136 /* Avoid ram_block_discard_disable(), cannot change during migration. */
4137 if (ram_block_discard_is_required()) {
4138 error_setg(errp
, "RDMA: cannot disable RAM discard");
4142 rdma
= qemu_rdma_data_init(host_port
, errp
);
4147 ret
= qemu_rdma_dest_init(rdma
, errp
);
4152 trace_rdma_start_incoming_migration_after_dest_init();
4154 ret
= rdma_listen(rdma
->listen_id
, 5);
4157 ERROR(errp
, "listening on socket!");
4161 trace_rdma_start_incoming_migration_after_rdma_listen();
4163 qemu_set_fd_handler(rdma
->channel
->fd
, rdma_accept_incoming_migration
,
4164 NULL
, (void *)(intptr_t)rdma
);
4168 qemu_rdma_cleanup(rdma
);
4172 g_free(rdma
->host_port
);
4177 void rdma_start_outgoing_migration(void *opaque
,
4178 const char *host_port
, Error
**errp
)
4180 MigrationState
*s
= opaque
;
4181 RDMAContext
*rdma_return_path
= NULL
;
4185 /* Avoid ram_block_discard_disable(), cannot change during migration. */
4186 if (ram_block_discard_is_required()) {
4187 error_setg(errp
, "RDMA: cannot disable RAM discard");
4191 rdma
= qemu_rdma_data_init(host_port
, errp
);
4196 ret
= qemu_rdma_source_init(rdma
, migrate_rdma_pin_all(), errp
);
4202 trace_rdma_start_outgoing_migration_after_rdma_source_init();
4203 ret
= qemu_rdma_connect(rdma
, false, errp
);
4209 /* RDMA postcopy need a separate queue pair for return path */
4210 if (migrate_postcopy() || migrate_return_path()) {
4211 rdma_return_path
= qemu_rdma_data_init(host_port
, errp
);
4213 if (rdma_return_path
== NULL
) {
4214 goto return_path_err
;
4217 ret
= qemu_rdma_source_init(rdma_return_path
,
4218 migrate_rdma_pin_all(), errp
);
4221 goto return_path_err
;
4224 ret
= qemu_rdma_connect(rdma_return_path
, true, errp
);
4227 goto return_path_err
;
4230 rdma
->return_path
= rdma_return_path
;
4231 rdma_return_path
->return_path
= rdma
;
4232 rdma_return_path
->is_return_path
= true;
4235 trace_rdma_start_outgoing_migration_after_rdma_connect();
4237 s
->to_dst_file
= rdma_new_output(rdma
);
4238 migrate_fd_connect(s
, NULL
);
4241 qemu_rdma_cleanup(rdma
);
4244 g_free(rdma_return_path
);