2 * QEMU paravirtual RDMA - Generic RDMA backend
4 * Copyright (C) 2018 Oracle
5 * Copyright (C) 2018 Red Hat Inc
8 * Yuval Shaia <yuval.shaia@oracle.com>
9 * Marcel Apfelbaum <marcel@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include "qemu/osdep.h"
17 #include "sysemu/sysemu.h"
18 #include "qapi/error.h"
19 #include "qapi/qmp/qlist.h"
20 #include "qapi/qmp/qnum.h"
21 #include "qapi/qapi-events-rdma.h"
23 #include <infiniband/verbs.h>
24 #include <infiniband/umad_types.h>
25 #include <infiniband/umad.h>
26 #include <rdma/rdma_user_cm.h>
28 #include "contrib/rdmacm-mux/rdmacm-mux.h"
30 #include "rdma_utils.h"
32 #include "rdma_backend.h"
34 #define THR_NAME_LEN 16
35 #define THR_POLL_TO 5000
37 #define MAD_HDR_SIZE sizeof(struct ibv_grh)
39 typedef struct BackendCtx
{
41 struct ibv_sge sge
; /* Used to save MAD recv buffer */
42 RdmaBackendQP
*backend_qp
; /* To maintain recv buffers */
43 RdmaBackendSRQ
*backend_srq
;
47 struct ib_user_mad hdr
;
48 char mad
[RDMA_MAX_PRIVATE_DATA
];
51 static void (*comp_handler
)(void *ctx
, struct ibv_wc
*wc
);
53 static void dummy_comp_handler(void *ctx
, struct ibv_wc
*wc
)
55 rdma_error_report("No completion handler is registered");
58 static inline void complete_work(enum ibv_wc_status status
, uint32_t vendor_err
,
61 struct ibv_wc wc
= {};
64 wc
.vendor_err
= vendor_err
;
66 comp_handler(ctx
, &wc
);
69 static void free_cqe_ctx(gpointer data
, gpointer user_data
)
72 RdmaDeviceResources
*rdma_dev_res
= user_data
;
73 unsigned long cqe_ctx_id
= GPOINTER_TO_INT(data
);
75 bctx
= rdma_rm_get_cqe_ctx(rdma_dev_res
, cqe_ctx_id
);
77 rdma_rm_dealloc_cqe_ctx(rdma_dev_res
, cqe_ctx_id
);
78 atomic_dec(&rdma_dev_res
->stats
.missing_cqe
);
83 static void clean_recv_mads(RdmaBackendDev
*backend_dev
)
85 unsigned long cqe_ctx_id
;
88 cqe_ctx_id
= rdma_protected_qlist_pop_int64(&backend_dev
->
90 if (cqe_ctx_id
!= -ENOENT
) {
91 atomic_inc(&backend_dev
->rdma_dev_res
->stats
.missing_cqe
);
92 free_cqe_ctx(GINT_TO_POINTER(cqe_ctx_id
),
93 backend_dev
->rdma_dev_res
);
95 } while (cqe_ctx_id
!= -ENOENT
);
98 static int rdma_poll_cq(RdmaDeviceResources
*rdma_dev_res
, struct ibv_cq
*ibcq
)
100 int i
, ne
, total_ne
= 0;
103 RdmaProtectedGSList
*cqe_ctx_list
;
105 qemu_mutex_lock(&rdma_dev_res
->lock
);
107 ne
= ibv_poll_cq(ibcq
, ARRAY_SIZE(wc
), wc
);
109 trace_rdma_poll_cq(ne
, ibcq
);
111 for (i
= 0; i
< ne
; i
++) {
112 bctx
= rdma_rm_get_cqe_ctx(rdma_dev_res
, wc
[i
].wr_id
);
113 if (unlikely(!bctx
)) {
114 rdma_error_report("No matching ctx for req %"PRId64
,
119 comp_handler(bctx
->up_ctx
, &wc
[i
]);
121 if (bctx
->backend_qp
) {
122 cqe_ctx_list
= &bctx
->backend_qp
->cqe_ctx_list
;
124 cqe_ctx_list
= &bctx
->backend_srq
->cqe_ctx_list
;
127 rdma_protected_gslist_remove_int32(cqe_ctx_list
, wc
[i
].wr_id
);
128 rdma_rm_dealloc_cqe_ctx(rdma_dev_res
, wc
[i
].wr_id
);
133 atomic_sub(&rdma_dev_res
->stats
.missing_cqe
, total_ne
);
134 qemu_mutex_unlock(&rdma_dev_res
->lock
);
137 rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne
, errno
);
140 rdma_dev_res
->stats
.completions
+= total_ne
;
145 static void *comp_handler_thread(void *arg
)
147 RdmaBackendDev
*backend_dev
= (RdmaBackendDev
*)arg
;
149 struct ibv_cq
*ev_cq
;
154 /* Change to non-blocking mode */
155 flags
= fcntl(backend_dev
->channel
->fd
, F_GETFL
);
156 rc
= fcntl(backend_dev
->channel
->fd
, F_SETFL
, flags
| O_NONBLOCK
);
158 rdma_error_report("Failed to change backend channel FD to non-blocking");
162 pfds
[0].fd
= backend_dev
->channel
->fd
;
163 pfds
[0].events
= G_IO_IN
| G_IO_HUP
| G_IO_ERR
;
165 backend_dev
->comp_thread
.is_running
= true;
167 while (backend_dev
->comp_thread
.run
) {
169 rc
= qemu_poll_ns(pfds
, 1, THR_POLL_TO
* (int64_t)SCALE_MS
);
171 backend_dev
->rdma_dev_res
->stats
.poll_cq_ppoll_to
++;
173 } while (!rc
&& backend_dev
->comp_thread
.run
);
175 if (backend_dev
->comp_thread
.run
) {
176 rc
= ibv_get_cq_event(backend_dev
->channel
, &ev_cq
, &ev_ctx
);
178 rdma_error_report("ibv_get_cq_event fail, rc=%d, errno=%d", rc
,
183 rc
= ibv_req_notify_cq(ev_cq
, 0);
185 rdma_error_report("ibv_req_notify_cq fail, rc=%d, errno=%d", rc
,
189 backend_dev
->rdma_dev_res
->stats
.poll_cq_from_bk
++;
190 rdma_poll_cq(backend_dev
->rdma_dev_res
, ev_cq
);
192 ibv_ack_cq_events(ev_cq
, 1);
196 backend_dev
->comp_thread
.is_running
= false;
203 static inline void disable_rdmacm_mux_async(RdmaBackendDev
*backend_dev
)
205 atomic_set(&backend_dev
->rdmacm_mux
.can_receive
, 0);
208 static inline void enable_rdmacm_mux_async(RdmaBackendDev
*backend_dev
)
210 atomic_set(&backend_dev
->rdmacm_mux
.can_receive
, sizeof(RdmaCmMuxMsg
));
213 static inline int rdmacm_mux_can_process_async(RdmaBackendDev
*backend_dev
)
215 return atomic_read(&backend_dev
->rdmacm_mux
.can_receive
);
218 static int rdmacm_mux_check_op_status(CharBackend
*mad_chr_be
)
220 RdmaCmMuxMsg msg
= {};
223 ret
= qemu_chr_fe_read_all(mad_chr_be
, (uint8_t *)&msg
, sizeof(msg
));
224 if (ret
!= sizeof(msg
)) {
225 rdma_error_report("Got invalid message from mux: size %d, expecting %d",
226 ret
, (int)sizeof(msg
));
230 trace_rdmacm_mux_check_op_status(msg
.hdr
.msg_type
, msg
.hdr
.op_code
,
233 if (msg
.hdr
.msg_type
!= RDMACM_MUX_MSG_TYPE_RESP
) {
234 rdma_error_report("Got invalid message type %d", msg
.hdr
.msg_type
);
238 if (msg
.hdr
.err_code
!= RDMACM_MUX_ERR_CODE_OK
) {
239 rdma_error_report("Operation failed in mux, error code %d",
247 static int rdmacm_mux_send(RdmaBackendDev
*backend_dev
, RdmaCmMuxMsg
*msg
)
251 msg
->hdr
.msg_type
= RDMACM_MUX_MSG_TYPE_REQ
;
252 trace_rdmacm_mux("send", msg
->hdr
.msg_type
, msg
->hdr
.op_code
);
253 disable_rdmacm_mux_async(backend_dev
);
254 rc
= qemu_chr_fe_write(backend_dev
->rdmacm_mux
.chr_be
,
255 (const uint8_t *)msg
, sizeof(*msg
));
256 if (rc
!= sizeof(*msg
)) {
257 enable_rdmacm_mux_async(backend_dev
);
258 rdma_error_report("Failed to send request to rdmacm_mux (rc=%d)", rc
);
262 rc
= rdmacm_mux_check_op_status(backend_dev
->rdmacm_mux
.chr_be
);
264 rdma_error_report("Failed to execute rdmacm_mux request %d (rc=%d)",
265 msg
->hdr
.op_code
, rc
);
268 enable_rdmacm_mux_async(backend_dev
);
273 static void stop_backend_thread(RdmaBackendThread
*thread
)
276 while (thread
->is_running
) {
277 sleep(THR_POLL_TO
/ SCALE_US
/ 2);
281 static void start_comp_thread(RdmaBackendDev
*backend_dev
)
283 char thread_name
[THR_NAME_LEN
] = {};
285 stop_backend_thread(&backend_dev
->comp_thread
);
287 snprintf(thread_name
, sizeof(thread_name
), "rdma_comp_%s",
288 ibv_get_device_name(backend_dev
->ib_dev
));
289 backend_dev
->comp_thread
.run
= true;
290 qemu_thread_create(&backend_dev
->comp_thread
.thread
, thread_name
,
291 comp_handler_thread
, backend_dev
, QEMU_THREAD_DETACHED
);
294 void rdma_backend_register_comp_handler(void (*handler
)(void *ctx
,
297 comp_handler
= handler
;
300 void rdma_backend_unregister_comp_handler(void)
302 rdma_backend_register_comp_handler(dummy_comp_handler
);
305 int rdma_backend_query_port(RdmaBackendDev
*backend_dev
,
306 struct ibv_port_attr
*port_attr
)
310 rc
= ibv_query_port(backend_dev
->context
, backend_dev
->port_num
, port_attr
);
312 rdma_error_report("ibv_query_port fail, rc=%d, errno=%d", rc
, errno
);
319 void rdma_backend_poll_cq(RdmaDeviceResources
*rdma_dev_res
, RdmaBackendCQ
*cq
)
323 rdma_dev_res
->stats
.poll_cq_from_guest
++;
324 polled
= rdma_poll_cq(rdma_dev_res
, cq
->ibcq
);
326 rdma_dev_res
->stats
.poll_cq_from_guest_empty
++;
330 static GHashTable
*ah_hash
;
332 static struct ibv_ah
*create_ah(RdmaBackendDev
*backend_dev
, struct ibv_pd
*pd
,
333 uint8_t sgid_idx
, union ibv_gid
*dgid
)
335 GBytes
*ah_key
= g_bytes_new(dgid
, sizeof(*dgid
));
336 struct ibv_ah
*ah
= g_hash_table_lookup(ah_hash
, ah_key
);
339 trace_rdma_create_ah_cache_hit(be64_to_cpu(dgid
->global
.subnet_prefix
),
340 be64_to_cpu(dgid
->global
.interface_id
));
341 g_bytes_unref(ah_key
);
343 struct ibv_ah_attr ah_attr
= {
345 .port_num
= backend_dev
->port_num
,
349 ah_attr
.grh
.dgid
= *dgid
;
350 ah_attr
.grh
.sgid_index
= sgid_idx
;
352 ah
= ibv_create_ah(pd
, &ah_attr
);
354 g_hash_table_insert(ah_hash
, ah_key
, ah
);
356 g_bytes_unref(ah_key
);
357 rdma_error_report("Failed to create AH for gid <0x%" PRIx64
", 0x%"PRIx64
">",
358 be64_to_cpu(dgid
->global
.subnet_prefix
),
359 be64_to_cpu(dgid
->global
.interface_id
));
362 trace_rdma_create_ah_cache_miss(be64_to_cpu(dgid
->global
.subnet_prefix
),
363 be64_to_cpu(dgid
->global
.interface_id
));
369 static void destroy_ah_hash_key(gpointer data
)
374 static void destroy_ah_hast_data(gpointer data
)
376 struct ibv_ah
*ah
= data
;
381 static void ah_cache_init(void)
383 ah_hash
= g_hash_table_new_full(g_bytes_hash
, g_bytes_equal
,
384 destroy_ah_hash_key
, destroy_ah_hast_data
);
387 static int build_host_sge_array(RdmaDeviceResources
*rdma_dev_res
,
388 struct ibv_sge
*dsge
, struct ibv_sge
*ssge
,
389 uint8_t num_sge
, uint64_t *total_length
)
394 for (ssge_idx
= 0; ssge_idx
< num_sge
; ssge_idx
++) {
395 mr
= rdma_rm_get_mr(rdma_dev_res
, ssge
[ssge_idx
].lkey
);
397 rdma_error_report("Invalid lkey 0x%x", ssge
[ssge_idx
].lkey
);
398 return VENDOR_ERR_INVLKEY
| ssge
[ssge_idx
].lkey
;
401 dsge
->addr
= (uintptr_t)mr
->virt
+ ssge
[ssge_idx
].addr
- mr
->start
;
402 dsge
->length
= ssge
[ssge_idx
].length
;
403 dsge
->lkey
= rdma_backend_mr_lkey(&mr
->backend_mr
);
405 *total_length
+= dsge
->length
;
413 static void trace_mad_message(const char *title
, char *buf
, int len
)
416 char *b
= g_malloc0(len
* 3 + 1);
419 for (i
= 0; i
< len
; i
++) {
420 sprintf(b1
, "%.2X ", buf
[i
] & 0x000000FF);
424 trace_rdma_mad_message(title
, len
, b
);
429 static int mad_send(RdmaBackendDev
*backend_dev
, uint8_t sgid_idx
,
430 union ibv_gid
*sgid
, struct ibv_sge
*sge
, uint32_t num_sge
)
432 RdmaCmMuxMsg msg
= {};
440 msg
.hdr
.op_code
= RDMACM_MUX_OP_CODE_MAD
;
441 memcpy(msg
.hdr
.sgid
.raw
, sgid
->raw
, sizeof(msg
.hdr
.sgid
));
443 msg
.umad_len
= sge
[0].length
+ sge
[1].length
;
445 if (msg
.umad_len
> sizeof(msg
.umad
.mad
)) {
449 msg
.umad
.hdr
.addr
.qpn
= htobe32(1);
450 msg
.umad
.hdr
.addr
.grh_present
= 1;
451 msg
.umad
.hdr
.addr
.gid_index
= sgid_idx
;
452 memcpy(msg
.umad
.hdr
.addr
.gid
, sgid
->raw
, sizeof(msg
.umad
.hdr
.addr
.gid
));
453 msg
.umad
.hdr
.addr
.hop_limit
= 0xFF;
455 hdr
= rdma_pci_dma_map(backend_dev
->dev
, sge
[0].addr
, sge
[0].length
);
459 data
= rdma_pci_dma_map(backend_dev
->dev
, sge
[1].addr
, sge
[1].length
);
461 rdma_pci_dma_unmap(backend_dev
->dev
, hdr
, sge
[0].length
);
465 memcpy(&msg
.umad
.mad
[0], hdr
, sge
[0].length
);
466 memcpy(&msg
.umad
.mad
[sge
[0].length
], data
, sge
[1].length
);
468 rdma_pci_dma_unmap(backend_dev
->dev
, data
, sge
[1].length
);
469 rdma_pci_dma_unmap(backend_dev
->dev
, hdr
, sge
[0].length
);
471 trace_mad_message("send", msg
.umad
.mad
, msg
.umad_len
);
473 ret
= rdmacm_mux_send(backend_dev
, &msg
);
475 rdma_error_report("Failed to send MAD to rdma_umadmux (%d)", ret
);
482 void rdma_backend_post_send(RdmaBackendDev
*backend_dev
,
483 RdmaBackendQP
*qp
, uint8_t qp_type
,
484 struct ibv_sge
*sge
, uint32_t num_sge
,
485 uint8_t sgid_idx
, union ibv_gid
*sgid
,
486 union ibv_gid
*dgid
, uint32_t dqpn
, uint32_t dqkey
,
490 struct ibv_sge new_sge
[MAX_SGE
];
493 struct ibv_send_wr wr
= {}, *bad_wr
;
495 if (!qp
->ibqp
) { /* This field is not initialized for QP0 and QP1 */
496 if (qp_type
== IBV_QPT_SMI
) {
497 rdma_error_report("Got QP0 request");
498 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_QP0
, ctx
);
499 } else if (qp_type
== IBV_QPT_GSI
) {
500 rc
= mad_send(backend_dev
, sgid_idx
, sgid
, sge
, num_sge
);
502 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_MAD_SEND
, ctx
);
503 backend_dev
->rdma_dev_res
->stats
.mad_tx_err
++;
505 complete_work(IBV_WC_SUCCESS
, 0, ctx
);
506 backend_dev
->rdma_dev_res
->stats
.mad_tx
++;
512 bctx
= g_malloc0(sizeof(*bctx
));
514 bctx
->backend_qp
= qp
;
516 rc
= rdma_rm_alloc_cqe_ctx(backend_dev
->rdma_dev_res
, &bctx_id
, bctx
);
518 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_NOMEM
, ctx
);
522 rdma_protected_gslist_append_int32(&qp
->cqe_ctx_list
, bctx_id
);
524 rc
= build_host_sge_array(backend_dev
->rdma_dev_res
, new_sge
, sge
, num_sge
,
525 &backend_dev
->rdma_dev_res
->stats
.tx_len
);
527 complete_work(IBV_WC_GENERAL_ERR
, rc
, ctx
);
528 goto err_dealloc_cqe_ctx
;
531 if (qp_type
== IBV_QPT_UD
) {
532 wr
.wr
.ud
.ah
= create_ah(backend_dev
, qp
->ibpd
, sgid_idx
, dgid
);
534 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_FAIL_BACKEND
, ctx
);
535 goto err_dealloc_cqe_ctx
;
537 wr
.wr
.ud
.remote_qpn
= dqpn
;
538 wr
.wr
.ud
.remote_qkey
= dqkey
;
541 wr
.num_sge
= num_sge
;
542 wr
.opcode
= IBV_WR_SEND
;
543 wr
.send_flags
= IBV_SEND_SIGNALED
;
544 wr
.sg_list
= new_sge
;
547 rc
= ibv_post_send(qp
->ibqp
, &wr
, &bad_wr
);
549 rdma_error_report("ibv_post_send fail, qpn=0x%x, rc=%d, errno=%d",
550 qp
->ibqp
->qp_num
, rc
, errno
);
551 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_FAIL_BACKEND
, ctx
);
552 goto err_dealloc_cqe_ctx
;
555 atomic_inc(&backend_dev
->rdma_dev_res
->stats
.missing_cqe
);
556 backend_dev
->rdma_dev_res
->stats
.tx
++;
561 backend_dev
->rdma_dev_res
->stats
.tx_err
++;
562 rdma_rm_dealloc_cqe_ctx(backend_dev
->rdma_dev_res
, bctx_id
);
568 static unsigned int save_mad_recv_buffer(RdmaBackendDev
*backend_dev
,
569 struct ibv_sge
*sge
, uint32_t num_sge
,
577 rdma_error_report("Invalid num_sge (%d), expecting 1", num_sge
);
578 return VENDOR_ERR_INV_NUM_SGE
;
581 if (sge
[0].length
< RDMA_MAX_PRIVATE_DATA
+ sizeof(struct ibv_grh
)) {
582 rdma_error_report("Too small buffer for MAD");
583 return VENDOR_ERR_INV_MAD_BUFF
;
586 bctx
= g_malloc0(sizeof(*bctx
));
588 rc
= rdma_rm_alloc_cqe_ctx(backend_dev
->rdma_dev_res
, &bctx_id
, bctx
);
591 return VENDOR_ERR_NOMEM
;
597 rdma_protected_qlist_append_int64(&backend_dev
->recv_mads_list
, bctx_id
);
602 void rdma_backend_post_recv(RdmaBackendDev
*backend_dev
,
603 RdmaBackendQP
*qp
, uint8_t qp_type
,
604 struct ibv_sge
*sge
, uint32_t num_sge
, void *ctx
)
607 struct ibv_sge new_sge
[MAX_SGE
];
610 struct ibv_recv_wr wr
= {}, *bad_wr
;
612 if (!qp
->ibqp
) { /* This field does not get initialized for QP0 and QP1 */
613 if (qp_type
== IBV_QPT_SMI
) {
614 rdma_error_report("Got QP0 request");
615 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_QP0
, ctx
);
617 if (qp_type
== IBV_QPT_GSI
) {
618 rc
= save_mad_recv_buffer(backend_dev
, sge
, num_sge
, ctx
);
620 complete_work(IBV_WC_GENERAL_ERR
, rc
, ctx
);
621 backend_dev
->rdma_dev_res
->stats
.mad_rx_bufs_err
++;
623 backend_dev
->rdma_dev_res
->stats
.mad_rx_bufs
++;
629 bctx
= g_malloc0(sizeof(*bctx
));
631 bctx
->backend_qp
= qp
;
633 rc
= rdma_rm_alloc_cqe_ctx(backend_dev
->rdma_dev_res
, &bctx_id
, bctx
);
635 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_NOMEM
, ctx
);
639 rdma_protected_gslist_append_int32(&qp
->cqe_ctx_list
, bctx_id
);
641 rc
= build_host_sge_array(backend_dev
->rdma_dev_res
, new_sge
, sge
, num_sge
,
642 &backend_dev
->rdma_dev_res
->stats
.rx_bufs_len
);
644 complete_work(IBV_WC_GENERAL_ERR
, rc
, ctx
);
645 goto err_dealloc_cqe_ctx
;
648 wr
.num_sge
= num_sge
;
649 wr
.sg_list
= new_sge
;
651 rc
= ibv_post_recv(qp
->ibqp
, &wr
, &bad_wr
);
653 rdma_error_report("ibv_post_recv fail, qpn=0x%x, rc=%d, errno=%d",
654 qp
->ibqp
->qp_num
, rc
, errno
);
655 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_FAIL_BACKEND
, ctx
);
656 goto err_dealloc_cqe_ctx
;
659 atomic_inc(&backend_dev
->rdma_dev_res
->stats
.missing_cqe
);
660 backend_dev
->rdma_dev_res
->stats
.rx_bufs
++;
665 backend_dev
->rdma_dev_res
->stats
.rx_bufs_err
++;
666 rdma_rm_dealloc_cqe_ctx(backend_dev
->rdma_dev_res
, bctx_id
);
672 void rdma_backend_post_srq_recv(RdmaBackendDev
*backend_dev
,
673 RdmaBackendSRQ
*srq
, struct ibv_sge
*sge
,
674 uint32_t num_sge
, void *ctx
)
677 struct ibv_sge new_sge
[MAX_SGE
];
680 struct ibv_recv_wr wr
= {}, *bad_wr
;
682 bctx
= g_malloc0(sizeof(*bctx
));
684 bctx
->backend_srq
= srq
;
686 rc
= rdma_rm_alloc_cqe_ctx(backend_dev
->rdma_dev_res
, &bctx_id
, bctx
);
688 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_NOMEM
, ctx
);
692 rdma_protected_gslist_append_int32(&srq
->cqe_ctx_list
, bctx_id
);
694 rc
= build_host_sge_array(backend_dev
->rdma_dev_res
, new_sge
, sge
, num_sge
,
695 &backend_dev
->rdma_dev_res
->stats
.rx_bufs_len
);
697 complete_work(IBV_WC_GENERAL_ERR
, rc
, ctx
);
698 goto err_dealloc_cqe_ctx
;
701 wr
.num_sge
= num_sge
;
702 wr
.sg_list
= new_sge
;
704 rc
= ibv_post_srq_recv(srq
->ibsrq
, &wr
, &bad_wr
);
706 rdma_error_report("ibv_post_srq_recv fail, srqn=0x%x, rc=%d, errno=%d",
707 srq
->ibsrq
->handle
, rc
, errno
);
708 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_FAIL_BACKEND
, ctx
);
709 goto err_dealloc_cqe_ctx
;
712 atomic_inc(&backend_dev
->rdma_dev_res
->stats
.missing_cqe
);
713 backend_dev
->rdma_dev_res
->stats
.rx_bufs
++;
714 backend_dev
->rdma_dev_res
->stats
.rx_srq
++;
719 backend_dev
->rdma_dev_res
->stats
.rx_bufs_err
++;
720 rdma_rm_dealloc_cqe_ctx(backend_dev
->rdma_dev_res
, bctx_id
);
726 int rdma_backend_create_pd(RdmaBackendDev
*backend_dev
, RdmaBackendPD
*pd
)
728 pd
->ibpd
= ibv_alloc_pd(backend_dev
->context
);
731 rdma_error_report("ibv_alloc_pd fail, errno=%d", errno
);
738 void rdma_backend_destroy_pd(RdmaBackendPD
*pd
)
741 ibv_dealloc_pd(pd
->ibpd
);
745 int rdma_backend_create_mr(RdmaBackendMR
*mr
, RdmaBackendPD
*pd
, void *addr
,
746 size_t length
, int access
)
748 mr
->ibmr
= ibv_reg_mr(pd
->ibpd
, addr
, length
, access
);
750 rdma_error_report("ibv_reg_mr fail, errno=%d", errno
);
759 void rdma_backend_destroy_mr(RdmaBackendMR
*mr
)
762 ibv_dereg_mr(mr
->ibmr
);
766 int rdma_backend_create_cq(RdmaBackendDev
*backend_dev
, RdmaBackendCQ
*cq
,
771 cq
->ibcq
= ibv_create_cq(backend_dev
->context
, cqe
+ 1, NULL
,
772 backend_dev
->channel
, 0);
774 rdma_error_report("ibv_create_cq fail, errno=%d", errno
);
778 rc
= ibv_req_notify_cq(cq
->ibcq
, 0);
780 rdma_warn_report("ibv_req_notify_cq fail, rc=%d, errno=%d", rc
, errno
);
783 cq
->backend_dev
= backend_dev
;
788 void rdma_backend_destroy_cq(RdmaBackendCQ
*cq
)
791 ibv_destroy_cq(cq
->ibcq
);
795 int rdma_backend_create_qp(RdmaBackendQP
*qp
, uint8_t qp_type
,
796 RdmaBackendPD
*pd
, RdmaBackendCQ
*scq
,
797 RdmaBackendCQ
*rcq
, RdmaBackendSRQ
*srq
,
798 uint32_t max_send_wr
, uint32_t max_recv_wr
,
799 uint32_t max_send_sge
, uint32_t max_recv_sge
)
801 struct ibv_qp_init_attr attr
= {};
816 rdma_error_report("Unsupported QP type %d", qp_type
);
820 attr
.qp_type
= qp_type
;
821 attr
.send_cq
= scq
->ibcq
;
822 attr
.recv_cq
= rcq
->ibcq
;
823 attr
.cap
.max_send_wr
= max_send_wr
;
824 attr
.cap
.max_recv_wr
= max_recv_wr
;
825 attr
.cap
.max_send_sge
= max_send_sge
;
826 attr
.cap
.max_recv_sge
= max_recv_sge
;
828 attr
.srq
= srq
->ibsrq
;
831 qp
->ibqp
= ibv_create_qp(pd
->ibpd
, &attr
);
833 rdma_error_report("ibv_create_qp fail, errno=%d", errno
);
837 rdma_protected_gslist_init(&qp
->cqe_ctx_list
);
841 /* TODO: Query QP to get max_inline_data and save it to be used in send */
846 int rdma_backend_qp_state_init(RdmaBackendDev
*backend_dev
, RdmaBackendQP
*qp
,
847 uint8_t qp_type
, uint32_t qkey
)
849 struct ibv_qp_attr attr
= {};
852 attr_mask
= IBV_QP_STATE
| IBV_QP_PKEY_INDEX
| IBV_QP_PORT
;
853 attr
.qp_state
= IBV_QPS_INIT
;
855 attr
.port_num
= backend_dev
->port_num
;
859 attr_mask
|= IBV_QP_ACCESS_FLAGS
;
860 trace_rdma_backend_rc_qp_state_init(qp
->ibqp
->qp_num
);
865 attr_mask
|= IBV_QP_QKEY
;
866 trace_rdma_backend_ud_qp_state_init(qp
->ibqp
->qp_num
, qkey
);
870 rdma_error_report("Unsupported QP type %d", qp_type
);
874 rc
= ibv_modify_qp(qp
->ibqp
, &attr
, attr_mask
);
876 rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc
, errno
);
883 int rdma_backend_qp_state_rtr(RdmaBackendDev
*backend_dev
, RdmaBackendQP
*qp
,
884 uint8_t qp_type
, uint8_t sgid_idx
,
885 union ibv_gid
*dgid
, uint32_t dqpn
,
886 uint32_t rq_psn
, uint32_t qkey
, bool use_qkey
)
888 struct ibv_qp_attr attr
= {};
889 union ibv_gid ibv_gid
= {
890 .global
.interface_id
= dgid
->global
.interface_id
,
891 .global
.subnet_prefix
= dgid
->global
.subnet_prefix
895 attr
.qp_state
= IBV_QPS_RTR
;
896 attr_mask
= IBV_QP_STATE
;
898 qp
->sgid_idx
= sgid_idx
;
902 attr
.path_mtu
= IBV_MTU_1024
;
903 attr
.dest_qp_num
= dqpn
;
904 attr
.max_dest_rd_atomic
= 1;
905 attr
.min_rnr_timer
= 12;
906 attr
.ah_attr
.port_num
= backend_dev
->port_num
;
907 attr
.ah_attr
.is_global
= 1;
908 attr
.ah_attr
.grh
.hop_limit
= 1;
909 attr
.ah_attr
.grh
.dgid
= ibv_gid
;
910 attr
.ah_attr
.grh
.sgid_index
= qp
->sgid_idx
;
911 attr
.rq_psn
= rq_psn
;
913 attr_mask
|= IBV_QP_AV
| IBV_QP_PATH_MTU
| IBV_QP_DEST_QPN
|
914 IBV_QP_RQ_PSN
| IBV_QP_MAX_DEST_RD_ATOMIC
|
915 IBV_QP_MIN_RNR_TIMER
;
917 trace_rdma_backend_rc_qp_state_rtr(qp
->ibqp
->qp_num
,
918 be64_to_cpu(ibv_gid
.global
.
920 be64_to_cpu(ibv_gid
.global
.
922 qp
->sgid_idx
, dqpn
, rq_psn
);
928 attr_mask
|= IBV_QP_QKEY
;
930 trace_rdma_backend_ud_qp_state_rtr(qp
->ibqp
->qp_num
, use_qkey
? qkey
:
935 rc
= ibv_modify_qp(qp
->ibqp
, &attr
, attr_mask
);
937 rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc
, errno
);
944 int rdma_backend_qp_state_rts(RdmaBackendQP
*qp
, uint8_t qp_type
,
945 uint32_t sq_psn
, uint32_t qkey
, bool use_qkey
)
947 struct ibv_qp_attr attr
= {};
950 attr
.qp_state
= IBV_QPS_RTS
;
951 attr
.sq_psn
= sq_psn
;
952 attr_mask
= IBV_QP_STATE
| IBV_QP_SQ_PSN
;
959 attr
.max_rd_atomic
= 1;
961 attr_mask
|= IBV_QP_TIMEOUT
| IBV_QP_RETRY_CNT
| IBV_QP_RNR_RETRY
|
962 IBV_QP_MAX_QP_RD_ATOMIC
;
963 trace_rdma_backend_rc_qp_state_rts(qp
->ibqp
->qp_num
, sq_psn
);
969 attr_mask
|= IBV_QP_QKEY
;
971 trace_rdma_backend_ud_qp_state_rts(qp
->ibqp
->qp_num
, sq_psn
,
972 use_qkey
? qkey
: 0);
976 rc
= ibv_modify_qp(qp
->ibqp
, &attr
, attr_mask
);
978 rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc
, errno
);
985 int rdma_backend_query_qp(RdmaBackendQP
*qp
, struct ibv_qp_attr
*attr
,
986 int attr_mask
, struct ibv_qp_init_attr
*init_attr
)
989 attr
->qp_state
= IBV_QPS_RTS
;
993 return ibv_query_qp(qp
->ibqp
, attr
, attr_mask
, init_attr
);
996 void rdma_backend_destroy_qp(RdmaBackendQP
*qp
, RdmaDeviceResources
*dev_res
)
999 ibv_destroy_qp(qp
->ibqp
);
1001 g_slist_foreach(qp
->cqe_ctx_list
.list
, free_cqe_ctx
, dev_res
);
1002 rdma_protected_gslist_destroy(&qp
->cqe_ctx_list
);
1005 int rdma_backend_create_srq(RdmaBackendSRQ
*srq
, RdmaBackendPD
*pd
,
1006 uint32_t max_wr
, uint32_t max_sge
,
1009 struct ibv_srq_init_attr srq_init_attr
= {};
1011 srq_init_attr
.attr
.max_wr
= max_wr
;
1012 srq_init_attr
.attr
.max_sge
= max_sge
;
1013 srq_init_attr
.attr
.srq_limit
= srq_limit
;
1015 srq
->ibsrq
= ibv_create_srq(pd
->ibpd
, &srq_init_attr
);
1017 rdma_error_report("ibv_create_srq failed, errno=%d", errno
);
1021 rdma_protected_gslist_init(&srq
->cqe_ctx_list
);
1026 int rdma_backend_query_srq(RdmaBackendSRQ
*srq
, struct ibv_srq_attr
*srq_attr
)
1032 return ibv_query_srq(srq
->ibsrq
, srq_attr
);
1035 int rdma_backend_modify_srq(RdmaBackendSRQ
*srq
, struct ibv_srq_attr
*srq_attr
,
1042 return ibv_modify_srq(srq
->ibsrq
, srq_attr
, srq_attr_mask
);
1045 void rdma_backend_destroy_srq(RdmaBackendSRQ
*srq
, RdmaDeviceResources
*dev_res
)
1048 ibv_destroy_srq(srq
->ibsrq
);
1050 g_slist_foreach(srq
->cqe_ctx_list
.list
, free_cqe_ctx
, dev_res
);
1051 rdma_protected_gslist_destroy(&srq
->cqe_ctx_list
);
1054 #define CHK_ATTR(req, dev, member, fmt) ({ \
1055 trace_rdma_check_dev_attr(#member, dev.member, req->member); \
1056 if (req->member > dev.member) { \
1057 rdma_warn_report("%s = "fmt" is higher than host device capability "fmt, \
1058 #member, req->member, dev.member); \
1059 req->member = dev.member; \
1063 static int init_device_caps(RdmaBackendDev
*backend_dev
,
1064 struct ibv_device_attr
*dev_attr
)
1066 struct ibv_device_attr bk_dev_attr
;
1069 rc
= ibv_query_device(backend_dev
->context
, &bk_dev_attr
);
1071 rdma_error_report("ibv_query_device fail, rc=%d, errno=%d", rc
, errno
);
1075 dev_attr
->max_sge
= MAX_SGE
;
1076 dev_attr
->max_srq_sge
= MAX_SGE
;
1078 CHK_ATTR(dev_attr
, bk_dev_attr
, max_mr_size
, "%" PRId64
);
1079 CHK_ATTR(dev_attr
, bk_dev_attr
, max_qp
, "%d");
1080 CHK_ATTR(dev_attr
, bk_dev_attr
, max_sge
, "%d");
1081 CHK_ATTR(dev_attr
, bk_dev_attr
, max_cq
, "%d");
1082 CHK_ATTR(dev_attr
, bk_dev_attr
, max_mr
, "%d");
1083 CHK_ATTR(dev_attr
, bk_dev_attr
, max_pd
, "%d");
1084 CHK_ATTR(dev_attr
, bk_dev_attr
, max_qp_rd_atom
, "%d");
1085 CHK_ATTR(dev_attr
, bk_dev_attr
, max_qp_init_rd_atom
, "%d");
1086 CHK_ATTR(dev_attr
, bk_dev_attr
, max_ah
, "%d");
1087 CHK_ATTR(dev_attr
, bk_dev_attr
, max_srq
, "%d");
1092 static inline void build_mad_hdr(struct ibv_grh
*grh
, union ibv_gid
*sgid
,
1093 union ibv_gid
*my_gid
, int paylen
)
1095 grh
->paylen
= htons(paylen
);
1097 grh
->dgid
= *my_gid
;
1100 static void process_incoming_mad_req(RdmaBackendDev
*backend_dev
,
1103 unsigned long cqe_ctx_id
;
1107 trace_mad_message("recv", msg
->umad
.mad
, msg
->umad_len
);
1109 cqe_ctx_id
= rdma_protected_qlist_pop_int64(&backend_dev
->recv_mads_list
);
1110 if (cqe_ctx_id
== -ENOENT
) {
1111 rdma_warn_report("No more free MADs buffers, waiting for a while");
1116 bctx
= rdma_rm_get_cqe_ctx(backend_dev
->rdma_dev_res
, cqe_ctx_id
);
1117 if (unlikely(!bctx
)) {
1118 rdma_error_report("No matching ctx for req %ld", cqe_ctx_id
);
1119 backend_dev
->rdma_dev_res
->stats
.mad_rx_err
++;
1123 mad
= rdma_pci_dma_map(backend_dev
->dev
, bctx
->sge
.addr
,
1125 if (!mad
|| bctx
->sge
.length
< msg
->umad_len
+ MAD_HDR_SIZE
) {
1126 backend_dev
->rdma_dev_res
->stats
.mad_rx_err
++;
1127 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_INV_MAD_BUFF
,
1130 struct ibv_wc wc
= {};
1131 memset(mad
, 0, bctx
->sge
.length
);
1132 build_mad_hdr((struct ibv_grh
*)mad
,
1133 (union ibv_gid
*)&msg
->umad
.hdr
.addr
.gid
, &msg
->hdr
.sgid
,
1135 memcpy(&mad
[MAD_HDR_SIZE
], msg
->umad
.mad
, msg
->umad_len
);
1136 rdma_pci_dma_unmap(backend_dev
->dev
, mad
, bctx
->sge
.length
);
1138 wc
.byte_len
= msg
->umad_len
;
1139 wc
.status
= IBV_WC_SUCCESS
;
1140 wc
.wc_flags
= IBV_WC_GRH
;
1141 backend_dev
->rdma_dev_res
->stats
.mad_rx
++;
1142 comp_handler(bctx
->up_ctx
, &wc
);
1146 rdma_rm_dealloc_cqe_ctx(backend_dev
->rdma_dev_res
, cqe_ctx_id
);
1149 static inline int rdmacm_mux_can_receive(void *opaque
)
1151 RdmaBackendDev
*backend_dev
= (RdmaBackendDev
*)opaque
;
1153 return rdmacm_mux_can_process_async(backend_dev
);
1156 static void rdmacm_mux_read(void *opaque
, const uint8_t *buf
, int size
)
1158 RdmaBackendDev
*backend_dev
= (RdmaBackendDev
*)opaque
;
1159 RdmaCmMuxMsg
*msg
= (RdmaCmMuxMsg
*)buf
;
1161 trace_rdmacm_mux("read", msg
->hdr
.msg_type
, msg
->hdr
.op_code
);
1163 if (msg
->hdr
.msg_type
!= RDMACM_MUX_MSG_TYPE_REQ
&&
1164 msg
->hdr
.op_code
!= RDMACM_MUX_OP_CODE_MAD
) {
1165 rdma_error_report("Error: Not a MAD request, skipping");
1168 process_incoming_mad_req(backend_dev
, msg
);
1171 static int mad_init(RdmaBackendDev
*backend_dev
, CharBackend
*mad_chr_be
)
1175 backend_dev
->rdmacm_mux
.chr_be
= mad_chr_be
;
1177 ret
= qemu_chr_fe_backend_connected(backend_dev
->rdmacm_mux
.chr_be
);
1179 rdma_error_report("Missing chardev for MAD multiplexer");
1183 rdma_protected_qlist_init(&backend_dev
->recv_mads_list
);
1185 enable_rdmacm_mux_async(backend_dev
);
1187 qemu_chr_fe_set_handlers(backend_dev
->rdmacm_mux
.chr_be
,
1188 rdmacm_mux_can_receive
, rdmacm_mux_read
, NULL
,
1189 NULL
, backend_dev
, NULL
, true);
1194 static void mad_stop(RdmaBackendDev
*backend_dev
)
1196 clean_recv_mads(backend_dev
);
1199 static void mad_fini(RdmaBackendDev
*backend_dev
)
1201 disable_rdmacm_mux_async(backend_dev
);
1202 qemu_chr_fe_disconnect(backend_dev
->rdmacm_mux
.chr_be
);
1203 rdma_protected_qlist_destroy(&backend_dev
->recv_mads_list
);
1206 int rdma_backend_get_gid_index(RdmaBackendDev
*backend_dev
,
1214 ret
= ibv_query_gid(backend_dev
->context
, backend_dev
->port_num
, i
,
1217 } while (!ret
&& (memcmp(&sgid
, gid
, sizeof(*gid
))));
1219 trace_rdma_backend_get_gid_index(be64_to_cpu(gid
->global
.subnet_prefix
),
1220 be64_to_cpu(gid
->global
.interface_id
),
1223 return ret
? ret
: i
- 1;
1226 int rdma_backend_add_gid(RdmaBackendDev
*backend_dev
, const char *ifname
,
1229 RdmaCmMuxMsg msg
= {};
1232 trace_rdma_backend_gid_change("add", be64_to_cpu(gid
->global
.subnet_prefix
),
1233 be64_to_cpu(gid
->global
.interface_id
));
1235 msg
.hdr
.op_code
= RDMACM_MUX_OP_CODE_REG
;
1236 memcpy(msg
.hdr
.sgid
.raw
, gid
->raw
, sizeof(msg
.hdr
.sgid
));
1238 ret
= rdmacm_mux_send(backend_dev
, &msg
);
1240 rdma_error_report("Failed to register GID to rdma_umadmux (%d)", ret
);
1244 qapi_event_send_rdma_gid_status_changed(ifname
, true,
1245 gid
->global
.subnet_prefix
,
1246 gid
->global
.interface_id
);
1251 int rdma_backend_del_gid(RdmaBackendDev
*backend_dev
, const char *ifname
,
1254 RdmaCmMuxMsg msg
= {};
1257 trace_rdma_backend_gid_change("del", be64_to_cpu(gid
->global
.subnet_prefix
),
1258 be64_to_cpu(gid
->global
.interface_id
));
1260 msg
.hdr
.op_code
= RDMACM_MUX_OP_CODE_UNREG
;
1261 memcpy(msg
.hdr
.sgid
.raw
, gid
->raw
, sizeof(msg
.hdr
.sgid
));
1263 ret
= rdmacm_mux_send(backend_dev
, &msg
);
1265 rdma_error_report("Failed to unregister GID from rdma_umadmux (%d)",
1270 qapi_event_send_rdma_gid_status_changed(ifname
, false,
1271 gid
->global
.subnet_prefix
,
1272 gid
->global
.interface_id
);
1277 int rdma_backend_init(RdmaBackendDev
*backend_dev
, PCIDevice
*pdev
,
1278 RdmaDeviceResources
*rdma_dev_res
,
1279 const char *backend_device_name
, uint8_t port_num
,
1280 struct ibv_device_attr
*dev_attr
, CharBackend
*mad_chr_be
)
1284 int num_ibv_devices
;
1285 struct ibv_device
**dev_list
;
1287 memset(backend_dev
, 0, sizeof(*backend_dev
));
1289 backend_dev
->dev
= pdev
;
1290 backend_dev
->port_num
= port_num
;
1291 backend_dev
->rdma_dev_res
= rdma_dev_res
;
1293 rdma_backend_register_comp_handler(dummy_comp_handler
);
1295 dev_list
= ibv_get_device_list(&num_ibv_devices
);
1297 rdma_error_report("Failed to get IB devices list");
1301 if (num_ibv_devices
== 0) {
1302 rdma_error_report("No IB devices were found");
1304 goto out_free_dev_list
;
1307 if (backend_device_name
) {
1308 for (i
= 0; dev_list
[i
]; ++i
) {
1309 if (!strcmp(ibv_get_device_name(dev_list
[i
]),
1310 backend_device_name
)) {
1315 backend_dev
->ib_dev
= dev_list
[i
];
1316 if (!backend_dev
->ib_dev
) {
1317 rdma_error_report("Failed to find IB device %s",
1318 backend_device_name
);
1320 goto out_free_dev_list
;
1323 backend_dev
->ib_dev
= *dev_list
;
1326 rdma_info_report("uverb device %s", backend_dev
->ib_dev
->dev_name
);
1328 backend_dev
->context
= ibv_open_device(backend_dev
->ib_dev
);
1329 if (!backend_dev
->context
) {
1330 rdma_error_report("Failed to open IB device %s",
1331 ibv_get_device_name(backend_dev
->ib_dev
));
1336 backend_dev
->channel
= ibv_create_comp_channel(backend_dev
->context
);
1337 if (!backend_dev
->channel
) {
1338 rdma_error_report("Failed to create IB communication channel");
1340 goto out_close_device
;
1343 ret
= init_device_caps(backend_dev
, dev_attr
);
1345 rdma_error_report("Failed to initialize device capabilities");
1347 goto out_destroy_comm_channel
;
1351 ret
= mad_init(backend_dev
, mad_chr_be
);
1353 rdma_error_report("Failed to initialize mad");
1355 goto out_destroy_comm_channel
;
1358 backend_dev
->comp_thread
.run
= false;
1359 backend_dev
->comp_thread
.is_running
= false;
1363 goto out_free_dev_list
;
1365 out_destroy_comm_channel
:
1366 ibv_destroy_comp_channel(backend_dev
->channel
);
1369 ibv_close_device(backend_dev
->context
);
1372 ibv_free_device_list(dev_list
);
1379 void rdma_backend_start(RdmaBackendDev
*backend_dev
)
1381 start_comp_thread(backend_dev
);
1384 void rdma_backend_stop(RdmaBackendDev
*backend_dev
)
1386 mad_stop(backend_dev
);
1387 stop_backend_thread(&backend_dev
->comp_thread
);
1390 void rdma_backend_fini(RdmaBackendDev
*backend_dev
)
1392 mad_fini(backend_dev
);
1393 g_hash_table_destroy(ah_hash
);
1394 ibv_destroy_comp_channel(backend_dev
->channel
);
1395 ibv_close_device(backend_dev
->context
);