2 * QEMU paravirtual RDMA - Generic RDMA backend
4 * Copyright (C) 2018 Oracle
5 * Copyright (C) 2018 Red Hat Inc
8 * Yuval Shaia <yuval.shaia@oracle.com>
9 * Marcel Apfelbaum <marcel@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include "qemu/osdep.h"
17 #include "qapi/qapi-events-rdma.h"
19 #include <infiniband/verbs.h>
21 #include "contrib/rdmacm-mux/rdmacm-mux.h"
23 #include "rdma_utils.h"
25 #include "rdma_backend.h"
27 #define THR_NAME_LEN 16
28 #define THR_POLL_TO 5000
30 #define MAD_HDR_SIZE sizeof(struct ibv_grh)
32 typedef struct BackendCtx
{
34 struct ibv_sge sge
; /* Used to save MAD recv buffer */
35 RdmaBackendQP
*backend_qp
; /* To maintain recv buffers */
36 RdmaBackendSRQ
*backend_srq
;
40 struct ib_user_mad hdr
;
41 char mad
[RDMA_MAX_PRIVATE_DATA
];
44 static void (*comp_handler
)(void *ctx
, struct ibv_wc
*wc
);
46 static void dummy_comp_handler(void *ctx
, struct ibv_wc
*wc
)
48 rdma_error_report("No completion handler is registered");
51 static inline void complete_work(enum ibv_wc_status status
, uint32_t vendor_err
,
54 struct ibv_wc wc
= {};
57 wc
.vendor_err
= vendor_err
;
59 comp_handler(ctx
, &wc
);
62 static void free_cqe_ctx(gpointer data
, gpointer user_data
)
65 RdmaDeviceResources
*rdma_dev_res
= user_data
;
66 unsigned long cqe_ctx_id
= GPOINTER_TO_INT(data
);
68 bctx
= rdma_rm_get_cqe_ctx(rdma_dev_res
, cqe_ctx_id
);
70 rdma_rm_dealloc_cqe_ctx(rdma_dev_res
, cqe_ctx_id
);
71 atomic_dec(&rdma_dev_res
->stats
.missing_cqe
);
76 static void clean_recv_mads(RdmaBackendDev
*backend_dev
)
78 unsigned long cqe_ctx_id
;
81 cqe_ctx_id
= rdma_protected_qlist_pop_int64(&backend_dev
->
83 if (cqe_ctx_id
!= -ENOENT
) {
84 atomic_inc(&backend_dev
->rdma_dev_res
->stats
.missing_cqe
);
85 free_cqe_ctx(GINT_TO_POINTER(cqe_ctx_id
),
86 backend_dev
->rdma_dev_res
);
88 } while (cqe_ctx_id
!= -ENOENT
);
91 static int rdma_poll_cq(RdmaDeviceResources
*rdma_dev_res
, struct ibv_cq
*ibcq
)
93 int i
, ne
, total_ne
= 0;
96 RdmaProtectedGSList
*cqe_ctx_list
;
98 qemu_mutex_lock(&rdma_dev_res
->lock
);
100 ne
= ibv_poll_cq(ibcq
, ARRAY_SIZE(wc
), wc
);
102 trace_rdma_poll_cq(ne
, ibcq
);
104 for (i
= 0; i
< ne
; i
++) {
105 bctx
= rdma_rm_get_cqe_ctx(rdma_dev_res
, wc
[i
].wr_id
);
106 if (unlikely(!bctx
)) {
107 rdma_error_report("No matching ctx for req %"PRId64
,
112 comp_handler(bctx
->up_ctx
, &wc
[i
]);
114 if (bctx
->backend_qp
) {
115 cqe_ctx_list
= &bctx
->backend_qp
->cqe_ctx_list
;
117 cqe_ctx_list
= &bctx
->backend_srq
->cqe_ctx_list
;
120 rdma_protected_gslist_remove_int32(cqe_ctx_list
, wc
[i
].wr_id
);
121 rdma_rm_dealloc_cqe_ctx(rdma_dev_res
, wc
[i
].wr_id
);
126 atomic_sub(&rdma_dev_res
->stats
.missing_cqe
, total_ne
);
127 qemu_mutex_unlock(&rdma_dev_res
->lock
);
130 rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne
, errno
);
133 rdma_dev_res
->stats
.completions
+= total_ne
;
138 static void *comp_handler_thread(void *arg
)
140 RdmaBackendDev
*backend_dev
= (RdmaBackendDev
*)arg
;
142 struct ibv_cq
*ev_cq
;
147 /* Change to non-blocking mode */
148 flags
= fcntl(backend_dev
->channel
->fd
, F_GETFL
);
149 rc
= fcntl(backend_dev
->channel
->fd
, F_SETFL
, flags
| O_NONBLOCK
);
151 rdma_error_report("Failed to change backend channel FD to non-blocking");
155 pfds
[0].fd
= backend_dev
->channel
->fd
;
156 pfds
[0].events
= G_IO_IN
| G_IO_HUP
| G_IO_ERR
;
158 backend_dev
->comp_thread
.is_running
= true;
160 while (backend_dev
->comp_thread
.run
) {
162 rc
= qemu_poll_ns(pfds
, 1, THR_POLL_TO
* (int64_t)SCALE_MS
);
164 backend_dev
->rdma_dev_res
->stats
.poll_cq_ppoll_to
++;
166 } while (!rc
&& backend_dev
->comp_thread
.run
);
168 if (backend_dev
->comp_thread
.run
) {
169 rc
= ibv_get_cq_event(backend_dev
->channel
, &ev_cq
, &ev_ctx
);
171 rdma_error_report("ibv_get_cq_event fail, rc=%d, errno=%d", rc
,
176 rc
= ibv_req_notify_cq(ev_cq
, 0);
178 rdma_error_report("ibv_req_notify_cq fail, rc=%d, errno=%d", rc
,
182 backend_dev
->rdma_dev_res
->stats
.poll_cq_from_bk
++;
183 rdma_poll_cq(backend_dev
->rdma_dev_res
, ev_cq
);
185 ibv_ack_cq_events(ev_cq
, 1);
189 backend_dev
->comp_thread
.is_running
= false;
196 static inline void disable_rdmacm_mux_async(RdmaBackendDev
*backend_dev
)
198 atomic_set(&backend_dev
->rdmacm_mux
.can_receive
, 0);
201 static inline void enable_rdmacm_mux_async(RdmaBackendDev
*backend_dev
)
203 atomic_set(&backend_dev
->rdmacm_mux
.can_receive
, sizeof(RdmaCmMuxMsg
));
206 static inline int rdmacm_mux_can_process_async(RdmaBackendDev
*backend_dev
)
208 return atomic_read(&backend_dev
->rdmacm_mux
.can_receive
);
211 static int rdmacm_mux_check_op_status(CharBackend
*mad_chr_be
)
213 RdmaCmMuxMsg msg
= {};
216 ret
= qemu_chr_fe_read_all(mad_chr_be
, (uint8_t *)&msg
, sizeof(msg
));
217 if (ret
!= sizeof(msg
)) {
218 rdma_error_report("Got invalid message from mux: size %d, expecting %d",
219 ret
, (int)sizeof(msg
));
223 trace_rdmacm_mux_check_op_status(msg
.hdr
.msg_type
, msg
.hdr
.op_code
,
226 if (msg
.hdr
.msg_type
!= RDMACM_MUX_MSG_TYPE_RESP
) {
227 rdma_error_report("Got invalid message type %d", msg
.hdr
.msg_type
);
231 if (msg
.hdr
.err_code
!= RDMACM_MUX_ERR_CODE_OK
) {
232 rdma_error_report("Operation failed in mux, error code %d",
240 static int rdmacm_mux_send(RdmaBackendDev
*backend_dev
, RdmaCmMuxMsg
*msg
)
244 msg
->hdr
.msg_type
= RDMACM_MUX_MSG_TYPE_REQ
;
245 trace_rdmacm_mux("send", msg
->hdr
.msg_type
, msg
->hdr
.op_code
);
246 disable_rdmacm_mux_async(backend_dev
);
247 rc
= qemu_chr_fe_write(backend_dev
->rdmacm_mux
.chr_be
,
248 (const uint8_t *)msg
, sizeof(*msg
));
249 if (rc
!= sizeof(*msg
)) {
250 enable_rdmacm_mux_async(backend_dev
);
251 rdma_error_report("Failed to send request to rdmacm_mux (rc=%d)", rc
);
255 rc
= rdmacm_mux_check_op_status(backend_dev
->rdmacm_mux
.chr_be
);
257 rdma_error_report("Failed to execute rdmacm_mux request %d (rc=%d)",
258 msg
->hdr
.op_code
, rc
);
261 enable_rdmacm_mux_async(backend_dev
);
266 static void stop_backend_thread(RdmaBackendThread
*thread
)
269 while (thread
->is_running
) {
270 sleep(THR_POLL_TO
/ SCALE_US
/ 2);
274 static void start_comp_thread(RdmaBackendDev
*backend_dev
)
276 char thread_name
[THR_NAME_LEN
] = {};
278 stop_backend_thread(&backend_dev
->comp_thread
);
280 snprintf(thread_name
, sizeof(thread_name
), "rdma_comp_%s",
281 ibv_get_device_name(backend_dev
->ib_dev
));
282 backend_dev
->comp_thread
.run
= true;
283 qemu_thread_create(&backend_dev
->comp_thread
.thread
, thread_name
,
284 comp_handler_thread
, backend_dev
, QEMU_THREAD_DETACHED
);
287 void rdma_backend_register_comp_handler(void (*handler
)(void *ctx
,
290 comp_handler
= handler
;
293 void rdma_backend_unregister_comp_handler(void)
295 rdma_backend_register_comp_handler(dummy_comp_handler
);
298 int rdma_backend_query_port(RdmaBackendDev
*backend_dev
,
299 struct ibv_port_attr
*port_attr
)
303 rc
= ibv_query_port(backend_dev
->context
, backend_dev
->port_num
, port_attr
);
305 rdma_error_report("ibv_query_port fail, rc=%d, errno=%d", rc
, errno
);
312 void rdma_backend_poll_cq(RdmaDeviceResources
*rdma_dev_res
, RdmaBackendCQ
*cq
)
316 rdma_dev_res
->stats
.poll_cq_from_guest
++;
317 polled
= rdma_poll_cq(rdma_dev_res
, cq
->ibcq
);
319 rdma_dev_res
->stats
.poll_cq_from_guest_empty
++;
323 static GHashTable
*ah_hash
;
325 static struct ibv_ah
*create_ah(RdmaBackendDev
*backend_dev
, struct ibv_pd
*pd
,
326 uint8_t sgid_idx
, union ibv_gid
*dgid
)
328 GBytes
*ah_key
= g_bytes_new(dgid
, sizeof(*dgid
));
329 struct ibv_ah
*ah
= g_hash_table_lookup(ah_hash
, ah_key
);
332 trace_rdma_create_ah_cache_hit(be64_to_cpu(dgid
->global
.subnet_prefix
),
333 be64_to_cpu(dgid
->global
.interface_id
));
334 g_bytes_unref(ah_key
);
336 struct ibv_ah_attr ah_attr
= {
338 .port_num
= backend_dev
->port_num
,
342 ah_attr
.grh
.dgid
= *dgid
;
343 ah_attr
.grh
.sgid_index
= sgid_idx
;
345 ah
= ibv_create_ah(pd
, &ah_attr
);
347 g_hash_table_insert(ah_hash
, ah_key
, ah
);
349 g_bytes_unref(ah_key
);
350 rdma_error_report("Failed to create AH for gid <0x%" PRIx64
", 0x%"PRIx64
">",
351 be64_to_cpu(dgid
->global
.subnet_prefix
),
352 be64_to_cpu(dgid
->global
.interface_id
));
355 trace_rdma_create_ah_cache_miss(be64_to_cpu(dgid
->global
.subnet_prefix
),
356 be64_to_cpu(dgid
->global
.interface_id
));
362 static void destroy_ah_hash_key(gpointer data
)
367 static void destroy_ah_hast_data(gpointer data
)
369 struct ibv_ah
*ah
= data
;
374 static void ah_cache_init(void)
376 ah_hash
= g_hash_table_new_full(g_bytes_hash
, g_bytes_equal
,
377 destroy_ah_hash_key
, destroy_ah_hast_data
);
380 static int build_host_sge_array(RdmaDeviceResources
*rdma_dev_res
,
381 struct ibv_sge
*dsge
, struct ibv_sge
*ssge
,
382 uint8_t num_sge
, uint64_t *total_length
)
387 for (ssge_idx
= 0; ssge_idx
< num_sge
; ssge_idx
++) {
388 mr
= rdma_rm_get_mr(rdma_dev_res
, ssge
[ssge_idx
].lkey
);
390 rdma_error_report("Invalid lkey 0x%x", ssge
[ssge_idx
].lkey
);
391 return VENDOR_ERR_INVLKEY
| ssge
[ssge_idx
].lkey
;
394 #ifdef LEGACY_RDMA_REG_MR
395 dsge
->addr
= (uintptr_t)mr
->virt
+ ssge
[ssge_idx
].addr
- mr
->start
;
397 dsge
->addr
= ssge
[ssge_idx
].addr
;
399 dsge
->length
= ssge
[ssge_idx
].length
;
400 dsge
->lkey
= rdma_backend_mr_lkey(&mr
->backend_mr
);
402 *total_length
+= dsge
->length
;
410 static void trace_mad_message(const char *title
, char *buf
, int len
)
413 char *b
= g_malloc0(len
* 3 + 1);
416 for (i
= 0; i
< len
; i
++) {
417 sprintf(b1
, "%.2X ", buf
[i
] & 0x000000FF);
421 trace_rdma_mad_message(title
, len
, b
);
426 static int mad_send(RdmaBackendDev
*backend_dev
, uint8_t sgid_idx
,
427 union ibv_gid
*sgid
, struct ibv_sge
*sge
, uint32_t num_sge
)
429 RdmaCmMuxMsg msg
= {};
437 msg
.hdr
.op_code
= RDMACM_MUX_OP_CODE_MAD
;
438 memcpy(msg
.hdr
.sgid
.raw
, sgid
->raw
, sizeof(msg
.hdr
.sgid
));
440 msg
.umad_len
= sge
[0].length
+ sge
[1].length
;
442 if (msg
.umad_len
> sizeof(msg
.umad
.mad
)) {
446 msg
.umad
.hdr
.addr
.qpn
= htobe32(1);
447 msg
.umad
.hdr
.addr
.grh_present
= 1;
448 msg
.umad
.hdr
.addr
.gid_index
= sgid_idx
;
449 memcpy(msg
.umad
.hdr
.addr
.gid
, sgid
->raw
, sizeof(msg
.umad
.hdr
.addr
.gid
));
450 msg
.umad
.hdr
.addr
.hop_limit
= 0xFF;
452 hdr
= rdma_pci_dma_map(backend_dev
->dev
, sge
[0].addr
, sge
[0].length
);
456 data
= rdma_pci_dma_map(backend_dev
->dev
, sge
[1].addr
, sge
[1].length
);
458 rdma_pci_dma_unmap(backend_dev
->dev
, hdr
, sge
[0].length
);
462 memcpy(&msg
.umad
.mad
[0], hdr
, sge
[0].length
);
463 memcpy(&msg
.umad
.mad
[sge
[0].length
], data
, sge
[1].length
);
465 rdma_pci_dma_unmap(backend_dev
->dev
, data
, sge
[1].length
);
466 rdma_pci_dma_unmap(backend_dev
->dev
, hdr
, sge
[0].length
);
468 trace_mad_message("send", msg
.umad
.mad
, msg
.umad_len
);
470 ret
= rdmacm_mux_send(backend_dev
, &msg
);
472 rdma_error_report("Failed to send MAD to rdma_umadmux (%d)", ret
);
479 void rdma_backend_post_send(RdmaBackendDev
*backend_dev
,
480 RdmaBackendQP
*qp
, uint8_t qp_type
,
481 struct ibv_sge
*sge
, uint32_t num_sge
,
482 uint8_t sgid_idx
, union ibv_gid
*sgid
,
483 union ibv_gid
*dgid
, uint32_t dqpn
, uint32_t dqkey
,
487 struct ibv_sge new_sge
[MAX_SGE
];
490 struct ibv_send_wr wr
= {}, *bad_wr
;
492 if (!qp
->ibqp
) { /* This field is not initialized for QP0 and QP1 */
493 if (qp_type
== IBV_QPT_SMI
) {
494 rdma_error_report("Got QP0 request");
495 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_QP0
, ctx
);
496 } else if (qp_type
== IBV_QPT_GSI
) {
497 rc
= mad_send(backend_dev
, sgid_idx
, sgid
, sge
, num_sge
);
499 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_MAD_SEND
, ctx
);
500 backend_dev
->rdma_dev_res
->stats
.mad_tx_err
++;
502 complete_work(IBV_WC_SUCCESS
, 0, ctx
);
503 backend_dev
->rdma_dev_res
->stats
.mad_tx
++;
509 bctx
= g_malloc0(sizeof(*bctx
));
511 bctx
->backend_qp
= qp
;
513 rc
= rdma_rm_alloc_cqe_ctx(backend_dev
->rdma_dev_res
, &bctx_id
, bctx
);
515 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_NOMEM
, ctx
);
519 rdma_protected_gslist_append_int32(&qp
->cqe_ctx_list
, bctx_id
);
521 rc
= build_host_sge_array(backend_dev
->rdma_dev_res
, new_sge
, sge
, num_sge
,
522 &backend_dev
->rdma_dev_res
->stats
.tx_len
);
524 complete_work(IBV_WC_GENERAL_ERR
, rc
, ctx
);
525 goto err_dealloc_cqe_ctx
;
528 if (qp_type
== IBV_QPT_UD
) {
529 wr
.wr
.ud
.ah
= create_ah(backend_dev
, qp
->ibpd
, sgid_idx
, dgid
);
531 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_FAIL_BACKEND
, ctx
);
532 goto err_dealloc_cqe_ctx
;
534 wr
.wr
.ud
.remote_qpn
= dqpn
;
535 wr
.wr
.ud
.remote_qkey
= dqkey
;
538 wr
.num_sge
= num_sge
;
539 wr
.opcode
= IBV_WR_SEND
;
540 wr
.send_flags
= IBV_SEND_SIGNALED
;
541 wr
.sg_list
= new_sge
;
544 rc
= ibv_post_send(qp
->ibqp
, &wr
, &bad_wr
);
546 rdma_error_report("ibv_post_send fail, qpn=0x%x, rc=%d, errno=%d",
547 qp
->ibqp
->qp_num
, rc
, errno
);
548 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_FAIL_BACKEND
, ctx
);
549 goto err_dealloc_cqe_ctx
;
552 atomic_inc(&backend_dev
->rdma_dev_res
->stats
.missing_cqe
);
553 backend_dev
->rdma_dev_res
->stats
.tx
++;
558 backend_dev
->rdma_dev_res
->stats
.tx_err
++;
559 rdma_rm_dealloc_cqe_ctx(backend_dev
->rdma_dev_res
, bctx_id
);
565 static unsigned int save_mad_recv_buffer(RdmaBackendDev
*backend_dev
,
566 struct ibv_sge
*sge
, uint32_t num_sge
,
574 rdma_error_report("Invalid num_sge (%d), expecting 1", num_sge
);
575 return VENDOR_ERR_INV_NUM_SGE
;
578 if (sge
[0].length
< RDMA_MAX_PRIVATE_DATA
+ sizeof(struct ibv_grh
)) {
579 rdma_error_report("Too small buffer for MAD");
580 return VENDOR_ERR_INV_MAD_BUFF
;
583 bctx
= g_malloc0(sizeof(*bctx
));
585 rc
= rdma_rm_alloc_cqe_ctx(backend_dev
->rdma_dev_res
, &bctx_id
, bctx
);
588 return VENDOR_ERR_NOMEM
;
594 rdma_protected_qlist_append_int64(&backend_dev
->recv_mads_list
, bctx_id
);
599 void rdma_backend_post_recv(RdmaBackendDev
*backend_dev
,
600 RdmaBackendQP
*qp
, uint8_t qp_type
,
601 struct ibv_sge
*sge
, uint32_t num_sge
, void *ctx
)
604 struct ibv_sge new_sge
[MAX_SGE
];
607 struct ibv_recv_wr wr
= {}, *bad_wr
;
609 if (!qp
->ibqp
) { /* This field does not get initialized for QP0 and QP1 */
610 if (qp_type
== IBV_QPT_SMI
) {
611 rdma_error_report("Got QP0 request");
612 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_QP0
, ctx
);
614 if (qp_type
== IBV_QPT_GSI
) {
615 rc
= save_mad_recv_buffer(backend_dev
, sge
, num_sge
, ctx
);
617 complete_work(IBV_WC_GENERAL_ERR
, rc
, ctx
);
618 backend_dev
->rdma_dev_res
->stats
.mad_rx_bufs_err
++;
620 backend_dev
->rdma_dev_res
->stats
.mad_rx_bufs
++;
626 bctx
= g_malloc0(sizeof(*bctx
));
628 bctx
->backend_qp
= qp
;
630 rc
= rdma_rm_alloc_cqe_ctx(backend_dev
->rdma_dev_res
, &bctx_id
, bctx
);
632 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_NOMEM
, ctx
);
636 rdma_protected_gslist_append_int32(&qp
->cqe_ctx_list
, bctx_id
);
638 rc
= build_host_sge_array(backend_dev
->rdma_dev_res
, new_sge
, sge
, num_sge
,
639 &backend_dev
->rdma_dev_res
->stats
.rx_bufs_len
);
641 complete_work(IBV_WC_GENERAL_ERR
, rc
, ctx
);
642 goto err_dealloc_cqe_ctx
;
645 wr
.num_sge
= num_sge
;
646 wr
.sg_list
= new_sge
;
648 rc
= ibv_post_recv(qp
->ibqp
, &wr
, &bad_wr
);
650 rdma_error_report("ibv_post_recv fail, qpn=0x%x, rc=%d, errno=%d",
651 qp
->ibqp
->qp_num
, rc
, errno
);
652 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_FAIL_BACKEND
, ctx
);
653 goto err_dealloc_cqe_ctx
;
656 atomic_inc(&backend_dev
->rdma_dev_res
->stats
.missing_cqe
);
657 backend_dev
->rdma_dev_res
->stats
.rx_bufs
++;
662 backend_dev
->rdma_dev_res
->stats
.rx_bufs_err
++;
663 rdma_rm_dealloc_cqe_ctx(backend_dev
->rdma_dev_res
, bctx_id
);
669 void rdma_backend_post_srq_recv(RdmaBackendDev
*backend_dev
,
670 RdmaBackendSRQ
*srq
, struct ibv_sge
*sge
,
671 uint32_t num_sge
, void *ctx
)
674 struct ibv_sge new_sge
[MAX_SGE
];
677 struct ibv_recv_wr wr
= {}, *bad_wr
;
679 bctx
= g_malloc0(sizeof(*bctx
));
681 bctx
->backend_srq
= srq
;
683 rc
= rdma_rm_alloc_cqe_ctx(backend_dev
->rdma_dev_res
, &bctx_id
, bctx
);
685 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_NOMEM
, ctx
);
689 rdma_protected_gslist_append_int32(&srq
->cqe_ctx_list
, bctx_id
);
691 rc
= build_host_sge_array(backend_dev
->rdma_dev_res
, new_sge
, sge
, num_sge
,
692 &backend_dev
->rdma_dev_res
->stats
.rx_bufs_len
);
694 complete_work(IBV_WC_GENERAL_ERR
, rc
, ctx
);
695 goto err_dealloc_cqe_ctx
;
698 wr
.num_sge
= num_sge
;
699 wr
.sg_list
= new_sge
;
701 rc
= ibv_post_srq_recv(srq
->ibsrq
, &wr
, &bad_wr
);
703 rdma_error_report("ibv_post_srq_recv fail, srqn=0x%x, rc=%d, errno=%d",
704 srq
->ibsrq
->handle
, rc
, errno
);
705 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_FAIL_BACKEND
, ctx
);
706 goto err_dealloc_cqe_ctx
;
709 atomic_inc(&backend_dev
->rdma_dev_res
->stats
.missing_cqe
);
710 backend_dev
->rdma_dev_res
->stats
.rx_bufs
++;
711 backend_dev
->rdma_dev_res
->stats
.rx_srq
++;
716 backend_dev
->rdma_dev_res
->stats
.rx_bufs_err
++;
717 rdma_rm_dealloc_cqe_ctx(backend_dev
->rdma_dev_res
, bctx_id
);
723 int rdma_backend_create_pd(RdmaBackendDev
*backend_dev
, RdmaBackendPD
*pd
)
725 pd
->ibpd
= ibv_alloc_pd(backend_dev
->context
);
728 rdma_error_report("ibv_alloc_pd fail, errno=%d", errno
);
735 void rdma_backend_destroy_pd(RdmaBackendPD
*pd
)
738 ibv_dealloc_pd(pd
->ibpd
);
742 #ifdef LEGACY_RDMA_REG_MR
743 int rdma_backend_create_mr(RdmaBackendMR
*mr
, RdmaBackendPD
*pd
, void *addr
,
744 size_t length
, int access
)
746 int rdma_backend_create_mr(RdmaBackendMR
*mr
, RdmaBackendPD
*pd
, void *addr
,
747 size_t length
, uint64_t guest_start
, int access
)
750 #ifdef LEGACY_RDMA_REG_MR
751 mr
->ibmr
= ibv_reg_mr(pd
->ibpd
, addr
, length
, access
);
753 mr
->ibmr
= ibv_reg_mr_iova(pd
->ibpd
, addr
, length
, guest_start
, access
);
756 rdma_error_report("ibv_reg_mr fail, errno=%d", errno
);
765 void rdma_backend_destroy_mr(RdmaBackendMR
*mr
)
768 ibv_dereg_mr(mr
->ibmr
);
772 int rdma_backend_create_cq(RdmaBackendDev
*backend_dev
, RdmaBackendCQ
*cq
,
777 cq
->ibcq
= ibv_create_cq(backend_dev
->context
, cqe
+ 1, NULL
,
778 backend_dev
->channel
, 0);
780 rdma_error_report("ibv_create_cq fail, errno=%d", errno
);
784 rc
= ibv_req_notify_cq(cq
->ibcq
, 0);
786 rdma_warn_report("ibv_req_notify_cq fail, rc=%d, errno=%d", rc
, errno
);
789 cq
->backend_dev
= backend_dev
;
794 void rdma_backend_destroy_cq(RdmaBackendCQ
*cq
)
797 ibv_destroy_cq(cq
->ibcq
);
801 int rdma_backend_create_qp(RdmaBackendQP
*qp
, uint8_t qp_type
,
802 RdmaBackendPD
*pd
, RdmaBackendCQ
*scq
,
803 RdmaBackendCQ
*rcq
, RdmaBackendSRQ
*srq
,
804 uint32_t max_send_wr
, uint32_t max_recv_wr
,
805 uint32_t max_send_sge
, uint32_t max_recv_sge
)
807 struct ibv_qp_init_attr attr
= {};
822 rdma_error_report("Unsupported QP type %d", qp_type
);
826 attr
.qp_type
= qp_type
;
827 attr
.send_cq
= scq
->ibcq
;
828 attr
.recv_cq
= rcq
->ibcq
;
829 attr
.cap
.max_send_wr
= max_send_wr
;
830 attr
.cap
.max_recv_wr
= max_recv_wr
;
831 attr
.cap
.max_send_sge
= max_send_sge
;
832 attr
.cap
.max_recv_sge
= max_recv_sge
;
834 attr
.srq
= srq
->ibsrq
;
837 qp
->ibqp
= ibv_create_qp(pd
->ibpd
, &attr
);
839 rdma_error_report("ibv_create_qp fail, errno=%d", errno
);
843 rdma_protected_gslist_init(&qp
->cqe_ctx_list
);
847 /* TODO: Query QP to get max_inline_data and save it to be used in send */
852 int rdma_backend_qp_state_init(RdmaBackendDev
*backend_dev
, RdmaBackendQP
*qp
,
853 uint8_t qp_type
, uint32_t qkey
)
855 struct ibv_qp_attr attr
= {};
858 attr_mask
= IBV_QP_STATE
| IBV_QP_PKEY_INDEX
| IBV_QP_PORT
;
859 attr
.qp_state
= IBV_QPS_INIT
;
861 attr
.port_num
= backend_dev
->port_num
;
865 attr_mask
|= IBV_QP_ACCESS_FLAGS
;
866 trace_rdma_backend_rc_qp_state_init(qp
->ibqp
->qp_num
);
871 attr_mask
|= IBV_QP_QKEY
;
872 trace_rdma_backend_ud_qp_state_init(qp
->ibqp
->qp_num
, qkey
);
876 rdma_error_report("Unsupported QP type %d", qp_type
);
880 rc
= ibv_modify_qp(qp
->ibqp
, &attr
, attr_mask
);
882 rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc
, errno
);
889 int rdma_backend_qp_state_rtr(RdmaBackendDev
*backend_dev
, RdmaBackendQP
*qp
,
890 uint8_t qp_type
, uint8_t sgid_idx
,
891 union ibv_gid
*dgid
, uint32_t dqpn
,
892 uint32_t rq_psn
, uint32_t qkey
, bool use_qkey
)
894 struct ibv_qp_attr attr
= {};
895 union ibv_gid ibv_gid
= {
896 .global
.interface_id
= dgid
->global
.interface_id
,
897 .global
.subnet_prefix
= dgid
->global
.subnet_prefix
901 attr
.qp_state
= IBV_QPS_RTR
;
902 attr_mask
= IBV_QP_STATE
;
904 qp
->sgid_idx
= sgid_idx
;
908 attr
.path_mtu
= IBV_MTU_1024
;
909 attr
.dest_qp_num
= dqpn
;
910 attr
.max_dest_rd_atomic
= 1;
911 attr
.min_rnr_timer
= 12;
912 attr
.ah_attr
.port_num
= backend_dev
->port_num
;
913 attr
.ah_attr
.is_global
= 1;
914 attr
.ah_attr
.grh
.hop_limit
= 1;
915 attr
.ah_attr
.grh
.dgid
= ibv_gid
;
916 attr
.ah_attr
.grh
.sgid_index
= qp
->sgid_idx
;
917 attr
.rq_psn
= rq_psn
;
919 attr_mask
|= IBV_QP_AV
| IBV_QP_PATH_MTU
| IBV_QP_DEST_QPN
|
920 IBV_QP_RQ_PSN
| IBV_QP_MAX_DEST_RD_ATOMIC
|
921 IBV_QP_MIN_RNR_TIMER
;
923 trace_rdma_backend_rc_qp_state_rtr(qp
->ibqp
->qp_num
,
924 be64_to_cpu(ibv_gid
.global
.
926 be64_to_cpu(ibv_gid
.global
.
928 qp
->sgid_idx
, dqpn
, rq_psn
);
934 attr_mask
|= IBV_QP_QKEY
;
936 trace_rdma_backend_ud_qp_state_rtr(qp
->ibqp
->qp_num
, use_qkey
? qkey
:
941 rc
= ibv_modify_qp(qp
->ibqp
, &attr
, attr_mask
);
943 rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc
, errno
);
950 int rdma_backend_qp_state_rts(RdmaBackendQP
*qp
, uint8_t qp_type
,
951 uint32_t sq_psn
, uint32_t qkey
, bool use_qkey
)
953 struct ibv_qp_attr attr
= {};
956 attr
.qp_state
= IBV_QPS_RTS
;
957 attr
.sq_psn
= sq_psn
;
958 attr_mask
= IBV_QP_STATE
| IBV_QP_SQ_PSN
;
965 attr
.max_rd_atomic
= 1;
967 attr_mask
|= IBV_QP_TIMEOUT
| IBV_QP_RETRY_CNT
| IBV_QP_RNR_RETRY
|
968 IBV_QP_MAX_QP_RD_ATOMIC
;
969 trace_rdma_backend_rc_qp_state_rts(qp
->ibqp
->qp_num
, sq_psn
);
975 attr_mask
|= IBV_QP_QKEY
;
977 trace_rdma_backend_ud_qp_state_rts(qp
->ibqp
->qp_num
, sq_psn
,
978 use_qkey
? qkey
: 0);
982 rc
= ibv_modify_qp(qp
->ibqp
, &attr
, attr_mask
);
984 rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc
, errno
);
991 int rdma_backend_query_qp(RdmaBackendQP
*qp
, struct ibv_qp_attr
*attr
,
992 int attr_mask
, struct ibv_qp_init_attr
*init_attr
)
995 attr
->qp_state
= IBV_QPS_RTS
;
999 return ibv_query_qp(qp
->ibqp
, attr
, attr_mask
, init_attr
);
1002 void rdma_backend_destroy_qp(RdmaBackendQP
*qp
, RdmaDeviceResources
*dev_res
)
1005 ibv_destroy_qp(qp
->ibqp
);
1007 g_slist_foreach(qp
->cqe_ctx_list
.list
, free_cqe_ctx
, dev_res
);
1008 rdma_protected_gslist_destroy(&qp
->cqe_ctx_list
);
1011 int rdma_backend_create_srq(RdmaBackendSRQ
*srq
, RdmaBackendPD
*pd
,
1012 uint32_t max_wr
, uint32_t max_sge
,
1015 struct ibv_srq_init_attr srq_init_attr
= {};
1017 srq_init_attr
.attr
.max_wr
= max_wr
;
1018 srq_init_attr
.attr
.max_sge
= max_sge
;
1019 srq_init_attr
.attr
.srq_limit
= srq_limit
;
1021 srq
->ibsrq
= ibv_create_srq(pd
->ibpd
, &srq_init_attr
);
1023 rdma_error_report("ibv_create_srq failed, errno=%d", errno
);
1027 rdma_protected_gslist_init(&srq
->cqe_ctx_list
);
1032 int rdma_backend_query_srq(RdmaBackendSRQ
*srq
, struct ibv_srq_attr
*srq_attr
)
1038 return ibv_query_srq(srq
->ibsrq
, srq_attr
);
1041 int rdma_backend_modify_srq(RdmaBackendSRQ
*srq
, struct ibv_srq_attr
*srq_attr
,
1048 return ibv_modify_srq(srq
->ibsrq
, srq_attr
, srq_attr_mask
);
1051 void rdma_backend_destroy_srq(RdmaBackendSRQ
*srq
, RdmaDeviceResources
*dev_res
)
1054 ibv_destroy_srq(srq
->ibsrq
);
1056 g_slist_foreach(srq
->cqe_ctx_list
.list
, free_cqe_ctx
, dev_res
);
1057 rdma_protected_gslist_destroy(&srq
->cqe_ctx_list
);
1060 #define CHK_ATTR(req, dev, member, fmt) ({ \
1061 trace_rdma_check_dev_attr(#member, dev.member, req->member); \
1062 if (req->member > dev.member) { \
1063 rdma_warn_report("%s = "fmt" is higher than host device capability "fmt, \
1064 #member, req->member, dev.member); \
1065 req->member = dev.member; \
1069 static int init_device_caps(RdmaBackendDev
*backend_dev
,
1070 struct ibv_device_attr
*dev_attr
)
1072 struct ibv_device_attr bk_dev_attr
;
1075 rc
= ibv_query_device(backend_dev
->context
, &bk_dev_attr
);
1077 rdma_error_report("ibv_query_device fail, rc=%d, errno=%d", rc
, errno
);
1081 dev_attr
->max_sge
= MAX_SGE
;
1082 dev_attr
->max_srq_sge
= MAX_SGE
;
1084 CHK_ATTR(dev_attr
, bk_dev_attr
, max_mr_size
, "%" PRId64
);
1085 CHK_ATTR(dev_attr
, bk_dev_attr
, max_qp
, "%d");
1086 CHK_ATTR(dev_attr
, bk_dev_attr
, max_sge
, "%d");
1087 CHK_ATTR(dev_attr
, bk_dev_attr
, max_cq
, "%d");
1088 CHK_ATTR(dev_attr
, bk_dev_attr
, max_mr
, "%d");
1089 CHK_ATTR(dev_attr
, bk_dev_attr
, max_pd
, "%d");
1090 CHK_ATTR(dev_attr
, bk_dev_attr
, max_qp_rd_atom
, "%d");
1091 CHK_ATTR(dev_attr
, bk_dev_attr
, max_qp_init_rd_atom
, "%d");
1092 CHK_ATTR(dev_attr
, bk_dev_attr
, max_ah
, "%d");
1093 CHK_ATTR(dev_attr
, bk_dev_attr
, max_srq
, "%d");
1098 static inline void build_mad_hdr(struct ibv_grh
*grh
, union ibv_gid
*sgid
,
1099 union ibv_gid
*my_gid
, int paylen
)
1101 grh
->paylen
= htons(paylen
);
1103 grh
->dgid
= *my_gid
;
1106 static void process_incoming_mad_req(RdmaBackendDev
*backend_dev
,
1109 unsigned long cqe_ctx_id
;
1113 trace_mad_message("recv", msg
->umad
.mad
, msg
->umad_len
);
1115 cqe_ctx_id
= rdma_protected_qlist_pop_int64(&backend_dev
->recv_mads_list
);
1116 if (cqe_ctx_id
== -ENOENT
) {
1117 rdma_warn_report("No more free MADs buffers, waiting for a while");
1122 bctx
= rdma_rm_get_cqe_ctx(backend_dev
->rdma_dev_res
, cqe_ctx_id
);
1123 if (unlikely(!bctx
)) {
1124 rdma_error_report("No matching ctx for req %ld", cqe_ctx_id
);
1125 backend_dev
->rdma_dev_res
->stats
.mad_rx_err
++;
1129 mad
= rdma_pci_dma_map(backend_dev
->dev
, bctx
->sge
.addr
,
1131 if (!mad
|| bctx
->sge
.length
< msg
->umad_len
+ MAD_HDR_SIZE
) {
1132 backend_dev
->rdma_dev_res
->stats
.mad_rx_err
++;
1133 complete_work(IBV_WC_GENERAL_ERR
, VENDOR_ERR_INV_MAD_BUFF
,
1136 struct ibv_wc wc
= {};
1137 memset(mad
, 0, bctx
->sge
.length
);
1138 build_mad_hdr((struct ibv_grh
*)mad
,
1139 (union ibv_gid
*)&msg
->umad
.hdr
.addr
.gid
, &msg
->hdr
.sgid
,
1141 memcpy(&mad
[MAD_HDR_SIZE
], msg
->umad
.mad
, msg
->umad_len
);
1142 rdma_pci_dma_unmap(backend_dev
->dev
, mad
, bctx
->sge
.length
);
1144 wc
.byte_len
= msg
->umad_len
;
1145 wc
.status
= IBV_WC_SUCCESS
;
1146 wc
.wc_flags
= IBV_WC_GRH
;
1147 backend_dev
->rdma_dev_res
->stats
.mad_rx
++;
1148 comp_handler(bctx
->up_ctx
, &wc
);
1152 rdma_rm_dealloc_cqe_ctx(backend_dev
->rdma_dev_res
, cqe_ctx_id
);
1155 static inline int rdmacm_mux_can_receive(void *opaque
)
1157 RdmaBackendDev
*backend_dev
= (RdmaBackendDev
*)opaque
;
1159 return rdmacm_mux_can_process_async(backend_dev
);
1162 static void rdmacm_mux_read(void *opaque
, const uint8_t *buf
, int size
)
1164 RdmaBackendDev
*backend_dev
= (RdmaBackendDev
*)opaque
;
1165 RdmaCmMuxMsg
*msg
= (RdmaCmMuxMsg
*)buf
;
1167 trace_rdmacm_mux("read", msg
->hdr
.msg_type
, msg
->hdr
.op_code
);
1169 if (msg
->hdr
.msg_type
!= RDMACM_MUX_MSG_TYPE_REQ
&&
1170 msg
->hdr
.op_code
!= RDMACM_MUX_OP_CODE_MAD
) {
1171 rdma_error_report("Error: Not a MAD request, skipping");
1174 process_incoming_mad_req(backend_dev
, msg
);
1177 static int mad_init(RdmaBackendDev
*backend_dev
, CharBackend
*mad_chr_be
)
1181 backend_dev
->rdmacm_mux
.chr_be
= mad_chr_be
;
1183 ret
= qemu_chr_fe_backend_connected(backend_dev
->rdmacm_mux
.chr_be
);
1185 rdma_error_report("Missing chardev for MAD multiplexer");
1189 rdma_protected_qlist_init(&backend_dev
->recv_mads_list
);
1191 enable_rdmacm_mux_async(backend_dev
);
1193 qemu_chr_fe_set_handlers(backend_dev
->rdmacm_mux
.chr_be
,
1194 rdmacm_mux_can_receive
, rdmacm_mux_read
, NULL
,
1195 NULL
, backend_dev
, NULL
, true);
1200 static void mad_stop(RdmaBackendDev
*backend_dev
)
1202 clean_recv_mads(backend_dev
);
1205 static void mad_fini(RdmaBackendDev
*backend_dev
)
1207 disable_rdmacm_mux_async(backend_dev
);
1208 qemu_chr_fe_disconnect(backend_dev
->rdmacm_mux
.chr_be
);
1209 rdma_protected_qlist_destroy(&backend_dev
->recv_mads_list
);
1212 int rdma_backend_get_gid_index(RdmaBackendDev
*backend_dev
,
1220 ret
= ibv_query_gid(backend_dev
->context
, backend_dev
->port_num
, i
,
1223 } while (!ret
&& (memcmp(&sgid
, gid
, sizeof(*gid
))));
1225 trace_rdma_backend_get_gid_index(be64_to_cpu(gid
->global
.subnet_prefix
),
1226 be64_to_cpu(gid
->global
.interface_id
),
1229 return ret
? ret
: i
- 1;
1232 int rdma_backend_add_gid(RdmaBackendDev
*backend_dev
, const char *ifname
,
1235 RdmaCmMuxMsg msg
= {};
1238 trace_rdma_backend_gid_change("add", be64_to_cpu(gid
->global
.subnet_prefix
),
1239 be64_to_cpu(gid
->global
.interface_id
));
1241 msg
.hdr
.op_code
= RDMACM_MUX_OP_CODE_REG
;
1242 memcpy(msg
.hdr
.sgid
.raw
, gid
->raw
, sizeof(msg
.hdr
.sgid
));
1244 ret
= rdmacm_mux_send(backend_dev
, &msg
);
1246 rdma_error_report("Failed to register GID to rdma_umadmux (%d)", ret
);
1250 qapi_event_send_rdma_gid_status_changed(ifname
, true,
1251 gid
->global
.subnet_prefix
,
1252 gid
->global
.interface_id
);
1257 int rdma_backend_del_gid(RdmaBackendDev
*backend_dev
, const char *ifname
,
1260 RdmaCmMuxMsg msg
= {};
1263 trace_rdma_backend_gid_change("del", be64_to_cpu(gid
->global
.subnet_prefix
),
1264 be64_to_cpu(gid
->global
.interface_id
));
1266 msg
.hdr
.op_code
= RDMACM_MUX_OP_CODE_UNREG
;
1267 memcpy(msg
.hdr
.sgid
.raw
, gid
->raw
, sizeof(msg
.hdr
.sgid
));
1269 ret
= rdmacm_mux_send(backend_dev
, &msg
);
1271 rdma_error_report("Failed to unregister GID from rdma_umadmux (%d)",
1276 qapi_event_send_rdma_gid_status_changed(ifname
, false,
1277 gid
->global
.subnet_prefix
,
1278 gid
->global
.interface_id
);
1283 int rdma_backend_init(RdmaBackendDev
*backend_dev
, PCIDevice
*pdev
,
1284 RdmaDeviceResources
*rdma_dev_res
,
1285 const char *backend_device_name
, uint8_t port_num
,
1286 struct ibv_device_attr
*dev_attr
, CharBackend
*mad_chr_be
)
1290 int num_ibv_devices
;
1291 struct ibv_device
**dev_list
;
1293 memset(backend_dev
, 0, sizeof(*backend_dev
));
1295 backend_dev
->dev
= pdev
;
1296 backend_dev
->port_num
= port_num
;
1297 backend_dev
->rdma_dev_res
= rdma_dev_res
;
1299 rdma_backend_register_comp_handler(dummy_comp_handler
);
1301 dev_list
= ibv_get_device_list(&num_ibv_devices
);
1303 rdma_error_report("Failed to get IB devices list");
1307 if (num_ibv_devices
== 0) {
1308 rdma_error_report("No IB devices were found");
1310 goto out_free_dev_list
;
1313 if (backend_device_name
) {
1314 for (i
= 0; dev_list
[i
]; ++i
) {
1315 if (!strcmp(ibv_get_device_name(dev_list
[i
]),
1316 backend_device_name
)) {
1321 backend_dev
->ib_dev
= dev_list
[i
];
1322 if (!backend_dev
->ib_dev
) {
1323 rdma_error_report("Failed to find IB device %s",
1324 backend_device_name
);
1326 goto out_free_dev_list
;
1329 backend_dev
->ib_dev
= *dev_list
;
1332 rdma_info_report("uverb device %s", backend_dev
->ib_dev
->dev_name
);
1334 backend_dev
->context
= ibv_open_device(backend_dev
->ib_dev
);
1335 if (!backend_dev
->context
) {
1336 rdma_error_report("Failed to open IB device %s",
1337 ibv_get_device_name(backend_dev
->ib_dev
));
1342 backend_dev
->channel
= ibv_create_comp_channel(backend_dev
->context
);
1343 if (!backend_dev
->channel
) {
1344 rdma_error_report("Failed to create IB communication channel");
1346 goto out_close_device
;
1349 ret
= init_device_caps(backend_dev
, dev_attr
);
1351 rdma_error_report("Failed to initialize device capabilities");
1353 goto out_destroy_comm_channel
;
1357 ret
= mad_init(backend_dev
, mad_chr_be
);
1359 rdma_error_report("Failed to initialize mad");
1361 goto out_destroy_comm_channel
;
1364 backend_dev
->comp_thread
.run
= false;
1365 backend_dev
->comp_thread
.is_running
= false;
1369 goto out_free_dev_list
;
1371 out_destroy_comm_channel
:
1372 ibv_destroy_comp_channel(backend_dev
->channel
);
1375 ibv_close_device(backend_dev
->context
);
1378 ibv_free_device_list(dev_list
);
1385 void rdma_backend_start(RdmaBackendDev
*backend_dev
)
1387 start_comp_thread(backend_dev
);
1390 void rdma_backend_stop(RdmaBackendDev
*backend_dev
)
1392 mad_stop(backend_dev
);
1393 stop_backend_thread(&backend_dev
->comp_thread
);
1396 void rdma_backend_fini(RdmaBackendDev
*backend_dev
)
1398 mad_fini(backend_dev
);
1399 g_hash_table_destroy(ah_hash
);
1400 ibv_destroy_comp_channel(backend_dev
->channel
);
1401 ibv_close_device(backend_dev
->context
);