arm: Update code for several boards to match latest QEMU API
[qemu/ar7.git] / hw / rdma / rdma_backend.c
blobd1660b6474fa915c75458ad9ebdff687c1c53e81
1 /*
2 * QEMU paravirtual RDMA - Generic RDMA backend
4 * Copyright (C) 2018 Oracle
5 * Copyright (C) 2018 Red Hat Inc
7 * Authors:
8 * Yuval Shaia <yuval.shaia@oracle.com>
9 * Marcel Apfelbaum <marcel@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include "qemu/osdep.h"
17 #include "sysemu/sysemu.h"
18 #include "qapi/error.h"
19 #include "qapi/qmp/qlist.h"
20 #include "qapi/qmp/qnum.h"
21 #include "qapi/qapi-events-rdma.h"
23 #include <infiniband/verbs.h>
24 #include <infiniband/umad_types.h>
25 #include <infiniband/umad.h>
26 #include <rdma/rdma_user_cm.h>
28 #include "contrib/rdmacm-mux/rdmacm-mux.h"
29 #include "trace.h"
30 #include "rdma_utils.h"
31 #include "rdma_rm.h"
32 #include "rdma_backend.h"
34 #define THR_NAME_LEN 16
35 #define THR_POLL_TO 5000
37 #define MAD_HDR_SIZE sizeof(struct ibv_grh)
39 typedef struct BackendCtx {
40 void *up_ctx;
41 struct ibv_sge sge; /* Used to save MAD recv buffer */
42 RdmaBackendQP *backend_qp; /* To maintain recv buffers */
43 } BackendCtx;
45 struct backend_umad {
46 struct ib_user_mad hdr;
47 char mad[RDMA_MAX_PRIVATE_DATA];
50 static void (*comp_handler)(void *ctx, struct ibv_wc *wc);
52 static void dummy_comp_handler(void *ctx, struct ibv_wc *wc)
54 rdma_error_report("No completion handler is registered");
57 static inline void complete_work(enum ibv_wc_status status, uint32_t vendor_err,
58 void *ctx)
60 struct ibv_wc wc = {};
62 wc.status = status;
63 wc.vendor_err = vendor_err;
65 comp_handler(ctx, &wc);
68 static void free_cqe_ctx(gpointer data, gpointer user_data)
70 BackendCtx *bctx;
71 RdmaDeviceResources *rdma_dev_res = user_data;
72 unsigned long cqe_ctx_id = GPOINTER_TO_INT(data);
74 bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, cqe_ctx_id);
75 if (bctx) {
76 rdma_rm_dealloc_cqe_ctx(rdma_dev_res, cqe_ctx_id);
77 atomic_dec(&rdma_dev_res->stats.missing_cqe);
79 g_free(bctx);
82 static void clean_recv_mads(RdmaBackendDev *backend_dev)
84 unsigned long cqe_ctx_id;
86 do {
87 cqe_ctx_id = rdma_protected_qlist_pop_int64(&backend_dev->
88 recv_mads_list);
89 if (cqe_ctx_id != -ENOENT) {
90 atomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe);
91 free_cqe_ctx(GINT_TO_POINTER(cqe_ctx_id),
92 backend_dev->rdma_dev_res);
94 } while (cqe_ctx_id != -ENOENT);
97 static int rdma_poll_cq(RdmaDeviceResources *rdma_dev_res, struct ibv_cq *ibcq)
99 int i, ne, total_ne = 0;
100 BackendCtx *bctx;
101 struct ibv_wc wc[2];
103 qemu_mutex_lock(&rdma_dev_res->lock);
104 do {
105 ne = ibv_poll_cq(ibcq, ARRAY_SIZE(wc), wc);
107 trace_rdma_poll_cq(ne, ibcq);
109 for (i = 0; i < ne; i++) {
110 bctx = rdma_rm_get_cqe_ctx(rdma_dev_res, wc[i].wr_id);
111 if (unlikely(!bctx)) {
112 rdma_error_report("No matching ctx for req %"PRId64,
113 wc[i].wr_id);
114 continue;
117 comp_handler(bctx->up_ctx, &wc[i]);
119 rdma_protected_gslist_remove_int32(&bctx->backend_qp->cqe_ctx_list,
120 wc[i].wr_id);
121 rdma_rm_dealloc_cqe_ctx(rdma_dev_res, wc[i].wr_id);
122 g_free(bctx);
124 total_ne += ne;
125 } while (ne > 0);
126 atomic_sub(&rdma_dev_res->stats.missing_cqe, total_ne);
127 qemu_mutex_unlock(&rdma_dev_res->lock);
129 if (ne < 0) {
130 rdma_error_report("ibv_poll_cq fail, rc=%d, errno=%d", ne, errno);
133 rdma_dev_res->stats.completions += total_ne;
135 return total_ne;
138 static void *comp_handler_thread(void *arg)
140 RdmaBackendDev *backend_dev = (RdmaBackendDev *)arg;
141 int rc;
142 struct ibv_cq *ev_cq;
143 void *ev_ctx;
144 int flags;
145 GPollFD pfds[1];
147 /* Change to non-blocking mode */
148 flags = fcntl(backend_dev->channel->fd, F_GETFL);
149 rc = fcntl(backend_dev->channel->fd, F_SETFL, flags | O_NONBLOCK);
150 if (rc < 0) {
151 rdma_error_report("Failed to change backend channel FD to non-blocking");
152 return NULL;
155 pfds[0].fd = backend_dev->channel->fd;
156 pfds[0].events = G_IO_IN | G_IO_HUP | G_IO_ERR;
158 backend_dev->comp_thread.is_running = true;
160 while (backend_dev->comp_thread.run) {
161 do {
162 rc = qemu_poll_ns(pfds, 1, THR_POLL_TO * (int64_t)SCALE_MS);
163 if (!rc) {
164 backend_dev->rdma_dev_res->stats.poll_cq_ppoll_to++;
166 } while (!rc && backend_dev->comp_thread.run);
168 if (backend_dev->comp_thread.run) {
169 rc = ibv_get_cq_event(backend_dev->channel, &ev_cq, &ev_ctx);
170 if (unlikely(rc)) {
171 rdma_error_report("ibv_get_cq_event fail, rc=%d, errno=%d", rc,
172 errno);
173 continue;
176 rc = ibv_req_notify_cq(ev_cq, 0);
177 if (unlikely(rc)) {
178 rdma_error_report("ibv_req_notify_cq fail, rc=%d, errno=%d", rc,
179 errno);
182 backend_dev->rdma_dev_res->stats.poll_cq_from_bk++;
183 rdma_poll_cq(backend_dev->rdma_dev_res, ev_cq);
185 ibv_ack_cq_events(ev_cq, 1);
189 backend_dev->comp_thread.is_running = false;
191 qemu_thread_exit(0);
193 return NULL;
196 static inline void disable_rdmacm_mux_async(RdmaBackendDev *backend_dev)
198 atomic_set(&backend_dev->rdmacm_mux.can_receive, 0);
201 static inline void enable_rdmacm_mux_async(RdmaBackendDev *backend_dev)
203 atomic_set(&backend_dev->rdmacm_mux.can_receive, sizeof(RdmaCmMuxMsg));
206 static inline int rdmacm_mux_can_process_async(RdmaBackendDev *backend_dev)
208 return atomic_read(&backend_dev->rdmacm_mux.can_receive);
211 static int rdmacm_mux_check_op_status(CharBackend *mad_chr_be)
213 RdmaCmMuxMsg msg = {};
214 int ret;
216 ret = qemu_chr_fe_read_all(mad_chr_be, (uint8_t *)&msg, sizeof(msg));
217 if (ret != sizeof(msg)) {
218 rdma_error_report("Got invalid message from mux: size %d, expecting %d",
219 ret, (int)sizeof(msg));
220 return -EIO;
223 trace_rdmacm_mux_check_op_status(msg.hdr.msg_type, msg.hdr.op_code,
224 msg.hdr.err_code);
226 if (msg.hdr.msg_type != RDMACM_MUX_MSG_TYPE_RESP) {
227 rdma_error_report("Got invalid message type %d", msg.hdr.msg_type);
228 return -EIO;
231 if (msg.hdr.err_code != RDMACM_MUX_ERR_CODE_OK) {
232 rdma_error_report("Operation failed in mux, error code %d",
233 msg.hdr.err_code);
234 return -EIO;
237 return 0;
240 static int rdmacm_mux_send(RdmaBackendDev *backend_dev, RdmaCmMuxMsg *msg)
242 int rc = 0;
244 msg->hdr.msg_type = RDMACM_MUX_MSG_TYPE_REQ;
245 trace_rdmacm_mux("send", msg->hdr.msg_type, msg->hdr.op_code);
246 disable_rdmacm_mux_async(backend_dev);
247 rc = qemu_chr_fe_write(backend_dev->rdmacm_mux.chr_be,
248 (const uint8_t *)msg, sizeof(*msg));
249 if (rc != sizeof(*msg)) {
250 enable_rdmacm_mux_async(backend_dev);
251 rdma_error_report("Failed to send request to rdmacm_mux (rc=%d)", rc);
252 return -EIO;
255 rc = rdmacm_mux_check_op_status(backend_dev->rdmacm_mux.chr_be);
256 if (rc) {
257 rdma_error_report("Failed to execute rdmacm_mux request %d (rc=%d)",
258 msg->hdr.op_code, rc);
261 enable_rdmacm_mux_async(backend_dev);
263 return 0;
266 static void stop_backend_thread(RdmaBackendThread *thread)
268 thread->run = false;
269 while (thread->is_running) {
270 sleep(THR_POLL_TO / SCALE_US / 2);
274 static void start_comp_thread(RdmaBackendDev *backend_dev)
276 char thread_name[THR_NAME_LEN] = {};
278 stop_backend_thread(&backend_dev->comp_thread);
280 snprintf(thread_name, sizeof(thread_name), "rdma_comp_%s",
281 ibv_get_device_name(backend_dev->ib_dev));
282 backend_dev->comp_thread.run = true;
283 qemu_thread_create(&backend_dev->comp_thread.thread, thread_name,
284 comp_handler_thread, backend_dev, QEMU_THREAD_DETACHED);
287 void rdma_backend_register_comp_handler(void (*handler)(void *ctx,
288 struct ibv_wc *wc))
290 comp_handler = handler;
293 void rdma_backend_unregister_comp_handler(void)
295 rdma_backend_register_comp_handler(dummy_comp_handler);
298 int rdma_backend_query_port(RdmaBackendDev *backend_dev,
299 struct ibv_port_attr *port_attr)
301 int rc;
303 rc = ibv_query_port(backend_dev->context, backend_dev->port_num, port_attr);
304 if (rc) {
305 rdma_error_report("ibv_query_port fail, rc=%d, errno=%d", rc, errno);
306 return -EIO;
309 return 0;
312 void rdma_backend_poll_cq(RdmaDeviceResources *rdma_dev_res, RdmaBackendCQ *cq)
314 int polled;
316 rdma_dev_res->stats.poll_cq_from_guest++;
317 polled = rdma_poll_cq(rdma_dev_res, cq->ibcq);
318 if (!polled) {
319 rdma_dev_res->stats.poll_cq_from_guest_empty++;
323 static GHashTable *ah_hash;
325 static struct ibv_ah *create_ah(RdmaBackendDev *backend_dev, struct ibv_pd *pd,
326 uint8_t sgid_idx, union ibv_gid *dgid)
328 GBytes *ah_key = g_bytes_new(dgid, sizeof(*dgid));
329 struct ibv_ah *ah = g_hash_table_lookup(ah_hash, ah_key);
331 if (ah) {
332 trace_rdma_create_ah_cache_hit(be64_to_cpu(dgid->global.subnet_prefix),
333 be64_to_cpu(dgid->global.interface_id));
334 g_bytes_unref(ah_key);
335 } else {
336 struct ibv_ah_attr ah_attr = {
337 .is_global = 1,
338 .port_num = backend_dev->port_num,
339 .grh.hop_limit = 1,
342 ah_attr.grh.dgid = *dgid;
343 ah_attr.grh.sgid_index = sgid_idx;
345 ah = ibv_create_ah(pd, &ah_attr);
346 if (ah) {
347 g_hash_table_insert(ah_hash, ah_key, ah);
348 } else {
349 g_bytes_unref(ah_key);
350 rdma_error_report("Failed to create AH for gid <0x%" PRIx64", 0x%"PRIx64">",
351 be64_to_cpu(dgid->global.subnet_prefix),
352 be64_to_cpu(dgid->global.interface_id));
355 trace_rdma_create_ah_cache_miss(be64_to_cpu(dgid->global.subnet_prefix),
356 be64_to_cpu(dgid->global.interface_id));
359 return ah;
362 static void destroy_ah_hash_key(gpointer data)
364 g_bytes_unref(data);
367 static void destroy_ah_hast_data(gpointer data)
369 struct ibv_ah *ah = data;
371 ibv_destroy_ah(ah);
374 static void ah_cache_init(void)
376 ah_hash = g_hash_table_new_full(g_bytes_hash, g_bytes_equal,
377 destroy_ah_hash_key, destroy_ah_hast_data);
380 static int build_host_sge_array(RdmaDeviceResources *rdma_dev_res,
381 struct ibv_sge *dsge, struct ibv_sge *ssge,
382 uint8_t num_sge, uint64_t *total_length)
384 RdmaRmMR *mr;
385 int ssge_idx;
387 for (ssge_idx = 0; ssge_idx < num_sge; ssge_idx++) {
388 mr = rdma_rm_get_mr(rdma_dev_res, ssge[ssge_idx].lkey);
389 if (unlikely(!mr)) {
390 rdma_error_report("Invalid lkey 0x%x", ssge[ssge_idx].lkey);
391 return VENDOR_ERR_INVLKEY | ssge[ssge_idx].lkey;
394 dsge->addr = (uintptr_t)mr->virt + ssge[ssge_idx].addr - mr->start;
395 dsge->length = ssge[ssge_idx].length;
396 dsge->lkey = rdma_backend_mr_lkey(&mr->backend_mr);
398 *total_length += dsge->length;
400 dsge++;
403 return 0;
406 static void trace_mad_message(const char *title, char *buf, int len)
408 int i;
409 char *b = g_malloc0(len * 3 + 1);
410 char b1[4];
412 for (i = 0; i < len; i++) {
413 sprintf(b1, "%.2X ", buf[i] & 0x000000FF);
414 strcat(b, b1);
417 trace_rdma_mad_message(title, len, b);
419 g_free(b);
422 static int mad_send(RdmaBackendDev *backend_dev, uint8_t sgid_idx,
423 union ibv_gid *sgid, struct ibv_sge *sge, uint32_t num_sge)
425 RdmaCmMuxMsg msg = {};
426 char *hdr, *data;
427 int ret;
429 if (num_sge != 2) {
430 return -EINVAL;
433 msg.hdr.op_code = RDMACM_MUX_OP_CODE_MAD;
434 memcpy(msg.hdr.sgid.raw, sgid->raw, sizeof(msg.hdr.sgid));
436 msg.umad_len = sge[0].length + sge[1].length;
438 if (msg.umad_len > sizeof(msg.umad.mad)) {
439 return -ENOMEM;
442 msg.umad.hdr.addr.qpn = htobe32(1);
443 msg.umad.hdr.addr.grh_present = 1;
444 msg.umad.hdr.addr.gid_index = sgid_idx;
445 memcpy(msg.umad.hdr.addr.gid, sgid->raw, sizeof(msg.umad.hdr.addr.gid));
446 msg.umad.hdr.addr.hop_limit = 0xFF;
448 hdr = rdma_pci_dma_map(backend_dev->dev, sge[0].addr, sge[0].length);
449 if (!hdr) {
450 return -ENOMEM;
452 data = rdma_pci_dma_map(backend_dev->dev, sge[1].addr, sge[1].length);
453 if (!data) {
454 rdma_pci_dma_unmap(backend_dev->dev, hdr, sge[0].length);
455 return -ENOMEM;
458 memcpy(&msg.umad.mad[0], hdr, sge[0].length);
459 memcpy(&msg.umad.mad[sge[0].length], data, sge[1].length);
461 rdma_pci_dma_unmap(backend_dev->dev, data, sge[1].length);
462 rdma_pci_dma_unmap(backend_dev->dev, hdr, sge[0].length);
464 trace_mad_message("send", msg.umad.mad, msg.umad_len);
466 ret = rdmacm_mux_send(backend_dev, &msg);
467 if (ret) {
468 rdma_error_report("Failed to send MAD to rdma_umadmux (%d)", ret);
469 return -EIO;
472 return 0;
475 void rdma_backend_post_send(RdmaBackendDev *backend_dev,
476 RdmaBackendQP *qp, uint8_t qp_type,
477 struct ibv_sge *sge, uint32_t num_sge,
478 uint8_t sgid_idx, union ibv_gid *sgid,
479 union ibv_gid *dgid, uint32_t dqpn, uint32_t dqkey,
480 void *ctx)
482 BackendCtx *bctx;
483 struct ibv_sge new_sge[MAX_SGE];
484 uint32_t bctx_id;
485 int rc;
486 struct ibv_send_wr wr = {}, *bad_wr;
488 if (!qp->ibqp) { /* This field is not initialized for QP0 and QP1 */
489 if (qp_type == IBV_QPT_SMI) {
490 rdma_error_report("Got QP0 request");
491 complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_QP0, ctx);
492 } else if (qp_type == IBV_QPT_GSI) {
493 rc = mad_send(backend_dev, sgid_idx, sgid, sge, num_sge);
494 if (rc) {
495 complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_MAD_SEND, ctx);
496 backend_dev->rdma_dev_res->stats.mad_tx_err++;
497 } else {
498 complete_work(IBV_WC_SUCCESS, 0, ctx);
499 backend_dev->rdma_dev_res->stats.mad_tx++;
502 return;
505 bctx = g_malloc0(sizeof(*bctx));
506 bctx->up_ctx = ctx;
507 bctx->backend_qp = qp;
509 rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx);
510 if (unlikely(rc)) {
511 complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_NOMEM, ctx);
512 goto err_free_bctx;
515 rdma_protected_gslist_append_int32(&qp->cqe_ctx_list, bctx_id);
517 rc = build_host_sge_array(backend_dev->rdma_dev_res, new_sge, sge, num_sge,
518 &backend_dev->rdma_dev_res->stats.tx_len);
519 if (rc) {
520 complete_work(IBV_WC_GENERAL_ERR, rc, ctx);
521 goto err_dealloc_cqe_ctx;
524 if (qp_type == IBV_QPT_UD) {
525 wr.wr.ud.ah = create_ah(backend_dev, qp->ibpd, sgid_idx, dgid);
526 if (!wr.wr.ud.ah) {
527 complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx);
528 goto err_dealloc_cqe_ctx;
530 wr.wr.ud.remote_qpn = dqpn;
531 wr.wr.ud.remote_qkey = dqkey;
534 wr.num_sge = num_sge;
535 wr.opcode = IBV_WR_SEND;
536 wr.send_flags = IBV_SEND_SIGNALED;
537 wr.sg_list = new_sge;
538 wr.wr_id = bctx_id;
540 rc = ibv_post_send(qp->ibqp, &wr, &bad_wr);
541 if (rc) {
542 rdma_error_report("ibv_post_send fail, qpn=0x%x, rc=%d, errno=%d",
543 qp->ibqp->qp_num, rc, errno);
544 complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx);
545 goto err_dealloc_cqe_ctx;
548 atomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe);
549 backend_dev->rdma_dev_res->stats.tx++;
551 return;
553 err_dealloc_cqe_ctx:
554 backend_dev->rdma_dev_res->stats.tx_err++;
555 rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, bctx_id);
557 err_free_bctx:
558 g_free(bctx);
561 static unsigned int save_mad_recv_buffer(RdmaBackendDev *backend_dev,
562 struct ibv_sge *sge, uint32_t num_sge,
563 void *ctx)
565 BackendCtx *bctx;
566 int rc;
567 uint32_t bctx_id;
569 if (num_sge != 1) {
570 rdma_error_report("Invalid num_sge (%d), expecting 1", num_sge);
571 return VENDOR_ERR_INV_NUM_SGE;
574 if (sge[0].length < RDMA_MAX_PRIVATE_DATA + sizeof(struct ibv_grh)) {
575 rdma_error_report("Too small buffer for MAD");
576 return VENDOR_ERR_INV_MAD_BUFF;
579 bctx = g_malloc0(sizeof(*bctx));
581 rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx);
582 if (unlikely(rc)) {
583 g_free(bctx);
584 return VENDOR_ERR_NOMEM;
587 bctx->up_ctx = ctx;
588 bctx->sge = *sge;
590 rdma_protected_qlist_append_int64(&backend_dev->recv_mads_list, bctx_id);
592 return 0;
595 void rdma_backend_post_recv(RdmaBackendDev *backend_dev,
596 RdmaBackendQP *qp, uint8_t qp_type,
597 struct ibv_sge *sge, uint32_t num_sge, void *ctx)
599 BackendCtx *bctx;
600 struct ibv_sge new_sge[MAX_SGE];
601 uint32_t bctx_id;
602 int rc;
603 struct ibv_recv_wr wr = {}, *bad_wr;
605 if (!qp->ibqp) { /* This field does not get initialized for QP0 and QP1 */
606 if (qp_type == IBV_QPT_SMI) {
607 rdma_error_report("Got QP0 request");
608 complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_QP0, ctx);
610 if (qp_type == IBV_QPT_GSI) {
611 rc = save_mad_recv_buffer(backend_dev, sge, num_sge, ctx);
612 if (rc) {
613 complete_work(IBV_WC_GENERAL_ERR, rc, ctx);
614 backend_dev->rdma_dev_res->stats.mad_rx_bufs_err++;
615 } else {
616 backend_dev->rdma_dev_res->stats.mad_rx_bufs++;
619 return;
622 bctx = g_malloc0(sizeof(*bctx));
623 bctx->up_ctx = ctx;
624 bctx->backend_qp = qp;
626 rc = rdma_rm_alloc_cqe_ctx(backend_dev->rdma_dev_res, &bctx_id, bctx);
627 if (unlikely(rc)) {
628 complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_NOMEM, ctx);
629 goto err_free_bctx;
632 rdma_protected_gslist_append_int32(&qp->cqe_ctx_list, bctx_id);
634 rc = build_host_sge_array(backend_dev->rdma_dev_res, new_sge, sge, num_sge,
635 &backend_dev->rdma_dev_res->stats.rx_bufs_len);
636 if (rc) {
637 complete_work(IBV_WC_GENERAL_ERR, rc, ctx);
638 goto err_dealloc_cqe_ctx;
641 wr.num_sge = num_sge;
642 wr.sg_list = new_sge;
643 wr.wr_id = bctx_id;
644 rc = ibv_post_recv(qp->ibqp, &wr, &bad_wr);
645 if (rc) {
646 rdma_error_report("ibv_post_recv fail, qpn=0x%x, rc=%d, errno=%d",
647 qp->ibqp->qp_num, rc, errno);
648 complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_FAIL_BACKEND, ctx);
649 goto err_dealloc_cqe_ctx;
652 atomic_inc(&backend_dev->rdma_dev_res->stats.missing_cqe);
653 backend_dev->rdma_dev_res->stats.rx_bufs++;
655 return;
657 err_dealloc_cqe_ctx:
658 backend_dev->rdma_dev_res->stats.rx_bufs_err++;
659 rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, bctx_id);
661 err_free_bctx:
662 g_free(bctx);
665 int rdma_backend_create_pd(RdmaBackendDev *backend_dev, RdmaBackendPD *pd)
667 pd->ibpd = ibv_alloc_pd(backend_dev->context);
669 if (!pd->ibpd) {
670 rdma_error_report("ibv_alloc_pd fail, errno=%d", errno);
671 return -EIO;
674 return 0;
677 void rdma_backend_destroy_pd(RdmaBackendPD *pd)
679 if (pd->ibpd) {
680 ibv_dealloc_pd(pd->ibpd);
684 int rdma_backend_create_mr(RdmaBackendMR *mr, RdmaBackendPD *pd, void *addr,
685 size_t length, int access)
687 mr->ibmr = ibv_reg_mr(pd->ibpd, addr, length, access);
688 if (!mr->ibmr) {
689 rdma_error_report("ibv_reg_mr fail, errno=%d", errno);
690 return -EIO;
693 mr->ibpd = pd->ibpd;
695 return 0;
698 void rdma_backend_destroy_mr(RdmaBackendMR *mr)
700 if (mr->ibmr) {
701 ibv_dereg_mr(mr->ibmr);
705 int rdma_backend_create_cq(RdmaBackendDev *backend_dev, RdmaBackendCQ *cq,
706 int cqe)
708 int rc;
710 cq->ibcq = ibv_create_cq(backend_dev->context, cqe + 1, NULL,
711 backend_dev->channel, 0);
712 if (!cq->ibcq) {
713 rdma_error_report("ibv_create_cq fail, errno=%d", errno);
714 return -EIO;
717 rc = ibv_req_notify_cq(cq->ibcq, 0);
718 if (rc) {
719 rdma_warn_report("ibv_req_notify_cq fail, rc=%d, errno=%d", rc, errno);
722 cq->backend_dev = backend_dev;
724 return 0;
727 void rdma_backend_destroy_cq(RdmaBackendCQ *cq)
729 if (cq->ibcq) {
730 ibv_destroy_cq(cq->ibcq);
734 int rdma_backend_create_qp(RdmaBackendQP *qp, uint8_t qp_type,
735 RdmaBackendPD *pd, RdmaBackendCQ *scq,
736 RdmaBackendCQ *rcq, uint32_t max_send_wr,
737 uint32_t max_recv_wr, uint32_t max_send_sge,
738 uint32_t max_recv_sge)
740 struct ibv_qp_init_attr attr = {};
742 qp->ibqp = 0;
744 switch (qp_type) {
745 case IBV_QPT_GSI:
746 return 0;
748 case IBV_QPT_RC:
749 /* fall through */
750 case IBV_QPT_UD:
751 /* do nothing */
752 break;
754 default:
755 rdma_error_report("Unsupported QP type %d", qp_type);
756 return -EIO;
759 attr.qp_type = qp_type;
760 attr.send_cq = scq->ibcq;
761 attr.recv_cq = rcq->ibcq;
762 attr.cap.max_send_wr = max_send_wr;
763 attr.cap.max_recv_wr = max_recv_wr;
764 attr.cap.max_send_sge = max_send_sge;
765 attr.cap.max_recv_sge = max_recv_sge;
767 qp->ibqp = ibv_create_qp(pd->ibpd, &attr);
768 if (!qp->ibqp) {
769 rdma_error_report("ibv_create_qp fail, errno=%d", errno);
770 return -EIO;
773 rdma_protected_gslist_init(&qp->cqe_ctx_list);
775 qp->ibpd = pd->ibpd;
777 /* TODO: Query QP to get max_inline_data and save it to be used in send */
779 return 0;
782 int rdma_backend_qp_state_init(RdmaBackendDev *backend_dev, RdmaBackendQP *qp,
783 uint8_t qp_type, uint32_t qkey)
785 struct ibv_qp_attr attr = {};
786 int rc, attr_mask;
788 attr_mask = IBV_QP_STATE | IBV_QP_PKEY_INDEX | IBV_QP_PORT;
789 attr.qp_state = IBV_QPS_INIT;
790 attr.pkey_index = 0;
791 attr.port_num = backend_dev->port_num;
793 switch (qp_type) {
794 case IBV_QPT_RC:
795 attr_mask |= IBV_QP_ACCESS_FLAGS;
796 trace_rdma_backend_rc_qp_state_init(qp->ibqp->qp_num);
797 break;
799 case IBV_QPT_UD:
800 attr.qkey = qkey;
801 attr_mask |= IBV_QP_QKEY;
802 trace_rdma_backend_ud_qp_state_init(qp->ibqp->qp_num, qkey);
803 break;
805 default:
806 rdma_error_report("Unsupported QP type %d", qp_type);
807 return -EIO;
810 rc = ibv_modify_qp(qp->ibqp, &attr, attr_mask);
811 if (rc) {
812 rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc, errno);
813 return -EIO;
816 return 0;
819 int rdma_backend_qp_state_rtr(RdmaBackendDev *backend_dev, RdmaBackendQP *qp,
820 uint8_t qp_type, uint8_t sgid_idx,
821 union ibv_gid *dgid, uint32_t dqpn,
822 uint32_t rq_psn, uint32_t qkey, bool use_qkey)
824 struct ibv_qp_attr attr = {};
825 union ibv_gid ibv_gid = {
826 .global.interface_id = dgid->global.interface_id,
827 .global.subnet_prefix = dgid->global.subnet_prefix
829 int rc, attr_mask;
831 attr.qp_state = IBV_QPS_RTR;
832 attr_mask = IBV_QP_STATE;
834 qp->sgid_idx = sgid_idx;
836 switch (qp_type) {
837 case IBV_QPT_RC:
838 attr.path_mtu = IBV_MTU_1024;
839 attr.dest_qp_num = dqpn;
840 attr.max_dest_rd_atomic = 1;
841 attr.min_rnr_timer = 12;
842 attr.ah_attr.port_num = backend_dev->port_num;
843 attr.ah_attr.is_global = 1;
844 attr.ah_attr.grh.hop_limit = 1;
845 attr.ah_attr.grh.dgid = ibv_gid;
846 attr.ah_attr.grh.sgid_index = qp->sgid_idx;
847 attr.rq_psn = rq_psn;
849 attr_mask |= IBV_QP_AV | IBV_QP_PATH_MTU | IBV_QP_DEST_QPN |
850 IBV_QP_RQ_PSN | IBV_QP_MAX_DEST_RD_ATOMIC |
851 IBV_QP_MIN_RNR_TIMER;
853 trace_rdma_backend_rc_qp_state_rtr(qp->ibqp->qp_num,
854 be64_to_cpu(ibv_gid.global.
855 subnet_prefix),
856 be64_to_cpu(ibv_gid.global.
857 interface_id),
858 qp->sgid_idx, dqpn, rq_psn);
859 break;
861 case IBV_QPT_UD:
862 if (use_qkey) {
863 attr.qkey = qkey;
864 attr_mask |= IBV_QP_QKEY;
866 trace_rdma_backend_ud_qp_state_rtr(qp->ibqp->qp_num, use_qkey ? qkey :
868 break;
871 rc = ibv_modify_qp(qp->ibqp, &attr, attr_mask);
872 if (rc) {
873 rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc, errno);
874 return -EIO;
877 return 0;
880 int rdma_backend_qp_state_rts(RdmaBackendQP *qp, uint8_t qp_type,
881 uint32_t sq_psn, uint32_t qkey, bool use_qkey)
883 struct ibv_qp_attr attr = {};
884 int rc, attr_mask;
886 attr.qp_state = IBV_QPS_RTS;
887 attr.sq_psn = sq_psn;
888 attr_mask = IBV_QP_STATE | IBV_QP_SQ_PSN;
890 switch (qp_type) {
891 case IBV_QPT_RC:
892 attr.timeout = 14;
893 attr.retry_cnt = 7;
894 attr.rnr_retry = 7;
895 attr.max_rd_atomic = 1;
897 attr_mask |= IBV_QP_TIMEOUT | IBV_QP_RETRY_CNT | IBV_QP_RNR_RETRY |
898 IBV_QP_MAX_QP_RD_ATOMIC;
899 trace_rdma_backend_rc_qp_state_rts(qp->ibqp->qp_num, sq_psn);
900 break;
902 case IBV_QPT_UD:
903 if (use_qkey) {
904 attr.qkey = qkey;
905 attr_mask |= IBV_QP_QKEY;
907 trace_rdma_backend_ud_qp_state_rts(qp->ibqp->qp_num, sq_psn,
908 use_qkey ? qkey : 0);
909 break;
912 rc = ibv_modify_qp(qp->ibqp, &attr, attr_mask);
913 if (rc) {
914 rdma_error_report("ibv_modify_qp fail, rc=%d, errno=%d", rc, errno);
915 return -EIO;
918 return 0;
921 int rdma_backend_query_qp(RdmaBackendQP *qp, struct ibv_qp_attr *attr,
922 int attr_mask, struct ibv_qp_init_attr *init_attr)
924 if (!qp->ibqp) {
925 attr->qp_state = IBV_QPS_RTS;
926 return 0;
929 return ibv_query_qp(qp->ibqp, attr, attr_mask, init_attr);
932 void rdma_backend_destroy_qp(RdmaBackendQP *qp, RdmaDeviceResources *dev_res)
934 if (qp->ibqp) {
935 ibv_destroy_qp(qp->ibqp);
937 g_slist_foreach(qp->cqe_ctx_list.list, free_cqe_ctx, dev_res);
938 rdma_protected_gslist_destroy(&qp->cqe_ctx_list);
941 #define CHK_ATTR(req, dev, member, fmt) ({ \
942 trace_rdma_check_dev_attr(#member, dev.member, req->member); \
943 if (req->member > dev.member) { \
944 rdma_warn_report("%s = "fmt" is higher than host device capability "fmt, \
945 #member, req->member, dev.member); \
946 req->member = dev.member; \
950 static int init_device_caps(RdmaBackendDev *backend_dev,
951 struct ibv_device_attr *dev_attr)
953 struct ibv_device_attr bk_dev_attr;
954 int rc;
956 rc = ibv_query_device(backend_dev->context, &bk_dev_attr);
957 if (rc) {
958 rdma_error_report("ibv_query_device fail, rc=%d, errno=%d", rc, errno);
959 return -EIO;
962 dev_attr->max_sge = MAX_SGE;
964 CHK_ATTR(dev_attr, bk_dev_attr, max_mr_size, "%" PRId64);
965 CHK_ATTR(dev_attr, bk_dev_attr, max_qp, "%d");
966 CHK_ATTR(dev_attr, bk_dev_attr, max_sge, "%d");
967 CHK_ATTR(dev_attr, bk_dev_attr, max_cq, "%d");
968 CHK_ATTR(dev_attr, bk_dev_attr, max_mr, "%d");
969 CHK_ATTR(dev_attr, bk_dev_attr, max_pd, "%d");
970 CHK_ATTR(dev_attr, bk_dev_attr, max_qp_rd_atom, "%d");
971 CHK_ATTR(dev_attr, bk_dev_attr, max_qp_init_rd_atom, "%d");
972 CHK_ATTR(dev_attr, bk_dev_attr, max_ah, "%d");
974 return 0;
977 static inline void build_mad_hdr(struct ibv_grh *grh, union ibv_gid *sgid,
978 union ibv_gid *my_gid, int paylen)
980 grh->paylen = htons(paylen);
981 grh->sgid = *sgid;
982 grh->dgid = *my_gid;
985 static void process_incoming_mad_req(RdmaBackendDev *backend_dev,
986 RdmaCmMuxMsg *msg)
988 unsigned long cqe_ctx_id;
989 BackendCtx *bctx;
990 char *mad;
992 trace_mad_message("recv", msg->umad.mad, msg->umad_len);
994 cqe_ctx_id = rdma_protected_qlist_pop_int64(&backend_dev->recv_mads_list);
995 if (cqe_ctx_id == -ENOENT) {
996 rdma_warn_report("No more free MADs buffers, waiting for a while");
997 sleep(THR_POLL_TO);
998 return;
1001 bctx = rdma_rm_get_cqe_ctx(backend_dev->rdma_dev_res, cqe_ctx_id);
1002 if (unlikely(!bctx)) {
1003 rdma_error_report("No matching ctx for req %ld", cqe_ctx_id);
1004 backend_dev->rdma_dev_res->stats.mad_rx_err++;
1005 return;
1008 mad = rdma_pci_dma_map(backend_dev->dev, bctx->sge.addr,
1009 bctx->sge.length);
1010 if (!mad || bctx->sge.length < msg->umad_len + MAD_HDR_SIZE) {
1011 backend_dev->rdma_dev_res->stats.mad_rx_err++;
1012 complete_work(IBV_WC_GENERAL_ERR, VENDOR_ERR_INV_MAD_BUFF,
1013 bctx->up_ctx);
1014 } else {
1015 struct ibv_wc wc = {};
1016 memset(mad, 0, bctx->sge.length);
1017 build_mad_hdr((struct ibv_grh *)mad,
1018 (union ibv_gid *)&msg->umad.hdr.addr.gid, &msg->hdr.sgid,
1019 msg->umad_len);
1020 memcpy(&mad[MAD_HDR_SIZE], msg->umad.mad, msg->umad_len);
1021 rdma_pci_dma_unmap(backend_dev->dev, mad, bctx->sge.length);
1023 wc.byte_len = msg->umad_len;
1024 wc.status = IBV_WC_SUCCESS;
1025 wc.wc_flags = IBV_WC_GRH;
1026 backend_dev->rdma_dev_res->stats.mad_rx++;
1027 comp_handler(bctx->up_ctx, &wc);
1030 g_free(bctx);
1031 rdma_rm_dealloc_cqe_ctx(backend_dev->rdma_dev_res, cqe_ctx_id);
1034 static inline int rdmacm_mux_can_receive(void *opaque)
1036 RdmaBackendDev *backend_dev = (RdmaBackendDev *)opaque;
1038 return rdmacm_mux_can_process_async(backend_dev);
1041 static void rdmacm_mux_read(void *opaque, const uint8_t *buf, int size)
1043 RdmaBackendDev *backend_dev = (RdmaBackendDev *)opaque;
1044 RdmaCmMuxMsg *msg = (RdmaCmMuxMsg *)buf;
1046 trace_rdmacm_mux("read", msg->hdr.msg_type, msg->hdr.op_code);
1048 if (msg->hdr.msg_type != RDMACM_MUX_MSG_TYPE_REQ &&
1049 msg->hdr.op_code != RDMACM_MUX_OP_CODE_MAD) {
1050 rdma_error_report("Error: Not a MAD request, skipping");
1051 return;
1053 process_incoming_mad_req(backend_dev, msg);
1056 static int mad_init(RdmaBackendDev *backend_dev, CharBackend *mad_chr_be)
1058 int ret;
1060 backend_dev->rdmacm_mux.chr_be = mad_chr_be;
1062 ret = qemu_chr_fe_backend_connected(backend_dev->rdmacm_mux.chr_be);
1063 if (!ret) {
1064 rdma_error_report("Missing chardev for MAD multiplexer");
1065 return -EIO;
1068 rdma_protected_qlist_init(&backend_dev->recv_mads_list);
1070 enable_rdmacm_mux_async(backend_dev);
1072 qemu_chr_fe_set_handlers(backend_dev->rdmacm_mux.chr_be,
1073 rdmacm_mux_can_receive, rdmacm_mux_read, NULL,
1074 NULL, backend_dev, NULL, true);
1076 return 0;
1079 static void mad_stop(RdmaBackendDev *backend_dev)
1081 clean_recv_mads(backend_dev);
1084 static void mad_fini(RdmaBackendDev *backend_dev)
1086 disable_rdmacm_mux_async(backend_dev);
1087 qemu_chr_fe_disconnect(backend_dev->rdmacm_mux.chr_be);
1088 rdma_protected_qlist_destroy(&backend_dev->recv_mads_list);
1091 int rdma_backend_get_gid_index(RdmaBackendDev *backend_dev,
1092 union ibv_gid *gid)
1094 union ibv_gid sgid;
1095 int ret;
1096 int i = 0;
1098 do {
1099 ret = ibv_query_gid(backend_dev->context, backend_dev->port_num, i,
1100 &sgid);
1101 i++;
1102 } while (!ret && (memcmp(&sgid, gid, sizeof(*gid))));
1104 trace_rdma_backend_get_gid_index(be64_to_cpu(gid->global.subnet_prefix),
1105 be64_to_cpu(gid->global.interface_id),
1106 i - 1);
1108 return ret ? ret : i - 1;
1111 int rdma_backend_add_gid(RdmaBackendDev *backend_dev, const char *ifname,
1112 union ibv_gid *gid)
1114 RdmaCmMuxMsg msg = {};
1115 int ret;
1117 trace_rdma_backend_gid_change("add", be64_to_cpu(gid->global.subnet_prefix),
1118 be64_to_cpu(gid->global.interface_id));
1120 msg.hdr.op_code = RDMACM_MUX_OP_CODE_REG;
1121 memcpy(msg.hdr.sgid.raw, gid->raw, sizeof(msg.hdr.sgid));
1123 ret = rdmacm_mux_send(backend_dev, &msg);
1124 if (ret) {
1125 rdma_error_report("Failed to register GID to rdma_umadmux (%d)", ret);
1126 return -EIO;
1129 qapi_event_send_rdma_gid_status_changed(ifname, true,
1130 gid->global.subnet_prefix,
1131 gid->global.interface_id);
1133 return ret;
1136 int rdma_backend_del_gid(RdmaBackendDev *backend_dev, const char *ifname,
1137 union ibv_gid *gid)
1139 RdmaCmMuxMsg msg = {};
1140 int ret;
1142 trace_rdma_backend_gid_change("del", be64_to_cpu(gid->global.subnet_prefix),
1143 be64_to_cpu(gid->global.interface_id));
1145 msg.hdr.op_code = RDMACM_MUX_OP_CODE_UNREG;
1146 memcpy(msg.hdr.sgid.raw, gid->raw, sizeof(msg.hdr.sgid));
1148 ret = rdmacm_mux_send(backend_dev, &msg);
1149 if (ret) {
1150 rdma_error_report("Failed to unregister GID from rdma_umadmux (%d)",
1151 ret);
1152 return -EIO;
1155 qapi_event_send_rdma_gid_status_changed(ifname, false,
1156 gid->global.subnet_prefix,
1157 gid->global.interface_id);
1159 return 0;
1162 int rdma_backend_init(RdmaBackendDev *backend_dev, PCIDevice *pdev,
1163 RdmaDeviceResources *rdma_dev_res,
1164 const char *backend_device_name, uint8_t port_num,
1165 struct ibv_device_attr *dev_attr, CharBackend *mad_chr_be)
1167 int i;
1168 int ret = 0;
1169 int num_ibv_devices;
1170 struct ibv_device **dev_list;
1172 memset(backend_dev, 0, sizeof(*backend_dev));
1174 backend_dev->dev = pdev;
1175 backend_dev->port_num = port_num;
1176 backend_dev->rdma_dev_res = rdma_dev_res;
1178 rdma_backend_register_comp_handler(dummy_comp_handler);
1180 dev_list = ibv_get_device_list(&num_ibv_devices);
1181 if (!dev_list) {
1182 rdma_error_report("Failed to get IB devices list");
1183 return -EIO;
1186 if (num_ibv_devices == 0) {
1187 rdma_error_report("No IB devices were found");
1188 ret = -ENXIO;
1189 goto out_free_dev_list;
1192 if (backend_device_name) {
1193 for (i = 0; dev_list[i]; ++i) {
1194 if (!strcmp(ibv_get_device_name(dev_list[i]),
1195 backend_device_name)) {
1196 break;
1200 backend_dev->ib_dev = dev_list[i];
1201 if (!backend_dev->ib_dev) {
1202 rdma_error_report("Failed to find IB device %s",
1203 backend_device_name);
1204 ret = -EIO;
1205 goto out_free_dev_list;
1207 } else {
1208 backend_dev->ib_dev = *dev_list;
1211 rdma_info_report("uverb device %s", backend_dev->ib_dev->dev_name);
1213 backend_dev->context = ibv_open_device(backend_dev->ib_dev);
1214 if (!backend_dev->context) {
1215 rdma_error_report("Failed to open IB device %s",
1216 ibv_get_device_name(backend_dev->ib_dev));
1217 ret = -EIO;
1218 goto out;
1221 backend_dev->channel = ibv_create_comp_channel(backend_dev->context);
1222 if (!backend_dev->channel) {
1223 rdma_error_report("Failed to create IB communication channel");
1224 ret = -EIO;
1225 goto out_close_device;
1228 ret = init_device_caps(backend_dev, dev_attr);
1229 if (ret) {
1230 rdma_error_report("Failed to initialize device capabilities");
1231 ret = -EIO;
1232 goto out_destroy_comm_channel;
1236 ret = mad_init(backend_dev, mad_chr_be);
1237 if (ret) {
1238 rdma_error_report("Failed to initialize mad");
1239 ret = -EIO;
1240 goto out_destroy_comm_channel;
1243 backend_dev->comp_thread.run = false;
1244 backend_dev->comp_thread.is_running = false;
1246 ah_cache_init();
1248 goto out_free_dev_list;
1250 out_destroy_comm_channel:
1251 ibv_destroy_comp_channel(backend_dev->channel);
1253 out_close_device:
1254 ibv_close_device(backend_dev->context);
1256 out_free_dev_list:
1257 ibv_free_device_list(dev_list);
1259 out:
1260 return ret;
1264 void rdma_backend_start(RdmaBackendDev *backend_dev)
1266 start_comp_thread(backend_dev);
1269 void rdma_backend_stop(RdmaBackendDev *backend_dev)
1271 mad_stop(backend_dev);
1272 stop_backend_thread(&backend_dev->comp_thread);
1275 void rdma_backend_fini(RdmaBackendDev *backend_dev)
1277 mad_fini(backend_dev);
1278 g_hash_table_destroy(ah_hash);
1279 ibv_destroy_comp_channel(backend_dev->channel);
1280 ibv_close_device(backend_dev->context);