2 * QEMU paravirtual RDMA - Resource Manager Implementation
4 * Copyright (C) 2018 Oracle
5 * Copyright (C) 2018 Red Hat Inc
8 * Yuval Shaia <yuval.shaia@oracle.com>
9 * Marcel Apfelbaum <marcel@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
20 #include "rdma_utils.h"
21 #include "rdma_backend.h"
24 #define MAX_RM_TBL_NAME 16
26 /* Page directory and page tables */
27 #define PG_DIR_SZ { TARGET_PAGE_SIZE / sizeof(__u64) }
28 #define PG_TBL_SZ { TARGET_PAGE_SIZE / sizeof(__u64) }
30 static inline void res_tbl_init(const char *name
, RdmaRmResTbl
*tbl
,
31 uint32_t tbl_sz
, uint32_t res_sz
)
33 tbl
->tbl
= g_malloc(tbl_sz
* res_sz
);
35 strncpy(tbl
->name
, name
, MAX_RM_TBL_NAME
);
36 tbl
->name
[MAX_RM_TBL_NAME
- 1] = 0;
38 tbl
->bitmap
= bitmap_new(tbl_sz
);
41 qemu_mutex_init(&tbl
->lock
);
44 static inline void res_tbl_free(RdmaRmResTbl
*tbl
)
46 qemu_mutex_destroy(&tbl
->lock
);
48 bitmap_zero_extend(tbl
->bitmap
, tbl
->tbl_sz
, 0);
51 static inline void *res_tbl_get(RdmaRmResTbl
*tbl
, uint32_t handle
)
53 pr_dbg("%s, handle=%d\n", tbl
->name
, handle
);
55 if ((handle
< tbl
->tbl_sz
) && (test_bit(handle
, tbl
->bitmap
))) {
56 return tbl
->tbl
+ handle
* tbl
->res_sz
;
58 pr_dbg("Invalid handle %d\n", handle
);
63 static inline void *res_tbl_alloc(RdmaRmResTbl
*tbl
, uint32_t *handle
)
65 qemu_mutex_lock(&tbl
->lock
);
67 *handle
= find_first_zero_bit(tbl
->bitmap
, tbl
->tbl_sz
);
68 if (*handle
> tbl
->tbl_sz
) {
69 pr_dbg("Failed to alloc, bitmap is full\n");
70 qemu_mutex_unlock(&tbl
->lock
);
74 set_bit(*handle
, tbl
->bitmap
);
76 qemu_mutex_unlock(&tbl
->lock
);
78 memset(tbl
->tbl
+ *handle
* tbl
->res_sz
, 0, tbl
->res_sz
);
80 pr_dbg("%s, handle=%d\n", tbl
->name
, *handle
);
82 return tbl
->tbl
+ *handle
* tbl
->res_sz
;
85 static inline void res_tbl_dealloc(RdmaRmResTbl
*tbl
, uint32_t handle
)
87 pr_dbg("%s, handle=%d\n", tbl
->name
, handle
);
89 qemu_mutex_lock(&tbl
->lock
);
91 if (handle
< tbl
->tbl_sz
) {
92 clear_bit(handle
, tbl
->bitmap
);
95 qemu_mutex_unlock(&tbl
->lock
);
98 int rdma_rm_alloc_pd(RdmaDeviceResources
*dev_res
, RdmaBackendDev
*backend_dev
,
99 uint32_t *pd_handle
, uint32_t ctx_handle
)
104 pd
= res_tbl_alloc(&dev_res
->pd_tbl
, pd_handle
);
109 ret
= rdma_backend_create_pd(backend_dev
, &pd
->backend_pd
);
112 goto out_tbl_dealloc
;
115 pd
->ctx_handle
= ctx_handle
;
120 res_tbl_dealloc(&dev_res
->pd_tbl
, *pd_handle
);
126 RdmaRmPD
*rdma_rm_get_pd(RdmaDeviceResources
*dev_res
, uint32_t pd_handle
)
128 return res_tbl_get(&dev_res
->pd_tbl
, pd_handle
);
131 void rdma_rm_dealloc_pd(RdmaDeviceResources
*dev_res
, uint32_t pd_handle
)
133 RdmaRmPD
*pd
= rdma_rm_get_pd(dev_res
, pd_handle
);
136 rdma_backend_destroy_pd(&pd
->backend_pd
);
137 res_tbl_dealloc(&dev_res
->pd_tbl
, pd_handle
);
141 int rdma_rm_alloc_mr(RdmaDeviceResources
*dev_res
, uint32_t pd_handle
,
142 uint64_t guest_start
, size_t guest_length
, void *host_virt
,
143 int access_flags
, uint32_t *mr_handle
, uint32_t *lkey
,
152 pd
= rdma_rm_get_pd(dev_res
, pd_handle
);
154 pr_dbg("Invalid PD\n");
158 mr
= res_tbl_alloc(&dev_res
->mr_tbl
, mr_handle
);
160 pr_dbg("Failed to allocate obj in table\n");
165 /* TODO: This is my guess but not so sure that this needs to be
167 length
= TARGET_PAGE_SIZE
;
168 addr
= g_malloc(length
);
170 mr
->user_mr
.host_virt
= host_virt
;
171 pr_dbg("host_virt=0x%p\n", mr
->user_mr
.host_virt
);
172 mr
->user_mr
.length
= guest_length
;
173 pr_dbg("length=%zu\n", guest_length
);
174 mr
->user_mr
.guest_start
= guest_start
;
175 pr_dbg("guest_start=0x%" PRIx64
"\n", mr
->user_mr
.guest_start
);
177 length
= mr
->user_mr
.length
;
178 addr
= mr
->user_mr
.host_virt
;
181 ret
= rdma_backend_create_mr(&mr
->backend_mr
, &pd
->backend_pd
, addr
, length
,
184 pr_dbg("Fail in rdma_backend_create_mr, err=%d\n", ret
);
190 *lkey
= mr
->lkey
= rdma_backend_mr_lkey(&mr
->backend_mr
);
191 *rkey
= mr
->rkey
= rdma_backend_mr_rkey(&mr
->backend_mr
);
193 /* We keep mr_handle in lkey so send and recv get get mr ptr */
198 mr
->pd_handle
= pd_handle
;
203 res_tbl_dealloc(&dev_res
->mr_tbl
, *mr_handle
);
208 RdmaRmMR
*rdma_rm_get_mr(RdmaDeviceResources
*dev_res
, uint32_t mr_handle
)
210 return res_tbl_get(&dev_res
->mr_tbl
, mr_handle
);
213 void rdma_rm_dealloc_mr(RdmaDeviceResources
*dev_res
, uint32_t mr_handle
)
215 RdmaRmMR
*mr
= rdma_rm_get_mr(dev_res
, mr_handle
);
218 rdma_backend_destroy_mr(&mr
->backend_mr
);
219 munmap(mr
->user_mr
.host_virt
, mr
->user_mr
.length
);
220 res_tbl_dealloc(&dev_res
->mr_tbl
, mr_handle
);
224 int rdma_rm_alloc_uc(RdmaDeviceResources
*dev_res
, uint32_t pfn
,
229 /* TODO: Need to make sure pfn is between bar start address and
230 * bsd+RDMA_BAR2_UAR_SIZE
231 if (pfn > RDMA_BAR2_UAR_SIZE) {
232 pr_err("pfn out of range (%d > %d)\n", pfn, RDMA_BAR2_UAR_SIZE);
237 uc
= res_tbl_alloc(&dev_res
->uc_tbl
, uc_handle
);
245 RdmaRmUC
*rdma_rm_get_uc(RdmaDeviceResources
*dev_res
, uint32_t uc_handle
)
247 return res_tbl_get(&dev_res
->uc_tbl
, uc_handle
);
250 void rdma_rm_dealloc_uc(RdmaDeviceResources
*dev_res
, uint32_t uc_handle
)
252 RdmaRmUC
*uc
= rdma_rm_get_uc(dev_res
, uc_handle
);
255 res_tbl_dealloc(&dev_res
->uc_tbl
, uc_handle
);
259 RdmaRmCQ
*rdma_rm_get_cq(RdmaDeviceResources
*dev_res
, uint32_t cq_handle
)
261 return res_tbl_get(&dev_res
->cq_tbl
, cq_handle
);
264 int rdma_rm_alloc_cq(RdmaDeviceResources
*dev_res
, RdmaBackendDev
*backend_dev
,
265 uint32_t cqe
, uint32_t *cq_handle
, void *opaque
)
270 cq
= res_tbl_alloc(&dev_res
->cq_tbl
, cq_handle
);
278 rc
= rdma_backend_create_cq(backend_dev
, &cq
->backend_cq
, cqe
);
287 rdma_rm_dealloc_cq(dev_res
, *cq_handle
);
292 void rdma_rm_req_notify_cq(RdmaDeviceResources
*dev_res
, uint32_t cq_handle
,
297 pr_dbg("cq_handle=%d, notify=0x%x\n", cq_handle
, notify
);
299 cq
= rdma_rm_get_cq(dev_res
, cq_handle
);
305 pr_dbg("notify=%d\n", cq
->notify
);
308 void rdma_rm_dealloc_cq(RdmaDeviceResources
*dev_res
, uint32_t cq_handle
)
312 cq
= rdma_rm_get_cq(dev_res
, cq_handle
);
317 rdma_backend_destroy_cq(&cq
->backend_cq
);
319 res_tbl_dealloc(&dev_res
->cq_tbl
, cq_handle
);
322 RdmaRmQP
*rdma_rm_get_qp(RdmaDeviceResources
*dev_res
, uint32_t qpn
)
324 GBytes
*key
= g_bytes_new(&qpn
, sizeof(qpn
));
326 RdmaRmQP
*qp
= g_hash_table_lookup(dev_res
->qp_hash
, key
);
333 int rdma_rm_alloc_qp(RdmaDeviceResources
*dev_res
, uint32_t pd_handle
,
334 uint8_t qp_type
, uint32_t max_send_wr
,
335 uint32_t max_send_sge
, uint32_t send_cq_handle
,
336 uint32_t max_recv_wr
, uint32_t max_recv_sge
,
337 uint32_t recv_cq_handle
, void *opaque
, uint32_t *qpn
)
345 pr_dbg("qp_type=%d\n", qp_type
);
347 pd
= rdma_rm_get_pd(dev_res
, pd_handle
);
349 pr_err("Invalid pd handle (%d)\n", pd_handle
);
353 scq
= rdma_rm_get_cq(dev_res
, send_cq_handle
);
354 rcq
= rdma_rm_get_cq(dev_res
, recv_cq_handle
);
357 pr_err("Invalid send_cqn or recv_cqn (%d, %d)\n",
358 send_cq_handle
, recv_cq_handle
);
362 qp
= res_tbl_alloc(&dev_res
->qp_tbl
, &rm_qpn
);
366 pr_dbg("rm_qpn=%d\n", rm_qpn
);
369 qp
->qp_state
= IBV_QPS_RESET
;
370 qp
->qp_type
= qp_type
;
371 qp
->send_cq_handle
= send_cq_handle
;
372 qp
->recv_cq_handle
= recv_cq_handle
;
375 rc
= rdma_backend_create_qp(&qp
->backend_qp
, qp_type
, &pd
->backend_pd
,
376 &scq
->backend_cq
, &rcq
->backend_cq
, max_send_wr
,
377 max_recv_wr
, max_send_sge
, max_recv_sge
);
383 *qpn
= rdma_backend_qpn(&qp
->backend_qp
);
384 pr_dbg("rm_qpn=%d, backend_qpn=0x%x\n", rm_qpn
, *qpn
);
385 g_hash_table_insert(dev_res
->qp_hash
, g_bytes_new(qpn
, sizeof(*qpn
)), qp
);
390 res_tbl_dealloc(&dev_res
->qp_tbl
, qp
->qpn
);
395 int rdma_rm_modify_qp(RdmaDeviceResources
*dev_res
, RdmaBackendDev
*backend_dev
,
396 uint32_t qp_handle
, uint32_t attr_mask
,
397 union ibv_gid
*dgid
, uint32_t dqpn
,
398 enum ibv_qp_state qp_state
, uint32_t qkey
,
399 uint32_t rq_psn
, uint32_t sq_psn
)
404 pr_dbg("qpn=%d\n", qp_handle
);
406 qp
= rdma_rm_get_qp(dev_res
, qp_handle
);
411 pr_dbg("qp_type=%d\n", qp
->qp_type
);
412 pr_dbg("attr_mask=0x%x\n", attr_mask
);
414 if (qp
->qp_type
== IBV_QPT_SMI
) {
415 pr_dbg("QP0 unsupported\n");
417 } else if (qp
->qp_type
== IBV_QPT_GSI
) {
422 if (attr_mask
& IBV_QP_STATE
) {
423 qp
->qp_state
= qp_state
;
424 pr_dbg("qp_state=%d\n", qp
->qp_state
);
426 if (qp
->qp_state
== IBV_QPS_INIT
) {
427 ret
= rdma_backend_qp_state_init(backend_dev
, &qp
->backend_qp
,
434 if (qp
->qp_state
== IBV_QPS_RTR
) {
435 ret
= rdma_backend_qp_state_rtr(backend_dev
, &qp
->backend_qp
,
436 qp
->qp_type
, dgid
, dqpn
, rq_psn
,
437 qkey
, attr_mask
& IBV_QP_QKEY
);
443 if (qp
->qp_state
== IBV_QPS_RTS
) {
444 ret
= rdma_backend_qp_state_rts(&qp
->backend_qp
, qp
->qp_type
,
446 attr_mask
& IBV_QP_QKEY
);
456 int rdma_rm_query_qp(RdmaDeviceResources
*dev_res
, RdmaBackendDev
*backend_dev
,
457 uint32_t qp_handle
, struct ibv_qp_attr
*attr
,
458 int attr_mask
, struct ibv_qp_init_attr
*init_attr
)
462 pr_dbg("qpn=%d\n", qp_handle
);
464 qp
= rdma_rm_get_qp(dev_res
, qp_handle
);
469 pr_dbg("qp_type=%d\n", qp
->qp_type
);
471 return rdma_backend_query_qp(&qp
->backend_qp
, attr
, attr_mask
, init_attr
);
474 void rdma_rm_dealloc_qp(RdmaDeviceResources
*dev_res
, uint32_t qp_handle
)
479 key
= g_bytes_new(&qp_handle
, sizeof(qp_handle
));
480 qp
= g_hash_table_lookup(dev_res
->qp_hash
, key
);
481 g_hash_table_remove(dev_res
->qp_hash
, key
);
488 rdma_backend_destroy_qp(&qp
->backend_qp
);
490 res_tbl_dealloc(&dev_res
->qp_tbl
, qp
->qpn
);
493 void *rdma_rm_get_cqe_ctx(RdmaDeviceResources
*dev_res
, uint32_t cqe_ctx_id
)
497 cqe_ctx
= res_tbl_get(&dev_res
->cqe_ctx_tbl
, cqe_ctx_id
);
502 pr_dbg("ctx=%p\n", *cqe_ctx
);
507 int rdma_rm_alloc_cqe_ctx(RdmaDeviceResources
*dev_res
, uint32_t *cqe_ctx_id
,
512 cqe_ctx
= res_tbl_alloc(&dev_res
->cqe_ctx_tbl
, cqe_ctx_id
);
517 pr_dbg("ctx=%p\n", ctx
);
523 void rdma_rm_dealloc_cqe_ctx(RdmaDeviceResources
*dev_res
, uint32_t cqe_ctx_id
)
525 res_tbl_dealloc(&dev_res
->cqe_ctx_tbl
, cqe_ctx_id
);
528 static void destroy_qp_hash_key(gpointer data
)
533 int rdma_rm_init(RdmaDeviceResources
*dev_res
, struct ibv_device_attr
*dev_attr
,
536 dev_res
->qp_hash
= g_hash_table_new_full(g_bytes_hash
, g_bytes_equal
,
537 destroy_qp_hash_key
, NULL
);
538 if (!dev_res
->qp_hash
) {
542 res_tbl_init("PD", &dev_res
->pd_tbl
, dev_attr
->max_pd
, sizeof(RdmaRmPD
));
543 res_tbl_init("CQ", &dev_res
->cq_tbl
, dev_attr
->max_cq
, sizeof(RdmaRmCQ
));
544 res_tbl_init("MR", &dev_res
->mr_tbl
, dev_attr
->max_mr
, sizeof(RdmaRmMR
));
545 res_tbl_init("QP", &dev_res
->qp_tbl
, dev_attr
->max_qp
, sizeof(RdmaRmQP
));
546 res_tbl_init("CQE_CTX", &dev_res
->cqe_ctx_tbl
, dev_attr
->max_qp
*
547 dev_attr
->max_qp_wr
, sizeof(void *));
548 res_tbl_init("UC", &dev_res
->uc_tbl
, MAX_UCS
, sizeof(RdmaRmUC
));
553 void rdma_rm_fini(RdmaDeviceResources
*dev_res
)
555 res_tbl_free(&dev_res
->uc_tbl
);
556 res_tbl_free(&dev_res
->cqe_ctx_tbl
);
557 res_tbl_free(&dev_res
->qp_tbl
);
558 res_tbl_free(&dev_res
->cq_tbl
);
559 res_tbl_free(&dev_res
->mr_tbl
);
560 res_tbl_free(&dev_res
->pd_tbl
);
561 g_hash_table_destroy(dev_res
->qp_hash
);