2 * QEMU paravirtual RDMA - Resource Manager Implementation
4 * Copyright (C) 2018 Oracle
5 * Copyright (C) 2018 Red Hat Inc
8 * Yuval Shaia <yuval.shaia@oracle.com>
9 * Marcel Apfelbaum <marcel@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
19 #include "monitor/monitor.h"
22 #include "rdma_utils.h"
23 #include "rdma_backend.h"
26 /* Page directory and page tables */
27 #define PG_DIR_SZ { TARGET_PAGE_SIZE / sizeof(__u64) }
28 #define PG_TBL_SZ { TARGET_PAGE_SIZE / sizeof(__u64) }
30 void rdma_dump_device_counters(Monitor
*mon
, RdmaDeviceResources
*dev_res
)
32 monitor_printf(mon
, "\ttx : %" PRId64
"\n",
34 monitor_printf(mon
, "\ttx_len : %" PRId64
"\n",
35 dev_res
->stats
.tx_len
);
36 monitor_printf(mon
, "\ttx_err : %" PRId64
"\n",
37 dev_res
->stats
.tx_err
);
38 monitor_printf(mon
, "\trx_bufs : %" PRId64
"\n",
39 dev_res
->stats
.rx_bufs
);
40 monitor_printf(mon
, "\trx_srq : %" PRId64
"\n",
41 dev_res
->stats
.rx_srq
);
42 monitor_printf(mon
, "\trx_bufs_len : %" PRId64
"\n",
43 dev_res
->stats
.rx_bufs_len
);
44 monitor_printf(mon
, "\trx_bufs_err : %" PRId64
"\n",
45 dev_res
->stats
.rx_bufs_err
);
46 monitor_printf(mon
, "\tcomps : %" PRId64
"\n",
47 dev_res
->stats
.completions
);
48 monitor_printf(mon
, "\tmissing_comps : %" PRId32
"\n",
49 dev_res
->stats
.missing_cqe
);
50 monitor_printf(mon
, "\tpoll_cq (bk) : %" PRId64
"\n",
51 dev_res
->stats
.poll_cq_from_bk
);
52 monitor_printf(mon
, "\tpoll_cq_ppoll_to : %" PRId64
"\n",
53 dev_res
->stats
.poll_cq_ppoll_to
);
54 monitor_printf(mon
, "\tpoll_cq (fe) : %" PRId64
"\n",
55 dev_res
->stats
.poll_cq_from_guest
);
56 monitor_printf(mon
, "\tpoll_cq_empty : %" PRId64
"\n",
57 dev_res
->stats
.poll_cq_from_guest_empty
);
58 monitor_printf(mon
, "\tmad_tx : %" PRId64
"\n",
59 dev_res
->stats
.mad_tx
);
60 monitor_printf(mon
, "\tmad_tx_err : %" PRId64
"\n",
61 dev_res
->stats
.mad_tx_err
);
62 monitor_printf(mon
, "\tmad_rx : %" PRId64
"\n",
63 dev_res
->stats
.mad_rx
);
64 monitor_printf(mon
, "\tmad_rx_err : %" PRId64
"\n",
65 dev_res
->stats
.mad_rx_err
);
66 monitor_printf(mon
, "\tmad_rx_bufs : %" PRId64
"\n",
67 dev_res
->stats
.mad_rx_bufs
);
68 monitor_printf(mon
, "\tmad_rx_bufs_err : %" PRId64
"\n",
69 dev_res
->stats
.mad_rx_bufs_err
);
70 monitor_printf(mon
, "\tPDs : %" PRId32
"\n",
71 dev_res
->pd_tbl
.used
);
72 monitor_printf(mon
, "\tMRs : %" PRId32
"\n",
73 dev_res
->mr_tbl
.used
);
74 monitor_printf(mon
, "\tUCs : %" PRId32
"\n",
75 dev_res
->uc_tbl
.used
);
76 monitor_printf(mon
, "\tQPs : %" PRId32
"\n",
77 dev_res
->qp_tbl
.used
);
78 monitor_printf(mon
, "\tCQs : %" PRId32
"\n",
79 dev_res
->cq_tbl
.used
);
80 monitor_printf(mon
, "\tCEQ_CTXs : %" PRId32
"\n",
81 dev_res
->cqe_ctx_tbl
.used
);
84 static inline void res_tbl_init(const char *name
, RdmaRmResTbl
*tbl
,
85 uint32_t tbl_sz
, uint32_t res_sz
)
87 tbl
->tbl
= g_malloc(tbl_sz
* res_sz
);
89 strncpy(tbl
->name
, name
, MAX_RM_TBL_NAME
);
90 tbl
->name
[MAX_RM_TBL_NAME
- 1] = 0;
92 tbl
->bitmap
= bitmap_new(tbl_sz
);
96 qemu_mutex_init(&tbl
->lock
);
99 static inline void res_tbl_free(RdmaRmResTbl
*tbl
)
104 qemu_mutex_destroy(&tbl
->lock
);
109 static inline void *rdma_res_tbl_get(RdmaRmResTbl
*tbl
, uint32_t handle
)
111 trace_rdma_res_tbl_get(tbl
->name
, handle
);
113 if ((handle
< tbl
->tbl_sz
) && (test_bit(handle
, tbl
->bitmap
))) {
114 return tbl
->tbl
+ handle
* tbl
->res_sz
;
116 rdma_error_report("Table %s, invalid handle %d", tbl
->name
, handle
);
121 static inline void *rdma_res_tbl_alloc(RdmaRmResTbl
*tbl
, uint32_t *handle
)
123 qemu_mutex_lock(&tbl
->lock
);
125 *handle
= find_first_zero_bit(tbl
->bitmap
, tbl
->tbl_sz
);
126 if (*handle
> tbl
->tbl_sz
) {
127 rdma_error_report("Table %s, failed to allocate, bitmap is full",
129 qemu_mutex_unlock(&tbl
->lock
);
133 set_bit(*handle
, tbl
->bitmap
);
137 qemu_mutex_unlock(&tbl
->lock
);
139 memset(tbl
->tbl
+ *handle
* tbl
->res_sz
, 0, tbl
->res_sz
);
141 trace_rdma_res_tbl_alloc(tbl
->name
, *handle
);
143 return tbl
->tbl
+ *handle
* tbl
->res_sz
;
146 static inline void rdma_res_tbl_dealloc(RdmaRmResTbl
*tbl
, uint32_t handle
)
148 trace_rdma_res_tbl_dealloc(tbl
->name
, handle
);
150 qemu_mutex_lock(&tbl
->lock
);
152 if (handle
< tbl
->tbl_sz
) {
153 clear_bit(handle
, tbl
->bitmap
);
157 qemu_mutex_unlock(&tbl
->lock
);
160 int rdma_rm_alloc_pd(RdmaDeviceResources
*dev_res
, RdmaBackendDev
*backend_dev
,
161 uint32_t *pd_handle
, uint32_t ctx_handle
)
166 pd
= rdma_res_tbl_alloc(&dev_res
->pd_tbl
, pd_handle
);
171 ret
= rdma_backend_create_pd(backend_dev
, &pd
->backend_pd
);
174 goto out_tbl_dealloc
;
177 pd
->ctx_handle
= ctx_handle
;
182 rdma_res_tbl_dealloc(&dev_res
->pd_tbl
, *pd_handle
);
188 RdmaRmPD
*rdma_rm_get_pd(RdmaDeviceResources
*dev_res
, uint32_t pd_handle
)
190 return rdma_res_tbl_get(&dev_res
->pd_tbl
, pd_handle
);
193 void rdma_rm_dealloc_pd(RdmaDeviceResources
*dev_res
, uint32_t pd_handle
)
195 RdmaRmPD
*pd
= rdma_rm_get_pd(dev_res
, pd_handle
);
198 rdma_backend_destroy_pd(&pd
->backend_pd
);
199 rdma_res_tbl_dealloc(&dev_res
->pd_tbl
, pd_handle
);
203 int rdma_rm_alloc_mr(RdmaDeviceResources
*dev_res
, uint32_t pd_handle
,
204 uint64_t guest_start
, uint64_t guest_length
,
205 void *host_virt
, int access_flags
, uint32_t *mr_handle
,
206 uint32_t *lkey
, uint32_t *rkey
)
212 pd
= rdma_rm_get_pd(dev_res
, pd_handle
);
217 mr
= rdma_res_tbl_alloc(&dev_res
->mr_tbl
, mr_handle
);
221 trace_rdma_rm_alloc_mr(*mr_handle
, host_virt
, guest_start
, guest_length
,
225 mr
->virt
= host_virt
;
226 mr
->start
= guest_start
;
227 mr
->length
= guest_length
;
228 mr
->virt
+= (mr
->start
& (TARGET_PAGE_SIZE
- 1));
230 #ifdef LEGACY_RDMA_REG_MR
231 ret
= rdma_backend_create_mr(&mr
->backend_mr
, &pd
->backend_pd
, mr
->virt
,
232 mr
->length
, access_flags
);
234 ret
= rdma_backend_create_mr(&mr
->backend_mr
, &pd
->backend_pd
, mr
->virt
,
235 mr
->length
, guest_start
, access_flags
);
243 /* We keep mr_handle in lkey so send and recv get get mr ptr */
247 mr
->pd_handle
= pd_handle
;
252 rdma_res_tbl_dealloc(&dev_res
->mr_tbl
, *mr_handle
);
257 RdmaRmMR
*rdma_rm_get_mr(RdmaDeviceResources
*dev_res
, uint32_t mr_handle
)
259 return rdma_res_tbl_get(&dev_res
->mr_tbl
, mr_handle
);
262 void rdma_rm_dealloc_mr(RdmaDeviceResources
*dev_res
, uint32_t mr_handle
)
264 RdmaRmMR
*mr
= rdma_rm_get_mr(dev_res
, mr_handle
);
267 rdma_backend_destroy_mr(&mr
->backend_mr
);
268 trace_rdma_rm_dealloc_mr(mr_handle
, mr
->start
);
270 mr
->virt
-= (mr
->start
& (TARGET_PAGE_SIZE
- 1));
271 munmap(mr
->virt
, mr
->length
);
273 rdma_res_tbl_dealloc(&dev_res
->mr_tbl
, mr_handle
);
277 int rdma_rm_alloc_uc(RdmaDeviceResources
*dev_res
, uint32_t pfn
,
282 /* TODO: Need to make sure pfn is between bar start address and
283 * bsd+RDMA_BAR2_UAR_SIZE
284 if (pfn > RDMA_BAR2_UAR_SIZE) {
285 rdma_error_report("pfn out of range (%d > %d)", pfn,
291 uc
= rdma_res_tbl_alloc(&dev_res
->uc_tbl
, uc_handle
);
299 RdmaRmUC
*rdma_rm_get_uc(RdmaDeviceResources
*dev_res
, uint32_t uc_handle
)
301 return rdma_res_tbl_get(&dev_res
->uc_tbl
, uc_handle
);
304 void rdma_rm_dealloc_uc(RdmaDeviceResources
*dev_res
, uint32_t uc_handle
)
306 RdmaRmUC
*uc
= rdma_rm_get_uc(dev_res
, uc_handle
);
309 rdma_res_tbl_dealloc(&dev_res
->uc_tbl
, uc_handle
);
313 RdmaRmCQ
*rdma_rm_get_cq(RdmaDeviceResources
*dev_res
, uint32_t cq_handle
)
315 return rdma_res_tbl_get(&dev_res
->cq_tbl
, cq_handle
);
318 int rdma_rm_alloc_cq(RdmaDeviceResources
*dev_res
, RdmaBackendDev
*backend_dev
,
319 uint32_t cqe
, uint32_t *cq_handle
, void *opaque
)
324 cq
= rdma_res_tbl_alloc(&dev_res
->cq_tbl
, cq_handle
);
330 cq
->notify
= CNT_CLEAR
;
332 rc
= rdma_backend_create_cq(backend_dev
, &cq
->backend_cq
, cqe
);
341 rdma_rm_dealloc_cq(dev_res
, *cq_handle
);
346 void rdma_rm_req_notify_cq(RdmaDeviceResources
*dev_res
, uint32_t cq_handle
,
351 cq
= rdma_rm_get_cq(dev_res
, cq_handle
);
356 if (cq
->notify
!= CNT_SET
) {
357 cq
->notify
= notify
? CNT_ARM
: CNT_CLEAR
;
361 void rdma_rm_dealloc_cq(RdmaDeviceResources
*dev_res
, uint32_t cq_handle
)
365 cq
= rdma_rm_get_cq(dev_res
, cq_handle
);
370 rdma_backend_destroy_cq(&cq
->backend_cq
);
372 rdma_res_tbl_dealloc(&dev_res
->cq_tbl
, cq_handle
);
375 RdmaRmQP
*rdma_rm_get_qp(RdmaDeviceResources
*dev_res
, uint32_t qpn
)
377 GBytes
*key
= g_bytes_new(&qpn
, sizeof(qpn
));
379 RdmaRmQP
*qp
= g_hash_table_lookup(dev_res
->qp_hash
, key
);
384 rdma_error_report("Invalid QP handle %d", qpn
);
390 int rdma_rm_alloc_qp(RdmaDeviceResources
*dev_res
, uint32_t pd_handle
,
391 uint8_t qp_type
, uint32_t max_send_wr
,
392 uint32_t max_send_sge
, uint32_t send_cq_handle
,
393 uint32_t max_recv_wr
, uint32_t max_recv_sge
,
394 uint32_t recv_cq_handle
, void *opaque
, uint32_t *qpn
,
395 uint8_t is_srq
, uint32_t srq_handle
)
401 RdmaRmSRQ
*srq
= NULL
;
404 pd
= rdma_rm_get_pd(dev_res
, pd_handle
);
409 scq
= rdma_rm_get_cq(dev_res
, send_cq_handle
);
410 rcq
= rdma_rm_get_cq(dev_res
, recv_cq_handle
);
413 rdma_error_report("Invalid send_cqn or recv_cqn (%d, %d)",
414 send_cq_handle
, recv_cq_handle
);
419 srq
= rdma_rm_get_srq(dev_res
, srq_handle
);
421 rdma_error_report("Invalid srqn %d", srq_handle
);
425 srq
->recv_cq_handle
= recv_cq_handle
;
428 if (qp_type
== IBV_QPT_GSI
) {
429 scq
->notify
= CNT_SET
;
430 rcq
->notify
= CNT_SET
;
433 qp
= rdma_res_tbl_alloc(&dev_res
->qp_tbl
, &rm_qpn
);
439 qp
->qp_state
= IBV_QPS_RESET
;
440 qp
->qp_type
= qp_type
;
441 qp
->send_cq_handle
= send_cq_handle
;
442 qp
->recv_cq_handle
= recv_cq_handle
;
446 rc
= rdma_backend_create_qp(&qp
->backend_qp
, qp_type
, &pd
->backend_pd
,
447 &scq
->backend_cq
, &rcq
->backend_cq
,
448 is_srq
? &srq
->backend_srq
: NULL
,
449 max_send_wr
, max_recv_wr
, max_send_sge
,
457 *qpn
= rdma_backend_qpn(&qp
->backend_qp
);
458 trace_rdma_rm_alloc_qp(rm_qpn
, *qpn
, qp_type
);
459 g_hash_table_insert(dev_res
->qp_hash
, g_bytes_new(qpn
, sizeof(*qpn
)), qp
);
464 rdma_res_tbl_dealloc(&dev_res
->qp_tbl
, qp
->qpn
);
469 int rdma_rm_modify_qp(RdmaDeviceResources
*dev_res
, RdmaBackendDev
*backend_dev
,
470 uint32_t qp_handle
, uint32_t attr_mask
, uint8_t sgid_idx
,
471 union ibv_gid
*dgid
, uint32_t dqpn
,
472 enum ibv_qp_state qp_state
, uint32_t qkey
,
473 uint32_t rq_psn
, uint32_t sq_psn
)
478 qp
= rdma_rm_get_qp(dev_res
, qp_handle
);
483 if (qp
->qp_type
== IBV_QPT_SMI
) {
484 rdma_error_report("Got QP0 request");
486 } else if (qp
->qp_type
== IBV_QPT_GSI
) {
490 trace_rdma_rm_modify_qp(qp_handle
, attr_mask
, qp_state
, sgid_idx
);
492 if (attr_mask
& IBV_QP_STATE
) {
493 qp
->qp_state
= qp_state
;
495 if (qp
->qp_state
== IBV_QPS_INIT
) {
496 ret
= rdma_backend_qp_state_init(backend_dev
, &qp
->backend_qp
,
503 if (qp
->qp_state
== IBV_QPS_RTR
) {
504 /* Get backend gid index */
505 sgid_idx
= rdma_rm_get_backend_gid_index(dev_res
, backend_dev
,
507 if (sgid_idx
<= 0) { /* TODO check also less than bk.max_sgid */
508 rdma_error_report("Failed to get bk sgid_idx for sgid_idx %d",
513 ret
= rdma_backend_qp_state_rtr(backend_dev
, &qp
->backend_qp
,
514 qp
->qp_type
, sgid_idx
, dgid
, dqpn
,
516 attr_mask
& IBV_QP_QKEY
);
522 if (qp
->qp_state
== IBV_QPS_RTS
) {
523 ret
= rdma_backend_qp_state_rts(&qp
->backend_qp
, qp
->qp_type
,
525 attr_mask
& IBV_QP_QKEY
);
535 int rdma_rm_query_qp(RdmaDeviceResources
*dev_res
, RdmaBackendDev
*backend_dev
,
536 uint32_t qp_handle
, struct ibv_qp_attr
*attr
,
537 int attr_mask
, struct ibv_qp_init_attr
*init_attr
)
541 qp
= rdma_rm_get_qp(dev_res
, qp_handle
);
546 return rdma_backend_query_qp(&qp
->backend_qp
, attr
, attr_mask
, init_attr
);
549 void rdma_rm_dealloc_qp(RdmaDeviceResources
*dev_res
, uint32_t qp_handle
)
554 key
= g_bytes_new(&qp_handle
, sizeof(qp_handle
));
555 qp
= g_hash_table_lookup(dev_res
->qp_hash
, key
);
556 g_hash_table_remove(dev_res
->qp_hash
, key
);
563 rdma_backend_destroy_qp(&qp
->backend_qp
, dev_res
);
565 rdma_res_tbl_dealloc(&dev_res
->qp_tbl
, qp
->qpn
);
568 RdmaRmSRQ
*rdma_rm_get_srq(RdmaDeviceResources
*dev_res
, uint32_t srq_handle
)
570 return rdma_res_tbl_get(&dev_res
->srq_tbl
, srq_handle
);
573 int rdma_rm_alloc_srq(RdmaDeviceResources
*dev_res
, uint32_t pd_handle
,
574 uint32_t max_wr
, uint32_t max_sge
, uint32_t srq_limit
,
575 uint32_t *srq_handle
, void *opaque
)
581 pd
= rdma_rm_get_pd(dev_res
, pd_handle
);
586 srq
= rdma_res_tbl_alloc(&dev_res
->srq_tbl
, srq_handle
);
591 rc
= rdma_backend_create_srq(&srq
->backend_srq
, &pd
->backend_pd
,
592 max_wr
, max_sge
, srq_limit
);
595 goto out_dealloc_srq
;
598 srq
->opaque
= opaque
;
603 rdma_res_tbl_dealloc(&dev_res
->srq_tbl
, *srq_handle
);
608 int rdma_rm_query_srq(RdmaDeviceResources
*dev_res
, uint32_t srq_handle
,
609 struct ibv_srq_attr
*srq_attr
)
613 srq
= rdma_rm_get_srq(dev_res
, srq_handle
);
618 return rdma_backend_query_srq(&srq
->backend_srq
, srq_attr
);
621 int rdma_rm_modify_srq(RdmaDeviceResources
*dev_res
, uint32_t srq_handle
,
622 struct ibv_srq_attr
*srq_attr
, int srq_attr_mask
)
626 srq
= rdma_rm_get_srq(dev_res
, srq_handle
);
631 if ((srq_attr_mask
& IBV_SRQ_LIMIT
) &&
632 (srq_attr
->srq_limit
== 0)) {
636 if ((srq_attr_mask
& IBV_SRQ_MAX_WR
) &&
637 (srq_attr
->max_wr
== 0)) {
641 return rdma_backend_modify_srq(&srq
->backend_srq
, srq_attr
,
645 void rdma_rm_dealloc_srq(RdmaDeviceResources
*dev_res
, uint32_t srq_handle
)
649 srq
= rdma_rm_get_srq(dev_res
, srq_handle
);
654 rdma_backend_destroy_srq(&srq
->backend_srq
, dev_res
);
655 rdma_res_tbl_dealloc(&dev_res
->srq_tbl
, srq_handle
);
658 void *rdma_rm_get_cqe_ctx(RdmaDeviceResources
*dev_res
, uint32_t cqe_ctx_id
)
662 cqe_ctx
= rdma_res_tbl_get(&dev_res
->cqe_ctx_tbl
, cqe_ctx_id
);
670 int rdma_rm_alloc_cqe_ctx(RdmaDeviceResources
*dev_res
, uint32_t *cqe_ctx_id
,
675 cqe_ctx
= rdma_res_tbl_alloc(&dev_res
->cqe_ctx_tbl
, cqe_ctx_id
);
685 void rdma_rm_dealloc_cqe_ctx(RdmaDeviceResources
*dev_res
, uint32_t cqe_ctx_id
)
687 rdma_res_tbl_dealloc(&dev_res
->cqe_ctx_tbl
, cqe_ctx_id
);
690 int rdma_rm_add_gid(RdmaDeviceResources
*dev_res
, RdmaBackendDev
*backend_dev
,
691 const char *ifname
, union ibv_gid
*gid
, int gid_idx
)
695 rc
= rdma_backend_add_gid(backend_dev
, ifname
, gid
);
700 memcpy(&dev_res
->port
.gid_tbl
[gid_idx
].gid
, gid
, sizeof(*gid
));
705 int rdma_rm_del_gid(RdmaDeviceResources
*dev_res
, RdmaBackendDev
*backend_dev
,
706 const char *ifname
, int gid_idx
)
710 if (!dev_res
->port
.gid_tbl
[gid_idx
].gid
.global
.interface_id
) {
714 rc
= rdma_backend_del_gid(backend_dev
, ifname
,
715 &dev_res
->port
.gid_tbl
[gid_idx
].gid
);
720 memset(dev_res
->port
.gid_tbl
[gid_idx
].gid
.raw
, 0,
721 sizeof(dev_res
->port
.gid_tbl
[gid_idx
].gid
));
722 dev_res
->port
.gid_tbl
[gid_idx
].backend_gid_index
= -1;
727 int rdma_rm_get_backend_gid_index(RdmaDeviceResources
*dev_res
,
728 RdmaBackendDev
*backend_dev
, int sgid_idx
)
730 if (unlikely(sgid_idx
< 0 || sgid_idx
>= MAX_PORT_GIDS
)) {
731 rdma_error_report("Got invalid sgid_idx %d", sgid_idx
);
735 if (unlikely(dev_res
->port
.gid_tbl
[sgid_idx
].backend_gid_index
== -1)) {
736 dev_res
->port
.gid_tbl
[sgid_idx
].backend_gid_index
=
737 rdma_backend_get_gid_index(backend_dev
,
738 &dev_res
->port
.gid_tbl
[sgid_idx
].gid
);
741 return dev_res
->port
.gid_tbl
[sgid_idx
].backend_gid_index
;
744 static void destroy_qp_hash_key(gpointer data
)
749 static void init_ports(RdmaDeviceResources
*dev_res
)
753 memset(&dev_res
->port
, 0, sizeof(dev_res
->port
));
755 dev_res
->port
.state
= IBV_PORT_DOWN
;
756 for (i
= 0; i
< MAX_PORT_GIDS
; i
++) {
757 dev_res
->port
.gid_tbl
[i
].backend_gid_index
= -1;
761 static void fini_ports(RdmaDeviceResources
*dev_res
,
762 RdmaBackendDev
*backend_dev
, const char *ifname
)
766 dev_res
->port
.state
= IBV_PORT_DOWN
;
767 for (i
= 0; i
< MAX_PORT_GIDS
; i
++) {
768 rdma_rm_del_gid(dev_res
, backend_dev
, ifname
, i
);
772 int rdma_rm_init(RdmaDeviceResources
*dev_res
, struct ibv_device_attr
*dev_attr
)
774 dev_res
->qp_hash
= g_hash_table_new_full(g_bytes_hash
, g_bytes_equal
,
775 destroy_qp_hash_key
, NULL
);
776 if (!dev_res
->qp_hash
) {
780 res_tbl_init("PD", &dev_res
->pd_tbl
, dev_attr
->max_pd
, sizeof(RdmaRmPD
));
781 res_tbl_init("CQ", &dev_res
->cq_tbl
, dev_attr
->max_cq
, sizeof(RdmaRmCQ
));
782 res_tbl_init("MR", &dev_res
->mr_tbl
, dev_attr
->max_mr
, sizeof(RdmaRmMR
));
783 res_tbl_init("QP", &dev_res
->qp_tbl
, dev_attr
->max_qp
, sizeof(RdmaRmQP
));
784 res_tbl_init("CQE_CTX", &dev_res
->cqe_ctx_tbl
, dev_attr
->max_qp
*
785 dev_attr
->max_qp_wr
, sizeof(void *));
786 res_tbl_init("UC", &dev_res
->uc_tbl
, MAX_UCS
, sizeof(RdmaRmUC
));
787 res_tbl_init("SRQ", &dev_res
->srq_tbl
, dev_attr
->max_srq
,
792 qemu_mutex_init(&dev_res
->lock
);
794 memset(&dev_res
->stats
, 0, sizeof(dev_res
->stats
));
795 atomic_set(&dev_res
->stats
.missing_cqe
, 0);
800 void rdma_rm_fini(RdmaDeviceResources
*dev_res
, RdmaBackendDev
*backend_dev
,
803 qemu_mutex_destroy(&dev_res
->lock
);
805 fini_ports(dev_res
, backend_dev
, ifname
);
807 res_tbl_free(&dev_res
->srq_tbl
);
808 res_tbl_free(&dev_res
->uc_tbl
);
809 res_tbl_free(&dev_res
->cqe_ctx_tbl
);
810 res_tbl_free(&dev_res
->qp_tbl
);
811 res_tbl_free(&dev_res
->mr_tbl
);
812 res_tbl_free(&dev_res
->cq_tbl
);
813 res_tbl_free(&dev_res
->pd_tbl
);
815 if (dev_res
->qp_hash
) {
816 g_hash_table_destroy(dev_res
->qp_hash
);