2 * QEMU paravirtual RDMA - Resource Manager Implementation
4 * Copyright (C) 2018 Oracle
5 * Copyright (C) 2018 Red Hat Inc
8 * Yuval Shaia <yuval.shaia@oracle.com>
9 * Marcel Apfelbaum <marcel@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include "qemu/osdep.h"
17 #include "qapi/error.h"
19 #include "monitor/monitor.h"
22 #include "rdma_utils.h"
23 #include "rdma_backend.h"
26 /* Page directory and page tables */
27 #define PG_DIR_SZ { TARGET_PAGE_SIZE / sizeof(__u64) }
28 #define PG_TBL_SZ { TARGET_PAGE_SIZE / sizeof(__u64) }
30 void rdma_format_device_counters(RdmaDeviceResources
*dev_res
, GString
*buf
)
32 g_string_append_printf(buf
, "\ttx : %" PRId64
"\n",
34 g_string_append_printf(buf
, "\ttx_len : %" PRId64
"\n",
35 dev_res
->stats
.tx_len
);
36 g_string_append_printf(buf
, "\ttx_err : %" PRId64
"\n",
37 dev_res
->stats
.tx_err
);
38 g_string_append_printf(buf
, "\trx_bufs : %" PRId64
"\n",
39 dev_res
->stats
.rx_bufs
);
40 g_string_append_printf(buf
, "\trx_srq : %" PRId64
"\n",
41 dev_res
->stats
.rx_srq
);
42 g_string_append_printf(buf
, "\trx_bufs_len : %" PRId64
"\n",
43 dev_res
->stats
.rx_bufs_len
);
44 g_string_append_printf(buf
, "\trx_bufs_err : %" PRId64
"\n",
45 dev_res
->stats
.rx_bufs_err
);
46 g_string_append_printf(buf
, "\tcomps : %" PRId64
"\n",
47 dev_res
->stats
.completions
);
48 g_string_append_printf(buf
, "\tmissing_comps : %" PRId32
"\n",
49 dev_res
->stats
.missing_cqe
);
50 g_string_append_printf(buf
, "\tpoll_cq (bk) : %" PRId64
"\n",
51 dev_res
->stats
.poll_cq_from_bk
);
52 g_string_append_printf(buf
, "\tpoll_cq_ppoll_to : %" PRId64
"\n",
53 dev_res
->stats
.poll_cq_ppoll_to
);
54 g_string_append_printf(buf
, "\tpoll_cq (fe) : %" PRId64
"\n",
55 dev_res
->stats
.poll_cq_from_guest
);
56 g_string_append_printf(buf
, "\tpoll_cq_empty : %" PRId64
"\n",
57 dev_res
->stats
.poll_cq_from_guest_empty
);
58 g_string_append_printf(buf
, "\tmad_tx : %" PRId64
"\n",
59 dev_res
->stats
.mad_tx
);
60 g_string_append_printf(buf
, "\tmad_tx_err : %" PRId64
"\n",
61 dev_res
->stats
.mad_tx_err
);
62 g_string_append_printf(buf
, "\tmad_rx : %" PRId64
"\n",
63 dev_res
->stats
.mad_rx
);
64 g_string_append_printf(buf
, "\tmad_rx_err : %" PRId64
"\n",
65 dev_res
->stats
.mad_rx_err
);
66 g_string_append_printf(buf
, "\tmad_rx_bufs : %" PRId64
"\n",
67 dev_res
->stats
.mad_rx_bufs
);
68 g_string_append_printf(buf
, "\tmad_rx_bufs_err : %" PRId64
"\n",
69 dev_res
->stats
.mad_rx_bufs_err
);
70 g_string_append_printf(buf
, "\tPDs : %" PRId32
"\n",
71 dev_res
->pd_tbl
.used
);
72 g_string_append_printf(buf
, "\tMRs : %" PRId32
"\n",
73 dev_res
->mr_tbl
.used
);
74 g_string_append_printf(buf
, "\tUCs : %" PRId32
"\n",
75 dev_res
->uc_tbl
.used
);
76 g_string_append_printf(buf
, "\tQPs : %" PRId32
"\n",
77 dev_res
->qp_tbl
.used
);
78 g_string_append_printf(buf
, "\tCQs : %" PRId32
"\n",
79 dev_res
->cq_tbl
.used
);
80 g_string_append_printf(buf
, "\tCEQ_CTXs : %" PRId32
"\n",
81 dev_res
->cqe_ctx_tbl
.used
);
84 static inline void res_tbl_init(const char *name
, RdmaRmResTbl
*tbl
,
85 uint32_t tbl_sz
, uint32_t res_sz
)
87 tbl
->tbl
= g_malloc(tbl_sz
* res_sz
);
89 strncpy(tbl
->name
, name
, MAX_RM_TBL_NAME
);
90 tbl
->name
[MAX_RM_TBL_NAME
- 1] = 0;
92 tbl
->bitmap
= bitmap_new(tbl_sz
);
96 qemu_mutex_init(&tbl
->lock
);
99 static inline void res_tbl_free(RdmaRmResTbl
*tbl
)
104 qemu_mutex_destroy(&tbl
->lock
);
109 static inline void *rdma_res_tbl_get(RdmaRmResTbl
*tbl
, uint32_t handle
)
111 trace_rdma_res_tbl_get(tbl
->name
, handle
);
113 if ((handle
< tbl
->tbl_sz
) && (test_bit(handle
, tbl
->bitmap
))) {
114 return tbl
->tbl
+ handle
* tbl
->res_sz
;
116 rdma_error_report("Table %s, invalid handle %d", tbl
->name
, handle
);
121 static inline void *rdma_res_tbl_alloc(RdmaRmResTbl
*tbl
, uint32_t *handle
)
123 qemu_mutex_lock(&tbl
->lock
);
125 *handle
= find_first_zero_bit(tbl
->bitmap
, tbl
->tbl_sz
);
126 if (*handle
> tbl
->tbl_sz
) {
127 rdma_error_report("Table %s, failed to allocate, bitmap is full",
129 qemu_mutex_unlock(&tbl
->lock
);
133 set_bit(*handle
, tbl
->bitmap
);
137 qemu_mutex_unlock(&tbl
->lock
);
139 memset(tbl
->tbl
+ *handle
* tbl
->res_sz
, 0, tbl
->res_sz
);
141 trace_rdma_res_tbl_alloc(tbl
->name
, *handle
);
143 return tbl
->tbl
+ *handle
* tbl
->res_sz
;
146 static inline void rdma_res_tbl_dealloc(RdmaRmResTbl
*tbl
, uint32_t handle
)
148 trace_rdma_res_tbl_dealloc(tbl
->name
, handle
);
150 QEMU_LOCK_GUARD(&tbl
->lock
);
152 if (handle
< tbl
->tbl_sz
) {
153 clear_bit(handle
, tbl
->bitmap
);
159 int rdma_rm_alloc_pd(RdmaDeviceResources
*dev_res
, RdmaBackendDev
*backend_dev
,
160 uint32_t *pd_handle
, uint32_t ctx_handle
)
165 pd
= rdma_res_tbl_alloc(&dev_res
->pd_tbl
, pd_handle
);
170 ret
= rdma_backend_create_pd(backend_dev
, &pd
->backend_pd
);
173 goto out_tbl_dealloc
;
176 pd
->ctx_handle
= ctx_handle
;
181 rdma_res_tbl_dealloc(&dev_res
->pd_tbl
, *pd_handle
);
187 RdmaRmPD
*rdma_rm_get_pd(RdmaDeviceResources
*dev_res
, uint32_t pd_handle
)
189 return rdma_res_tbl_get(&dev_res
->pd_tbl
, pd_handle
);
192 void rdma_rm_dealloc_pd(RdmaDeviceResources
*dev_res
, uint32_t pd_handle
)
194 RdmaRmPD
*pd
= rdma_rm_get_pd(dev_res
, pd_handle
);
197 rdma_backend_destroy_pd(&pd
->backend_pd
);
198 rdma_res_tbl_dealloc(&dev_res
->pd_tbl
, pd_handle
);
202 int rdma_rm_alloc_mr(RdmaDeviceResources
*dev_res
, uint32_t pd_handle
,
203 uint64_t guest_start
, uint64_t guest_length
,
204 void *host_virt
, int access_flags
, uint32_t *mr_handle
,
205 uint32_t *lkey
, uint32_t *rkey
)
211 pd
= rdma_rm_get_pd(dev_res
, pd_handle
);
216 mr
= rdma_res_tbl_alloc(&dev_res
->mr_tbl
, mr_handle
);
220 trace_rdma_rm_alloc_mr(*mr_handle
, host_virt
, guest_start
, guest_length
,
224 mr
->virt
= host_virt
;
225 mr
->start
= guest_start
;
226 mr
->length
= guest_length
;
227 mr
->virt
+= (mr
->start
& (TARGET_PAGE_SIZE
- 1));
229 ret
= rdma_backend_create_mr(&mr
->backend_mr
, &pd
->backend_pd
, mr
->virt
,
230 mr
->length
, guest_start
, access_flags
);
235 #ifdef LEGACY_RDMA_REG_MR
236 /* We keep mr_handle in lkey so send and recv get get mr ptr */
239 *lkey
= rdma_backend_mr_lkey(&mr
->backend_mr
);
245 mr
->pd_handle
= pd_handle
;
250 rdma_res_tbl_dealloc(&dev_res
->mr_tbl
, *mr_handle
);
255 RdmaRmMR
*rdma_rm_get_mr(RdmaDeviceResources
*dev_res
, uint32_t mr_handle
)
257 return rdma_res_tbl_get(&dev_res
->mr_tbl
, mr_handle
);
260 void rdma_rm_dealloc_mr(RdmaDeviceResources
*dev_res
, uint32_t mr_handle
)
262 RdmaRmMR
*mr
= rdma_rm_get_mr(dev_res
, mr_handle
);
265 rdma_backend_destroy_mr(&mr
->backend_mr
);
266 trace_rdma_rm_dealloc_mr(mr_handle
, mr
->start
);
268 mr
->virt
-= (mr
->start
& (TARGET_PAGE_SIZE
- 1));
269 munmap(mr
->virt
, mr
->length
);
271 rdma_res_tbl_dealloc(&dev_res
->mr_tbl
, mr_handle
);
275 int rdma_rm_alloc_uc(RdmaDeviceResources
*dev_res
, uint32_t pfn
,
280 /* TODO: Need to make sure pfn is between bar start address and
281 * bsd+RDMA_BAR2_UAR_SIZE
282 if (pfn > RDMA_BAR2_UAR_SIZE) {
283 rdma_error_report("pfn out of range (%d > %d)", pfn,
289 uc
= rdma_res_tbl_alloc(&dev_res
->uc_tbl
, uc_handle
);
297 RdmaRmUC
*rdma_rm_get_uc(RdmaDeviceResources
*dev_res
, uint32_t uc_handle
)
299 return rdma_res_tbl_get(&dev_res
->uc_tbl
, uc_handle
);
302 void rdma_rm_dealloc_uc(RdmaDeviceResources
*dev_res
, uint32_t uc_handle
)
304 RdmaRmUC
*uc
= rdma_rm_get_uc(dev_res
, uc_handle
);
307 rdma_res_tbl_dealloc(&dev_res
->uc_tbl
, uc_handle
);
311 RdmaRmCQ
*rdma_rm_get_cq(RdmaDeviceResources
*dev_res
, uint32_t cq_handle
)
313 return rdma_res_tbl_get(&dev_res
->cq_tbl
, cq_handle
);
316 int rdma_rm_alloc_cq(RdmaDeviceResources
*dev_res
, RdmaBackendDev
*backend_dev
,
317 uint32_t cqe
, uint32_t *cq_handle
, void *opaque
)
322 cq
= rdma_res_tbl_alloc(&dev_res
->cq_tbl
, cq_handle
);
328 cq
->notify
= CNT_CLEAR
;
330 rc
= rdma_backend_create_cq(backend_dev
, &cq
->backend_cq
, cqe
);
339 rdma_rm_dealloc_cq(dev_res
, *cq_handle
);
344 void rdma_rm_req_notify_cq(RdmaDeviceResources
*dev_res
, uint32_t cq_handle
,
349 cq
= rdma_rm_get_cq(dev_res
, cq_handle
);
354 if (cq
->notify
!= CNT_SET
) {
355 cq
->notify
= notify
? CNT_ARM
: CNT_CLEAR
;
359 void rdma_rm_dealloc_cq(RdmaDeviceResources
*dev_res
, uint32_t cq_handle
)
363 cq
= rdma_rm_get_cq(dev_res
, cq_handle
);
368 rdma_backend_destroy_cq(&cq
->backend_cq
);
370 rdma_res_tbl_dealloc(&dev_res
->cq_tbl
, cq_handle
);
373 RdmaRmQP
*rdma_rm_get_qp(RdmaDeviceResources
*dev_res
, uint32_t qpn
)
375 GBytes
*key
= g_bytes_new(&qpn
, sizeof(qpn
));
377 RdmaRmQP
*qp
= g_hash_table_lookup(dev_res
->qp_hash
, key
);
382 rdma_error_report("Invalid QP handle %d", qpn
);
388 int rdma_rm_alloc_qp(RdmaDeviceResources
*dev_res
, uint32_t pd_handle
,
389 uint8_t qp_type
, uint32_t max_send_wr
,
390 uint32_t max_send_sge
, uint32_t send_cq_handle
,
391 uint32_t max_recv_wr
, uint32_t max_recv_sge
,
392 uint32_t recv_cq_handle
, void *opaque
, uint32_t *qpn
,
393 uint8_t is_srq
, uint32_t srq_handle
)
399 RdmaRmSRQ
*srq
= NULL
;
402 pd
= rdma_rm_get_pd(dev_res
, pd_handle
);
407 scq
= rdma_rm_get_cq(dev_res
, send_cq_handle
);
408 rcq
= rdma_rm_get_cq(dev_res
, recv_cq_handle
);
411 rdma_error_report("Invalid send_cqn or recv_cqn (%d, %d)",
412 send_cq_handle
, recv_cq_handle
);
417 srq
= rdma_rm_get_srq(dev_res
, srq_handle
);
419 rdma_error_report("Invalid srqn %d", srq_handle
);
423 srq
->recv_cq_handle
= recv_cq_handle
;
426 if (qp_type
== IBV_QPT_GSI
) {
427 scq
->notify
= CNT_SET
;
428 rcq
->notify
= CNT_SET
;
431 qp
= rdma_res_tbl_alloc(&dev_res
->qp_tbl
, &rm_qpn
);
437 qp
->qp_state
= IBV_QPS_RESET
;
438 qp
->qp_type
= qp_type
;
439 qp
->send_cq_handle
= send_cq_handle
;
440 qp
->recv_cq_handle
= recv_cq_handle
;
444 rc
= rdma_backend_create_qp(&qp
->backend_qp
, qp_type
, &pd
->backend_pd
,
445 &scq
->backend_cq
, &rcq
->backend_cq
,
446 is_srq
? &srq
->backend_srq
: NULL
,
447 max_send_wr
, max_recv_wr
, max_send_sge
,
455 *qpn
= rdma_backend_qpn(&qp
->backend_qp
);
456 trace_rdma_rm_alloc_qp(rm_qpn
, *qpn
, qp_type
);
457 g_hash_table_insert(dev_res
->qp_hash
, g_bytes_new(qpn
, sizeof(*qpn
)), qp
);
462 rdma_res_tbl_dealloc(&dev_res
->qp_tbl
, qp
->qpn
);
467 int rdma_rm_modify_qp(RdmaDeviceResources
*dev_res
, RdmaBackendDev
*backend_dev
,
468 uint32_t qp_handle
, uint32_t attr_mask
, uint8_t sgid_idx
,
469 union ibv_gid
*dgid
, uint32_t dqpn
,
470 enum ibv_qp_state qp_state
, uint32_t qkey
,
471 uint32_t rq_psn
, uint32_t sq_psn
)
476 qp
= rdma_rm_get_qp(dev_res
, qp_handle
);
481 if (qp
->qp_type
== IBV_QPT_SMI
) {
482 rdma_error_report("Got QP0 request");
484 } else if (qp
->qp_type
== IBV_QPT_GSI
) {
488 trace_rdma_rm_modify_qp(qp_handle
, attr_mask
, qp_state
, sgid_idx
);
490 if (attr_mask
& IBV_QP_STATE
) {
491 qp
->qp_state
= qp_state
;
493 if (qp
->qp_state
== IBV_QPS_INIT
) {
494 ret
= rdma_backend_qp_state_init(backend_dev
, &qp
->backend_qp
,
501 if (qp
->qp_state
== IBV_QPS_RTR
) {
502 /* Get backend gid index */
503 sgid_idx
= rdma_rm_get_backend_gid_index(dev_res
, backend_dev
,
505 if (sgid_idx
<= 0) { /* TODO check also less than bk.max_sgid */
506 rdma_error_report("Failed to get bk sgid_idx for sgid_idx %d",
511 ret
= rdma_backend_qp_state_rtr(backend_dev
, &qp
->backend_qp
,
512 qp
->qp_type
, sgid_idx
, dgid
, dqpn
,
514 attr_mask
& IBV_QP_QKEY
);
520 if (qp
->qp_state
== IBV_QPS_RTS
) {
521 ret
= rdma_backend_qp_state_rts(&qp
->backend_qp
, qp
->qp_type
,
523 attr_mask
& IBV_QP_QKEY
);
533 int rdma_rm_query_qp(RdmaDeviceResources
*dev_res
, RdmaBackendDev
*backend_dev
,
534 uint32_t qp_handle
, struct ibv_qp_attr
*attr
,
535 int attr_mask
, struct ibv_qp_init_attr
*init_attr
)
539 qp
= rdma_rm_get_qp(dev_res
, qp_handle
);
544 return rdma_backend_query_qp(&qp
->backend_qp
, attr
, attr_mask
, init_attr
);
547 void rdma_rm_dealloc_qp(RdmaDeviceResources
*dev_res
, uint32_t qp_handle
)
552 key
= g_bytes_new(&qp_handle
, sizeof(qp_handle
));
553 qp
= g_hash_table_lookup(dev_res
->qp_hash
, key
);
554 g_hash_table_remove(dev_res
->qp_hash
, key
);
561 rdma_backend_destroy_qp(&qp
->backend_qp
, dev_res
);
563 rdma_res_tbl_dealloc(&dev_res
->qp_tbl
, qp
->qpn
);
566 RdmaRmSRQ
*rdma_rm_get_srq(RdmaDeviceResources
*dev_res
, uint32_t srq_handle
)
568 return rdma_res_tbl_get(&dev_res
->srq_tbl
, srq_handle
);
571 int rdma_rm_alloc_srq(RdmaDeviceResources
*dev_res
, uint32_t pd_handle
,
572 uint32_t max_wr
, uint32_t max_sge
, uint32_t srq_limit
,
573 uint32_t *srq_handle
, void *opaque
)
579 pd
= rdma_rm_get_pd(dev_res
, pd_handle
);
584 srq
= rdma_res_tbl_alloc(&dev_res
->srq_tbl
, srq_handle
);
589 rc
= rdma_backend_create_srq(&srq
->backend_srq
, &pd
->backend_pd
,
590 max_wr
, max_sge
, srq_limit
);
593 goto out_dealloc_srq
;
596 srq
->opaque
= opaque
;
601 rdma_res_tbl_dealloc(&dev_res
->srq_tbl
, *srq_handle
);
606 int rdma_rm_query_srq(RdmaDeviceResources
*dev_res
, uint32_t srq_handle
,
607 struct ibv_srq_attr
*srq_attr
)
611 srq
= rdma_rm_get_srq(dev_res
, srq_handle
);
616 return rdma_backend_query_srq(&srq
->backend_srq
, srq_attr
);
619 int rdma_rm_modify_srq(RdmaDeviceResources
*dev_res
, uint32_t srq_handle
,
620 struct ibv_srq_attr
*srq_attr
, int srq_attr_mask
)
624 srq
= rdma_rm_get_srq(dev_res
, srq_handle
);
629 if ((srq_attr_mask
& IBV_SRQ_LIMIT
) &&
630 (srq_attr
->srq_limit
== 0)) {
634 if ((srq_attr_mask
& IBV_SRQ_MAX_WR
) &&
635 (srq_attr
->max_wr
== 0)) {
639 return rdma_backend_modify_srq(&srq
->backend_srq
, srq_attr
,
643 void rdma_rm_dealloc_srq(RdmaDeviceResources
*dev_res
, uint32_t srq_handle
)
647 srq
= rdma_rm_get_srq(dev_res
, srq_handle
);
652 rdma_backend_destroy_srq(&srq
->backend_srq
, dev_res
);
653 rdma_res_tbl_dealloc(&dev_res
->srq_tbl
, srq_handle
);
656 void *rdma_rm_get_cqe_ctx(RdmaDeviceResources
*dev_res
, uint32_t cqe_ctx_id
)
660 cqe_ctx
= rdma_res_tbl_get(&dev_res
->cqe_ctx_tbl
, cqe_ctx_id
);
668 int rdma_rm_alloc_cqe_ctx(RdmaDeviceResources
*dev_res
, uint32_t *cqe_ctx_id
,
673 cqe_ctx
= rdma_res_tbl_alloc(&dev_res
->cqe_ctx_tbl
, cqe_ctx_id
);
683 void rdma_rm_dealloc_cqe_ctx(RdmaDeviceResources
*dev_res
, uint32_t cqe_ctx_id
)
685 rdma_res_tbl_dealloc(&dev_res
->cqe_ctx_tbl
, cqe_ctx_id
);
688 int rdma_rm_add_gid(RdmaDeviceResources
*dev_res
, RdmaBackendDev
*backend_dev
,
689 const char *ifname
, union ibv_gid
*gid
, int gid_idx
)
693 rc
= rdma_backend_add_gid(backend_dev
, ifname
, gid
);
698 memcpy(&dev_res
->port
.gid_tbl
[gid_idx
].gid
, gid
, sizeof(*gid
));
703 int rdma_rm_del_gid(RdmaDeviceResources
*dev_res
, RdmaBackendDev
*backend_dev
,
704 const char *ifname
, int gid_idx
)
708 if (!dev_res
->port
.gid_tbl
[gid_idx
].gid
.global
.interface_id
) {
712 rc
= rdma_backend_del_gid(backend_dev
, ifname
,
713 &dev_res
->port
.gid_tbl
[gid_idx
].gid
);
718 memset(dev_res
->port
.gid_tbl
[gid_idx
].gid
.raw
, 0,
719 sizeof(dev_res
->port
.gid_tbl
[gid_idx
].gid
));
720 dev_res
->port
.gid_tbl
[gid_idx
].backend_gid_index
= -1;
725 int rdma_rm_get_backend_gid_index(RdmaDeviceResources
*dev_res
,
726 RdmaBackendDev
*backend_dev
, int sgid_idx
)
728 if (unlikely(sgid_idx
< 0 || sgid_idx
>= MAX_PORT_GIDS
)) {
729 rdma_error_report("Got invalid sgid_idx %d", sgid_idx
);
733 if (unlikely(dev_res
->port
.gid_tbl
[sgid_idx
].backend_gid_index
== -1)) {
734 dev_res
->port
.gid_tbl
[sgid_idx
].backend_gid_index
=
735 rdma_backend_get_gid_index(backend_dev
,
736 &dev_res
->port
.gid_tbl
[sgid_idx
].gid
);
739 return dev_res
->port
.gid_tbl
[sgid_idx
].backend_gid_index
;
742 static void destroy_qp_hash_key(gpointer data
)
747 static void init_ports(RdmaDeviceResources
*dev_res
)
751 memset(&dev_res
->port
, 0, sizeof(dev_res
->port
));
753 dev_res
->port
.state
= IBV_PORT_DOWN
;
754 for (i
= 0; i
< MAX_PORT_GIDS
; i
++) {
755 dev_res
->port
.gid_tbl
[i
].backend_gid_index
= -1;
759 static void fini_ports(RdmaDeviceResources
*dev_res
,
760 RdmaBackendDev
*backend_dev
, const char *ifname
)
764 dev_res
->port
.state
= IBV_PORT_DOWN
;
765 for (i
= 0; i
< MAX_PORT_GIDS
; i
++) {
766 rdma_rm_del_gid(dev_res
, backend_dev
, ifname
, i
);
770 int rdma_rm_init(RdmaDeviceResources
*dev_res
, struct ibv_device_attr
*dev_attr
)
772 dev_res
->qp_hash
= g_hash_table_new_full(g_bytes_hash
, g_bytes_equal
,
773 destroy_qp_hash_key
, NULL
);
774 if (!dev_res
->qp_hash
) {
778 res_tbl_init("PD", &dev_res
->pd_tbl
, dev_attr
->max_pd
, sizeof(RdmaRmPD
));
779 res_tbl_init("CQ", &dev_res
->cq_tbl
, dev_attr
->max_cq
, sizeof(RdmaRmCQ
));
780 res_tbl_init("MR", &dev_res
->mr_tbl
, dev_attr
->max_mr
, sizeof(RdmaRmMR
));
781 res_tbl_init("QP", &dev_res
->qp_tbl
, dev_attr
->max_qp
, sizeof(RdmaRmQP
));
782 res_tbl_init("CQE_CTX", &dev_res
->cqe_ctx_tbl
, dev_attr
->max_qp
*
783 dev_attr
->max_qp_wr
, sizeof(void *));
784 res_tbl_init("UC", &dev_res
->uc_tbl
, MAX_UCS
, sizeof(RdmaRmUC
));
785 res_tbl_init("SRQ", &dev_res
->srq_tbl
, dev_attr
->max_srq
,
790 qemu_mutex_init(&dev_res
->lock
);
792 memset(&dev_res
->stats
, 0, sizeof(dev_res
->stats
));
793 qatomic_set(&dev_res
->stats
.missing_cqe
, 0);
798 void rdma_rm_fini(RdmaDeviceResources
*dev_res
, RdmaBackendDev
*backend_dev
,
801 qemu_mutex_destroy(&dev_res
->lock
);
803 fini_ports(dev_res
, backend_dev
, ifname
);
805 res_tbl_free(&dev_res
->srq_tbl
);
806 res_tbl_free(&dev_res
->uc_tbl
);
807 res_tbl_free(&dev_res
->cqe_ctx_tbl
);
808 res_tbl_free(&dev_res
->qp_tbl
);
809 res_tbl_free(&dev_res
->mr_tbl
);
810 res_tbl_free(&dev_res
->cq_tbl
);
811 res_tbl_free(&dev_res
->pd_tbl
);
813 if (dev_res
->qp_hash
) {
814 g_hash_table_destroy(dev_res
->qp_hash
);