2 * QEMU paravirtual RDMA - Command channel
4 * Copyright (C) 2018 Oracle
5 * Copyright (C) 2018 Red Hat Inc
8 * Yuval Shaia <yuval.shaia@oracle.com>
9 * Marcel Apfelbaum <marcel@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include "qemu/osdep.h"
18 #include "hw/pci/pci.h"
19 #include "hw/pci/pci_ids.h"
21 #include "../rdma_backend.h"
22 #include "../rdma_rm.h"
23 #include "../rdma_utils.h"
27 #include "standard-headers/rdma/vmw_pvrdma-abi.h"
29 static void *pvrdma_map_to_pdir(PCIDevice
*pdev
, uint64_t pdir_dma
,
30 uint32_t nchunks
, size_t length
)
33 int tbl_idx
, dir_idx
, addr_idx
;
34 void *host_virt
= NULL
, *curr_page
;
37 rdma_error_report("Got nchunks=0");
41 dir
= rdma_pci_dma_map(pdev
, pdir_dma
, TARGET_PAGE_SIZE
);
43 rdma_error_report("Failed to map to page directory");
47 tbl
= rdma_pci_dma_map(pdev
, dir
[0], TARGET_PAGE_SIZE
);
49 rdma_error_report("Failed to map to page table 0");
53 curr_page
= rdma_pci_dma_map(pdev
, (dma_addr_t
)tbl
[0], TARGET_PAGE_SIZE
);
55 rdma_error_report("Failed to map the page 0");
59 host_virt
= mremap(curr_page
, 0, length
, MREMAP_MAYMOVE
);
60 if (host_virt
== MAP_FAILED
) {
62 rdma_error_report("Failed to remap memory for host_virt");
65 trace_pvrdma_map_to_pdir_host_virt(curr_page
, host_virt
);
67 rdma_pci_dma_unmap(pdev
, curr_page
, TARGET_PAGE_SIZE
);
72 while (addr_idx
< nchunks
) {
73 if (tbl_idx
== TARGET_PAGE_SIZE
/ sizeof(uint64_t)) {
76 rdma_pci_dma_unmap(pdev
, tbl
, TARGET_PAGE_SIZE
);
77 tbl
= rdma_pci_dma_map(pdev
, dir
[dir_idx
], TARGET_PAGE_SIZE
);
79 rdma_error_report("Failed to map to page table %d", dir_idx
);
80 goto out_unmap_host_virt
;
84 curr_page
= rdma_pci_dma_map(pdev
, (dma_addr_t
)tbl
[tbl_idx
],
87 rdma_error_report("Failed to map to page %d, dir %d", tbl_idx
,
89 goto out_unmap_host_virt
;
92 mremap(curr_page
, 0, TARGET_PAGE_SIZE
, MREMAP_MAYMOVE
| MREMAP_FIXED
,
93 host_virt
+ TARGET_PAGE_SIZE
* addr_idx
);
95 trace_pvrdma_map_to_pdir_next_page(addr_idx
, curr_page
, host_virt
+
96 TARGET_PAGE_SIZE
* addr_idx
);
98 rdma_pci_dma_unmap(pdev
, curr_page
, TARGET_PAGE_SIZE
);
108 munmap(host_virt
, length
);
112 rdma_pci_dma_unmap(pdev
, tbl
, TARGET_PAGE_SIZE
);
115 rdma_pci_dma_unmap(pdev
, dir
, TARGET_PAGE_SIZE
);
120 static int query_port(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
121 union pvrdma_cmd_resp
*rsp
)
123 struct pvrdma_cmd_query_port
*cmd
= &req
->query_port
;
124 struct pvrdma_cmd_query_port_resp
*resp
= &rsp
->query_port_resp
;
125 struct pvrdma_port_attr attrs
= {};
127 if (cmd
->port_num
> MAX_PORTS
) {
131 if (rdma_backend_query_port(&dev
->backend_dev
,
132 (struct ibv_port_attr
*)&attrs
)) {
136 memset(resp
, 0, sizeof(*resp
));
138 resp
->attrs
.state
= dev
->func0
->device_active
? attrs
.state
:
140 resp
->attrs
.max_mtu
= attrs
.max_mtu
;
141 resp
->attrs
.active_mtu
= attrs
.active_mtu
;
142 resp
->attrs
.phys_state
= attrs
.phys_state
;
143 resp
->attrs
.gid_tbl_len
= MIN(MAX_PORT_GIDS
, attrs
.gid_tbl_len
);
144 resp
->attrs
.max_msg_sz
= 1024;
145 resp
->attrs
.pkey_tbl_len
= MIN(MAX_PORT_PKEYS
, attrs
.pkey_tbl_len
);
146 resp
->attrs
.active_width
= 1;
147 resp
->attrs
.active_speed
= 1;
152 static int query_pkey(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
153 union pvrdma_cmd_resp
*rsp
)
155 struct pvrdma_cmd_query_pkey
*cmd
= &req
->query_pkey
;
156 struct pvrdma_cmd_query_pkey_resp
*resp
= &rsp
->query_pkey_resp
;
158 if (cmd
->port_num
> MAX_PORTS
) {
162 if (cmd
->index
> MAX_PKEYS
) {
166 memset(resp
, 0, sizeof(*resp
));
168 resp
->pkey
= PVRDMA_PKEY
;
173 static int create_pd(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
174 union pvrdma_cmd_resp
*rsp
)
176 struct pvrdma_cmd_create_pd
*cmd
= &req
->create_pd
;
177 struct pvrdma_cmd_create_pd_resp
*resp
= &rsp
->create_pd_resp
;
180 memset(resp
, 0, sizeof(*resp
));
181 rc
= rdma_rm_alloc_pd(&dev
->rdma_dev_res
, &dev
->backend_dev
,
182 &resp
->pd_handle
, cmd
->ctx_handle
);
187 static int destroy_pd(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
188 union pvrdma_cmd_resp
*rsp
)
190 struct pvrdma_cmd_destroy_pd
*cmd
= &req
->destroy_pd
;
192 rdma_rm_dealloc_pd(&dev
->rdma_dev_res
, cmd
->pd_handle
);
197 static int create_mr(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
198 union pvrdma_cmd_resp
*rsp
)
200 struct pvrdma_cmd_create_mr
*cmd
= &req
->create_mr
;
201 struct pvrdma_cmd_create_mr_resp
*resp
= &rsp
->create_mr_resp
;
202 PCIDevice
*pci_dev
= PCI_DEVICE(dev
);
203 void *host_virt
= NULL
;
206 memset(resp
, 0, sizeof(*resp
));
208 if (!(cmd
->flags
& PVRDMA_MR_FLAG_DMA
)) {
209 host_virt
= pvrdma_map_to_pdir(pci_dev
, cmd
->pdir_dma
, cmd
->nchunks
,
212 rdma_error_report("Failed to map to pdir");
217 rc
= rdma_rm_alloc_mr(&dev
->rdma_dev_res
, cmd
->pd_handle
, cmd
->start
,
218 cmd
->length
, host_virt
, cmd
->access_flags
,
219 &resp
->mr_handle
, &resp
->lkey
, &resp
->rkey
);
220 if (rc
&& host_virt
) {
221 munmap(host_virt
, cmd
->length
);
227 static int destroy_mr(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
228 union pvrdma_cmd_resp
*rsp
)
230 struct pvrdma_cmd_destroy_mr
*cmd
= &req
->destroy_mr
;
232 rdma_rm_dealloc_mr(&dev
->rdma_dev_res
, cmd
->mr_handle
);
237 static int create_cq_ring(PCIDevice
*pci_dev
, PvrdmaRing
**ring
,
238 uint64_t pdir_dma
, uint32_t nchunks
, uint32_t cqe
)
240 uint64_t *dir
= NULL
, *tbl
= NULL
;
243 char ring_name
[MAX_RING_NAME_SZ
];
245 if (!nchunks
|| nchunks
> PVRDMA_MAX_FAST_REG_PAGES
) {
246 rdma_error_report("Got invalid nchunks: %d", nchunks
);
250 dir
= rdma_pci_dma_map(pci_dev
, pdir_dma
, TARGET_PAGE_SIZE
);
252 rdma_error_report("Failed to map to CQ page directory");
256 tbl
= rdma_pci_dma_map(pci_dev
, dir
[0], TARGET_PAGE_SIZE
);
258 rdma_error_report("Failed to map to CQ page table");
262 r
= g_malloc(sizeof(*r
));
265 r
->ring_state
= (struct pvrdma_ring
*)
266 rdma_pci_dma_map(pci_dev
, tbl
[0], TARGET_PAGE_SIZE
);
268 if (!r
->ring_state
) {
269 rdma_error_report("Failed to map to CQ ring state");
273 sprintf(ring_name
, "cq_ring_%" PRIx64
, pdir_dma
);
274 rc
= pvrdma_ring_init(r
, ring_name
, pci_dev
, &r
->ring_state
[1],
275 cqe
, sizeof(struct pvrdma_cqe
),
276 /* first page is ring state */
277 (dma_addr_t
*)&tbl
[1], nchunks
- 1);
279 goto out_unmap_ring_state
;
284 out_unmap_ring_state
:
285 /* ring_state was in slot 1, not 0 so need to jump back */
286 rdma_pci_dma_unmap(pci_dev
, --r
->ring_state
, TARGET_PAGE_SIZE
);
292 rdma_pci_dma_unmap(pci_dev
, tbl
, TARGET_PAGE_SIZE
);
293 rdma_pci_dma_unmap(pci_dev
, dir
, TARGET_PAGE_SIZE
);
298 static void destroy_cq_ring(PvrdmaRing
*ring
)
300 pvrdma_ring_free(ring
);
301 /* ring_state was in slot 1, not 0 so need to jump back */
302 rdma_pci_dma_unmap(ring
->dev
, --ring
->ring_state
, TARGET_PAGE_SIZE
);
306 static int create_cq(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
307 union pvrdma_cmd_resp
*rsp
)
309 struct pvrdma_cmd_create_cq
*cmd
= &req
->create_cq
;
310 struct pvrdma_cmd_create_cq_resp
*resp
= &rsp
->create_cq_resp
;
311 PvrdmaRing
*ring
= NULL
;
314 memset(resp
, 0, sizeof(*resp
));
316 resp
->cqe
= cmd
->cqe
;
318 rc
= create_cq_ring(PCI_DEVICE(dev
), &ring
, cmd
->pdir_dma
, cmd
->nchunks
,
324 rc
= rdma_rm_alloc_cq(&dev
->rdma_dev_res
, &dev
->backend_dev
, cmd
->cqe
,
325 &resp
->cq_handle
, ring
);
327 destroy_cq_ring(ring
);
330 resp
->cqe
= cmd
->cqe
;
335 static int destroy_cq(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
336 union pvrdma_cmd_resp
*rsp
)
338 struct pvrdma_cmd_destroy_cq
*cmd
= &req
->destroy_cq
;
342 cq
= rdma_rm_get_cq(&dev
->rdma_dev_res
, cmd
->cq_handle
);
344 rdma_error_report("Got invalid CQ handle");
348 ring
= (PvrdmaRing
*)cq
->opaque
;
349 destroy_cq_ring(ring
);
351 rdma_rm_dealloc_cq(&dev
->rdma_dev_res
, cmd
->cq_handle
);
356 static int create_qp_rings(PCIDevice
*pci_dev
, uint64_t pdir_dma
,
357 PvrdmaRing
**rings
, uint32_t scqe
, uint32_t smax_sge
,
358 uint32_t spages
, uint32_t rcqe
, uint32_t rmax_sge
,
359 uint32_t rpages
, uint8_t is_srq
)
361 uint64_t *dir
= NULL
, *tbl
= NULL
;
364 char ring_name
[MAX_RING_NAME_SZ
];
367 if (!spages
|| spages
> PVRDMA_MAX_FAST_REG_PAGES
) {
368 rdma_error_report("Got invalid send page count for QP ring: %d",
373 if (!is_srq
&& (!rpages
|| rpages
> PVRDMA_MAX_FAST_REG_PAGES
)) {
374 rdma_error_report("Got invalid recv page count for QP ring: %d",
379 dir
= rdma_pci_dma_map(pci_dev
, pdir_dma
, TARGET_PAGE_SIZE
);
381 rdma_error_report("Failed to map to QP page directory");
385 tbl
= rdma_pci_dma_map(pci_dev
, dir
[0], TARGET_PAGE_SIZE
);
387 rdma_error_report("Failed to map to QP page table");
392 sr
= g_malloc(2 * sizeof(*rr
));
395 sr
= g_malloc(sizeof(*sr
));
400 /* Create send ring */
401 sr
->ring_state
= (struct pvrdma_ring
*)
402 rdma_pci_dma_map(pci_dev
, tbl
[0], TARGET_PAGE_SIZE
);
403 if (!sr
->ring_state
) {
404 rdma_error_report("Failed to map to QP ring state");
405 goto out_free_sr_mem
;
408 wqe_sz
= pow2ceil(sizeof(struct pvrdma_sq_wqe_hdr
) +
409 sizeof(struct pvrdma_sge
) * smax_sge
- 1);
411 sprintf(ring_name
, "qp_sring_%" PRIx64
, pdir_dma
);
412 rc
= pvrdma_ring_init(sr
, ring_name
, pci_dev
, sr
->ring_state
,
413 scqe
, wqe_sz
, (dma_addr_t
*)&tbl
[1], spages
);
415 goto out_unmap_ring_state
;
419 /* Create recv ring */
420 rr
->ring_state
= &sr
->ring_state
[1];
421 wqe_sz
= pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr
) +
422 sizeof(struct pvrdma_sge
) * rmax_sge
- 1);
423 sprintf(ring_name
, "qp_rring_%" PRIx64
, pdir_dma
);
424 rc
= pvrdma_ring_init(rr
, ring_name
, pci_dev
, rr
->ring_state
,
425 rcqe
, wqe_sz
, (dma_addr_t
*)&tbl
[1 + spages
],
435 pvrdma_ring_free(sr
);
437 out_unmap_ring_state
:
438 rdma_pci_dma_unmap(pci_dev
, sr
->ring_state
, TARGET_PAGE_SIZE
);
444 rdma_pci_dma_unmap(pci_dev
, tbl
, TARGET_PAGE_SIZE
);
445 rdma_pci_dma_unmap(pci_dev
, dir
, TARGET_PAGE_SIZE
);
450 static void destroy_qp_rings(PvrdmaRing
*ring
, uint8_t is_srq
)
452 pvrdma_ring_free(&ring
[0]);
454 pvrdma_ring_free(&ring
[1]);
457 rdma_pci_dma_unmap(ring
->dev
, ring
->ring_state
, TARGET_PAGE_SIZE
);
461 static int create_qp(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
462 union pvrdma_cmd_resp
*rsp
)
464 struct pvrdma_cmd_create_qp
*cmd
= &req
->create_qp
;
465 struct pvrdma_cmd_create_qp_resp
*resp
= &rsp
->create_qp_resp
;
466 PvrdmaRing
*rings
= NULL
;
469 memset(resp
, 0, sizeof(*resp
));
471 rc
= create_qp_rings(PCI_DEVICE(dev
), cmd
->pdir_dma
, &rings
,
472 cmd
->max_send_wr
, cmd
->max_send_sge
, cmd
->send_chunks
,
473 cmd
->max_recv_wr
, cmd
->max_recv_sge
,
474 cmd
->total_chunks
- cmd
->send_chunks
- 1, cmd
->is_srq
);
479 rc
= rdma_rm_alloc_qp(&dev
->rdma_dev_res
, cmd
->pd_handle
, cmd
->qp_type
,
480 cmd
->max_send_wr
, cmd
->max_send_sge
,
481 cmd
->send_cq_handle
, cmd
->max_recv_wr
,
482 cmd
->max_recv_sge
, cmd
->recv_cq_handle
, rings
,
483 &resp
->qpn
, cmd
->is_srq
, cmd
->srq_handle
);
485 destroy_qp_rings(rings
, cmd
->is_srq
);
489 resp
->max_send_wr
= cmd
->max_send_wr
;
490 resp
->max_recv_wr
= cmd
->max_recv_wr
;
491 resp
->max_send_sge
= cmd
->max_send_sge
;
492 resp
->max_recv_sge
= cmd
->max_recv_sge
;
493 resp
->max_inline_data
= cmd
->max_inline_data
;
498 static int modify_qp(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
499 union pvrdma_cmd_resp
*rsp
)
501 struct pvrdma_cmd_modify_qp
*cmd
= &req
->modify_qp
;
504 /* No need to verify sgid_index since it is u8 */
506 rc
= rdma_rm_modify_qp(&dev
->rdma_dev_res
, &dev
->backend_dev
,
507 cmd
->qp_handle
, cmd
->attr_mask
,
508 cmd
->attrs
.ah_attr
.grh
.sgid_index
,
509 (union ibv_gid
*)&cmd
->attrs
.ah_attr
.grh
.dgid
,
510 cmd
->attrs
.dest_qp_num
,
511 (enum ibv_qp_state
)cmd
->attrs
.qp_state
,
512 cmd
->attrs
.qkey
, cmd
->attrs
.rq_psn
,
518 static int query_qp(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
519 union pvrdma_cmd_resp
*rsp
)
521 struct pvrdma_cmd_query_qp
*cmd
= &req
->query_qp
;
522 struct pvrdma_cmd_query_qp_resp
*resp
= &rsp
->query_qp_resp
;
523 struct ibv_qp_init_attr init_attr
;
526 memset(resp
, 0, sizeof(*resp
));
528 rc
= rdma_rm_query_qp(&dev
->rdma_dev_res
, &dev
->backend_dev
, cmd
->qp_handle
,
529 (struct ibv_qp_attr
*)&resp
->attrs
, cmd
->attr_mask
,
535 static int destroy_qp(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
536 union pvrdma_cmd_resp
*rsp
)
538 struct pvrdma_cmd_destroy_qp
*cmd
= &req
->destroy_qp
;
542 qp
= rdma_rm_get_qp(&dev
->rdma_dev_res
, cmd
->qp_handle
);
547 ring
= (PvrdmaRing
*)qp
->opaque
;
548 destroy_qp_rings(ring
, qp
->is_srq
);
549 rdma_rm_dealloc_qp(&dev
->rdma_dev_res
, cmd
->qp_handle
);
554 static int create_bind(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
555 union pvrdma_cmd_resp
*rsp
)
557 struct pvrdma_cmd_create_bind
*cmd
= &req
->create_bind
;
559 union ibv_gid
*gid
= (union ibv_gid
*)&cmd
->new_gid
;
561 if (cmd
->index
>= MAX_PORT_GIDS
) {
565 rc
= rdma_rm_add_gid(&dev
->rdma_dev_res
, &dev
->backend_dev
,
566 dev
->backend_eth_device_name
, gid
, cmd
->index
);
571 static int destroy_bind(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
572 union pvrdma_cmd_resp
*rsp
)
576 struct pvrdma_cmd_destroy_bind
*cmd
= &req
->destroy_bind
;
578 if (cmd
->index
>= MAX_PORT_GIDS
) {
582 rc
= rdma_rm_del_gid(&dev
->rdma_dev_res
, &dev
->backend_dev
,
583 dev
->backend_eth_device_name
, cmd
->index
);
588 static int create_uc(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
589 union pvrdma_cmd_resp
*rsp
)
591 struct pvrdma_cmd_create_uc
*cmd
= &req
->create_uc
;
592 struct pvrdma_cmd_create_uc_resp
*resp
= &rsp
->create_uc_resp
;
595 memset(resp
, 0, sizeof(*resp
));
596 rc
= rdma_rm_alloc_uc(&dev
->rdma_dev_res
, cmd
->pfn
, &resp
->ctx_handle
);
601 static int destroy_uc(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
602 union pvrdma_cmd_resp
*rsp
)
604 struct pvrdma_cmd_destroy_uc
*cmd
= &req
->destroy_uc
;
606 rdma_rm_dealloc_uc(&dev
->rdma_dev_res
, cmd
->ctx_handle
);
611 static int create_srq_ring(PCIDevice
*pci_dev
, PvrdmaRing
**ring
,
612 uint64_t pdir_dma
, uint32_t max_wr
,
613 uint32_t max_sge
, uint32_t nchunks
)
615 uint64_t *dir
= NULL
, *tbl
= NULL
;
618 char ring_name
[MAX_RING_NAME_SZ
];
621 if (!nchunks
|| nchunks
> PVRDMA_MAX_FAST_REG_PAGES
) {
622 rdma_error_report("Got invalid page count for SRQ ring: %d",
627 dir
= rdma_pci_dma_map(pci_dev
, pdir_dma
, TARGET_PAGE_SIZE
);
629 rdma_error_report("Failed to map to SRQ page directory");
633 tbl
= rdma_pci_dma_map(pci_dev
, dir
[0], TARGET_PAGE_SIZE
);
635 rdma_error_report("Failed to map to SRQ page table");
639 r
= g_malloc(sizeof(*r
));
642 r
->ring_state
= (struct pvrdma_ring
*)
643 rdma_pci_dma_map(pci_dev
, tbl
[0], TARGET_PAGE_SIZE
);
644 if (!r
->ring_state
) {
645 rdma_error_report("Failed to map tp SRQ ring state");
646 goto out_free_ring_mem
;
649 wqe_sz
= pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr
) +
650 sizeof(struct pvrdma_sge
) * max_sge
- 1);
651 sprintf(ring_name
, "srq_ring_%" PRIx64
, pdir_dma
);
652 rc
= pvrdma_ring_init(r
, ring_name
, pci_dev
, &r
->ring_state
[1], max_wr
,
653 wqe_sz
, (dma_addr_t
*)&tbl
[1], nchunks
- 1);
655 goto out_unmap_ring_state
;
660 out_unmap_ring_state
:
661 rdma_pci_dma_unmap(pci_dev
, r
->ring_state
, TARGET_PAGE_SIZE
);
667 rdma_pci_dma_unmap(pci_dev
, tbl
, TARGET_PAGE_SIZE
);
668 rdma_pci_dma_unmap(pci_dev
, dir
, TARGET_PAGE_SIZE
);
673 static void destroy_srq_ring(PvrdmaRing
*ring
)
675 pvrdma_ring_free(ring
);
676 rdma_pci_dma_unmap(ring
->dev
, ring
->ring_state
, TARGET_PAGE_SIZE
);
680 static int create_srq(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
681 union pvrdma_cmd_resp
*rsp
)
683 struct pvrdma_cmd_create_srq
*cmd
= &req
->create_srq
;
684 struct pvrdma_cmd_create_srq_resp
*resp
= &rsp
->create_srq_resp
;
685 PvrdmaRing
*ring
= NULL
;
688 memset(resp
, 0, sizeof(*resp
));
690 rc
= create_srq_ring(PCI_DEVICE(dev
), &ring
, cmd
->pdir_dma
,
691 cmd
->attrs
.max_wr
, cmd
->attrs
.max_sge
,
697 rc
= rdma_rm_alloc_srq(&dev
->rdma_dev_res
, cmd
->pd_handle
,
698 cmd
->attrs
.max_wr
, cmd
->attrs
.max_sge
,
699 cmd
->attrs
.srq_limit
, &resp
->srqn
, ring
);
701 destroy_srq_ring(ring
);
708 static int query_srq(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
709 union pvrdma_cmd_resp
*rsp
)
711 struct pvrdma_cmd_query_srq
*cmd
= &req
->query_srq
;
712 struct pvrdma_cmd_query_srq_resp
*resp
= &rsp
->query_srq_resp
;
714 memset(resp
, 0, sizeof(*resp
));
716 return rdma_rm_query_srq(&dev
->rdma_dev_res
, cmd
->srq_handle
,
717 (struct ibv_srq_attr
*)&resp
->attrs
);
720 static int modify_srq(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
721 union pvrdma_cmd_resp
*rsp
)
723 struct pvrdma_cmd_modify_srq
*cmd
= &req
->modify_srq
;
725 /* Only support SRQ limit */
726 if (!(cmd
->attr_mask
& IBV_SRQ_LIMIT
) ||
727 (cmd
->attr_mask
& IBV_SRQ_MAX_WR
))
730 return rdma_rm_modify_srq(&dev
->rdma_dev_res
, cmd
->srq_handle
,
731 (struct ibv_srq_attr
*)&cmd
->attrs
,
735 static int destroy_srq(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
736 union pvrdma_cmd_resp
*rsp
)
738 struct pvrdma_cmd_destroy_srq
*cmd
= &req
->destroy_srq
;
742 srq
= rdma_rm_get_srq(&dev
->rdma_dev_res
, cmd
->srq_handle
);
747 ring
= (PvrdmaRing
*)srq
->opaque
;
748 destroy_srq_ring(ring
);
749 rdma_rm_dealloc_srq(&dev
->rdma_dev_res
, cmd
->srq_handle
);
757 int (*exec
)(PVRDMADev
*dev
, union pvrdma_cmd_req
*req
,
758 union pvrdma_cmd_resp
*rsp
);
761 static struct cmd_handler cmd_handlers
[] = {
762 {PVRDMA_CMD_QUERY_PORT
, PVRDMA_CMD_QUERY_PORT_RESP
, query_port
},
763 {PVRDMA_CMD_QUERY_PKEY
, PVRDMA_CMD_QUERY_PKEY_RESP
, query_pkey
},
764 {PVRDMA_CMD_CREATE_PD
, PVRDMA_CMD_CREATE_PD_RESP
, create_pd
},
765 {PVRDMA_CMD_DESTROY_PD
, PVRDMA_CMD_DESTROY_PD_RESP_NOOP
, destroy_pd
},
766 {PVRDMA_CMD_CREATE_MR
, PVRDMA_CMD_CREATE_MR_RESP
, create_mr
},
767 {PVRDMA_CMD_DESTROY_MR
, PVRDMA_CMD_DESTROY_MR_RESP_NOOP
, destroy_mr
},
768 {PVRDMA_CMD_CREATE_CQ
, PVRDMA_CMD_CREATE_CQ_RESP
, create_cq
},
769 {PVRDMA_CMD_RESIZE_CQ
, PVRDMA_CMD_RESIZE_CQ_RESP
, NULL
},
770 {PVRDMA_CMD_DESTROY_CQ
, PVRDMA_CMD_DESTROY_CQ_RESP_NOOP
, destroy_cq
},
771 {PVRDMA_CMD_CREATE_QP
, PVRDMA_CMD_CREATE_QP_RESP
, create_qp
},
772 {PVRDMA_CMD_MODIFY_QP
, PVRDMA_CMD_MODIFY_QP_RESP
, modify_qp
},
773 {PVRDMA_CMD_QUERY_QP
, PVRDMA_CMD_QUERY_QP_RESP
, query_qp
},
774 {PVRDMA_CMD_DESTROY_QP
, PVRDMA_CMD_DESTROY_QP_RESP
, destroy_qp
},
775 {PVRDMA_CMD_CREATE_UC
, PVRDMA_CMD_CREATE_UC_RESP
, create_uc
},
776 {PVRDMA_CMD_DESTROY_UC
, PVRDMA_CMD_DESTROY_UC_RESP_NOOP
, destroy_uc
},
777 {PVRDMA_CMD_CREATE_BIND
, PVRDMA_CMD_CREATE_BIND_RESP_NOOP
, create_bind
},
778 {PVRDMA_CMD_DESTROY_BIND
, PVRDMA_CMD_DESTROY_BIND_RESP_NOOP
, destroy_bind
},
779 {PVRDMA_CMD_CREATE_SRQ
, PVRDMA_CMD_CREATE_SRQ_RESP
, create_srq
},
780 {PVRDMA_CMD_QUERY_SRQ
, PVRDMA_CMD_QUERY_SRQ_RESP
, query_srq
},
781 {PVRDMA_CMD_MODIFY_SRQ
, PVRDMA_CMD_MODIFY_SRQ_RESP
, modify_srq
},
782 {PVRDMA_CMD_DESTROY_SRQ
, PVRDMA_CMD_DESTROY_SRQ_RESP
, destroy_srq
},
785 int pvrdma_exec_cmd(PVRDMADev
*dev
)
790 dsr_info
= &dev
->dsr_info
;
792 if (dsr_info
->req
->hdr
.cmd
>= sizeof(cmd_handlers
) /
793 sizeof(struct cmd_handler
)) {
794 rdma_error_report("Unsupported command");
798 if (!cmd_handlers
[dsr_info
->req
->hdr
.cmd
].exec
) {
799 rdma_error_report("Unsupported command (not implemented yet)");
803 err
= cmd_handlers
[dsr_info
->req
->hdr
.cmd
].exec(dev
, dsr_info
->req
,
805 dsr_info
->rsp
->hdr
.response
= dsr_info
->req
->hdr
.response
;
806 dsr_info
->rsp
->hdr
.ack
= cmd_handlers
[dsr_info
->req
->hdr
.cmd
].ack
;
807 dsr_info
->rsp
->hdr
.err
= err
< 0 ? -err
: 0;
809 trace_pvrdma_exec_cmd(dsr_info
->req
->hdr
.cmd
, dsr_info
->rsp
->hdr
.err
);
811 dev
->stats
.commands
++;
814 set_reg_val(dev
, PVRDMA_REG_ERR
, err
);
815 post_interrupt(dev
, INTR_VEC_CMD_RING
);
817 return (err
== 0) ? 0 : -EINVAL
;