target/mips: Introduce decodetree helpers for MSA LSA/DLSA opcodes
[qemu/ar7.git] / hw / rdma / vmw / pvrdma_cmd.c
blob692125ac26815fc0d9180e69adbf43835385cd9c
1 /*
2 * QEMU paravirtual RDMA - Command channel
4 * Copyright (C) 2018 Oracle
5 * Copyright (C) 2018 Red Hat Inc
7 * Authors:
8 * Yuval Shaia <yuval.shaia@oracle.com>
9 * Marcel Apfelbaum <marcel@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include "qemu/osdep.h"
17 #include "cpu.h"
18 #include "hw/pci/pci.h"
19 #include "hw/pci/pci_ids.h"
21 #include "../rdma_backend.h"
22 #include "../rdma_rm.h"
23 #include "../rdma_utils.h"
25 #include "trace.h"
26 #include "pvrdma.h"
27 #include "standard-headers/rdma/vmw_pvrdma-abi.h"
29 static void *pvrdma_map_to_pdir(PCIDevice *pdev, uint64_t pdir_dma,
30 uint32_t nchunks, size_t length)
32 uint64_t *dir, *tbl;
33 int tbl_idx, dir_idx, addr_idx;
34 void *host_virt = NULL, *curr_page;
36 if (!nchunks) {
37 rdma_error_report("Got nchunks=0");
38 return NULL;
41 dir = rdma_pci_dma_map(pdev, pdir_dma, TARGET_PAGE_SIZE);
42 if (!dir) {
43 rdma_error_report("Failed to map to page directory");
44 return NULL;
47 tbl = rdma_pci_dma_map(pdev, dir[0], TARGET_PAGE_SIZE);
48 if (!tbl) {
49 rdma_error_report("Failed to map to page table 0");
50 goto out_unmap_dir;
53 curr_page = rdma_pci_dma_map(pdev, (dma_addr_t)tbl[0], TARGET_PAGE_SIZE);
54 if (!curr_page) {
55 rdma_error_report("Failed to map the page 0");
56 goto out_unmap_tbl;
59 host_virt = mremap(curr_page, 0, length, MREMAP_MAYMOVE);
60 if (host_virt == MAP_FAILED) {
61 host_virt = NULL;
62 rdma_error_report("Failed to remap memory for host_virt");
63 goto out_unmap_tbl;
65 trace_pvrdma_map_to_pdir_host_virt(curr_page, host_virt);
67 rdma_pci_dma_unmap(pdev, curr_page, TARGET_PAGE_SIZE);
69 dir_idx = 0;
70 tbl_idx = 1;
71 addr_idx = 1;
72 while (addr_idx < nchunks) {
73 if (tbl_idx == TARGET_PAGE_SIZE / sizeof(uint64_t)) {
74 tbl_idx = 0;
75 dir_idx++;
76 rdma_pci_dma_unmap(pdev, tbl, TARGET_PAGE_SIZE);
77 tbl = rdma_pci_dma_map(pdev, dir[dir_idx], TARGET_PAGE_SIZE);
78 if (!tbl) {
79 rdma_error_report("Failed to map to page table %d", dir_idx);
80 goto out_unmap_host_virt;
84 curr_page = rdma_pci_dma_map(pdev, (dma_addr_t)tbl[tbl_idx],
85 TARGET_PAGE_SIZE);
86 if (!curr_page) {
87 rdma_error_report("Failed to map to page %d, dir %d", tbl_idx,
88 dir_idx);
89 goto out_unmap_host_virt;
92 mremap(curr_page, 0, TARGET_PAGE_SIZE, MREMAP_MAYMOVE | MREMAP_FIXED,
93 host_virt + TARGET_PAGE_SIZE * addr_idx);
95 trace_pvrdma_map_to_pdir_next_page(addr_idx, curr_page, host_virt +
96 TARGET_PAGE_SIZE * addr_idx);
98 rdma_pci_dma_unmap(pdev, curr_page, TARGET_PAGE_SIZE);
100 addr_idx++;
102 tbl_idx++;
105 goto out_unmap_tbl;
107 out_unmap_host_virt:
108 munmap(host_virt, length);
109 host_virt = NULL;
111 out_unmap_tbl:
112 rdma_pci_dma_unmap(pdev, tbl, TARGET_PAGE_SIZE);
114 out_unmap_dir:
115 rdma_pci_dma_unmap(pdev, dir, TARGET_PAGE_SIZE);
117 return host_virt;
120 static int query_port(PVRDMADev *dev, union pvrdma_cmd_req *req,
121 union pvrdma_cmd_resp *rsp)
123 struct pvrdma_cmd_query_port *cmd = &req->query_port;
124 struct pvrdma_cmd_query_port_resp *resp = &rsp->query_port_resp;
125 struct pvrdma_port_attr attrs = {};
127 if (cmd->port_num > MAX_PORTS) {
128 return -EINVAL;
131 if (rdma_backend_query_port(&dev->backend_dev,
132 (struct ibv_port_attr *)&attrs)) {
133 return -ENOMEM;
136 memset(resp, 0, sizeof(*resp));
138 resp->attrs.state = dev->func0->device_active ? attrs.state :
139 PVRDMA_PORT_DOWN;
140 resp->attrs.max_mtu = attrs.max_mtu;
141 resp->attrs.active_mtu = attrs.active_mtu;
142 resp->attrs.phys_state = attrs.phys_state;
143 resp->attrs.gid_tbl_len = MIN(MAX_PORT_GIDS, attrs.gid_tbl_len);
144 resp->attrs.max_msg_sz = 1024;
145 resp->attrs.pkey_tbl_len = MIN(MAX_PORT_PKEYS, attrs.pkey_tbl_len);
146 resp->attrs.active_width = 1;
147 resp->attrs.active_speed = 1;
149 return 0;
152 static int query_pkey(PVRDMADev *dev, union pvrdma_cmd_req *req,
153 union pvrdma_cmd_resp *rsp)
155 struct pvrdma_cmd_query_pkey *cmd = &req->query_pkey;
156 struct pvrdma_cmd_query_pkey_resp *resp = &rsp->query_pkey_resp;
158 if (cmd->port_num > MAX_PORTS) {
159 return -EINVAL;
162 if (cmd->index > MAX_PKEYS) {
163 return -EINVAL;
166 memset(resp, 0, sizeof(*resp));
168 resp->pkey = PVRDMA_PKEY;
170 return 0;
173 static int create_pd(PVRDMADev *dev, union pvrdma_cmd_req *req,
174 union pvrdma_cmd_resp *rsp)
176 struct pvrdma_cmd_create_pd *cmd = &req->create_pd;
177 struct pvrdma_cmd_create_pd_resp *resp = &rsp->create_pd_resp;
178 int rc;
180 memset(resp, 0, sizeof(*resp));
181 rc = rdma_rm_alloc_pd(&dev->rdma_dev_res, &dev->backend_dev,
182 &resp->pd_handle, cmd->ctx_handle);
184 return rc;
187 static int destroy_pd(PVRDMADev *dev, union pvrdma_cmd_req *req,
188 union pvrdma_cmd_resp *rsp)
190 struct pvrdma_cmd_destroy_pd *cmd = &req->destroy_pd;
192 rdma_rm_dealloc_pd(&dev->rdma_dev_res, cmd->pd_handle);
194 return 0;
197 static int create_mr(PVRDMADev *dev, union pvrdma_cmd_req *req,
198 union pvrdma_cmd_resp *rsp)
200 struct pvrdma_cmd_create_mr *cmd = &req->create_mr;
201 struct pvrdma_cmd_create_mr_resp *resp = &rsp->create_mr_resp;
202 PCIDevice *pci_dev = PCI_DEVICE(dev);
203 void *host_virt = NULL;
204 int rc = 0;
206 memset(resp, 0, sizeof(*resp));
208 if (!(cmd->flags & PVRDMA_MR_FLAG_DMA)) {
209 host_virt = pvrdma_map_to_pdir(pci_dev, cmd->pdir_dma, cmd->nchunks,
210 cmd->length);
211 if (!host_virt) {
212 rdma_error_report("Failed to map to pdir");
213 return -EINVAL;
217 rc = rdma_rm_alloc_mr(&dev->rdma_dev_res, cmd->pd_handle, cmd->start,
218 cmd->length, host_virt, cmd->access_flags,
219 &resp->mr_handle, &resp->lkey, &resp->rkey);
220 if (rc && host_virt) {
221 munmap(host_virt, cmd->length);
224 return rc;
227 static int destroy_mr(PVRDMADev *dev, union pvrdma_cmd_req *req,
228 union pvrdma_cmd_resp *rsp)
230 struct pvrdma_cmd_destroy_mr *cmd = &req->destroy_mr;
232 rdma_rm_dealloc_mr(&dev->rdma_dev_res, cmd->mr_handle);
234 return 0;
237 static int create_cq_ring(PCIDevice *pci_dev , PvrdmaRing **ring,
238 uint64_t pdir_dma, uint32_t nchunks, uint32_t cqe)
240 uint64_t *dir = NULL, *tbl = NULL;
241 PvrdmaRing *r;
242 int rc = -EINVAL;
243 char ring_name[MAX_RING_NAME_SZ];
245 if (!nchunks || nchunks > PVRDMA_MAX_FAST_REG_PAGES) {
246 rdma_error_report("Got invalid nchunks: %d", nchunks);
247 return rc;
250 dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE);
251 if (!dir) {
252 rdma_error_report("Failed to map to CQ page directory");
253 goto out;
256 tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE);
257 if (!tbl) {
258 rdma_error_report("Failed to map to CQ page table");
259 goto out;
262 r = g_malloc(sizeof(*r));
263 *ring = r;
265 r->ring_state = (struct pvrdma_ring *)
266 rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
268 if (!r->ring_state) {
269 rdma_error_report("Failed to map to CQ ring state");
270 goto out_free_ring;
273 sprintf(ring_name, "cq_ring_%" PRIx64, pdir_dma);
274 rc = pvrdma_ring_init(r, ring_name, pci_dev, &r->ring_state[1],
275 cqe, sizeof(struct pvrdma_cqe),
276 /* first page is ring state */
277 (dma_addr_t *)&tbl[1], nchunks - 1);
278 if (rc) {
279 goto out_unmap_ring_state;
282 goto out;
284 out_unmap_ring_state:
285 /* ring_state was in slot 1, not 0 so need to jump back */
286 rdma_pci_dma_unmap(pci_dev, --r->ring_state, TARGET_PAGE_SIZE);
288 out_free_ring:
289 g_free(r);
291 out:
292 rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE);
293 rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE);
295 return rc;
298 static void destroy_cq_ring(PvrdmaRing *ring)
300 pvrdma_ring_free(ring);
301 /* ring_state was in slot 1, not 0 so need to jump back */
302 rdma_pci_dma_unmap(ring->dev, --ring->ring_state, TARGET_PAGE_SIZE);
303 g_free(ring);
306 static int create_cq(PVRDMADev *dev, union pvrdma_cmd_req *req,
307 union pvrdma_cmd_resp *rsp)
309 struct pvrdma_cmd_create_cq *cmd = &req->create_cq;
310 struct pvrdma_cmd_create_cq_resp *resp = &rsp->create_cq_resp;
311 PvrdmaRing *ring = NULL;
312 int rc;
314 memset(resp, 0, sizeof(*resp));
316 resp->cqe = cmd->cqe;
318 rc = create_cq_ring(PCI_DEVICE(dev), &ring, cmd->pdir_dma, cmd->nchunks,
319 cmd->cqe);
320 if (rc) {
321 return rc;
324 rc = rdma_rm_alloc_cq(&dev->rdma_dev_res, &dev->backend_dev, cmd->cqe,
325 &resp->cq_handle, ring);
326 if (rc) {
327 destroy_cq_ring(ring);
330 resp->cqe = cmd->cqe;
332 return rc;
335 static int destroy_cq(PVRDMADev *dev, union pvrdma_cmd_req *req,
336 union pvrdma_cmd_resp *rsp)
338 struct pvrdma_cmd_destroy_cq *cmd = &req->destroy_cq;
339 RdmaRmCQ *cq;
340 PvrdmaRing *ring;
342 cq = rdma_rm_get_cq(&dev->rdma_dev_res, cmd->cq_handle);
343 if (!cq) {
344 rdma_error_report("Got invalid CQ handle");
345 return -EINVAL;
348 ring = (PvrdmaRing *)cq->opaque;
349 destroy_cq_ring(ring);
351 rdma_rm_dealloc_cq(&dev->rdma_dev_res, cmd->cq_handle);
353 return 0;
356 static int create_qp_rings(PCIDevice *pci_dev, uint64_t pdir_dma,
357 PvrdmaRing **rings, uint32_t scqe, uint32_t smax_sge,
358 uint32_t spages, uint32_t rcqe, uint32_t rmax_sge,
359 uint32_t rpages, uint8_t is_srq)
361 uint64_t *dir = NULL, *tbl = NULL;
362 PvrdmaRing *sr, *rr;
363 int rc = -EINVAL;
364 char ring_name[MAX_RING_NAME_SZ];
365 uint32_t wqe_sz;
367 if (!spages || spages > PVRDMA_MAX_FAST_REG_PAGES) {
368 rdma_error_report("Got invalid send page count for QP ring: %d",
369 spages);
370 return rc;
373 if (!is_srq && (!rpages || rpages > PVRDMA_MAX_FAST_REG_PAGES)) {
374 rdma_error_report("Got invalid recv page count for QP ring: %d",
375 rpages);
376 return rc;
379 dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE);
380 if (!dir) {
381 rdma_error_report("Failed to map to QP page directory");
382 goto out;
385 tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE);
386 if (!tbl) {
387 rdma_error_report("Failed to map to QP page table");
388 goto out;
391 if (!is_srq) {
392 sr = g_malloc(2 * sizeof(*rr));
393 rr = &sr[1];
394 } else {
395 sr = g_malloc(sizeof(*sr));
398 *rings = sr;
400 /* Create send ring */
401 sr->ring_state = (struct pvrdma_ring *)
402 rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
403 if (!sr->ring_state) {
404 rdma_error_report("Failed to map to QP ring state");
405 goto out_free_sr_mem;
408 wqe_sz = pow2ceil(sizeof(struct pvrdma_sq_wqe_hdr) +
409 sizeof(struct pvrdma_sge) * smax_sge - 1);
411 sprintf(ring_name, "qp_sring_%" PRIx64, pdir_dma);
412 rc = pvrdma_ring_init(sr, ring_name, pci_dev, sr->ring_state,
413 scqe, wqe_sz, (dma_addr_t *)&tbl[1], spages);
414 if (rc) {
415 goto out_unmap_ring_state;
418 if (!is_srq) {
419 /* Create recv ring */
420 rr->ring_state = &sr->ring_state[1];
421 wqe_sz = pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr) +
422 sizeof(struct pvrdma_sge) * rmax_sge - 1);
423 sprintf(ring_name, "qp_rring_%" PRIx64, pdir_dma);
424 rc = pvrdma_ring_init(rr, ring_name, pci_dev, rr->ring_state,
425 rcqe, wqe_sz, (dma_addr_t *)&tbl[1 + spages],
426 rpages);
427 if (rc) {
428 goto out_free_sr;
432 goto out;
434 out_free_sr:
435 pvrdma_ring_free(sr);
437 out_unmap_ring_state:
438 rdma_pci_dma_unmap(pci_dev, sr->ring_state, TARGET_PAGE_SIZE);
440 out_free_sr_mem:
441 g_free(sr);
443 out:
444 rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE);
445 rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE);
447 return rc;
450 static void destroy_qp_rings(PvrdmaRing *ring, uint8_t is_srq)
452 pvrdma_ring_free(&ring[0]);
453 if (!is_srq) {
454 pvrdma_ring_free(&ring[1]);
457 rdma_pci_dma_unmap(ring->dev, ring->ring_state, TARGET_PAGE_SIZE);
458 g_free(ring);
461 static int create_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
462 union pvrdma_cmd_resp *rsp)
464 struct pvrdma_cmd_create_qp *cmd = &req->create_qp;
465 struct pvrdma_cmd_create_qp_resp *resp = &rsp->create_qp_resp;
466 PvrdmaRing *rings = NULL;
467 int rc;
469 memset(resp, 0, sizeof(*resp));
471 rc = create_qp_rings(PCI_DEVICE(dev), cmd->pdir_dma, &rings,
472 cmd->max_send_wr, cmd->max_send_sge, cmd->send_chunks,
473 cmd->max_recv_wr, cmd->max_recv_sge,
474 cmd->total_chunks - cmd->send_chunks - 1, cmd->is_srq);
475 if (rc) {
476 return rc;
479 rc = rdma_rm_alloc_qp(&dev->rdma_dev_res, cmd->pd_handle, cmd->qp_type,
480 cmd->max_send_wr, cmd->max_send_sge,
481 cmd->send_cq_handle, cmd->max_recv_wr,
482 cmd->max_recv_sge, cmd->recv_cq_handle, rings,
483 &resp->qpn, cmd->is_srq, cmd->srq_handle);
484 if (rc) {
485 destroy_qp_rings(rings, cmd->is_srq);
486 return rc;
489 resp->max_send_wr = cmd->max_send_wr;
490 resp->max_recv_wr = cmd->max_recv_wr;
491 resp->max_send_sge = cmd->max_send_sge;
492 resp->max_recv_sge = cmd->max_recv_sge;
493 resp->max_inline_data = cmd->max_inline_data;
495 return 0;
498 static int modify_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
499 union pvrdma_cmd_resp *rsp)
501 struct pvrdma_cmd_modify_qp *cmd = &req->modify_qp;
502 int rc;
504 /* No need to verify sgid_index since it is u8 */
506 rc = rdma_rm_modify_qp(&dev->rdma_dev_res, &dev->backend_dev,
507 cmd->qp_handle, cmd->attr_mask,
508 cmd->attrs.ah_attr.grh.sgid_index,
509 (union ibv_gid *)&cmd->attrs.ah_attr.grh.dgid,
510 cmd->attrs.dest_qp_num,
511 (enum ibv_qp_state)cmd->attrs.qp_state,
512 cmd->attrs.qkey, cmd->attrs.rq_psn,
513 cmd->attrs.sq_psn);
515 return rc;
518 static int query_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
519 union pvrdma_cmd_resp *rsp)
521 struct pvrdma_cmd_query_qp *cmd = &req->query_qp;
522 struct pvrdma_cmd_query_qp_resp *resp = &rsp->query_qp_resp;
523 struct ibv_qp_init_attr init_attr;
524 int rc;
526 memset(resp, 0, sizeof(*resp));
528 rc = rdma_rm_query_qp(&dev->rdma_dev_res, &dev->backend_dev, cmd->qp_handle,
529 (struct ibv_qp_attr *)&resp->attrs, cmd->attr_mask,
530 &init_attr);
532 return rc;
535 static int destroy_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
536 union pvrdma_cmd_resp *rsp)
538 struct pvrdma_cmd_destroy_qp *cmd = &req->destroy_qp;
539 RdmaRmQP *qp;
540 PvrdmaRing *ring;
542 qp = rdma_rm_get_qp(&dev->rdma_dev_res, cmd->qp_handle);
543 if (!qp) {
544 return -EINVAL;
547 ring = (PvrdmaRing *)qp->opaque;
548 destroy_qp_rings(ring, qp->is_srq);
549 rdma_rm_dealloc_qp(&dev->rdma_dev_res, cmd->qp_handle);
551 return 0;
554 static int create_bind(PVRDMADev *dev, union pvrdma_cmd_req *req,
555 union pvrdma_cmd_resp *rsp)
557 struct pvrdma_cmd_create_bind *cmd = &req->create_bind;
558 int rc;
559 union ibv_gid *gid = (union ibv_gid *)&cmd->new_gid;
561 if (cmd->index >= MAX_PORT_GIDS) {
562 return -EINVAL;
565 rc = rdma_rm_add_gid(&dev->rdma_dev_res, &dev->backend_dev,
566 dev->backend_eth_device_name, gid, cmd->index);
568 return rc;
571 static int destroy_bind(PVRDMADev *dev, union pvrdma_cmd_req *req,
572 union pvrdma_cmd_resp *rsp)
574 int rc;
576 struct pvrdma_cmd_destroy_bind *cmd = &req->destroy_bind;
578 if (cmd->index >= MAX_PORT_GIDS) {
579 return -EINVAL;
582 rc = rdma_rm_del_gid(&dev->rdma_dev_res, &dev->backend_dev,
583 dev->backend_eth_device_name, cmd->index);
585 return rc;
588 static int create_uc(PVRDMADev *dev, union pvrdma_cmd_req *req,
589 union pvrdma_cmd_resp *rsp)
591 struct pvrdma_cmd_create_uc *cmd = &req->create_uc;
592 struct pvrdma_cmd_create_uc_resp *resp = &rsp->create_uc_resp;
593 int rc;
595 memset(resp, 0, sizeof(*resp));
596 rc = rdma_rm_alloc_uc(&dev->rdma_dev_res, cmd->pfn, &resp->ctx_handle);
598 return rc;
601 static int destroy_uc(PVRDMADev *dev, union pvrdma_cmd_req *req,
602 union pvrdma_cmd_resp *rsp)
604 struct pvrdma_cmd_destroy_uc *cmd = &req->destroy_uc;
606 rdma_rm_dealloc_uc(&dev->rdma_dev_res, cmd->ctx_handle);
608 return 0;
611 static int create_srq_ring(PCIDevice *pci_dev, PvrdmaRing **ring,
612 uint64_t pdir_dma, uint32_t max_wr,
613 uint32_t max_sge, uint32_t nchunks)
615 uint64_t *dir = NULL, *tbl = NULL;
616 PvrdmaRing *r;
617 int rc = -EINVAL;
618 char ring_name[MAX_RING_NAME_SZ];
619 uint32_t wqe_sz;
621 if (!nchunks || nchunks > PVRDMA_MAX_FAST_REG_PAGES) {
622 rdma_error_report("Got invalid page count for SRQ ring: %d",
623 nchunks);
624 return rc;
627 dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE);
628 if (!dir) {
629 rdma_error_report("Failed to map to SRQ page directory");
630 goto out;
633 tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE);
634 if (!tbl) {
635 rdma_error_report("Failed to map to SRQ page table");
636 goto out;
639 r = g_malloc(sizeof(*r));
640 *ring = r;
642 r->ring_state = (struct pvrdma_ring *)
643 rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
644 if (!r->ring_state) {
645 rdma_error_report("Failed to map tp SRQ ring state");
646 goto out_free_ring_mem;
649 wqe_sz = pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr) +
650 sizeof(struct pvrdma_sge) * max_sge - 1);
651 sprintf(ring_name, "srq_ring_%" PRIx64, pdir_dma);
652 rc = pvrdma_ring_init(r, ring_name, pci_dev, &r->ring_state[1], max_wr,
653 wqe_sz, (dma_addr_t *)&tbl[1], nchunks - 1);
654 if (rc) {
655 goto out_unmap_ring_state;
658 goto out;
660 out_unmap_ring_state:
661 rdma_pci_dma_unmap(pci_dev, r->ring_state, TARGET_PAGE_SIZE);
663 out_free_ring_mem:
664 g_free(r);
666 out:
667 rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE);
668 rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE);
670 return rc;
673 static void destroy_srq_ring(PvrdmaRing *ring)
675 pvrdma_ring_free(ring);
676 rdma_pci_dma_unmap(ring->dev, ring->ring_state, TARGET_PAGE_SIZE);
677 g_free(ring);
680 static int create_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
681 union pvrdma_cmd_resp *rsp)
683 struct pvrdma_cmd_create_srq *cmd = &req->create_srq;
684 struct pvrdma_cmd_create_srq_resp *resp = &rsp->create_srq_resp;
685 PvrdmaRing *ring = NULL;
686 int rc;
688 memset(resp, 0, sizeof(*resp));
690 rc = create_srq_ring(PCI_DEVICE(dev), &ring, cmd->pdir_dma,
691 cmd->attrs.max_wr, cmd->attrs.max_sge,
692 cmd->nchunks);
693 if (rc) {
694 return rc;
697 rc = rdma_rm_alloc_srq(&dev->rdma_dev_res, cmd->pd_handle,
698 cmd->attrs.max_wr, cmd->attrs.max_sge,
699 cmd->attrs.srq_limit, &resp->srqn, ring);
700 if (rc) {
701 destroy_srq_ring(ring);
702 return rc;
705 return 0;
708 static int query_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
709 union pvrdma_cmd_resp *rsp)
711 struct pvrdma_cmd_query_srq *cmd = &req->query_srq;
712 struct pvrdma_cmd_query_srq_resp *resp = &rsp->query_srq_resp;
714 memset(resp, 0, sizeof(*resp));
716 return rdma_rm_query_srq(&dev->rdma_dev_res, cmd->srq_handle,
717 (struct ibv_srq_attr *)&resp->attrs);
720 static int modify_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
721 union pvrdma_cmd_resp *rsp)
723 struct pvrdma_cmd_modify_srq *cmd = &req->modify_srq;
725 /* Only support SRQ limit */
726 if (!(cmd->attr_mask & IBV_SRQ_LIMIT) ||
727 (cmd->attr_mask & IBV_SRQ_MAX_WR))
728 return -EINVAL;
730 return rdma_rm_modify_srq(&dev->rdma_dev_res, cmd->srq_handle,
731 (struct ibv_srq_attr *)&cmd->attrs,
732 cmd->attr_mask);
735 static int destroy_srq(PVRDMADev *dev, union pvrdma_cmd_req *req,
736 union pvrdma_cmd_resp *rsp)
738 struct pvrdma_cmd_destroy_srq *cmd = &req->destroy_srq;
739 RdmaRmSRQ *srq;
740 PvrdmaRing *ring;
742 srq = rdma_rm_get_srq(&dev->rdma_dev_res, cmd->srq_handle);
743 if (!srq) {
744 return -EINVAL;
747 ring = (PvrdmaRing *)srq->opaque;
748 destroy_srq_ring(ring);
749 rdma_rm_dealloc_srq(&dev->rdma_dev_res, cmd->srq_handle);
751 return 0;
754 struct cmd_handler {
755 uint32_t cmd;
756 uint32_t ack;
757 int (*exec)(PVRDMADev *dev, union pvrdma_cmd_req *req,
758 union pvrdma_cmd_resp *rsp);
761 static struct cmd_handler cmd_handlers[] = {
762 {PVRDMA_CMD_QUERY_PORT, PVRDMA_CMD_QUERY_PORT_RESP, query_port},
763 {PVRDMA_CMD_QUERY_PKEY, PVRDMA_CMD_QUERY_PKEY_RESP, query_pkey},
764 {PVRDMA_CMD_CREATE_PD, PVRDMA_CMD_CREATE_PD_RESP, create_pd},
765 {PVRDMA_CMD_DESTROY_PD, PVRDMA_CMD_DESTROY_PD_RESP_NOOP, destroy_pd},
766 {PVRDMA_CMD_CREATE_MR, PVRDMA_CMD_CREATE_MR_RESP, create_mr},
767 {PVRDMA_CMD_DESTROY_MR, PVRDMA_CMD_DESTROY_MR_RESP_NOOP, destroy_mr},
768 {PVRDMA_CMD_CREATE_CQ, PVRDMA_CMD_CREATE_CQ_RESP, create_cq},
769 {PVRDMA_CMD_RESIZE_CQ, PVRDMA_CMD_RESIZE_CQ_RESP, NULL},
770 {PVRDMA_CMD_DESTROY_CQ, PVRDMA_CMD_DESTROY_CQ_RESP_NOOP, destroy_cq},
771 {PVRDMA_CMD_CREATE_QP, PVRDMA_CMD_CREATE_QP_RESP, create_qp},
772 {PVRDMA_CMD_MODIFY_QP, PVRDMA_CMD_MODIFY_QP_RESP, modify_qp},
773 {PVRDMA_CMD_QUERY_QP, PVRDMA_CMD_QUERY_QP_RESP, query_qp},
774 {PVRDMA_CMD_DESTROY_QP, PVRDMA_CMD_DESTROY_QP_RESP, destroy_qp},
775 {PVRDMA_CMD_CREATE_UC, PVRDMA_CMD_CREATE_UC_RESP, create_uc},
776 {PVRDMA_CMD_DESTROY_UC, PVRDMA_CMD_DESTROY_UC_RESP_NOOP, destroy_uc},
777 {PVRDMA_CMD_CREATE_BIND, PVRDMA_CMD_CREATE_BIND_RESP_NOOP, create_bind},
778 {PVRDMA_CMD_DESTROY_BIND, PVRDMA_CMD_DESTROY_BIND_RESP_NOOP, destroy_bind},
779 {PVRDMA_CMD_CREATE_SRQ, PVRDMA_CMD_CREATE_SRQ_RESP, create_srq},
780 {PVRDMA_CMD_QUERY_SRQ, PVRDMA_CMD_QUERY_SRQ_RESP, query_srq},
781 {PVRDMA_CMD_MODIFY_SRQ, PVRDMA_CMD_MODIFY_SRQ_RESP, modify_srq},
782 {PVRDMA_CMD_DESTROY_SRQ, PVRDMA_CMD_DESTROY_SRQ_RESP, destroy_srq},
785 int pvrdma_exec_cmd(PVRDMADev *dev)
787 int err = 0xFFFF;
788 DSRInfo *dsr_info;
790 dsr_info = &dev->dsr_info;
792 if (dsr_info->req->hdr.cmd >= sizeof(cmd_handlers) /
793 sizeof(struct cmd_handler)) {
794 rdma_error_report("Unsupported command");
795 goto out;
798 if (!cmd_handlers[dsr_info->req->hdr.cmd].exec) {
799 rdma_error_report("Unsupported command (not implemented yet)");
800 goto out;
803 err = cmd_handlers[dsr_info->req->hdr.cmd].exec(dev, dsr_info->req,
804 dsr_info->rsp);
805 dsr_info->rsp->hdr.response = dsr_info->req->hdr.response;
806 dsr_info->rsp->hdr.ack = cmd_handlers[dsr_info->req->hdr.cmd].ack;
807 dsr_info->rsp->hdr.err = err < 0 ? -err : 0;
809 trace_pvrdma_exec_cmd(dsr_info->req->hdr.cmd, dsr_info->rsp->hdr.err);
811 dev->stats.commands++;
813 out:
814 set_reg_val(dev, PVRDMA_REG_ERR, err);
815 post_interrupt(dev, INTR_VEC_CMD_RING);
817 return (err == 0) ? 0 : -EINVAL;