hw/rdma: Initialize node_guid from vmxnet3 mac address
[qemu/kevin.git] / hw / rdma / vmw / pvrdma_cmd.c
blob2979582fac4077af5a6c11537122b6686724789e
1 /*
2 * QEMU paravirtual RDMA - Command channel
4 * Copyright (C) 2018 Oracle
5 * Copyright (C) 2018 Red Hat Inc
7 * Authors:
8 * Yuval Shaia <yuval.shaia@oracle.com>
9 * Marcel Apfelbaum <marcel@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include "qemu/osdep.h"
17 #include "qemu/error-report.h"
18 #include "cpu.h"
19 #include "hw/hw.h"
20 #include "hw/pci/pci.h"
21 #include "hw/pci/pci_ids.h"
23 #include "../rdma_backend.h"
24 #include "../rdma_rm.h"
25 #include "../rdma_utils.h"
27 #include "pvrdma.h"
28 #include "standard-headers/rdma/vmw_pvrdma-abi.h"
30 static void *pvrdma_map_to_pdir(PCIDevice *pdev, uint64_t pdir_dma,
31 uint32_t nchunks, size_t length)
33 uint64_t *dir, *tbl;
34 int tbl_idx, dir_idx, addr_idx;
35 void *host_virt = NULL, *curr_page;
37 if (!nchunks) {
38 pr_dbg("nchunks=0\n");
39 return NULL;
42 dir = rdma_pci_dma_map(pdev, pdir_dma, TARGET_PAGE_SIZE);
43 if (!dir) {
44 error_report("PVRDMA: Failed to map to page directory");
45 return NULL;
48 tbl = rdma_pci_dma_map(pdev, dir[0], TARGET_PAGE_SIZE);
49 if (!tbl) {
50 error_report("PVRDMA: Failed to map to page table 0");
51 goto out_unmap_dir;
54 curr_page = rdma_pci_dma_map(pdev, (dma_addr_t)tbl[0], TARGET_PAGE_SIZE);
55 if (!curr_page) {
56 error_report("PVRDMA: Failed to map the first page");
57 goto out_unmap_tbl;
60 host_virt = mremap(curr_page, 0, length, MREMAP_MAYMOVE);
61 pr_dbg("mremap %p -> %p\n", curr_page, host_virt);
62 if (host_virt == MAP_FAILED) {
63 host_virt = NULL;
64 error_report("PVRDMA: Failed to remap memory for host_virt");
65 goto out_unmap_tbl;
68 rdma_pci_dma_unmap(pdev, curr_page, TARGET_PAGE_SIZE);
70 pr_dbg("host_virt=%p\n", host_virt);
72 dir_idx = 0;
73 tbl_idx = 1;
74 addr_idx = 1;
75 while (addr_idx < nchunks) {
76 if (tbl_idx == TARGET_PAGE_SIZE / sizeof(uint64_t)) {
77 tbl_idx = 0;
78 dir_idx++;
79 pr_dbg("Mapping to table %d\n", dir_idx);
80 rdma_pci_dma_unmap(pdev, tbl, TARGET_PAGE_SIZE);
81 tbl = rdma_pci_dma_map(pdev, dir[dir_idx], TARGET_PAGE_SIZE);
82 if (!tbl) {
83 error_report("PVRDMA: Failed to map to page table %d", dir_idx);
84 goto out_unmap_host_virt;
88 pr_dbg("guest_dma[%d]=0x%" PRIx64 "\n", addr_idx, tbl[tbl_idx]);
90 curr_page = rdma_pci_dma_map(pdev, (dma_addr_t)tbl[tbl_idx],
91 TARGET_PAGE_SIZE);
92 if (!curr_page) {
93 error_report("PVRDMA: Failed to map to page %d, dir %d", tbl_idx,
94 dir_idx);
95 goto out_unmap_host_virt;
98 mremap(curr_page, 0, TARGET_PAGE_SIZE, MREMAP_MAYMOVE | MREMAP_FIXED,
99 host_virt + TARGET_PAGE_SIZE * addr_idx);
101 rdma_pci_dma_unmap(pdev, curr_page, TARGET_PAGE_SIZE);
103 addr_idx++;
105 tbl_idx++;
108 goto out_unmap_tbl;
110 out_unmap_host_virt:
111 munmap(host_virt, length);
112 host_virt = NULL;
114 out_unmap_tbl:
115 rdma_pci_dma_unmap(pdev, tbl, TARGET_PAGE_SIZE);
117 out_unmap_dir:
118 rdma_pci_dma_unmap(pdev, dir, TARGET_PAGE_SIZE);
120 return host_virt;
123 static int query_port(PVRDMADev *dev, union pvrdma_cmd_req *req,
124 union pvrdma_cmd_resp *rsp)
126 struct pvrdma_cmd_query_port *cmd = &req->query_port;
127 struct pvrdma_cmd_query_port_resp *resp = &rsp->query_port_resp;
128 struct pvrdma_port_attr attrs = {0};
130 pr_dbg("port=%d\n", cmd->port_num);
132 if (rdma_backend_query_port(&dev->backend_dev,
133 (struct ibv_port_attr *)&attrs)) {
134 return -ENOMEM;
137 memset(resp, 0, sizeof(*resp));
138 resp->hdr.response = cmd->hdr.response;
139 resp->hdr.ack = PVRDMA_CMD_QUERY_PORT_RESP;
140 resp->hdr.err = 0;
142 resp->attrs.state = attrs.state;
143 resp->attrs.max_mtu = attrs.max_mtu;
144 resp->attrs.active_mtu = attrs.active_mtu;
145 resp->attrs.phys_state = attrs.phys_state;
146 resp->attrs.gid_tbl_len = MIN(MAX_PORT_GIDS, attrs.gid_tbl_len);
147 resp->attrs.max_msg_sz = 1024;
148 resp->attrs.pkey_tbl_len = MIN(MAX_PORT_PKEYS, attrs.pkey_tbl_len);
149 resp->attrs.active_width = 1;
150 resp->attrs.active_speed = 1;
152 return 0;
155 static int query_pkey(PVRDMADev *dev, union pvrdma_cmd_req *req,
156 union pvrdma_cmd_resp *rsp)
158 struct pvrdma_cmd_query_pkey *cmd = &req->query_pkey;
159 struct pvrdma_cmd_query_pkey_resp *resp = &rsp->query_pkey_resp;
161 pr_dbg("port=%d\n", cmd->port_num);
162 pr_dbg("index=%d\n", cmd->index);
164 memset(resp, 0, sizeof(*resp));
165 resp->hdr.response = cmd->hdr.response;
166 resp->hdr.ack = PVRDMA_CMD_QUERY_PKEY_RESP;
167 resp->hdr.err = 0;
169 resp->pkey = PVRDMA_PKEY;
170 pr_dbg("pkey=0x%x\n", resp->pkey);
172 return 0;
175 static int create_pd(PVRDMADev *dev, union pvrdma_cmd_req *req,
176 union pvrdma_cmd_resp *rsp)
178 struct pvrdma_cmd_create_pd *cmd = &req->create_pd;
179 struct pvrdma_cmd_create_pd_resp *resp = &rsp->create_pd_resp;
181 pr_dbg("context=0x%x\n", cmd->ctx_handle ? cmd->ctx_handle : 0);
183 memset(resp, 0, sizeof(*resp));
184 resp->hdr.response = cmd->hdr.response;
185 resp->hdr.ack = PVRDMA_CMD_CREATE_PD_RESP;
186 resp->hdr.err = rdma_rm_alloc_pd(&dev->rdma_dev_res, &dev->backend_dev,
187 &resp->pd_handle, cmd->ctx_handle);
189 pr_dbg("ret=%d\n", resp->hdr.err);
190 return resp->hdr.err;
193 static int destroy_pd(PVRDMADev *dev, union pvrdma_cmd_req *req,
194 union pvrdma_cmd_resp *rsp)
196 struct pvrdma_cmd_destroy_pd *cmd = &req->destroy_pd;
198 pr_dbg("pd_handle=%d\n", cmd->pd_handle);
200 rdma_rm_dealloc_pd(&dev->rdma_dev_res, cmd->pd_handle);
202 return 0;
205 static int create_mr(PVRDMADev *dev, union pvrdma_cmd_req *req,
206 union pvrdma_cmd_resp *rsp)
208 struct pvrdma_cmd_create_mr *cmd = &req->create_mr;
209 struct pvrdma_cmd_create_mr_resp *resp = &rsp->create_mr_resp;
210 PCIDevice *pci_dev = PCI_DEVICE(dev);
211 void *host_virt = NULL;
213 memset(resp, 0, sizeof(*resp));
214 resp->hdr.response = cmd->hdr.response;
215 resp->hdr.ack = PVRDMA_CMD_CREATE_MR_RESP;
217 pr_dbg("pd_handle=%d\n", cmd->pd_handle);
218 pr_dbg("access_flags=0x%x\n", cmd->access_flags);
219 pr_dbg("flags=0x%x\n", cmd->flags);
221 if (!(cmd->flags & PVRDMA_MR_FLAG_DMA)) {
222 host_virt = pvrdma_map_to_pdir(pci_dev, cmd->pdir_dma, cmd->nchunks,
223 cmd->length);
224 if (!host_virt) {
225 pr_dbg("Failed to map to pdir\n");
226 resp->hdr.err = -EINVAL;
227 goto out;
231 resp->hdr.err = rdma_rm_alloc_mr(&dev->rdma_dev_res, cmd->pd_handle,
232 cmd->start, cmd->length, host_virt,
233 cmd->access_flags, &resp->mr_handle,
234 &resp->lkey, &resp->rkey);
235 if (resp->hdr.err && host_virt) {
236 munmap(host_virt, cmd->length);
239 out:
240 pr_dbg("ret=%d\n", resp->hdr.err);
241 return resp->hdr.err;
244 static int destroy_mr(PVRDMADev *dev, union pvrdma_cmd_req *req,
245 union pvrdma_cmd_resp *rsp)
247 struct pvrdma_cmd_destroy_mr *cmd = &req->destroy_mr;
249 pr_dbg("mr_handle=%d\n", cmd->mr_handle);
251 rdma_rm_dealloc_mr(&dev->rdma_dev_res, cmd->mr_handle);
253 return 0;
256 static int create_cq_ring(PCIDevice *pci_dev , PvrdmaRing **ring,
257 uint64_t pdir_dma, uint32_t nchunks, uint32_t cqe)
259 uint64_t *dir = NULL, *tbl = NULL;
260 PvrdmaRing *r;
261 int rc = -EINVAL;
262 char ring_name[MAX_RING_NAME_SZ];
264 pr_dbg("pdir_dma=0x%llx\n", (long long unsigned int)pdir_dma);
265 dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE);
266 if (!dir) {
267 pr_dbg("Failed to map to CQ page directory\n");
268 goto out;
271 tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE);
272 if (!tbl) {
273 pr_dbg("Failed to map to CQ page table\n");
274 goto out;
277 r = g_malloc(sizeof(*r));
278 *ring = r;
280 r->ring_state = (struct pvrdma_ring *)
281 rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
283 if (!r->ring_state) {
284 pr_dbg("Failed to map to CQ ring state\n");
285 goto out_free_ring;
288 sprintf(ring_name, "cq_ring_%" PRIx64, pdir_dma);
289 rc = pvrdma_ring_init(r, ring_name, pci_dev, &r->ring_state[1],
290 cqe, sizeof(struct pvrdma_cqe),
291 /* first page is ring state */
292 (dma_addr_t *)&tbl[1], nchunks - 1);
293 if (rc) {
294 goto out_unmap_ring_state;
297 goto out;
299 out_unmap_ring_state:
300 /* ring_state was in slot 1, not 0 so need to jump back */
301 rdma_pci_dma_unmap(pci_dev, --r->ring_state, TARGET_PAGE_SIZE);
303 out_free_ring:
304 g_free(r);
306 out:
307 rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE);
308 rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE);
310 return rc;
313 static int create_cq(PVRDMADev *dev, union pvrdma_cmd_req *req,
314 union pvrdma_cmd_resp *rsp)
316 struct pvrdma_cmd_create_cq *cmd = &req->create_cq;
317 struct pvrdma_cmd_create_cq_resp *resp = &rsp->create_cq_resp;
318 PvrdmaRing *ring = NULL;
320 memset(resp, 0, sizeof(*resp));
321 resp->hdr.response = cmd->hdr.response;
322 resp->hdr.ack = PVRDMA_CMD_CREATE_CQ_RESP;
324 resp->cqe = cmd->cqe;
326 resp->hdr.err = create_cq_ring(PCI_DEVICE(dev), &ring, cmd->pdir_dma,
327 cmd->nchunks, cmd->cqe);
328 if (resp->hdr.err) {
329 goto out;
332 pr_dbg("ring=%p\n", ring);
334 resp->hdr.err = rdma_rm_alloc_cq(&dev->rdma_dev_res, &dev->backend_dev,
335 cmd->cqe, &resp->cq_handle, ring);
336 resp->cqe = cmd->cqe;
338 out:
339 pr_dbg("ret=%d\n", resp->hdr.err);
340 return resp->hdr.err;
343 static int destroy_cq(PVRDMADev *dev, union pvrdma_cmd_req *req,
344 union pvrdma_cmd_resp *rsp)
346 struct pvrdma_cmd_destroy_cq *cmd = &req->destroy_cq;
347 RdmaRmCQ *cq;
348 PvrdmaRing *ring;
350 pr_dbg("cq_handle=%d\n", cmd->cq_handle);
352 cq = rdma_rm_get_cq(&dev->rdma_dev_res, cmd->cq_handle);
353 if (!cq) {
354 pr_dbg("Invalid CQ handle\n");
355 return -EINVAL;
358 ring = (PvrdmaRing *)cq->opaque;
359 pvrdma_ring_free(ring);
360 /* ring_state was in slot 1, not 0 so need to jump back */
361 rdma_pci_dma_unmap(PCI_DEVICE(dev), --ring->ring_state, TARGET_PAGE_SIZE);
362 g_free(ring);
364 rdma_rm_dealloc_cq(&dev->rdma_dev_res, cmd->cq_handle);
366 return 0;
369 static int create_qp_rings(PCIDevice *pci_dev, uint64_t pdir_dma,
370 PvrdmaRing **rings, uint32_t scqe, uint32_t smax_sge,
371 uint32_t spages, uint32_t rcqe, uint32_t rmax_sge,
372 uint32_t rpages)
374 uint64_t *dir = NULL, *tbl = NULL;
375 PvrdmaRing *sr, *rr;
376 int rc = -EINVAL;
377 char ring_name[MAX_RING_NAME_SZ];
378 uint32_t wqe_sz;
380 pr_dbg("pdir_dma=0x%llx\n", (long long unsigned int)pdir_dma);
381 dir = rdma_pci_dma_map(pci_dev, pdir_dma, TARGET_PAGE_SIZE);
382 if (!dir) {
383 pr_dbg("Failed to map to CQ page directory\n");
384 goto out;
387 tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE);
388 if (!tbl) {
389 pr_dbg("Failed to map to CQ page table\n");
390 goto out;
393 sr = g_malloc(2 * sizeof(*rr));
394 rr = &sr[1];
395 pr_dbg("sring=%p\n", sr);
396 pr_dbg("rring=%p\n", rr);
398 *rings = sr;
400 pr_dbg("scqe=%d\n", scqe);
401 pr_dbg("smax_sge=%d\n", smax_sge);
402 pr_dbg("spages=%d\n", spages);
403 pr_dbg("rcqe=%d\n", rcqe);
404 pr_dbg("rmax_sge=%d\n", rmax_sge);
405 pr_dbg("rpages=%d\n", rpages);
407 /* Create send ring */
408 sr->ring_state = (struct pvrdma_ring *)
409 rdma_pci_dma_map(pci_dev, tbl[0], TARGET_PAGE_SIZE);
410 if (!sr->ring_state) {
411 pr_dbg("Failed to map to CQ ring state\n");
412 goto out_free_sr_mem;
415 wqe_sz = pow2ceil(sizeof(struct pvrdma_sq_wqe_hdr) +
416 sizeof(struct pvrdma_sge) * smax_sge - 1);
418 sprintf(ring_name, "qp_sring_%" PRIx64, pdir_dma);
419 rc = pvrdma_ring_init(sr, ring_name, pci_dev, sr->ring_state,
420 scqe, wqe_sz, (dma_addr_t *)&tbl[1], spages);
421 if (rc) {
422 goto out_unmap_ring_state;
425 /* Create recv ring */
426 rr->ring_state = &sr->ring_state[1];
427 wqe_sz = pow2ceil(sizeof(struct pvrdma_rq_wqe_hdr) +
428 sizeof(struct pvrdma_sge) * rmax_sge - 1);
429 sprintf(ring_name, "qp_rring_%" PRIx64, pdir_dma);
430 rc = pvrdma_ring_init(rr, ring_name, pci_dev, rr->ring_state,
431 rcqe, wqe_sz, (dma_addr_t *)&tbl[1 + spages], rpages);
432 if (rc) {
433 goto out_free_sr;
436 goto out;
438 out_free_sr:
439 pvrdma_ring_free(sr);
441 out_unmap_ring_state:
442 rdma_pci_dma_unmap(pci_dev, sr->ring_state, TARGET_PAGE_SIZE);
444 out_free_sr_mem:
445 g_free(sr);
447 out:
448 rdma_pci_dma_unmap(pci_dev, tbl, TARGET_PAGE_SIZE);
449 rdma_pci_dma_unmap(pci_dev, dir, TARGET_PAGE_SIZE);
451 return rc;
454 static int create_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
455 union pvrdma_cmd_resp *rsp)
457 struct pvrdma_cmd_create_qp *cmd = &req->create_qp;
458 struct pvrdma_cmd_create_qp_resp *resp = &rsp->create_qp_resp;
459 PvrdmaRing *rings = NULL;
461 memset(resp, 0, sizeof(*resp));
462 resp->hdr.response = cmd->hdr.response;
463 resp->hdr.ack = PVRDMA_CMD_CREATE_QP_RESP;
465 pr_dbg("total_chunks=%d\n", cmd->total_chunks);
466 pr_dbg("send_chunks=%d\n", cmd->send_chunks);
468 resp->hdr.err = create_qp_rings(PCI_DEVICE(dev), cmd->pdir_dma, &rings,
469 cmd->max_send_wr, cmd->max_send_sge,
470 cmd->send_chunks, cmd->max_recv_wr,
471 cmd->max_recv_sge, cmd->total_chunks -
472 cmd->send_chunks - 1);
473 if (resp->hdr.err) {
474 goto out;
477 pr_dbg("rings=%p\n", rings);
479 resp->hdr.err = rdma_rm_alloc_qp(&dev->rdma_dev_res, cmd->pd_handle,
480 cmd->qp_type, cmd->max_send_wr,
481 cmd->max_send_sge, cmd->send_cq_handle,
482 cmd->max_recv_wr, cmd->max_recv_sge,
483 cmd->recv_cq_handle, rings, &resp->qpn);
485 resp->max_send_wr = cmd->max_send_wr;
486 resp->max_recv_wr = cmd->max_recv_wr;
487 resp->max_send_sge = cmd->max_send_sge;
488 resp->max_recv_sge = cmd->max_recv_sge;
489 resp->max_inline_data = cmd->max_inline_data;
491 out:
492 pr_dbg("ret=%d\n", resp->hdr.err);
493 return resp->hdr.err;
496 static int modify_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
497 union pvrdma_cmd_resp *rsp)
499 struct pvrdma_cmd_modify_qp *cmd = &req->modify_qp;
501 pr_dbg("qp_handle=%d\n", cmd->qp_handle);
503 memset(rsp, 0, sizeof(*rsp));
504 rsp->hdr.response = cmd->hdr.response;
505 rsp->hdr.ack = PVRDMA_CMD_MODIFY_QP_RESP;
507 /* No need to verify sgid_index since it is u8 */
509 rsp->hdr.err =
510 rdma_rm_modify_qp(&dev->rdma_dev_res, &dev->backend_dev, cmd->qp_handle,
511 cmd->attr_mask, cmd->attrs.ah_attr.grh.sgid_index,
512 (union ibv_gid *)&cmd->attrs.ah_attr.grh.dgid,
513 cmd->attrs.dest_qp_num,
514 (enum ibv_qp_state)cmd->attrs.qp_state,
515 cmd->attrs.qkey, cmd->attrs.rq_psn,
516 cmd->attrs.sq_psn);
518 pr_dbg("ret=%d\n", rsp->hdr.err);
519 return rsp->hdr.err;
522 static int query_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
523 union pvrdma_cmd_resp *rsp)
525 struct pvrdma_cmd_query_qp *cmd = &req->query_qp;
526 struct pvrdma_cmd_query_qp_resp *resp = &rsp->query_qp_resp;
527 struct ibv_qp_init_attr init_attr;
529 pr_dbg("qp_handle=%d\n", cmd->qp_handle);
530 pr_dbg("attr_mask=0x%x\n", cmd->attr_mask);
532 memset(rsp, 0, sizeof(*rsp));
533 rsp->hdr.response = cmd->hdr.response;
534 rsp->hdr.ack = PVRDMA_CMD_QUERY_QP_RESP;
536 rsp->hdr.err = rdma_rm_query_qp(&dev->rdma_dev_res, &dev->backend_dev,
537 cmd->qp_handle,
538 (struct ibv_qp_attr *)&resp->attrs,
539 cmd->attr_mask, &init_attr);
541 pr_dbg("ret=%d\n", rsp->hdr.err);
542 return rsp->hdr.err;
545 static int destroy_qp(PVRDMADev *dev, union pvrdma_cmd_req *req,
546 union pvrdma_cmd_resp *rsp)
548 struct pvrdma_cmd_destroy_qp *cmd = &req->destroy_qp;
549 RdmaRmQP *qp;
550 PvrdmaRing *ring;
552 qp = rdma_rm_get_qp(&dev->rdma_dev_res, cmd->qp_handle);
553 if (!qp) {
554 pr_dbg("Invalid QP handle\n");
555 return -EINVAL;
558 rdma_rm_dealloc_qp(&dev->rdma_dev_res, cmd->qp_handle);
560 ring = (PvrdmaRing *)qp->opaque;
561 pr_dbg("sring=%p\n", &ring[0]);
562 pvrdma_ring_free(&ring[0]);
563 pr_dbg("rring=%p\n", &ring[1]);
564 pvrdma_ring_free(&ring[1]);
566 rdma_pci_dma_unmap(PCI_DEVICE(dev), ring->ring_state, TARGET_PAGE_SIZE);
567 g_free(ring);
569 return 0;
572 static int create_bind(PVRDMADev *dev, union pvrdma_cmd_req *req,
573 union pvrdma_cmd_resp *rsp)
575 struct pvrdma_cmd_create_bind *cmd = &req->create_bind;
576 int rc;
577 union ibv_gid *gid = (union ibv_gid *)&cmd->new_gid;
579 pr_dbg("index=%d\n", cmd->index);
581 if (cmd->index >= MAX_PORT_GIDS) {
582 return -EINVAL;
585 pr_dbg("gid[%d]=0x%llx,0x%llx\n", cmd->index,
586 (long long unsigned int)be64_to_cpu(gid->global.subnet_prefix),
587 (long long unsigned int)be64_to_cpu(gid->global.interface_id));
589 rc = rdma_rm_add_gid(&dev->rdma_dev_res, &dev->backend_dev,
590 dev->backend_eth_device_name, gid, cmd->index);
591 if (rc < 0) {
592 return -EINVAL;
595 return 0;
598 static int destroy_bind(PVRDMADev *dev, union pvrdma_cmd_req *req,
599 union pvrdma_cmd_resp *rsp)
601 int rc;
603 struct pvrdma_cmd_destroy_bind *cmd = &req->destroy_bind;
605 pr_dbg("index=%d\n", cmd->index);
607 if (cmd->index >= MAX_PORT_GIDS) {
608 return -EINVAL;
611 rc = rdma_rm_del_gid(&dev->rdma_dev_res, &dev->backend_dev,
612 dev->backend_eth_device_name, cmd->index);
614 if (rc < 0) {
615 rsp->hdr.err = rc;
616 goto out;
619 return 0;
622 static int create_uc(PVRDMADev *dev, union pvrdma_cmd_req *req,
623 union pvrdma_cmd_resp *rsp)
625 struct pvrdma_cmd_create_uc *cmd = &req->create_uc;
626 struct pvrdma_cmd_create_uc_resp *resp = &rsp->create_uc_resp;
628 pr_dbg("pfn=%d\n", cmd->pfn);
630 memset(resp, 0, sizeof(*resp));
631 resp->hdr.response = cmd->hdr.response;
632 resp->hdr.ack = PVRDMA_CMD_CREATE_UC_RESP;
633 resp->hdr.err = rdma_rm_alloc_uc(&dev->rdma_dev_res, cmd->pfn,
634 &resp->ctx_handle);
636 pr_dbg("ret=%d\n", resp->hdr.err);
638 return 0;
641 static int destroy_uc(PVRDMADev *dev, union pvrdma_cmd_req *req,
642 union pvrdma_cmd_resp *rsp)
644 struct pvrdma_cmd_destroy_uc *cmd = &req->destroy_uc;
646 pr_dbg("ctx_handle=%d\n", cmd->ctx_handle);
648 rdma_rm_dealloc_uc(&dev->rdma_dev_res, cmd->ctx_handle);
650 return 0;
652 struct cmd_handler {
653 uint32_t cmd;
654 int (*exec)(PVRDMADev *dev, union pvrdma_cmd_req *req,
655 union pvrdma_cmd_resp *rsp);
658 static struct cmd_handler cmd_handlers[] = {
659 {PVRDMA_CMD_QUERY_PORT, query_port},
660 {PVRDMA_CMD_QUERY_PKEY, query_pkey},
661 {PVRDMA_CMD_CREATE_PD, create_pd},
662 {PVRDMA_CMD_DESTROY_PD, destroy_pd},
663 {PVRDMA_CMD_CREATE_MR, create_mr},
664 {PVRDMA_CMD_DESTROY_MR, destroy_mr},
665 {PVRDMA_CMD_CREATE_CQ, create_cq},
666 {PVRDMA_CMD_RESIZE_CQ, NULL},
667 {PVRDMA_CMD_DESTROY_CQ, destroy_cq},
668 {PVRDMA_CMD_CREATE_QP, create_qp},
669 {PVRDMA_CMD_MODIFY_QP, modify_qp},
670 {PVRDMA_CMD_QUERY_QP, query_qp},
671 {PVRDMA_CMD_DESTROY_QP, destroy_qp},
672 {PVRDMA_CMD_CREATE_UC, create_uc},
673 {PVRDMA_CMD_DESTROY_UC, destroy_uc},
674 {PVRDMA_CMD_CREATE_BIND, create_bind},
675 {PVRDMA_CMD_DESTROY_BIND, destroy_bind},
678 int execute_command(PVRDMADev *dev)
680 int err = 0xFFFF;
681 DSRInfo *dsr_info;
683 dsr_info = &dev->dsr_info;
685 pr_dbg("cmd=%d\n", dsr_info->req->hdr.cmd);
686 if (dsr_info->req->hdr.cmd >= sizeof(cmd_handlers) /
687 sizeof(struct cmd_handler)) {
688 pr_dbg("Unsupported command\n");
689 goto out;
692 if (!cmd_handlers[dsr_info->req->hdr.cmd].exec) {
693 pr_dbg("Unsupported command (not implemented yet)\n");
694 goto out;
697 err = cmd_handlers[dsr_info->req->hdr.cmd].exec(dev, dsr_info->req,
698 dsr_info->rsp);
699 out:
700 set_reg_val(dev, PVRDMA_REG_ERR, err);
701 post_interrupt(dev, INTR_VEC_CMD_RING);
703 return (err == 0) ? 0 : -EINVAL;