pvrdma: check return value from pvrdma_idx_ring_has_ routines
[qemu/ar7.git] / hw / rdma / vmw / pvrdma_dev_ring.c
blobe8e5b502f6e844691539f6e23a61c086b3de7e92
1 /*
2 * QEMU paravirtual RDMA - Device rings
4 * Copyright (C) 2018 Oracle
5 * Copyright (C) 2018 Red Hat Inc
7 * Authors:
8 * Yuval Shaia <yuval.shaia@oracle.com>
9 * Marcel Apfelbaum <marcel@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include "qemu/osdep.h"
17 #include "hw/pci/pci.h"
18 #include "cpu.h"
20 #include "../rdma_utils.h"
21 #include "standard-headers/drivers/infiniband/hw/vmw_pvrdma/pvrdma_ring.h"
22 #include "pvrdma_dev_ring.h"
24 int pvrdma_ring_init(PvrdmaRing *ring, const char *name, PCIDevice *dev,
25 struct pvrdma_ring *ring_state, uint32_t max_elems,
26 size_t elem_sz, dma_addr_t *tbl, uint32_t npages)
28 int i;
29 int rc = 0;
31 strncpy(ring->name, name, MAX_RING_NAME_SZ);
32 ring->name[MAX_RING_NAME_SZ - 1] = 0;
33 pr_dbg("Initializing %s ring\n", ring->name);
34 ring->dev = dev;
35 ring->ring_state = ring_state;
36 ring->max_elems = max_elems;
37 ring->elem_sz = elem_sz;
38 pr_dbg("ring->elem_sz=%zu\n", ring->elem_sz);
39 pr_dbg("npages=%d\n", npages);
40 /* TODO: Give a moment to think if we want to redo driver settings
41 atomic_set(&ring->ring_state->prod_tail, 0);
42 atomic_set(&ring->ring_state->cons_head, 0);
44 ring->npages = npages;
45 ring->pages = g_malloc(npages * sizeof(void *));
47 for (i = 0; i < npages; i++) {
48 if (!tbl[i]) {
49 pr_err("npages=%ld but tbl[%d] is NULL\n", (long)npages, i);
50 continue;
53 ring->pages[i] = rdma_pci_dma_map(dev, tbl[i], TARGET_PAGE_SIZE);
54 if (!ring->pages[i]) {
55 rc = -ENOMEM;
56 pr_dbg("Failed to map to page %d\n", i);
57 goto out_free;
59 memset(ring->pages[i], 0, TARGET_PAGE_SIZE);
62 goto out;
64 out_free:
65 while (i--) {
66 rdma_pci_dma_unmap(dev, ring->pages[i], TARGET_PAGE_SIZE);
68 g_free(ring->pages);
70 out:
71 return rc;
74 void *pvrdma_ring_next_elem_read(PvrdmaRing *ring)
76 int e;
77 unsigned int idx = 0, offset;
79 e = pvrdma_idx_ring_has_data(ring->ring_state, ring->max_elems, &idx);
80 if (e <= 0) {
81 pr_dbg("No more data in ring\n");
82 return NULL;
85 offset = idx * ring->elem_sz;
86 return ring->pages[offset / TARGET_PAGE_SIZE] + (offset % TARGET_PAGE_SIZE);
89 void pvrdma_ring_read_inc(PvrdmaRing *ring)
91 pvrdma_idx_ring_inc(&ring->ring_state->cons_head, ring->max_elems);
93 pr_dbg("%s: t=%d, h=%d, m=%ld\n", ring->name,
94 ring->ring_state->prod_tail, ring->ring_state->cons_head,
95 ring->max_elems);
99 void *pvrdma_ring_next_elem_write(PvrdmaRing *ring)
101 int idx;
102 unsigned int offset, tail;
104 idx = pvrdma_idx_ring_has_space(ring->ring_state, ring->max_elems, &tail);
105 if (idx <= 0) {
106 pr_dbg("CQ is full\n");
107 return NULL;
110 idx = pvrdma_idx(&ring->ring_state->prod_tail, ring->max_elems);
111 if (idx < 0 || tail != idx) {
112 pr_dbg("invalid idx\n");
113 return NULL;
116 offset = idx * ring->elem_sz;
117 return ring->pages[offset / TARGET_PAGE_SIZE] + (offset % TARGET_PAGE_SIZE);
120 void pvrdma_ring_write_inc(PvrdmaRing *ring)
122 pvrdma_idx_ring_inc(&ring->ring_state->prod_tail, ring->max_elems);
124 pr_dbg("%s: t=%d, h=%d, m=%ld\n", ring->name,
125 ring->ring_state->prod_tail, ring->ring_state->cons_head,
126 ring->max_elems);
130 void pvrdma_ring_free(PvrdmaRing *ring)
132 if (!ring) {
133 return;
136 if (!ring->pages) {
137 return;
140 pr_dbg("ring->npages=%d\n", ring->npages);
141 while (ring->npages--) {
142 rdma_pci_dma_unmap(ring->dev, ring->pages[ring->npages],
143 TARGET_PAGE_SIZE);
146 g_free(ring->pages);
147 ring->pages = NULL;