Merge remote-tracking branch 'remotes/dgilbert/tags/pull-migration-20200601a' into...
[qemu/ar7.git] / hw / net / xen_nic.c
blob00a7fdf843b0c275ac1bfd7d4824da644af198d8
1 /*
2 * xen paravirt network card backend
4 * (c) Gerd Hoffmann <kraxel@redhat.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; under version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
18 * Contributions after 2012-01-13 are licensed under the terms of the
19 * GNU GPL, version 2 or (at your option) any later version.
22 #include "qemu/osdep.h"
23 #include <sys/socket.h>
24 #include <sys/ioctl.h>
25 #include <sys/wait.h>
27 #include "net/net.h"
28 #include "net/checksum.h"
29 #include "net/util.h"
30 #include "hw/xen/xen-legacy-backend.h"
32 #include "hw/xen/interface/io/netif.h"
34 /* ------------------------------------------------------------- */
36 struct XenNetDev {
37 struct XenLegacyDevice xendev; /* must be first */
38 char *mac;
39 int tx_work;
40 int tx_ring_ref;
41 int rx_ring_ref;
42 struct netif_tx_sring *txs;
43 struct netif_rx_sring *rxs;
44 netif_tx_back_ring_t tx_ring;
45 netif_rx_back_ring_t rx_ring;
46 NICConf conf;
47 NICState *nic;
50 /* ------------------------------------------------------------- */
52 static void net_tx_response(struct XenNetDev *netdev, netif_tx_request_t *txp, int8_t st)
54 RING_IDX i = netdev->tx_ring.rsp_prod_pvt;
55 netif_tx_response_t *resp;
56 int notify;
58 resp = RING_GET_RESPONSE(&netdev->tx_ring, i);
59 resp->id = txp->id;
60 resp->status = st;
62 #if 0
63 if (txp->flags & NETTXF_extra_info) {
64 RING_GET_RESPONSE(&netdev->tx_ring, ++i)->status = NETIF_RSP_NULL;
66 #endif
68 netdev->tx_ring.rsp_prod_pvt = ++i;
69 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->tx_ring, notify);
70 if (notify) {
71 xen_pv_send_notify(&netdev->xendev);
74 if (i == netdev->tx_ring.req_cons) {
75 int more_to_do;
76 RING_FINAL_CHECK_FOR_REQUESTS(&netdev->tx_ring, more_to_do);
77 if (more_to_do) {
78 netdev->tx_work++;
83 static void net_tx_error(struct XenNetDev *netdev, netif_tx_request_t *txp, RING_IDX end)
85 #if 0
87 * Hmm, why netback fails everything in the ring?
88 * Should we do that even when not supporting SG and TSO?
90 RING_IDX cons = netdev->tx_ring.req_cons;
92 do {
93 make_tx_response(netif, txp, NETIF_RSP_ERROR);
94 if (cons >= end) {
95 break;
97 txp = RING_GET_REQUEST(&netdev->tx_ring, cons++);
98 } while (1);
99 netdev->tx_ring.req_cons = cons;
100 netif_schedule_work(netif);
101 netif_put(netif);
102 #else
103 net_tx_response(netdev, txp, NETIF_RSP_ERROR);
104 #endif
107 static void net_tx_packets(struct XenNetDev *netdev)
109 netif_tx_request_t txreq;
110 RING_IDX rc, rp;
111 void *page;
112 void *tmpbuf = NULL;
114 for (;;) {
115 rc = netdev->tx_ring.req_cons;
116 rp = netdev->tx_ring.sring->req_prod;
117 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
119 while ((rc != rp)) {
120 if (RING_REQUEST_CONS_OVERFLOW(&netdev->tx_ring, rc)) {
121 break;
123 memcpy(&txreq, RING_GET_REQUEST(&netdev->tx_ring, rc), sizeof(txreq));
124 netdev->tx_ring.req_cons = ++rc;
126 #if 1
127 /* should not happen in theory, we don't announce the *
128 * feature-{sg,gso,whatelse} flags in xenstore (yet?) */
129 if (txreq.flags & NETTXF_extra_info) {
130 xen_pv_printf(&netdev->xendev, 0, "FIXME: extra info flag\n");
131 net_tx_error(netdev, &txreq, rc);
132 continue;
134 if (txreq.flags & NETTXF_more_data) {
135 xen_pv_printf(&netdev->xendev, 0, "FIXME: more data flag\n");
136 net_tx_error(netdev, &txreq, rc);
137 continue;
139 #endif
141 if (txreq.size < 14) {
142 xen_pv_printf(&netdev->xendev, 0, "bad packet size: %d\n",
143 txreq.size);
144 net_tx_error(netdev, &txreq, rc);
145 continue;
148 if ((txreq.offset + txreq.size) > XC_PAGE_SIZE) {
149 xen_pv_printf(&netdev->xendev, 0, "error: page crossing\n");
150 net_tx_error(netdev, &txreq, rc);
151 continue;
154 xen_pv_printf(&netdev->xendev, 3,
155 "tx packet ref %d, off %d, len %d, flags 0x%x%s%s%s%s\n",
156 txreq.gref, txreq.offset, txreq.size, txreq.flags,
157 (txreq.flags & NETTXF_csum_blank) ? " csum_blank" : "",
158 (txreq.flags & NETTXF_data_validated) ? " data_validated" : "",
159 (txreq.flags & NETTXF_more_data) ? " more_data" : "",
160 (txreq.flags & NETTXF_extra_info) ? " extra_info" : "");
162 page = xen_be_map_grant_ref(&netdev->xendev, txreq.gref,
163 PROT_READ);
164 if (page == NULL) {
165 xen_pv_printf(&netdev->xendev, 0,
166 "error: tx gref dereference failed (%d)\n",
167 txreq.gref);
168 net_tx_error(netdev, &txreq, rc);
169 continue;
171 if (txreq.flags & NETTXF_csum_blank) {
172 /* have read-only mapping -> can't fill checksum in-place */
173 if (!tmpbuf) {
174 tmpbuf = g_malloc(XC_PAGE_SIZE);
176 memcpy(tmpbuf, page + txreq.offset, txreq.size);
177 net_checksum_calculate(tmpbuf, txreq.size);
178 qemu_send_packet(qemu_get_queue(netdev->nic), tmpbuf,
179 txreq.size);
180 } else {
181 qemu_send_packet(qemu_get_queue(netdev->nic),
182 page + txreq.offset, txreq.size);
184 xen_be_unmap_grant_ref(&netdev->xendev, page);
185 net_tx_response(netdev, &txreq, NETIF_RSP_OKAY);
187 if (!netdev->tx_work) {
188 break;
190 netdev->tx_work = 0;
192 g_free(tmpbuf);
195 /* ------------------------------------------------------------- */
197 static void net_rx_response(struct XenNetDev *netdev,
198 netif_rx_request_t *req, int8_t st,
199 uint16_t offset, uint16_t size,
200 uint16_t flags)
202 RING_IDX i = netdev->rx_ring.rsp_prod_pvt;
203 netif_rx_response_t *resp;
204 int notify;
206 resp = RING_GET_RESPONSE(&netdev->rx_ring, i);
207 resp->offset = offset;
208 resp->flags = flags;
209 resp->id = req->id;
210 resp->status = (int16_t)size;
211 if (st < 0) {
212 resp->status = (int16_t)st;
215 xen_pv_printf(&netdev->xendev, 3,
216 "rx response: idx %d, status %d, flags 0x%x\n",
217 i, resp->status, resp->flags);
219 netdev->rx_ring.rsp_prod_pvt = ++i;
220 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->rx_ring, notify);
221 if (notify) {
222 xen_pv_send_notify(&netdev->xendev);
226 #define NET_IP_ALIGN 2
228 static ssize_t net_rx_packet(NetClientState *nc, const uint8_t *buf, size_t size)
230 struct XenNetDev *netdev = qemu_get_nic_opaque(nc);
231 netif_rx_request_t rxreq;
232 RING_IDX rc, rp;
233 void *page;
235 if (netdev->xendev.be_state != XenbusStateConnected) {
236 return -1;
239 rc = netdev->rx_ring.req_cons;
240 rp = netdev->rx_ring.sring->req_prod;
241 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
243 if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) {
244 return 0;
246 if (size > XC_PAGE_SIZE - NET_IP_ALIGN) {
247 xen_pv_printf(&netdev->xendev, 0, "packet too big (%lu > %ld)",
248 (unsigned long)size, XC_PAGE_SIZE - NET_IP_ALIGN);
249 return -1;
252 memcpy(&rxreq, RING_GET_REQUEST(&netdev->rx_ring, rc), sizeof(rxreq));
253 netdev->rx_ring.req_cons = ++rc;
255 page = xen_be_map_grant_ref(&netdev->xendev, rxreq.gref, PROT_WRITE);
256 if (page == NULL) {
257 xen_pv_printf(&netdev->xendev, 0,
258 "error: rx gref dereference failed (%d)\n",
259 rxreq.gref);
260 net_rx_response(netdev, &rxreq, NETIF_RSP_ERROR, 0, 0, 0);
261 return -1;
263 memcpy(page + NET_IP_ALIGN, buf, size);
264 xen_be_unmap_grant_ref(&netdev->xendev, page);
265 net_rx_response(netdev, &rxreq, NETIF_RSP_OKAY, NET_IP_ALIGN, size, 0);
267 return size;
270 /* ------------------------------------------------------------- */
272 static NetClientInfo net_xen_info = {
273 .type = NET_CLIENT_DRIVER_NIC,
274 .size = sizeof(NICState),
275 .receive = net_rx_packet,
278 static int net_init(struct XenLegacyDevice *xendev)
280 struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev);
282 /* read xenstore entries */
283 if (netdev->mac == NULL) {
284 netdev->mac = xenstore_read_be_str(&netdev->xendev, "mac");
287 /* do we have all we need? */
288 if (netdev->mac == NULL) {
289 return -1;
292 if (net_parse_macaddr(netdev->conf.macaddr.a, netdev->mac) < 0) {
293 return -1;
296 netdev->nic = qemu_new_nic(&net_xen_info, &netdev->conf,
297 "xen", NULL, netdev);
299 snprintf(qemu_get_queue(netdev->nic)->info_str,
300 sizeof(qemu_get_queue(netdev->nic)->info_str),
301 "nic: xenbus vif macaddr=%s", netdev->mac);
303 /* fill info */
304 xenstore_write_be_int(&netdev->xendev, "feature-rx-copy", 1);
305 xenstore_write_be_int(&netdev->xendev, "feature-rx-flip", 0);
307 return 0;
310 static int net_connect(struct XenLegacyDevice *xendev)
312 struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev);
313 int rx_copy;
315 if (xenstore_read_fe_int(&netdev->xendev, "tx-ring-ref",
316 &netdev->tx_ring_ref) == -1) {
317 return -1;
319 if (xenstore_read_fe_int(&netdev->xendev, "rx-ring-ref",
320 &netdev->rx_ring_ref) == -1) {
321 return 1;
323 if (xenstore_read_fe_int(&netdev->xendev, "event-channel",
324 &netdev->xendev.remote_port) == -1) {
325 return -1;
328 if (xenstore_read_fe_int(&netdev->xendev, "request-rx-copy", &rx_copy) == -1) {
329 rx_copy = 0;
331 if (rx_copy == 0) {
332 xen_pv_printf(&netdev->xendev, 0,
333 "frontend doesn't support rx-copy.\n");
334 return -1;
337 netdev->txs = xen_be_map_grant_ref(&netdev->xendev,
338 netdev->tx_ring_ref,
339 PROT_READ | PROT_WRITE);
340 if (!netdev->txs) {
341 return -1;
343 netdev->rxs = xen_be_map_grant_ref(&netdev->xendev,
344 netdev->rx_ring_ref,
345 PROT_READ | PROT_WRITE);
346 if (!netdev->rxs) {
347 xen_be_unmap_grant_ref(&netdev->xendev, netdev->txs);
348 netdev->txs = NULL;
349 return -1;
351 BACK_RING_INIT(&netdev->tx_ring, netdev->txs, XC_PAGE_SIZE);
352 BACK_RING_INIT(&netdev->rx_ring, netdev->rxs, XC_PAGE_SIZE);
354 xen_be_bind_evtchn(&netdev->xendev);
356 xen_pv_printf(&netdev->xendev, 1, "ok: tx-ring-ref %d, rx-ring-ref %d, "
357 "remote port %d, local port %d\n",
358 netdev->tx_ring_ref, netdev->rx_ring_ref,
359 netdev->xendev.remote_port, netdev->xendev.local_port);
361 net_tx_packets(netdev);
362 return 0;
365 static void net_disconnect(struct XenLegacyDevice *xendev)
367 struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev);
369 xen_pv_unbind_evtchn(&netdev->xendev);
371 if (netdev->txs) {
372 xen_be_unmap_grant_ref(&netdev->xendev, netdev->txs);
373 netdev->txs = NULL;
375 if (netdev->rxs) {
376 xen_be_unmap_grant_ref(&netdev->xendev, netdev->rxs);
377 netdev->rxs = NULL;
381 static void net_event(struct XenLegacyDevice *xendev)
383 struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev);
384 net_tx_packets(netdev);
385 qemu_flush_queued_packets(qemu_get_queue(netdev->nic));
388 static int net_free(struct XenLegacyDevice *xendev)
390 struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev);
392 if (netdev->nic) {
393 qemu_del_nic(netdev->nic);
394 netdev->nic = NULL;
396 g_free(netdev->mac);
397 netdev->mac = NULL;
398 return 0;
401 /* ------------------------------------------------------------- */
403 struct XenDevOps xen_netdev_ops = {
404 .size = sizeof(struct XenNetDev),
405 .flags = DEVOPS_FLAG_NEED_GNTDEV,
406 .init = net_init,
407 .initialise = net_connect,
408 .event = net_event,
409 .disconnect = net_disconnect,
410 .free = net_free,