Fix 32-bit overflow in parallels image support
[qemu-kvm/fedora.git] / hw / xen_nic.c
blobc9e91999e453f5c7908f1906ee18d37c6d6beeb7
1 /*
2 * xen paravirt network card backend
4 * (c) Gerd Hoffmann <kraxel@redhat.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; under version 2 of the License.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdio.h>
20 #include <stdlib.h>
21 #include <stdarg.h>
22 #include <string.h>
23 #include <unistd.h>
24 #include <signal.h>
25 #include <inttypes.h>
26 #include <fcntl.h>
27 #include <errno.h>
28 #include <pthread.h>
29 #include <sys/socket.h>
30 #include <sys/ioctl.h>
31 #include <sys/types.h>
32 #include <sys/stat.h>
33 #include <sys/mman.h>
34 #include <sys/wait.h>
36 #include <xs.h>
37 #include <xenctrl.h>
38 #include <xen/io/xenbus.h>
39 #include <xen/io/netif.h>
41 #include "hw.h"
42 #include "net.h"
43 #include "qemu-char.h"
44 #include "xen_backend.h"
46 /* ------------------------------------------------------------- */
48 struct XenNetDev {
49 struct XenDevice xendev; /* must be first */
50 char *mac;
51 int tx_work;
52 int tx_ring_ref;
53 int rx_ring_ref;
54 struct netif_tx_sring *txs;
55 struct netif_rx_sring *rxs;
56 netif_tx_back_ring_t tx_ring;
57 netif_rx_back_ring_t rx_ring;
58 VLANClientState *vs;
61 /* ------------------------------------------------------------- */
63 static void net_tx_response(struct XenNetDev *netdev, netif_tx_request_t *txp, int8_t st)
65 RING_IDX i = netdev->tx_ring.rsp_prod_pvt;
66 netif_tx_response_t *resp;
67 int notify;
69 resp = RING_GET_RESPONSE(&netdev->tx_ring, i);
70 resp->id = txp->id;
71 resp->status = st;
73 #if 0
74 if (txp->flags & NETTXF_extra_info)
75 RING_GET_RESPONSE(&netdev->tx_ring, ++i)->status = NETIF_RSP_NULL;
76 #endif
78 netdev->tx_ring.rsp_prod_pvt = ++i;
79 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->tx_ring, notify);
80 if (notify)
81 xen_be_send_notify(&netdev->xendev);
83 if (i == netdev->tx_ring.req_cons) {
84 int more_to_do;
85 RING_FINAL_CHECK_FOR_REQUESTS(&netdev->tx_ring, more_to_do);
86 if (more_to_do)
87 netdev->tx_work++;
91 static void net_tx_error(struct XenNetDev *netdev, netif_tx_request_t *txp, RING_IDX end)
93 #if 0
95 * Hmm, why netback fails everything in the ring?
96 * Should we do that even when not supporting SG and TSO?
98 RING_IDX cons = netdev->tx_ring.req_cons;
100 do {
101 make_tx_response(netif, txp, NETIF_RSP_ERROR);
102 if (cons >= end)
103 break;
104 txp = RING_GET_REQUEST(&netdev->tx_ring, cons++);
105 } while (1);
106 netdev->tx_ring.req_cons = cons;
107 netif_schedule_work(netif);
108 netif_put(netif);
109 #else
110 net_tx_response(netdev, txp, NETIF_RSP_ERROR);
111 #endif
114 static void net_tx_packets(struct XenNetDev *netdev)
116 netif_tx_request_t txreq;
117 RING_IDX rc, rp;
118 void *page;
119 void *tmpbuf = NULL;
121 for (;;) {
122 rc = netdev->tx_ring.req_cons;
123 rp = netdev->tx_ring.sring->req_prod;
124 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
126 while ((rc != rp)) {
127 if (RING_REQUEST_CONS_OVERFLOW(&netdev->tx_ring, rc))
128 break;
129 memcpy(&txreq, RING_GET_REQUEST(&netdev->tx_ring, rc), sizeof(txreq));
130 netdev->tx_ring.req_cons = ++rc;
132 #if 1
133 /* should not happen in theory, we don't announce the *
134 * feature-{sg,gso,whatelse} flags in xenstore (yet?) */
135 if (txreq.flags & NETTXF_extra_info) {
136 xen_be_printf(&netdev->xendev, 0, "FIXME: extra info flag\n");
137 net_tx_error(netdev, &txreq, rc);
138 continue;
140 if (txreq.flags & NETTXF_more_data) {
141 xen_be_printf(&netdev->xendev, 0, "FIXME: more data flag\n");
142 net_tx_error(netdev, &txreq, rc);
143 continue;
145 #endif
147 if (txreq.size < 14) {
148 xen_be_printf(&netdev->xendev, 0, "bad packet size: %d\n", txreq.size);
149 net_tx_error(netdev, &txreq, rc);
150 continue;
153 if ((txreq.offset + txreq.size) > XC_PAGE_SIZE) {
154 xen_be_printf(&netdev->xendev, 0, "error: page crossing\n");
155 net_tx_error(netdev, &txreq, rc);
156 continue;
159 xen_be_printf(&netdev->xendev, 3, "tx packet ref %d, off %d, len %d, flags 0x%x%s%s%s%s\n",
160 txreq.gref, txreq.offset, txreq.size, txreq.flags,
161 (txreq.flags & NETTXF_csum_blank) ? " csum_blank" : "",
162 (txreq.flags & NETTXF_data_validated) ? " data_validated" : "",
163 (txreq.flags & NETTXF_more_data) ? " more_data" : "",
164 (txreq.flags & NETTXF_extra_info) ? " extra_info" : "");
166 page = xc_gnttab_map_grant_ref(netdev->xendev.gnttabdev,
167 netdev->xendev.dom,
168 txreq.gref, PROT_READ);
169 if (page == NULL) {
170 xen_be_printf(&netdev->xendev, 0, "error: tx gref dereference failed (%d)\n",
171 txreq.gref);
172 net_tx_error(netdev, &txreq, rc);
173 continue;
175 if (txreq.flags & NETTXF_csum_blank) {
176 /* have read-only mapping -> can't fill checksum in-place */
177 if (!tmpbuf)
178 tmpbuf = qemu_malloc(XC_PAGE_SIZE);
179 memcpy(tmpbuf, page + txreq.offset, txreq.size);
180 net_checksum_calculate(tmpbuf, txreq.size);
181 qemu_send_packet(netdev->vs, tmpbuf, txreq.size);
182 } else {
183 qemu_send_packet(netdev->vs, page + txreq.offset, txreq.size);
185 xc_gnttab_munmap(netdev->xendev.gnttabdev, page, 1);
186 net_tx_response(netdev, &txreq, NETIF_RSP_OKAY);
188 if (!netdev->tx_work)
189 break;
190 netdev->tx_work = 0;
192 qemu_free(tmpbuf);
195 /* ------------------------------------------------------------- */
197 static void net_rx_response(struct XenNetDev *netdev,
198 netif_rx_request_t *req, int8_t st,
199 uint16_t offset, uint16_t size,
200 uint16_t flags)
202 RING_IDX i = netdev->rx_ring.rsp_prod_pvt;
203 netif_rx_response_t *resp;
204 int notify;
206 resp = RING_GET_RESPONSE(&netdev->rx_ring, i);
207 resp->offset = offset;
208 resp->flags = flags;
209 resp->id = req->id;
210 resp->status = (int16_t)size;
211 if (st < 0)
212 resp->status = (int16_t)st;
214 xen_be_printf(&netdev->xendev, 3, "rx response: idx %d, status %d, flags 0x%x\n",
215 i, resp->status, resp->flags);
217 netdev->rx_ring.rsp_prod_pvt = ++i;
218 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netdev->rx_ring, notify);
219 if (notify)
220 xen_be_send_notify(&netdev->xendev);
223 #define NET_IP_ALIGN 2
225 static int net_rx_ok(VLANClientState *vc)
227 struct XenNetDev *netdev = vc->opaque;
228 RING_IDX rc, rp;
230 if (netdev->xendev.be_state != XenbusStateConnected)
231 return 0;
233 rc = netdev->rx_ring.req_cons;
234 rp = netdev->rx_ring.sring->req_prod;
235 xen_rmb();
237 if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) {
238 xen_be_printf(&netdev->xendev, 2, "%s: no rx buffers (%d/%d)\n",
239 __FUNCTION__, rc, rp);
240 return 0;
242 return 1;
245 static ssize_t net_rx_packet(VLANClientState *vc, const uint8_t *buf, size_t size)
247 struct XenNetDev *netdev = vc->opaque;
248 netif_rx_request_t rxreq;
249 RING_IDX rc, rp;
250 void *page;
252 if (netdev->xendev.be_state != XenbusStateConnected)
253 return -1;
255 rc = netdev->rx_ring.req_cons;
256 rp = netdev->rx_ring.sring->req_prod;
257 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */
259 if (rc == rp || RING_REQUEST_CONS_OVERFLOW(&netdev->rx_ring, rc)) {
260 xen_be_printf(&netdev->xendev, 2, "no buffer, drop packet\n");
261 return -1;
263 if (size > XC_PAGE_SIZE - NET_IP_ALIGN) {
264 xen_be_printf(&netdev->xendev, 0, "packet too big (%lu > %ld)",
265 (unsigned long)size, XC_PAGE_SIZE - NET_IP_ALIGN);
266 return -1;
269 memcpy(&rxreq, RING_GET_REQUEST(&netdev->rx_ring, rc), sizeof(rxreq));
270 netdev->rx_ring.req_cons = ++rc;
272 page = xc_gnttab_map_grant_ref(netdev->xendev.gnttabdev,
273 netdev->xendev.dom,
274 rxreq.gref, PROT_WRITE);
275 if (page == NULL) {
276 xen_be_printf(&netdev->xendev, 0, "error: rx gref dereference failed (%d)\n",
277 rxreq.gref);
278 net_rx_response(netdev, &rxreq, NETIF_RSP_ERROR, 0, 0, 0);
279 return -1;
281 memcpy(page + NET_IP_ALIGN, buf, size);
282 xc_gnttab_munmap(netdev->xendev.gnttabdev, page, 1);
283 net_rx_response(netdev, &rxreq, NETIF_RSP_OKAY, NET_IP_ALIGN, size, 0);
285 return size;
288 /* ------------------------------------------------------------- */
290 static int net_init(struct XenDevice *xendev)
292 struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev);
293 VLANState *vlan;
295 /* read xenstore entries */
296 if (netdev->mac == NULL)
297 netdev->mac = xenstore_read_be_str(&netdev->xendev, "mac");
299 /* do we have all we need? */
300 if (netdev->mac == NULL)
301 return -1;
303 vlan = qemu_find_vlan(netdev->xendev.dev, 1);
304 netdev->vs = qemu_new_vlan_client(vlan, "xen", NULL,
305 net_rx_ok, net_rx_packet, NULL,
306 NULL, netdev);
307 snprintf(netdev->vs->info_str, sizeof(netdev->vs->info_str),
308 "nic: xenbus vif macaddr=%s", netdev->mac);
310 /* fill info */
311 xenstore_write_be_int(&netdev->xendev, "feature-rx-copy", 1);
312 xenstore_write_be_int(&netdev->xendev, "feature-rx-flip", 0);
314 return 0;
317 static int net_connect(struct XenDevice *xendev)
319 struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev);
320 int rx_copy;
322 if (xenstore_read_fe_int(&netdev->xendev, "tx-ring-ref",
323 &netdev->tx_ring_ref) == -1)
324 return -1;
325 if (xenstore_read_fe_int(&netdev->xendev, "rx-ring-ref",
326 &netdev->rx_ring_ref) == -1)
327 return 1;
328 if (xenstore_read_fe_int(&netdev->xendev, "event-channel",
329 &netdev->xendev.remote_port) == -1)
330 return -1;
332 if (xenstore_read_fe_int(&netdev->xendev, "request-rx-copy", &rx_copy) == -1)
333 rx_copy = 0;
334 if (rx_copy == 0) {
335 xen_be_printf(&netdev->xendev, 0, "frontend doesn't support rx-copy.\n");
336 return -1;
339 netdev->txs = xc_gnttab_map_grant_ref(netdev->xendev.gnttabdev,
340 netdev->xendev.dom,
341 netdev->tx_ring_ref,
342 PROT_READ | PROT_WRITE);
343 netdev->rxs = xc_gnttab_map_grant_ref(netdev->xendev.gnttabdev,
344 netdev->xendev.dom,
345 netdev->rx_ring_ref,
346 PROT_READ | PROT_WRITE);
347 if (!netdev->txs || !netdev->rxs)
348 return -1;
349 BACK_RING_INIT(&netdev->tx_ring, netdev->txs, XC_PAGE_SIZE);
350 BACK_RING_INIT(&netdev->rx_ring, netdev->rxs, XC_PAGE_SIZE);
352 xen_be_bind_evtchn(&netdev->xendev);
354 xen_be_printf(&netdev->xendev, 1, "ok: tx-ring-ref %d, rx-ring-ref %d, "
355 "remote port %d, local port %d\n",
356 netdev->tx_ring_ref, netdev->rx_ring_ref,
357 netdev->xendev.remote_port, netdev->xendev.local_port);
359 net_tx_packets(netdev);
360 return 0;
363 static void net_disconnect(struct XenDevice *xendev)
365 struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev);
367 xen_be_unbind_evtchn(&netdev->xendev);
369 if (netdev->txs) {
370 xc_gnttab_munmap(netdev->xendev.gnttabdev, netdev->txs, 1);
371 netdev->txs = NULL;
373 if (netdev->rxs) {
374 xc_gnttab_munmap(netdev->xendev.gnttabdev, netdev->rxs, 1);
375 netdev->rxs = NULL;
377 if (netdev->vs) {
378 qemu_del_vlan_client(netdev->vs);
379 netdev->vs = NULL;
383 static void net_event(struct XenDevice *xendev)
385 struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev);
386 net_tx_packets(netdev);
389 static int net_free(struct XenDevice *xendev)
391 struct XenNetDev *netdev = container_of(xendev, struct XenNetDev, xendev);
393 qemu_free(netdev->mac);
394 return 0;
397 /* ------------------------------------------------------------- */
399 struct XenDevOps xen_netdev_ops = {
400 .size = sizeof(struct XenNetDev),
401 .flags = DEVOPS_FLAG_NEED_GNTDEV,
402 .init = net_init,
403 .connect = net_connect,
404 .event = net_event,
405 .disconnect = net_disconnect,
406 .free = net_free,