qapi: Inline gen_visit_members() into lone caller
[qemu/ar7.git] / net / netmap.c
blob1b427287a7f67e2dc15581f53ce9bfe4115c93c1
1 /*
2 * netmap access for qemu
4 * Copyright (c) 2012-2013 Luigi Rizzo
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
26 #include "qemu/osdep.h"
27 #include <sys/ioctl.h>
28 #include <net/if.h>
29 #include <sys/mman.h>
30 #define NETMAP_WITH_LIBS
31 #include <net/netmap.h>
32 #include <net/netmap_user.h>
34 #include "net/net.h"
35 #include "net/tap.h"
36 #include "clients.h"
37 #include "sysemu/sysemu.h"
38 #include "qemu/error-report.h"
39 #include "qemu/iov.h"
41 typedef struct NetmapState {
42 NetClientState nc;
43 struct nm_desc *nmd;
44 char ifname[IFNAMSIZ];
45 struct netmap_ring *tx;
46 struct netmap_ring *rx;
47 bool read_poll;
48 bool write_poll;
49 struct iovec iov[IOV_MAX];
50 int vnet_hdr_len; /* Current virtio-net header length. */
51 } NetmapState;
53 #ifndef __FreeBSD__
54 #define pkt_copy bcopy
55 #else
56 /* A fast copy routine only for multiples of 64 bytes, non overlapped. */
57 static inline void
58 pkt_copy(const void *_src, void *_dst, int l)
60 const uint64_t *src = _src;
61 uint64_t *dst = _dst;
62 if (unlikely(l >= 1024)) {
63 bcopy(src, dst, l);
64 return;
66 for (; l > 0; l -= 64) {
67 *dst++ = *src++;
68 *dst++ = *src++;
69 *dst++ = *src++;
70 *dst++ = *src++;
71 *dst++ = *src++;
72 *dst++ = *src++;
73 *dst++ = *src++;
74 *dst++ = *src++;
77 #endif /* __FreeBSD__ */
80 * Open a netmap device. We assume there is only one queue
81 * (which is the case for the VALE bridge).
83 static struct nm_desc *netmap_open(const NetdevNetmapOptions *nm_opts,
84 Error **errp)
86 struct nm_desc *nmd;
87 struct nmreq req;
89 memset(&req, 0, sizeof(req));
91 nmd = nm_open(nm_opts->ifname, &req, NETMAP_NO_TX_POLL,
92 NULL);
93 if (nmd == NULL) {
94 error_setg_errno(errp, errno, "Failed to nm_open() %s",
95 nm_opts->ifname);
96 return NULL;
99 return nmd;
102 static void netmap_send(void *opaque);
103 static void netmap_writable(void *opaque);
105 /* Set the event-loop handlers for the netmap backend. */
106 static void netmap_update_fd_handler(NetmapState *s)
108 qemu_set_fd_handler(s->nmd->fd,
109 s->read_poll ? netmap_send : NULL,
110 s->write_poll ? netmap_writable : NULL,
114 /* Update the read handler. */
115 static void netmap_read_poll(NetmapState *s, bool enable)
117 if (s->read_poll != enable) { /* Do nothing if not changed. */
118 s->read_poll = enable;
119 netmap_update_fd_handler(s);
123 /* Update the write handler. */
124 static void netmap_write_poll(NetmapState *s, bool enable)
126 if (s->write_poll != enable) {
127 s->write_poll = enable;
128 netmap_update_fd_handler(s);
132 static void netmap_poll(NetClientState *nc, bool enable)
134 NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
136 if (s->read_poll != enable || s->write_poll != enable) {
137 s->write_poll = enable;
138 s->read_poll = enable;
139 netmap_update_fd_handler(s);
144 * The fd_write() callback, invoked if the fd is marked as
145 * writable after a poll. Unregister the handler and flush any
146 * buffered packets.
148 static void netmap_writable(void *opaque)
150 NetmapState *s = opaque;
152 netmap_write_poll(s, false);
153 qemu_flush_queued_packets(&s->nc);
156 static ssize_t netmap_receive(NetClientState *nc,
157 const uint8_t *buf, size_t size)
159 NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
160 struct netmap_ring *ring = s->tx;
161 uint32_t i;
162 uint32_t idx;
163 uint8_t *dst;
165 if (unlikely(!ring)) {
166 /* Drop. */
167 return size;
170 if (unlikely(size > ring->nr_buf_size)) {
171 RD(5, "[netmap_receive] drop packet of size %d > %d\n",
172 (int)size, ring->nr_buf_size);
173 return size;
176 if (nm_ring_empty(ring)) {
177 /* No available slots in the netmap TX ring. */
178 netmap_write_poll(s, true);
179 return 0;
182 i = ring->cur;
183 idx = ring->slot[i].buf_idx;
184 dst = (uint8_t *)NETMAP_BUF(ring, idx);
186 ring->slot[i].len = size;
187 ring->slot[i].flags = 0;
188 pkt_copy(buf, dst, size);
189 ring->cur = ring->head = nm_ring_next(ring, i);
190 ioctl(s->nmd->fd, NIOCTXSYNC, NULL);
192 return size;
195 static ssize_t netmap_receive_iov(NetClientState *nc,
196 const struct iovec *iov, int iovcnt)
198 NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
199 struct netmap_ring *ring = s->tx;
200 uint32_t last;
201 uint32_t idx;
202 uint8_t *dst;
203 int j;
204 uint32_t i;
206 if (unlikely(!ring)) {
207 /* Drop the packet. */
208 return iov_size(iov, iovcnt);
211 last = i = ring->cur;
213 if (nm_ring_space(ring) < iovcnt) {
214 /* Not enough netmap slots. */
215 netmap_write_poll(s, true);
216 return 0;
219 for (j = 0; j < iovcnt; j++) {
220 int iov_frag_size = iov[j].iov_len;
221 int offset = 0;
222 int nm_frag_size;
224 /* Split each iovec fragment over more netmap slots, if
225 necessary. */
226 while (iov_frag_size) {
227 nm_frag_size = MIN(iov_frag_size, ring->nr_buf_size);
229 if (unlikely(nm_ring_empty(ring))) {
230 /* We run out of netmap slots while splitting the
231 iovec fragments. */
232 netmap_write_poll(s, true);
233 return 0;
236 idx = ring->slot[i].buf_idx;
237 dst = (uint8_t *)NETMAP_BUF(ring, idx);
239 ring->slot[i].len = nm_frag_size;
240 ring->slot[i].flags = NS_MOREFRAG;
241 pkt_copy(iov[j].iov_base + offset, dst, nm_frag_size);
243 last = i;
244 i = nm_ring_next(ring, i);
246 offset += nm_frag_size;
247 iov_frag_size -= nm_frag_size;
250 /* The last slot must not have NS_MOREFRAG set. */
251 ring->slot[last].flags &= ~NS_MOREFRAG;
253 /* Now update ring->cur and ring->head. */
254 ring->cur = ring->head = i;
256 ioctl(s->nmd->fd, NIOCTXSYNC, NULL);
258 return iov_size(iov, iovcnt);
261 /* Complete a previous send (backend --> guest) and enable the
262 fd_read callback. */
263 static void netmap_send_completed(NetClientState *nc, ssize_t len)
265 NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
267 netmap_read_poll(s, true);
270 static void netmap_send(void *opaque)
272 NetmapState *s = opaque;
273 struct netmap_ring *ring = s->rx;
275 /* Keep sending while there are available packets into the netmap
276 RX ring and the forwarding path towards the peer is open. */
277 while (!nm_ring_empty(ring)) {
278 uint32_t i;
279 uint32_t idx;
280 bool morefrag;
281 int iovcnt = 0;
282 int iovsize;
284 do {
285 i = ring->cur;
286 idx = ring->slot[i].buf_idx;
287 morefrag = (ring->slot[i].flags & NS_MOREFRAG);
288 s->iov[iovcnt].iov_base = (u_char *)NETMAP_BUF(ring, idx);
289 s->iov[iovcnt].iov_len = ring->slot[i].len;
290 iovcnt++;
292 ring->cur = ring->head = nm_ring_next(ring, i);
293 } while (!nm_ring_empty(ring) && morefrag);
295 if (unlikely(nm_ring_empty(ring) && morefrag)) {
296 RD(5, "[netmap_send] ran out of slots, with a pending"
297 "incomplete packet\n");
300 iovsize = qemu_sendv_packet_async(&s->nc, s->iov, iovcnt,
301 netmap_send_completed);
303 if (iovsize == 0) {
304 /* The peer does not receive anymore. Packet is queued, stop
305 * reading from the backend until netmap_send_completed()
307 netmap_read_poll(s, false);
308 break;
313 /* Flush and close. */
314 static void netmap_cleanup(NetClientState *nc)
316 NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
318 qemu_purge_queued_packets(nc);
320 netmap_poll(nc, false);
321 nm_close(s->nmd);
322 s->nmd = NULL;
325 /* Offloading manipulation support callbacks. */
326 static int netmap_fd_set_vnet_hdr_len(NetmapState *s, int len)
328 struct nmreq req;
330 /* Issue a NETMAP_BDG_VNET_HDR command to change the virtio-net header
331 * length for the netmap adapter associated to 's->ifname'.
333 memset(&req, 0, sizeof(req));
334 pstrcpy(req.nr_name, sizeof(req.nr_name), s->ifname);
335 req.nr_version = NETMAP_API;
336 req.nr_cmd = NETMAP_BDG_VNET_HDR;
337 req.nr_arg1 = len;
339 return ioctl(s->nmd->fd, NIOCREGIF, &req);
342 static bool netmap_has_vnet_hdr_len(NetClientState *nc, int len)
344 NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
345 int prev_len = s->vnet_hdr_len;
347 /* Check that we can set the new length. */
348 if (netmap_fd_set_vnet_hdr_len(s, len)) {
349 return false;
352 /* Restore the previous length. */
353 if (netmap_fd_set_vnet_hdr_len(s, prev_len)) {
354 error_report("Failed to restore vnet-hdr length %d on %s: %s",
355 prev_len, s->ifname, strerror(errno));
356 abort();
359 return true;
362 /* A netmap interface that supports virtio-net headers always
363 * supports UFO, so we use this callback also for the has_ufo hook. */
364 static bool netmap_has_vnet_hdr(NetClientState *nc)
366 return netmap_has_vnet_hdr_len(nc, sizeof(struct virtio_net_hdr));
369 static void netmap_using_vnet_hdr(NetClientState *nc, bool enable)
373 static void netmap_set_vnet_hdr_len(NetClientState *nc, int len)
375 NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
376 int err;
378 err = netmap_fd_set_vnet_hdr_len(s, len);
379 if (err) {
380 error_report("Unable to set vnet-hdr length %d on %s: %s",
381 len, s->ifname, strerror(errno));
382 } else {
383 /* Keep track of the current length. */
384 s->vnet_hdr_len = len;
388 static void netmap_set_offload(NetClientState *nc, int csum, int tso4, int tso6,
389 int ecn, int ufo)
391 NetmapState *s = DO_UPCAST(NetmapState, nc, nc);
393 /* Setting a virtio-net header length greater than zero automatically
394 * enables the offloadings. */
395 if (!s->vnet_hdr_len) {
396 netmap_set_vnet_hdr_len(nc, sizeof(struct virtio_net_hdr));
400 /* NetClientInfo methods */
401 static NetClientInfo net_netmap_info = {
402 .type = NET_CLIENT_OPTIONS_KIND_NETMAP,
403 .size = sizeof(NetmapState),
404 .receive = netmap_receive,
405 .receive_iov = netmap_receive_iov,
406 .poll = netmap_poll,
407 .cleanup = netmap_cleanup,
408 .has_ufo = netmap_has_vnet_hdr,
409 .has_vnet_hdr = netmap_has_vnet_hdr,
410 .has_vnet_hdr_len = netmap_has_vnet_hdr_len,
411 .using_vnet_hdr = netmap_using_vnet_hdr,
412 .set_offload = netmap_set_offload,
413 .set_vnet_hdr_len = netmap_set_vnet_hdr_len,
416 /* The exported init function
418 * ... -net netmap,ifname="..."
420 int net_init_netmap(const NetClientOptions *opts,
421 const char *name, NetClientState *peer, Error **errp)
423 const NetdevNetmapOptions *netmap_opts = opts->u.netmap;
424 struct nm_desc *nmd;
425 NetClientState *nc;
426 Error *err = NULL;
427 NetmapState *s;
429 nmd = netmap_open(netmap_opts, &err);
430 if (err) {
431 error_propagate(errp, err);
432 return -1;
434 /* Create the object. */
435 nc = qemu_new_net_client(&net_netmap_info, peer, "netmap", name);
436 s = DO_UPCAST(NetmapState, nc, nc);
437 s->nmd = nmd;
438 s->tx = NETMAP_TXRING(nmd->nifp, 0);
439 s->rx = NETMAP_RXRING(nmd->nifp, 0);
440 s->vnet_hdr_len = 0;
441 pstrcpy(s->ifname, sizeof(s->ifname), netmap_opts->ifname);
442 netmap_read_poll(s, true); /* Initially only poll for reads. */
444 return 0;