qemu:virtio-net: Enable filtering based on MAC, promisc, broadcast and allmulti ...
[qemu.git] / hw / virtio-net.c
blob001169d7a226bc36fd431ac702e43fda1df874ae
1 /*
2 * Virtio Network Device
4 * Copyright IBM, Corp. 2007
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "virtio.h"
15 #include "net.h"
16 #include "qemu-timer.h"
17 #include "virtio-net.h"
19 #define VIRTIO_NET_VM_VERSION 4
21 typedef struct VirtIONet
23 VirtIODevice vdev;
24 uint8_t mac[ETH_ALEN];
25 uint16_t status;
26 VirtQueue *rx_vq;
27 VirtQueue *tx_vq;
28 VirtQueue *ctrl_vq;
29 VLANClientState *vc;
30 QEMUTimer *tx_timer;
31 int tx_timer_active;
32 int mergeable_rx_bufs;
33 int promisc;
34 int allmulti;
35 } VirtIONet;
37 /* TODO
38 * - we could suppress RX interrupt if we were so inclined.
41 static VirtIONet *to_virtio_net(VirtIODevice *vdev)
43 return (VirtIONet *)vdev;
46 static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
48 VirtIONet *n = to_virtio_net(vdev);
49 struct virtio_net_config netcfg;
51 netcfg.status = n->status;
52 memcpy(netcfg.mac, n->mac, ETH_ALEN);
53 memcpy(config, &netcfg, sizeof(netcfg));
56 static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
58 VirtIONet *n = to_virtio_net(vdev);
59 struct virtio_net_config netcfg;
61 memcpy(&netcfg, config, sizeof(netcfg));
63 if (memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
64 memcpy(n->mac, netcfg.mac, ETH_ALEN);
65 qemu_format_nic_info_str(n->vc, n->mac);
69 static void virtio_net_set_link_status(VLANClientState *vc)
71 VirtIONet *n = vc->opaque;
72 uint16_t old_status = n->status;
74 if (vc->link_down)
75 n->status &= ~VIRTIO_NET_S_LINK_UP;
76 else
77 n->status |= VIRTIO_NET_S_LINK_UP;
79 if (n->status != old_status)
80 virtio_notify_config(&n->vdev);
83 static void virtio_net_reset(VirtIODevice *vdev)
85 VirtIONet *n = to_virtio_net(vdev);
87 /* Reset back to compatibility mode */
88 n->promisc = 1;
89 n->allmulti = 0;
92 static uint32_t virtio_net_get_features(VirtIODevice *vdev)
94 uint32_t features = (1 << VIRTIO_NET_F_MAC) |
95 (1 << VIRTIO_NET_F_STATUS) |
96 (1 << VIRTIO_NET_F_CTRL_VQ);
98 return features;
101 static void virtio_net_set_features(VirtIODevice *vdev, uint32_t features)
103 VirtIONet *n = to_virtio_net(vdev);
105 n->mergeable_rx_bufs = !!(features & (1 << VIRTIO_NET_F_MRG_RXBUF));
108 static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd,
109 VirtQueueElement *elem)
111 uint8_t on;
113 if (elem->out_num != 2 || elem->out_sg[1].iov_len != sizeof(on)) {
114 fprintf(stderr, "virtio-net ctrl invalid rx mode command\n");
115 exit(1);
118 on = ldub_p(elem->out_sg[1].iov_base);
120 if (cmd == VIRTIO_NET_CTRL_RX_MODE_PROMISC)
121 n->promisc = on;
122 else if (cmd == VIRTIO_NET_CTRL_RX_MODE_ALLMULTI)
123 n->allmulti = on;
124 else
125 return VIRTIO_NET_ERR;
127 return VIRTIO_NET_OK;
130 static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
132 VirtIONet *n = to_virtio_net(vdev);
133 struct virtio_net_ctrl_hdr ctrl;
134 virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
135 VirtQueueElement elem;
137 while (virtqueue_pop(vq, &elem)) {
138 if ((elem.in_num < 1) || (elem.out_num < 1)) {
139 fprintf(stderr, "virtio-net ctrl missing headers\n");
140 exit(1);
143 if (elem.out_sg[0].iov_len < sizeof(ctrl) ||
144 elem.out_sg[elem.in_num - 1].iov_len < sizeof(status)) {
145 fprintf(stderr, "virtio-net ctrl header not in correct element\n");
146 exit(1);
149 ctrl.class = ldub_p(elem.out_sg[0].iov_base);
150 ctrl.cmd = ldub_p(elem.out_sg[0].iov_base + sizeof(ctrl.class));
152 if (ctrl.class == VIRTIO_NET_CTRL_RX_MODE)
153 status = virtio_net_handle_rx_mode(n, ctrl.cmd, &elem);
155 stb_p(elem.in_sg[elem.in_num - 1].iov_base, status);
157 virtqueue_push(vq, &elem, sizeof(status));
158 virtio_notify(vdev, vq);
162 /* RX */
164 static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
168 static int do_virtio_net_can_receive(VirtIONet *n, int bufsize)
170 if (!virtio_queue_ready(n->rx_vq) ||
171 !(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK))
172 return 0;
174 if (virtio_queue_empty(n->rx_vq) ||
175 (n->mergeable_rx_bufs &&
176 !virtqueue_avail_bytes(n->rx_vq, bufsize, 0))) {
177 virtio_queue_set_notification(n->rx_vq, 1);
178 return 0;
181 virtio_queue_set_notification(n->rx_vq, 0);
182 return 1;
185 static int virtio_net_can_receive(void *opaque)
187 VirtIONet *n = opaque;
189 return do_virtio_net_can_receive(n, VIRTIO_NET_MAX_BUFSIZE);
192 static int iov_fill(struct iovec *iov, int iovcnt, const void *buf, int count)
194 int offset, i;
196 offset = i = 0;
197 while (offset < count && i < iovcnt) {
198 int len = MIN(iov[i].iov_len, count - offset);
199 memcpy(iov[i].iov_base, buf + offset, len);
200 offset += len;
201 i++;
204 return offset;
207 static int receive_header(VirtIONet *n, struct iovec *iov, int iovcnt,
208 const void *buf, size_t size, size_t hdr_len)
210 struct virtio_net_hdr *hdr = iov[0].iov_base;
211 int offset = 0;
213 hdr->flags = 0;
214 hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
216 /* We only ever receive a struct virtio_net_hdr from the tapfd,
217 * but we may be passing along a larger header to the guest.
219 iov[0].iov_base += hdr_len;
220 iov[0].iov_len -= hdr_len;
222 return offset;
225 static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
227 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
228 uint8_t *ptr = (uint8_t *)buf;
230 if (n->promisc)
231 return 1;
233 #ifdef TAP_VNET_HDR
234 if (tap_has_vnet_hdr(n->vc->vlan->first_client))
235 ptr += sizeof(struct virtio_net_hdr);
236 #endif
238 if ((ptr[0] & 1) && n->allmulti)
239 return 1;
241 if (!memcmp(ptr, bcast, sizeof(bcast)))
242 return 1;
244 if (!memcmp(ptr, n->mac, ETH_ALEN))
245 return 1;
247 return 0;
250 static void virtio_net_receive(void *opaque, const uint8_t *buf, int size)
252 VirtIONet *n = opaque;
253 struct virtio_net_hdr_mrg_rxbuf *mhdr = NULL;
254 size_t hdr_len, offset, i;
256 if (!do_virtio_net_can_receive(n, size))
257 return;
259 if (!receive_filter(n, buf, size))
260 return;
262 /* hdr_len refers to the header we supply to the guest */
263 hdr_len = n->mergeable_rx_bufs ?
264 sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr);
266 offset = i = 0;
268 while (offset < size) {
269 VirtQueueElement elem;
270 int len, total;
271 struct iovec sg[VIRTQUEUE_MAX_SIZE];
273 len = total = 0;
275 if ((i != 0 && !n->mergeable_rx_bufs) ||
276 virtqueue_pop(n->rx_vq, &elem) == 0) {
277 if (i == 0)
278 return;
279 fprintf(stderr, "virtio-net truncating packet\n");
280 exit(1);
283 if (elem.in_num < 1) {
284 fprintf(stderr, "virtio-net receive queue contains no in buffers\n");
285 exit(1);
288 if (!n->mergeable_rx_bufs && elem.in_sg[0].iov_len != hdr_len) {
289 fprintf(stderr, "virtio-net header not in first element\n");
290 exit(1);
293 memcpy(&sg, &elem.in_sg[0], sizeof(sg[0]) * elem.in_num);
295 if (i == 0) {
296 if (n->mergeable_rx_bufs)
297 mhdr = (struct virtio_net_hdr_mrg_rxbuf *)sg[0].iov_base;
299 offset += receive_header(n, sg, elem.in_num,
300 buf + offset, size - offset, hdr_len);
301 total += hdr_len;
304 /* copy in packet. ugh */
305 len = iov_fill(sg, elem.in_num,
306 buf + offset, size - offset);
307 total += len;
309 /* signal other side */
310 virtqueue_fill(n->rx_vq, &elem, total, i++);
312 offset += len;
315 if (mhdr)
316 mhdr->num_buffers = i;
318 virtqueue_flush(n->rx_vq, i);
319 virtio_notify(&n->vdev, n->rx_vq);
322 /* TX */
323 static void virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq)
325 VirtQueueElement elem;
326 int has_vnet_hdr = 0;
328 if (!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK))
329 return;
331 while (virtqueue_pop(vq, &elem)) {
332 ssize_t len = 0;
333 unsigned int out_num = elem.out_num;
334 struct iovec *out_sg = &elem.out_sg[0];
335 unsigned hdr_len;
337 /* hdr_len refers to the header received from the guest */
338 hdr_len = n->mergeable_rx_bufs ?
339 sizeof(struct virtio_net_hdr_mrg_rxbuf) :
340 sizeof(struct virtio_net_hdr);
342 if (out_num < 1 || out_sg->iov_len != hdr_len) {
343 fprintf(stderr, "virtio-net header not in first element\n");
344 exit(1);
347 /* ignore the header if GSO is not supported */
348 if (!has_vnet_hdr) {
349 out_num--;
350 out_sg++;
351 len += hdr_len;
352 } else if (n->mergeable_rx_bufs) {
353 /* tapfd expects a struct virtio_net_hdr */
354 hdr_len -= sizeof(struct virtio_net_hdr);
355 out_sg->iov_len -= hdr_len;
356 len += hdr_len;
359 len += qemu_sendv_packet(n->vc, out_sg, out_num);
361 virtqueue_push(vq, &elem, len);
362 virtio_notify(&n->vdev, vq);
366 static void virtio_net_handle_tx(VirtIODevice *vdev, VirtQueue *vq)
368 VirtIONet *n = to_virtio_net(vdev);
370 if (n->tx_timer_active) {
371 virtio_queue_set_notification(vq, 1);
372 qemu_del_timer(n->tx_timer);
373 n->tx_timer_active = 0;
374 virtio_net_flush_tx(n, vq);
375 } else {
376 qemu_mod_timer(n->tx_timer,
377 qemu_get_clock(vm_clock) + TX_TIMER_INTERVAL);
378 n->tx_timer_active = 1;
379 virtio_queue_set_notification(vq, 0);
383 static void virtio_net_tx_timer(void *opaque)
385 VirtIONet *n = opaque;
387 n->tx_timer_active = 0;
389 /* Just in case the driver is not ready on more */
390 if (!(n->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK))
391 return;
393 virtio_queue_set_notification(n->tx_vq, 1);
394 virtio_net_flush_tx(n, n->tx_vq);
397 static void virtio_net_save(QEMUFile *f, void *opaque)
399 VirtIONet *n = opaque;
401 virtio_save(&n->vdev, f);
403 qemu_put_buffer(f, n->mac, ETH_ALEN);
404 qemu_put_be32(f, n->tx_timer_active);
405 qemu_put_be32(f, n->mergeable_rx_bufs);
406 qemu_put_be16(f, n->status);
407 qemu_put_be32(f, n->promisc);
408 qemu_put_be32(f, n->allmulti);
411 static int virtio_net_load(QEMUFile *f, void *opaque, int version_id)
413 VirtIONet *n = opaque;
415 if (version_id < 2 || version_id > VIRTIO_NET_VM_VERSION)
416 return -EINVAL;
418 virtio_load(&n->vdev, f);
420 qemu_get_buffer(f, n->mac, ETH_ALEN);
421 n->tx_timer_active = qemu_get_be32(f);
422 n->mergeable_rx_bufs = qemu_get_be32(f);
424 if (version_id >= 3)
425 n->status = qemu_get_be16(f);
427 if (version_id >= 4) {
428 n->promisc = qemu_get_be32(f);
429 n->allmulti = qemu_get_be32(f);
432 if (n->tx_timer_active) {
433 qemu_mod_timer(n->tx_timer,
434 qemu_get_clock(vm_clock) + TX_TIMER_INTERVAL);
437 return 0;
440 void virtio_net_init(PCIBus *bus, NICInfo *nd, int devfn)
442 VirtIONet *n;
443 static int virtio_net_id;
445 n = (VirtIONet *)virtio_init_pci(bus, "virtio-net",
446 PCI_VENDOR_ID_REDHAT_QUMRANET,
447 PCI_DEVICE_ID_VIRTIO_NET,
448 PCI_VENDOR_ID_REDHAT_QUMRANET,
449 VIRTIO_ID_NET,
450 PCI_CLASS_NETWORK_ETHERNET, 0x00,
451 sizeof(struct virtio_net_config),
452 sizeof(VirtIONet));
453 if (!n)
454 return;
456 n->vdev.get_config = virtio_net_get_config;
457 n->vdev.set_config = virtio_net_set_config;
458 n->vdev.get_features = virtio_net_get_features;
459 n->vdev.set_features = virtio_net_set_features;
460 n->vdev.reset = virtio_net_reset;
461 n->rx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_rx);
462 n->tx_vq = virtio_add_queue(&n->vdev, 256, virtio_net_handle_tx);
463 n->ctrl_vq = virtio_add_queue(&n->vdev, 16, virtio_net_handle_ctrl);
464 memcpy(n->mac, nd->macaddr, ETH_ALEN);
465 n->status = VIRTIO_NET_S_LINK_UP;
466 n->vc = qemu_new_vlan_client(nd->vlan, nd->model, nd->name,
467 virtio_net_receive, virtio_net_can_receive, n);
468 n->vc->link_status_changed = virtio_net_set_link_status;
470 qemu_format_nic_info_str(n->vc, n->mac);
472 n->tx_timer = qemu_new_timer(vm_clock, virtio_net_tx_timer, n);
473 n->tx_timer_active = 0;
474 n->mergeable_rx_bufs = 0;
475 n->promisc = 1; /* for compatibility */
477 register_savevm("virtio-net", virtio_net_id++, VIRTIO_NET_VM_VERSION,
478 virtio_net_save, virtio_net_load, n);