MAINTAINERS: mark megasas as maintained
[qemu/ar7.git] / hw / net / virtio-net.c
blob33bd233a2dbc323fbdef1491429bfd313a4887f0
1 /*
2 * Virtio Network Device
4 * Copyright IBM, Corp. 2007
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/iov.h"
15 #include "hw/virtio/virtio.h"
16 #include "net/net.h"
17 #include "net/checksum.h"
18 #include "net/tap.h"
19 #include "qemu/error-report.h"
20 #include "qemu/timer.h"
21 #include "hw/virtio/virtio-net.h"
22 #include "net/vhost_net.h"
23 #include "hw/virtio/virtio-bus.h"
24 #include "qapi/qmp/qjson.h"
25 #include "monitor/monitor.h"
27 #define VIRTIO_NET_VM_VERSION 11
29 #define MAC_TABLE_ENTRIES 64
30 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
33 * Calculate the number of bytes up to and including the given 'field' of
34 * 'container'.
36 #define endof(container, field) \
37 (offsetof(container, field) + sizeof(((container *)0)->field))
39 typedef struct VirtIOFeature {
40 uint32_t flags;
41 size_t end;
42 } VirtIOFeature;
44 static VirtIOFeature feature_sizes[] = {
45 {.flags = 1 << VIRTIO_NET_F_MAC,
46 .end = endof(struct virtio_net_config, mac)},
47 {.flags = 1 << VIRTIO_NET_F_STATUS,
48 .end = endof(struct virtio_net_config, status)},
49 {.flags = 1 << VIRTIO_NET_F_MQ,
50 .end = endof(struct virtio_net_config, max_virtqueue_pairs)},
54 static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc)
56 VirtIONet *n = qemu_get_nic_opaque(nc);
58 return &n->vqs[nc->queue_index];
61 static int vq2q(int queue_index)
63 return queue_index / 2;
66 /* TODO
67 * - we could suppress RX interrupt if we were so inclined.
70 static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
72 VirtIONet *n = VIRTIO_NET(vdev);
73 struct virtio_net_config netcfg;
75 stw_p(&netcfg.status, n->status);
76 stw_p(&netcfg.max_virtqueue_pairs, n->max_queues);
77 memcpy(netcfg.mac, n->mac, ETH_ALEN);
78 memcpy(config, &netcfg, n->config_size);
81 static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
83 VirtIONet *n = VIRTIO_NET(vdev);
84 struct virtio_net_config netcfg = {};
86 memcpy(&netcfg, config, n->config_size);
88 if (!(vdev->guest_features >> VIRTIO_NET_F_CTRL_MAC_ADDR & 1) &&
89 memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
90 memcpy(n->mac, netcfg.mac, ETH_ALEN);
91 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
95 static bool virtio_net_started(VirtIONet *n, uint8_t status)
97 VirtIODevice *vdev = VIRTIO_DEVICE(n);
98 return (status & VIRTIO_CONFIG_S_DRIVER_OK) &&
99 (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running;
102 static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
104 VirtIODevice *vdev = VIRTIO_DEVICE(n);
105 NetClientState *nc = qemu_get_queue(n->nic);
106 int queues = n->multiqueue ? n->max_queues : 1;
108 if (!nc->peer) {
109 return;
111 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
112 return;
115 if (!tap_get_vhost_net(nc->peer)) {
116 return;
119 if (!!n->vhost_started ==
120 (virtio_net_started(n, status) && !nc->peer->link_down)) {
121 return;
123 if (!n->vhost_started) {
124 int r;
125 if (!vhost_net_query(tap_get_vhost_net(nc->peer), vdev)) {
126 return;
128 n->vhost_started = 1;
129 r = vhost_net_start(vdev, n->nic->ncs, queues);
130 if (r < 0) {
131 error_report("unable to start vhost net: %d: "
132 "falling back on userspace virtio", -r);
133 n->vhost_started = 0;
135 } else {
136 vhost_net_stop(vdev, n->nic->ncs, queues);
137 n->vhost_started = 0;
141 static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
143 VirtIONet *n = VIRTIO_NET(vdev);
144 VirtIONetQueue *q;
145 int i;
146 uint8_t queue_status;
148 virtio_net_vhost_status(n, status);
150 for (i = 0; i < n->max_queues; i++) {
151 q = &n->vqs[i];
153 if ((!n->multiqueue && i != 0) || i >= n->curr_queues) {
154 queue_status = 0;
155 } else {
156 queue_status = status;
159 if (!q->tx_waiting) {
160 continue;
163 if (virtio_net_started(n, queue_status) && !n->vhost_started) {
164 if (q->tx_timer) {
165 timer_mod(q->tx_timer,
166 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
167 } else {
168 qemu_bh_schedule(q->tx_bh);
170 } else {
171 if (q->tx_timer) {
172 timer_del(q->tx_timer);
173 } else {
174 qemu_bh_cancel(q->tx_bh);
180 static void virtio_net_set_link_status(NetClientState *nc)
182 VirtIONet *n = qemu_get_nic_opaque(nc);
183 VirtIODevice *vdev = VIRTIO_DEVICE(n);
184 uint16_t old_status = n->status;
186 if (nc->link_down)
187 n->status &= ~VIRTIO_NET_S_LINK_UP;
188 else
189 n->status |= VIRTIO_NET_S_LINK_UP;
191 if (n->status != old_status)
192 virtio_notify_config(vdev);
194 virtio_net_set_status(vdev, vdev->status);
197 static void rxfilter_notify(NetClientState *nc)
199 QObject *event_data;
200 VirtIONet *n = qemu_get_nic_opaque(nc);
202 if (nc->rxfilter_notify_enabled) {
203 gchar *path = object_get_canonical_path(OBJECT(n->qdev));
204 if (n->netclient_name) {
205 event_data = qobject_from_jsonf("{ 'name': %s, 'path': %s }",
206 n->netclient_name, path);
207 } else {
208 event_data = qobject_from_jsonf("{ 'path': %s }", path);
210 monitor_protocol_event(QEVENT_NIC_RX_FILTER_CHANGED, event_data);
211 qobject_decref(event_data);
212 g_free(path);
214 /* disable event notification to avoid events flooding */
215 nc->rxfilter_notify_enabled = 0;
219 static char *mac_strdup_printf(const uint8_t *mac)
221 return g_strdup_printf("%.2x:%.2x:%.2x:%.2x:%.2x:%.2x", mac[0],
222 mac[1], mac[2], mac[3], mac[4], mac[5]);
225 static intList *get_vlan_table(VirtIONet *n)
227 intList *list, *entry;
228 int i, j;
230 list = NULL;
231 for (i = 0; i < MAX_VLAN >> 5; i++) {
232 for (j = 0; n->vlans[i] && j <= 0x1f; j++) {
233 if (n->vlans[i] & (1U << j)) {
234 entry = g_malloc0(sizeof(*entry));
235 entry->value = (i << 5) + j;
236 entry->next = list;
237 list = entry;
242 return list;
245 static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc)
247 VirtIONet *n = qemu_get_nic_opaque(nc);
248 VirtIODevice *vdev = VIRTIO_DEVICE(n);
249 RxFilterInfo *info;
250 strList *str_list, *entry;
251 int i;
253 info = g_malloc0(sizeof(*info));
254 info->name = g_strdup(nc->name);
255 info->promiscuous = n->promisc;
257 if (n->nouni) {
258 info->unicast = RX_STATE_NONE;
259 } else if (n->alluni) {
260 info->unicast = RX_STATE_ALL;
261 } else {
262 info->unicast = RX_STATE_NORMAL;
265 if (n->nomulti) {
266 info->multicast = RX_STATE_NONE;
267 } else if (n->allmulti) {
268 info->multicast = RX_STATE_ALL;
269 } else {
270 info->multicast = RX_STATE_NORMAL;
273 info->broadcast_allowed = n->nobcast;
274 info->multicast_overflow = n->mac_table.multi_overflow;
275 info->unicast_overflow = n->mac_table.uni_overflow;
277 info->main_mac = mac_strdup_printf(n->mac);
279 str_list = NULL;
280 for (i = 0; i < n->mac_table.first_multi; i++) {
281 entry = g_malloc0(sizeof(*entry));
282 entry->value = mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
283 entry->next = str_list;
284 str_list = entry;
286 info->unicast_table = str_list;
288 str_list = NULL;
289 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
290 entry = g_malloc0(sizeof(*entry));
291 entry->value = mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
292 entry->next = str_list;
293 str_list = entry;
295 info->multicast_table = str_list;
296 info->vlan_table = get_vlan_table(n);
298 if (!((1 << VIRTIO_NET_F_CTRL_VLAN) & vdev->guest_features)) {
299 info->vlan = RX_STATE_ALL;
300 } else if (!info->vlan_table) {
301 info->vlan = RX_STATE_NONE;
302 } else {
303 info->vlan = RX_STATE_NORMAL;
306 /* enable event notification after query */
307 nc->rxfilter_notify_enabled = 1;
309 return info;
312 static void virtio_net_reset(VirtIODevice *vdev)
314 VirtIONet *n = VIRTIO_NET(vdev);
316 /* Reset back to compatibility mode */
317 n->promisc = 1;
318 n->allmulti = 0;
319 n->alluni = 0;
320 n->nomulti = 0;
321 n->nouni = 0;
322 n->nobcast = 0;
323 /* multiqueue is disabled by default */
324 n->curr_queues = 1;
326 /* Flush any MAC and VLAN filter table state */
327 n->mac_table.in_use = 0;
328 n->mac_table.first_multi = 0;
329 n->mac_table.multi_overflow = 0;
330 n->mac_table.uni_overflow = 0;
331 memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
332 memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac));
333 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
334 memset(n->vlans, 0, MAX_VLAN >> 3);
337 static void peer_test_vnet_hdr(VirtIONet *n)
339 NetClientState *nc = qemu_get_queue(n->nic);
340 if (!nc->peer) {
341 return;
344 n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer);
347 static int peer_has_vnet_hdr(VirtIONet *n)
349 return n->has_vnet_hdr;
352 static int peer_has_ufo(VirtIONet *n)
354 if (!peer_has_vnet_hdr(n))
355 return 0;
357 n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer);
359 return n->has_ufo;
362 static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs)
364 int i;
365 NetClientState *nc;
367 n->mergeable_rx_bufs = mergeable_rx_bufs;
369 n->guest_hdr_len = n->mergeable_rx_bufs ?
370 sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr);
372 for (i = 0; i < n->max_queues; i++) {
373 nc = qemu_get_subqueue(n->nic, i);
375 if (peer_has_vnet_hdr(n) &&
376 qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) {
377 qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len);
378 n->host_hdr_len = n->guest_hdr_len;
383 static int peer_attach(VirtIONet *n, int index)
385 NetClientState *nc = qemu_get_subqueue(n->nic, index);
387 if (!nc->peer) {
388 return 0;
391 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
392 return 0;
395 return tap_enable(nc->peer);
398 static int peer_detach(VirtIONet *n, int index)
400 NetClientState *nc = qemu_get_subqueue(n->nic, index);
402 if (!nc->peer) {
403 return 0;
406 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
407 return 0;
410 return tap_disable(nc->peer);
413 static void virtio_net_set_queues(VirtIONet *n)
415 int i;
416 int r;
418 for (i = 0; i < n->max_queues; i++) {
419 if (i < n->curr_queues) {
420 r = peer_attach(n, i);
421 assert(!r);
422 } else {
423 r = peer_detach(n, i);
424 assert(!r);
429 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue);
431 static uint32_t virtio_net_get_features(VirtIODevice *vdev, uint32_t features)
433 VirtIONet *n = VIRTIO_NET(vdev);
434 NetClientState *nc = qemu_get_queue(n->nic);
436 features |= (1 << VIRTIO_NET_F_MAC);
438 if (!peer_has_vnet_hdr(n)) {
439 features &= ~(0x1 << VIRTIO_NET_F_CSUM);
440 features &= ~(0x1 << VIRTIO_NET_F_HOST_TSO4);
441 features &= ~(0x1 << VIRTIO_NET_F_HOST_TSO6);
442 features &= ~(0x1 << VIRTIO_NET_F_HOST_ECN);
444 features &= ~(0x1 << VIRTIO_NET_F_GUEST_CSUM);
445 features &= ~(0x1 << VIRTIO_NET_F_GUEST_TSO4);
446 features &= ~(0x1 << VIRTIO_NET_F_GUEST_TSO6);
447 features &= ~(0x1 << VIRTIO_NET_F_GUEST_ECN);
450 if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
451 features &= ~(0x1 << VIRTIO_NET_F_GUEST_UFO);
452 features &= ~(0x1 << VIRTIO_NET_F_HOST_UFO);
455 if (!nc->peer || nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
456 return features;
458 if (!tap_get_vhost_net(nc->peer)) {
459 return features;
461 return vhost_net_get_features(tap_get_vhost_net(nc->peer), features);
464 static uint32_t virtio_net_bad_features(VirtIODevice *vdev)
466 uint32_t features = 0;
468 /* Linux kernel 2.6.25. It understood MAC (as everyone must),
469 * but also these: */
470 features |= (1 << VIRTIO_NET_F_MAC);
471 features |= (1 << VIRTIO_NET_F_CSUM);
472 features |= (1 << VIRTIO_NET_F_HOST_TSO4);
473 features |= (1 << VIRTIO_NET_F_HOST_TSO6);
474 features |= (1 << VIRTIO_NET_F_HOST_ECN);
476 return features;
479 static void virtio_net_apply_guest_offloads(VirtIONet *n)
481 qemu_set_offload(qemu_get_queue(n->nic)->peer,
482 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)),
483 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)),
484 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)),
485 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)),
486 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO)));
489 static uint64_t virtio_net_guest_offloads_by_features(uint32_t features)
491 static const uint64_t guest_offloads_mask =
492 (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
493 (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
494 (1ULL << VIRTIO_NET_F_GUEST_TSO6) |
495 (1ULL << VIRTIO_NET_F_GUEST_ECN) |
496 (1ULL << VIRTIO_NET_F_GUEST_UFO);
498 return guest_offloads_mask & features;
501 static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n)
503 VirtIODevice *vdev = VIRTIO_DEVICE(n);
504 return virtio_net_guest_offloads_by_features(vdev->guest_features);
507 static void virtio_net_set_features(VirtIODevice *vdev, uint32_t features)
509 VirtIONet *n = VIRTIO_NET(vdev);
510 int i;
512 virtio_net_set_multiqueue(n, !!(features & (1 << VIRTIO_NET_F_MQ)));
514 virtio_net_set_mrg_rx_bufs(n, !!(features & (1 << VIRTIO_NET_F_MRG_RXBUF)));
516 if (n->has_vnet_hdr) {
517 n->curr_guest_offloads =
518 virtio_net_guest_offloads_by_features(features);
519 virtio_net_apply_guest_offloads(n);
522 for (i = 0; i < n->max_queues; i++) {
523 NetClientState *nc = qemu_get_subqueue(n->nic, i);
525 if (!nc->peer || nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
526 continue;
528 if (!tap_get_vhost_net(nc->peer)) {
529 continue;
531 vhost_net_ack_features(tap_get_vhost_net(nc->peer), features);
534 if ((1 << VIRTIO_NET_F_CTRL_VLAN) & features) {
535 memset(n->vlans, 0, MAX_VLAN >> 3);
536 } else {
537 memset(n->vlans, 0xff, MAX_VLAN >> 3);
541 static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd,
542 struct iovec *iov, unsigned int iov_cnt)
544 uint8_t on;
545 size_t s;
546 NetClientState *nc = qemu_get_queue(n->nic);
548 s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on));
549 if (s != sizeof(on)) {
550 return VIRTIO_NET_ERR;
553 if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) {
554 n->promisc = on;
555 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) {
556 n->allmulti = on;
557 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) {
558 n->alluni = on;
559 } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) {
560 n->nomulti = on;
561 } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) {
562 n->nouni = on;
563 } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) {
564 n->nobcast = on;
565 } else {
566 return VIRTIO_NET_ERR;
569 rxfilter_notify(nc);
571 return VIRTIO_NET_OK;
574 static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd,
575 struct iovec *iov, unsigned int iov_cnt)
577 VirtIODevice *vdev = VIRTIO_DEVICE(n);
578 uint64_t offloads;
579 size_t s;
581 if (!((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features)) {
582 return VIRTIO_NET_ERR;
585 s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads));
586 if (s != sizeof(offloads)) {
587 return VIRTIO_NET_ERR;
590 if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) {
591 uint64_t supported_offloads;
593 if (!n->has_vnet_hdr) {
594 return VIRTIO_NET_ERR;
597 supported_offloads = virtio_net_supported_guest_offloads(n);
598 if (offloads & ~supported_offloads) {
599 return VIRTIO_NET_ERR;
602 n->curr_guest_offloads = offloads;
603 virtio_net_apply_guest_offloads(n);
605 return VIRTIO_NET_OK;
606 } else {
607 return VIRTIO_NET_ERR;
611 static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd,
612 struct iovec *iov, unsigned int iov_cnt)
614 struct virtio_net_ctrl_mac mac_data;
615 size_t s;
616 NetClientState *nc = qemu_get_queue(n->nic);
618 if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) {
619 if (iov_size(iov, iov_cnt) != sizeof(n->mac)) {
620 return VIRTIO_NET_ERR;
622 s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac));
623 assert(s == sizeof(n->mac));
624 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
625 rxfilter_notify(nc);
627 return VIRTIO_NET_OK;
630 if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) {
631 return VIRTIO_NET_ERR;
634 int in_use = 0;
635 int first_multi = 0;
636 uint8_t uni_overflow = 0;
637 uint8_t multi_overflow = 0;
638 uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
640 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
641 sizeof(mac_data.entries));
642 mac_data.entries = ldl_p(&mac_data.entries);
643 if (s != sizeof(mac_data.entries)) {
644 goto error;
646 iov_discard_front(&iov, &iov_cnt, s);
648 if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) {
649 goto error;
652 if (mac_data.entries <= MAC_TABLE_ENTRIES) {
653 s = iov_to_buf(iov, iov_cnt, 0, macs,
654 mac_data.entries * ETH_ALEN);
655 if (s != mac_data.entries * ETH_ALEN) {
656 goto error;
658 in_use += mac_data.entries;
659 } else {
660 uni_overflow = 1;
663 iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN);
665 first_multi = in_use;
667 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
668 sizeof(mac_data.entries));
669 mac_data.entries = ldl_p(&mac_data.entries);
670 if (s != sizeof(mac_data.entries)) {
671 goto error;
674 iov_discard_front(&iov, &iov_cnt, s);
676 if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) {
677 goto error;
680 if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) {
681 s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN],
682 mac_data.entries * ETH_ALEN);
683 if (s != mac_data.entries * ETH_ALEN) {
684 goto error;
686 in_use += mac_data.entries;
687 } else {
688 multi_overflow = 1;
691 n->mac_table.in_use = in_use;
692 n->mac_table.first_multi = first_multi;
693 n->mac_table.uni_overflow = uni_overflow;
694 n->mac_table.multi_overflow = multi_overflow;
695 memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN);
696 g_free(macs);
697 rxfilter_notify(nc);
699 return VIRTIO_NET_OK;
701 error:
702 g_free(macs);
703 return VIRTIO_NET_ERR;
706 static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd,
707 struct iovec *iov, unsigned int iov_cnt)
709 uint16_t vid;
710 size_t s;
711 NetClientState *nc = qemu_get_queue(n->nic);
713 s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid));
714 vid = lduw_p(&vid);
715 if (s != sizeof(vid)) {
716 return VIRTIO_NET_ERR;
719 if (vid >= MAX_VLAN)
720 return VIRTIO_NET_ERR;
722 if (cmd == VIRTIO_NET_CTRL_VLAN_ADD)
723 n->vlans[vid >> 5] |= (1U << (vid & 0x1f));
724 else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL)
725 n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f));
726 else
727 return VIRTIO_NET_ERR;
729 rxfilter_notify(nc);
731 return VIRTIO_NET_OK;
734 static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
735 struct iovec *iov, unsigned int iov_cnt)
737 VirtIODevice *vdev = VIRTIO_DEVICE(n);
738 struct virtio_net_ctrl_mq mq;
739 size_t s;
740 uint16_t queues;
742 s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq));
743 if (s != sizeof(mq)) {
744 return VIRTIO_NET_ERR;
747 if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
748 return VIRTIO_NET_ERR;
751 queues = lduw_p(&mq.virtqueue_pairs);
753 if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
754 queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
755 queues > n->max_queues ||
756 !n->multiqueue) {
757 return VIRTIO_NET_ERR;
760 n->curr_queues = queues;
761 /* stop the backend before changing the number of queues to avoid handling a
762 * disabled queue */
763 virtio_net_set_status(vdev, vdev->status);
764 virtio_net_set_queues(n);
766 return VIRTIO_NET_OK;
768 static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
770 VirtIONet *n = VIRTIO_NET(vdev);
771 struct virtio_net_ctrl_hdr ctrl;
772 virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
773 VirtQueueElement elem;
774 size_t s;
775 struct iovec *iov;
776 unsigned int iov_cnt;
778 while (virtqueue_pop(vq, &elem)) {
779 if (iov_size(elem.in_sg, elem.in_num) < sizeof(status) ||
780 iov_size(elem.out_sg, elem.out_num) < sizeof(ctrl)) {
781 error_report("virtio-net ctrl missing headers");
782 exit(1);
785 iov = elem.out_sg;
786 iov_cnt = elem.out_num;
787 s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl));
788 iov_discard_front(&iov, &iov_cnt, sizeof(ctrl));
789 if (s != sizeof(ctrl)) {
790 status = VIRTIO_NET_ERR;
791 } else if (ctrl.class == VIRTIO_NET_CTRL_RX) {
792 status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt);
793 } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) {
794 status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt);
795 } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
796 status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt);
797 } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
798 status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt);
799 } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
800 status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt);
803 s = iov_from_buf(elem.in_sg, elem.in_num, 0, &status, sizeof(status));
804 assert(s == sizeof(status));
806 virtqueue_push(vq, &elem, sizeof(status));
807 virtio_notify(vdev, vq);
811 /* RX */
813 static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
815 VirtIONet *n = VIRTIO_NET(vdev);
816 int queue_index = vq2q(virtio_get_queue_index(vq));
818 qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index));
821 static int virtio_net_can_receive(NetClientState *nc)
823 VirtIONet *n = qemu_get_nic_opaque(nc);
824 VirtIODevice *vdev = VIRTIO_DEVICE(n);
825 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
827 if (!vdev->vm_running) {
828 return 0;
831 if (nc->queue_index >= n->curr_queues) {
832 return 0;
835 if (!virtio_queue_ready(q->rx_vq) ||
836 !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
837 return 0;
840 return 1;
843 static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize)
845 VirtIONet *n = q->n;
846 if (virtio_queue_empty(q->rx_vq) ||
847 (n->mergeable_rx_bufs &&
848 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
849 virtio_queue_set_notification(q->rx_vq, 1);
851 /* To avoid a race condition where the guest has made some buffers
852 * available after the above check but before notification was
853 * enabled, check for available buffers again.
855 if (virtio_queue_empty(q->rx_vq) ||
856 (n->mergeable_rx_bufs &&
857 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
858 return 0;
862 virtio_queue_set_notification(q->rx_vq, 0);
863 return 1;
866 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
867 * it never finds out that the packets don't have valid checksums. This
868 * causes dhclient to get upset. Fedora's carried a patch for ages to
869 * fix this with Xen but it hasn't appeared in an upstream release of
870 * dhclient yet.
872 * To avoid breaking existing guests, we catch udp packets and add
873 * checksums. This is terrible but it's better than hacking the guest
874 * kernels.
876 * N.B. if we introduce a zero-copy API, this operation is no longer free so
877 * we should provide a mechanism to disable it to avoid polluting the host
878 * cache.
880 static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
881 uint8_t *buf, size_t size)
883 if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */
884 (size > 27 && size < 1500) && /* normal sized MTU */
885 (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */
886 (buf[23] == 17) && /* ip.protocol == UDP */
887 (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */
888 net_checksum_calculate(buf, size);
889 hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
893 static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
894 const void *buf, size_t size)
896 if (n->has_vnet_hdr) {
897 /* FIXME this cast is evil */
898 void *wbuf = (void *)buf;
899 work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
900 size - n->host_hdr_len);
901 iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
902 } else {
903 struct virtio_net_hdr hdr = {
904 .flags = 0,
905 .gso_type = VIRTIO_NET_HDR_GSO_NONE
907 iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr);
911 static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
913 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
914 static const uint8_t vlan[] = {0x81, 0x00};
915 uint8_t *ptr = (uint8_t *)buf;
916 int i;
918 if (n->promisc)
919 return 1;
921 ptr += n->host_hdr_len;
923 if (!memcmp(&ptr[12], vlan, sizeof(vlan))) {
924 int vid = be16_to_cpup((uint16_t *)(ptr + 14)) & 0xfff;
925 if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f))))
926 return 0;
929 if (ptr[0] & 1) { // multicast
930 if (!memcmp(ptr, bcast, sizeof(bcast))) {
931 return !n->nobcast;
932 } else if (n->nomulti) {
933 return 0;
934 } else if (n->allmulti || n->mac_table.multi_overflow) {
935 return 1;
938 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
939 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
940 return 1;
943 } else { // unicast
944 if (n->nouni) {
945 return 0;
946 } else if (n->alluni || n->mac_table.uni_overflow) {
947 return 1;
948 } else if (!memcmp(ptr, n->mac, ETH_ALEN)) {
949 return 1;
952 for (i = 0; i < n->mac_table.first_multi; i++) {
953 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
954 return 1;
959 return 0;
962 static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t size)
964 VirtIONet *n = qemu_get_nic_opaque(nc);
965 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
966 VirtIODevice *vdev = VIRTIO_DEVICE(n);
967 struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
968 struct virtio_net_hdr_mrg_rxbuf mhdr;
969 unsigned mhdr_cnt = 0;
970 size_t offset, i, guest_offset;
972 if (!virtio_net_can_receive(nc)) {
973 return -1;
976 /* hdr_len refers to the header we supply to the guest */
977 if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
978 return 0;
981 if (!receive_filter(n, buf, size))
982 return size;
984 offset = i = 0;
986 while (offset < size) {
987 VirtQueueElement elem;
988 int len, total;
989 const struct iovec *sg = elem.in_sg;
991 total = 0;
993 if (virtqueue_pop(q->rx_vq, &elem) == 0) {
994 if (i == 0)
995 return -1;
996 error_report("virtio-net unexpected empty queue: "
997 "i %zd mergeable %d offset %zd, size %zd, "
998 "guest hdr len %zd, host hdr len %zd guest features 0x%x",
999 i, n->mergeable_rx_bufs, offset, size,
1000 n->guest_hdr_len, n->host_hdr_len, vdev->guest_features);
1001 exit(1);
1004 if (elem.in_num < 1) {
1005 error_report("virtio-net receive queue contains no in buffers");
1006 exit(1);
1009 if (i == 0) {
1010 assert(offset == 0);
1011 if (n->mergeable_rx_bufs) {
1012 mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
1013 sg, elem.in_num,
1014 offsetof(typeof(mhdr), num_buffers),
1015 sizeof(mhdr.num_buffers));
1018 receive_header(n, sg, elem.in_num, buf, size);
1019 offset = n->host_hdr_len;
1020 total += n->guest_hdr_len;
1021 guest_offset = n->guest_hdr_len;
1022 } else {
1023 guest_offset = 0;
1026 /* copy in packet. ugh */
1027 len = iov_from_buf(sg, elem.in_num, guest_offset,
1028 buf + offset, size - offset);
1029 total += len;
1030 offset += len;
1031 /* If buffers can't be merged, at this point we
1032 * must have consumed the complete packet.
1033 * Otherwise, drop it. */
1034 if (!n->mergeable_rx_bufs && offset < size) {
1035 #if 0
1036 error_report("virtio-net truncated non-mergeable packet: "
1037 "i %zd mergeable %d offset %zd, size %zd, "
1038 "guest hdr len %zd, host hdr len %zd",
1039 i, n->mergeable_rx_bufs,
1040 offset, size, n->guest_hdr_len, n->host_hdr_len);
1041 #endif
1042 return size;
1045 /* signal other side */
1046 virtqueue_fill(q->rx_vq, &elem, total, i++);
1049 if (mhdr_cnt) {
1050 stw_p(&mhdr.num_buffers, i);
1051 iov_from_buf(mhdr_sg, mhdr_cnt,
1053 &mhdr.num_buffers, sizeof mhdr.num_buffers);
1056 virtqueue_flush(q->rx_vq, i);
1057 virtio_notify(vdev, q->rx_vq);
1059 return size;
1062 static int32_t virtio_net_flush_tx(VirtIONetQueue *q);
1064 static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
1066 VirtIONet *n = qemu_get_nic_opaque(nc);
1067 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1068 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1070 virtqueue_push(q->tx_vq, &q->async_tx.elem, 0);
1071 virtio_notify(vdev, q->tx_vq);
1073 q->async_tx.elem.out_num = q->async_tx.len = 0;
1075 virtio_queue_set_notification(q->tx_vq, 1);
1076 virtio_net_flush_tx(q);
1079 /* TX */
1080 static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
1082 VirtIONet *n = q->n;
1083 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1084 VirtQueueElement elem;
1085 int32_t num_packets = 0;
1086 int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
1087 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1088 return num_packets;
1091 assert(vdev->vm_running);
1093 if (q->async_tx.elem.out_num) {
1094 virtio_queue_set_notification(q->tx_vq, 0);
1095 return num_packets;
1098 while (virtqueue_pop(q->tx_vq, &elem)) {
1099 ssize_t ret, len;
1100 unsigned int out_num = elem.out_num;
1101 struct iovec *out_sg = &elem.out_sg[0];
1102 struct iovec sg[VIRTQUEUE_MAX_SIZE];
1104 if (out_num < 1) {
1105 error_report("virtio-net header not in first element");
1106 exit(1);
1110 * If host wants to see the guest header as is, we can
1111 * pass it on unchanged. Otherwise, copy just the parts
1112 * that host is interested in.
1114 assert(n->host_hdr_len <= n->guest_hdr_len);
1115 if (n->host_hdr_len != n->guest_hdr_len) {
1116 unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg),
1117 out_sg, out_num,
1118 0, n->host_hdr_len);
1119 sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num,
1120 out_sg, out_num,
1121 n->guest_hdr_len, -1);
1122 out_num = sg_num;
1123 out_sg = sg;
1126 len = n->guest_hdr_len;
1128 ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index),
1129 out_sg, out_num, virtio_net_tx_complete);
1130 if (ret == 0) {
1131 virtio_queue_set_notification(q->tx_vq, 0);
1132 q->async_tx.elem = elem;
1133 q->async_tx.len = len;
1134 return -EBUSY;
1137 len += ret;
1139 virtqueue_push(q->tx_vq, &elem, 0);
1140 virtio_notify(vdev, q->tx_vq);
1142 if (++num_packets >= n->tx_burst) {
1143 break;
1146 return num_packets;
1149 static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
1151 VirtIONet *n = VIRTIO_NET(vdev);
1152 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
1154 /* This happens when device was stopped but VCPU wasn't. */
1155 if (!vdev->vm_running) {
1156 q->tx_waiting = 1;
1157 return;
1160 if (q->tx_waiting) {
1161 virtio_queue_set_notification(vq, 1);
1162 timer_del(q->tx_timer);
1163 q->tx_waiting = 0;
1164 virtio_net_flush_tx(q);
1165 } else {
1166 timer_mod(q->tx_timer,
1167 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
1168 q->tx_waiting = 1;
1169 virtio_queue_set_notification(vq, 0);
1173 static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)
1175 VirtIONet *n = VIRTIO_NET(vdev);
1176 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
1178 if (unlikely(q->tx_waiting)) {
1179 return;
1181 q->tx_waiting = 1;
1182 /* This happens when device was stopped but VCPU wasn't. */
1183 if (!vdev->vm_running) {
1184 return;
1186 virtio_queue_set_notification(vq, 0);
1187 qemu_bh_schedule(q->tx_bh);
1190 static void virtio_net_tx_timer(void *opaque)
1192 VirtIONetQueue *q = opaque;
1193 VirtIONet *n = q->n;
1194 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1195 assert(vdev->vm_running);
1197 q->tx_waiting = 0;
1199 /* Just in case the driver is not ready on more */
1200 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1201 return;
1204 virtio_queue_set_notification(q->tx_vq, 1);
1205 virtio_net_flush_tx(q);
1208 static void virtio_net_tx_bh(void *opaque)
1210 VirtIONetQueue *q = opaque;
1211 VirtIONet *n = q->n;
1212 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1213 int32_t ret;
1215 assert(vdev->vm_running);
1217 q->tx_waiting = 0;
1219 /* Just in case the driver is not ready on more */
1220 if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) {
1221 return;
1224 ret = virtio_net_flush_tx(q);
1225 if (ret == -EBUSY) {
1226 return; /* Notification re-enable handled by tx_complete */
1229 /* If we flush a full burst of packets, assume there are
1230 * more coming and immediately reschedule */
1231 if (ret >= n->tx_burst) {
1232 qemu_bh_schedule(q->tx_bh);
1233 q->tx_waiting = 1;
1234 return;
1237 /* If less than a full burst, re-enable notification and flush
1238 * anything that may have come in while we weren't looking. If
1239 * we find something, assume the guest is still active and reschedule */
1240 virtio_queue_set_notification(q->tx_vq, 1);
1241 if (virtio_net_flush_tx(q) > 0) {
1242 virtio_queue_set_notification(q->tx_vq, 0);
1243 qemu_bh_schedule(q->tx_bh);
1244 q->tx_waiting = 1;
1248 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
1250 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1251 int i, max = multiqueue ? n->max_queues : 1;
1253 n->multiqueue = multiqueue;
1255 for (i = 2; i <= n->max_queues * 2 + 1; i++) {
1256 virtio_del_queue(vdev, i);
1259 for (i = 1; i < max; i++) {
1260 n->vqs[i].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
1261 if (n->vqs[i].tx_timer) {
1262 n->vqs[i].tx_vq =
1263 virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
1264 n->vqs[i].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1265 virtio_net_tx_timer,
1266 &n->vqs[i]);
1267 } else {
1268 n->vqs[i].tx_vq =
1269 virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
1270 n->vqs[i].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[i]);
1273 n->vqs[i].tx_waiting = 0;
1274 n->vqs[i].n = n;
1277 /* Note: Minux Guests (version 3.2.1) use ctrl vq but don't ack
1278 * VIRTIO_NET_F_CTRL_VQ. Create ctrl vq unconditionally to avoid
1279 * breaking them.
1281 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
1283 virtio_net_set_queues(n);
1286 static void virtio_net_save(QEMUFile *f, void *opaque)
1288 int i;
1289 VirtIONet *n = opaque;
1290 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1292 /* At this point, backend must be stopped, otherwise
1293 * it might keep writing to memory. */
1294 assert(!n->vhost_started);
1295 virtio_save(vdev, f);
1297 qemu_put_buffer(f, n->mac, ETH_ALEN);
1298 qemu_put_be32(f, n->vqs[0].tx_waiting);
1299 qemu_put_be32(f, n->mergeable_rx_bufs);
1300 qemu_put_be16(f, n->status);
1301 qemu_put_byte(f, n->promisc);
1302 qemu_put_byte(f, n->allmulti);
1303 qemu_put_be32(f, n->mac_table.in_use);
1304 qemu_put_buffer(f, n->mac_table.macs, n->mac_table.in_use * ETH_ALEN);
1305 qemu_put_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3);
1306 qemu_put_be32(f, n->has_vnet_hdr);
1307 qemu_put_byte(f, n->mac_table.multi_overflow);
1308 qemu_put_byte(f, n->mac_table.uni_overflow);
1309 qemu_put_byte(f, n->alluni);
1310 qemu_put_byte(f, n->nomulti);
1311 qemu_put_byte(f, n->nouni);
1312 qemu_put_byte(f, n->nobcast);
1313 qemu_put_byte(f, n->has_ufo);
1314 if (n->max_queues > 1) {
1315 qemu_put_be16(f, n->max_queues);
1316 qemu_put_be16(f, n->curr_queues);
1317 for (i = 1; i < n->curr_queues; i++) {
1318 qemu_put_be32(f, n->vqs[i].tx_waiting);
1322 if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features) {
1323 qemu_put_be64(f, n->curr_guest_offloads);
1327 static int virtio_net_load(QEMUFile *f, void *opaque, int version_id)
1329 VirtIONet *n = opaque;
1330 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1331 int ret, i, link_down;
1333 if (version_id < 2 || version_id > VIRTIO_NET_VM_VERSION)
1334 return -EINVAL;
1336 ret = virtio_load(vdev, f);
1337 if (ret) {
1338 return ret;
1341 qemu_get_buffer(f, n->mac, ETH_ALEN);
1342 n->vqs[0].tx_waiting = qemu_get_be32(f);
1344 virtio_net_set_mrg_rx_bufs(n, qemu_get_be32(f));
1346 if (version_id >= 3)
1347 n->status = qemu_get_be16(f);
1349 if (version_id >= 4) {
1350 if (version_id < 8) {
1351 n->promisc = qemu_get_be32(f);
1352 n->allmulti = qemu_get_be32(f);
1353 } else {
1354 n->promisc = qemu_get_byte(f);
1355 n->allmulti = qemu_get_byte(f);
1359 if (version_id >= 5) {
1360 n->mac_table.in_use = qemu_get_be32(f);
1361 /* MAC_TABLE_ENTRIES may be different from the saved image */
1362 if (n->mac_table.in_use <= MAC_TABLE_ENTRIES) {
1363 qemu_get_buffer(f, n->mac_table.macs,
1364 n->mac_table.in_use * ETH_ALEN);
1365 } else if (n->mac_table.in_use) {
1366 uint8_t *buf = g_malloc0(n->mac_table.in_use);
1367 qemu_get_buffer(f, buf, n->mac_table.in_use * ETH_ALEN);
1368 g_free(buf);
1369 n->mac_table.multi_overflow = n->mac_table.uni_overflow = 1;
1370 n->mac_table.in_use = 0;
1374 if (version_id >= 6)
1375 qemu_get_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3);
1377 if (version_id >= 7) {
1378 if (qemu_get_be32(f) && !peer_has_vnet_hdr(n)) {
1379 error_report("virtio-net: saved image requires vnet_hdr=on");
1380 return -1;
1384 if (version_id >= 9) {
1385 n->mac_table.multi_overflow = qemu_get_byte(f);
1386 n->mac_table.uni_overflow = qemu_get_byte(f);
1389 if (version_id >= 10) {
1390 n->alluni = qemu_get_byte(f);
1391 n->nomulti = qemu_get_byte(f);
1392 n->nouni = qemu_get_byte(f);
1393 n->nobcast = qemu_get_byte(f);
1396 if (version_id >= 11) {
1397 if (qemu_get_byte(f) && !peer_has_ufo(n)) {
1398 error_report("virtio-net: saved image requires TUN_F_UFO support");
1399 return -1;
1403 if (n->max_queues > 1) {
1404 if (n->max_queues != qemu_get_be16(f)) {
1405 error_report("virtio-net: different max_queues ");
1406 return -1;
1409 n->curr_queues = qemu_get_be16(f);
1410 for (i = 1; i < n->curr_queues; i++) {
1411 n->vqs[i].tx_waiting = qemu_get_be32(f);
1415 if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features) {
1416 n->curr_guest_offloads = qemu_get_be64(f);
1417 } else {
1418 n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
1421 if (peer_has_vnet_hdr(n)) {
1422 virtio_net_apply_guest_offloads(n);
1425 virtio_net_set_queues(n);
1427 /* Find the first multicast entry in the saved MAC filter */
1428 for (i = 0; i < n->mac_table.in_use; i++) {
1429 if (n->mac_table.macs[i * ETH_ALEN] & 1) {
1430 break;
1433 n->mac_table.first_multi = i;
1435 /* nc.link_down can't be migrated, so infer link_down according
1436 * to link status bit in n->status */
1437 link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0;
1438 for (i = 0; i < n->max_queues; i++) {
1439 qemu_get_subqueue(n->nic, i)->link_down = link_down;
1442 return 0;
1445 static void virtio_net_cleanup(NetClientState *nc)
1447 VirtIONet *n = qemu_get_nic_opaque(nc);
1449 n->nic = NULL;
1452 static NetClientInfo net_virtio_info = {
1453 .type = NET_CLIENT_OPTIONS_KIND_NIC,
1454 .size = sizeof(NICState),
1455 .can_receive = virtio_net_can_receive,
1456 .receive = virtio_net_receive,
1457 .cleanup = virtio_net_cleanup,
1458 .link_status_changed = virtio_net_set_link_status,
1459 .query_rx_filter = virtio_net_query_rxfilter,
1462 static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
1464 VirtIONet *n = VIRTIO_NET(vdev);
1465 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
1466 assert(n->vhost_started);
1467 return vhost_net_virtqueue_pending(tap_get_vhost_net(nc->peer), idx);
1470 static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
1471 bool mask)
1473 VirtIONet *n = VIRTIO_NET(vdev);
1474 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
1475 assert(n->vhost_started);
1476 vhost_net_virtqueue_mask(tap_get_vhost_net(nc->peer),
1477 vdev, idx, mask);
1480 void virtio_net_set_config_size(VirtIONet *n, uint32_t host_features)
1482 int i, config_size = 0;
1483 host_features |= (1 << VIRTIO_NET_F_MAC);
1484 for (i = 0; feature_sizes[i].flags != 0; i++) {
1485 if (host_features & feature_sizes[i].flags) {
1486 config_size = MAX(feature_sizes[i].end, config_size);
1489 n->config_size = config_size;
1492 void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
1493 const char *type)
1496 * The name can be NULL, the netclient name will be type.x.
1498 assert(type != NULL);
1500 if (n->netclient_name) {
1501 g_free(n->netclient_name);
1502 n->netclient_name = NULL;
1504 if (n->netclient_type) {
1505 g_free(n->netclient_type);
1506 n->netclient_type = NULL;
1509 if (name != NULL) {
1510 n->netclient_name = g_strdup(name);
1512 n->netclient_type = g_strdup(type);
1515 static void virtio_net_device_realize(DeviceState *dev, Error **errp)
1517 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1518 VirtIONet *n = VIRTIO_NET(dev);
1519 NetClientState *nc;
1520 int i;
1522 virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
1524 n->max_queues = MAX(n->nic_conf.queues, 1);
1525 n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues);
1526 n->vqs[0].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
1527 n->curr_queues = 1;
1528 n->vqs[0].n = n;
1529 n->tx_timeout = n->net_conf.txtimer;
1531 if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer")
1532 && strcmp(n->net_conf.tx, "bh")) {
1533 error_report("virtio-net: "
1534 "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
1535 n->net_conf.tx);
1536 error_report("Defaulting to \"bh\"");
1539 if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
1540 n->vqs[0].tx_vq = virtio_add_queue(vdev, 256,
1541 virtio_net_handle_tx_timer);
1542 n->vqs[0].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, virtio_net_tx_timer,
1543 &n->vqs[0]);
1544 } else {
1545 n->vqs[0].tx_vq = virtio_add_queue(vdev, 256,
1546 virtio_net_handle_tx_bh);
1547 n->vqs[0].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[0]);
1549 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
1550 qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
1551 memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
1552 n->status = VIRTIO_NET_S_LINK_UP;
1554 if (n->netclient_type) {
1556 * Happen when virtio_net_set_netclient_name has been called.
1558 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
1559 n->netclient_type, n->netclient_name, n);
1560 } else {
1561 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
1562 object_get_typename(OBJECT(dev)), dev->id, n);
1565 peer_test_vnet_hdr(n);
1566 if (peer_has_vnet_hdr(n)) {
1567 for (i = 0; i < n->max_queues; i++) {
1568 qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true);
1570 n->host_hdr_len = sizeof(struct virtio_net_hdr);
1571 } else {
1572 n->host_hdr_len = 0;
1575 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a);
1577 n->vqs[0].tx_waiting = 0;
1578 n->tx_burst = n->net_conf.txburst;
1579 virtio_net_set_mrg_rx_bufs(n, 0);
1580 n->promisc = 1; /* for compatibility */
1582 n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
1584 n->vlans = g_malloc0(MAX_VLAN >> 3);
1586 nc = qemu_get_queue(n->nic);
1587 nc->rxfilter_notify_enabled = 1;
1589 n->qdev = dev;
1590 register_savevm(dev, "virtio-net", -1, VIRTIO_NET_VM_VERSION,
1591 virtio_net_save, virtio_net_load, n);
1593 add_boot_device_path(n->nic_conf.bootindex, dev, "/ethernet-phy@0");
1596 static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
1598 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1599 VirtIONet *n = VIRTIO_NET(dev);
1600 int i;
1602 /* This will stop vhost backend if appropriate. */
1603 virtio_net_set_status(vdev, 0);
1605 unregister_savevm(dev, "virtio-net", n);
1607 if (n->netclient_name) {
1608 g_free(n->netclient_name);
1609 n->netclient_name = NULL;
1611 if (n->netclient_type) {
1612 g_free(n->netclient_type);
1613 n->netclient_type = NULL;
1616 g_free(n->mac_table.macs);
1617 g_free(n->vlans);
1619 for (i = 0; i < n->max_queues; i++) {
1620 VirtIONetQueue *q = &n->vqs[i];
1621 NetClientState *nc = qemu_get_subqueue(n->nic, i);
1623 qemu_purge_queued_packets(nc);
1625 if (q->tx_timer) {
1626 timer_del(q->tx_timer);
1627 timer_free(q->tx_timer);
1628 } else if (q->tx_bh) {
1629 qemu_bh_delete(q->tx_bh);
1633 g_free(n->vqs);
1634 qemu_del_nic(n->nic);
1635 virtio_cleanup(vdev);
1638 static void virtio_net_instance_init(Object *obj)
1640 VirtIONet *n = VIRTIO_NET(obj);
1643 * The default config_size is sizeof(struct virtio_net_config).
1644 * Can be overriden with virtio_net_set_config_size.
1646 n->config_size = sizeof(struct virtio_net_config);
1649 static Property virtio_net_properties[] = {
1650 DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf),
1651 DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer,
1652 TX_TIMER_INTERVAL),
1653 DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
1654 DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
1655 DEFINE_PROP_END_OF_LIST(),
1658 static void virtio_net_class_init(ObjectClass *klass, void *data)
1660 DeviceClass *dc = DEVICE_CLASS(klass);
1661 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1663 dc->props = virtio_net_properties;
1664 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
1665 vdc->realize = virtio_net_device_realize;
1666 vdc->unrealize = virtio_net_device_unrealize;
1667 vdc->get_config = virtio_net_get_config;
1668 vdc->set_config = virtio_net_set_config;
1669 vdc->get_features = virtio_net_get_features;
1670 vdc->set_features = virtio_net_set_features;
1671 vdc->bad_features = virtio_net_bad_features;
1672 vdc->reset = virtio_net_reset;
1673 vdc->set_status = virtio_net_set_status;
1674 vdc->guest_notifier_mask = virtio_net_guest_notifier_mask;
1675 vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;
1678 static const TypeInfo virtio_net_info = {
1679 .name = TYPE_VIRTIO_NET,
1680 .parent = TYPE_VIRTIO_DEVICE,
1681 .instance_size = sizeof(VirtIONet),
1682 .instance_init = virtio_net_instance_init,
1683 .class_init = virtio_net_class_init,
1686 static void virtio_register_types(void)
1688 type_register_static(&virtio_net_info);
1691 type_init(virtio_register_types)