virtio-net: don't run bh on vm stopped
[qemu/kevin.git] / hw / net / virtio-net.c
blob365e266b7486d6e4d765a6b665d0596cb625ed41
1 /*
2 * Virtio Network Device
4 * Copyright IBM, Corp. 2007
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/iov.h"
15 #include "hw/virtio/virtio.h"
16 #include "net/net.h"
17 #include "net/checksum.h"
18 #include "net/tap.h"
19 #include "qemu/error-report.h"
20 #include "qemu/timer.h"
21 #include "hw/virtio/virtio-net.h"
22 #include "net/vhost_net.h"
23 #include "hw/virtio/virtio-bus.h"
24 #include "qapi/qmp/qjson.h"
25 #include "qapi-event.h"
26 #include "hw/virtio/virtio-access.h"
28 #define VIRTIO_NET_VM_VERSION 11
30 #define MAC_TABLE_ENTRIES 64
31 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
34 * Calculate the number of bytes up to and including the given 'field' of
35 * 'container'.
37 #define endof(container, field) \
38 (offsetof(container, field) + sizeof(((container *)0)->field))
40 typedef struct VirtIOFeature {
41 uint32_t flags;
42 size_t end;
43 } VirtIOFeature;
45 static VirtIOFeature feature_sizes[] = {
46 {.flags = 1 << VIRTIO_NET_F_MAC,
47 .end = endof(struct virtio_net_config, mac)},
48 {.flags = 1 << VIRTIO_NET_F_STATUS,
49 .end = endof(struct virtio_net_config, status)},
50 {.flags = 1 << VIRTIO_NET_F_MQ,
51 .end = endof(struct virtio_net_config, max_virtqueue_pairs)},
55 static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc)
57 VirtIONet *n = qemu_get_nic_opaque(nc);
59 return &n->vqs[nc->queue_index];
62 static int vq2q(int queue_index)
64 return queue_index / 2;
67 /* TODO
68 * - we could suppress RX interrupt if we were so inclined.
71 static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
73 VirtIONet *n = VIRTIO_NET(vdev);
74 struct virtio_net_config netcfg;
76 virtio_stw_p(vdev, &netcfg.status, n->status);
77 virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues);
78 memcpy(netcfg.mac, n->mac, ETH_ALEN);
79 memcpy(config, &netcfg, n->config_size);
82 static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
84 VirtIONet *n = VIRTIO_NET(vdev);
85 struct virtio_net_config netcfg = {};
87 memcpy(&netcfg, config, n->config_size);
89 if (!(vdev->guest_features >> VIRTIO_NET_F_CTRL_MAC_ADDR & 1) &&
90 memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
91 memcpy(n->mac, netcfg.mac, ETH_ALEN);
92 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
96 static bool virtio_net_started(VirtIONet *n, uint8_t status)
98 VirtIODevice *vdev = VIRTIO_DEVICE(n);
99 return (status & VIRTIO_CONFIG_S_DRIVER_OK) &&
100 (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running;
103 static void virtio_net_announce_timer(void *opaque)
105 VirtIONet *n = opaque;
106 VirtIODevice *vdev = VIRTIO_DEVICE(n);
108 n->announce_counter--;
109 n->status |= VIRTIO_NET_S_ANNOUNCE;
110 virtio_notify_config(vdev);
113 static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
115 VirtIODevice *vdev = VIRTIO_DEVICE(n);
116 NetClientState *nc = qemu_get_queue(n->nic);
117 int queues = n->multiqueue ? n->max_queues : 1;
119 if (!get_vhost_net(nc->peer)) {
120 return;
123 if (!!n->vhost_started ==
124 (virtio_net_started(n, status) && !nc->peer->link_down)) {
125 return;
127 if (!n->vhost_started) {
128 int r;
129 if (!vhost_net_query(get_vhost_net(nc->peer), vdev)) {
130 return;
132 n->vhost_started = 1;
133 r = vhost_net_start(vdev, n->nic->ncs, queues);
134 if (r < 0) {
135 error_report("unable to start vhost net: %d: "
136 "falling back on userspace virtio", -r);
137 n->vhost_started = 0;
139 } else {
140 vhost_net_stop(vdev, n->nic->ncs, queues);
141 n->vhost_started = 0;
145 static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
147 VirtIONet *n = VIRTIO_NET(vdev);
148 VirtIONetQueue *q;
149 int i;
150 uint8_t queue_status;
152 virtio_net_vhost_status(n, status);
154 for (i = 0; i < n->max_queues; i++) {
155 q = &n->vqs[i];
157 if ((!n->multiqueue && i != 0) || i >= n->curr_queues) {
158 queue_status = 0;
159 } else {
160 queue_status = status;
163 if (!q->tx_waiting) {
164 continue;
167 if (virtio_net_started(n, queue_status) && !n->vhost_started) {
168 if (q->tx_timer) {
169 timer_mod(q->tx_timer,
170 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
171 } else {
172 qemu_bh_schedule(q->tx_bh);
174 } else {
175 if (q->tx_timer) {
176 timer_del(q->tx_timer);
177 } else {
178 qemu_bh_cancel(q->tx_bh);
184 static void virtio_net_set_link_status(NetClientState *nc)
186 VirtIONet *n = qemu_get_nic_opaque(nc);
187 VirtIODevice *vdev = VIRTIO_DEVICE(n);
188 uint16_t old_status = n->status;
190 if (nc->link_down)
191 n->status &= ~VIRTIO_NET_S_LINK_UP;
192 else
193 n->status |= VIRTIO_NET_S_LINK_UP;
195 if (n->status != old_status)
196 virtio_notify_config(vdev);
198 virtio_net_set_status(vdev, vdev->status);
201 static void rxfilter_notify(NetClientState *nc)
203 VirtIONet *n = qemu_get_nic_opaque(nc);
205 if (nc->rxfilter_notify_enabled) {
206 gchar *path = object_get_canonical_path(OBJECT(n->qdev));
207 qapi_event_send_nic_rx_filter_changed(!!n->netclient_name,
208 n->netclient_name, path, &error_abort);
209 g_free(path);
211 /* disable event notification to avoid events flooding */
212 nc->rxfilter_notify_enabled = 0;
216 static char *mac_strdup_printf(const uint8_t *mac)
218 return g_strdup_printf("%.2x:%.2x:%.2x:%.2x:%.2x:%.2x", mac[0],
219 mac[1], mac[2], mac[3], mac[4], mac[5]);
222 static intList *get_vlan_table(VirtIONet *n)
224 intList *list, *entry;
225 int i, j;
227 list = NULL;
228 for (i = 0; i < MAX_VLAN >> 5; i++) {
229 for (j = 0; n->vlans[i] && j <= 0x1f; j++) {
230 if (n->vlans[i] & (1U << j)) {
231 entry = g_malloc0(sizeof(*entry));
232 entry->value = (i << 5) + j;
233 entry->next = list;
234 list = entry;
239 return list;
242 static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc)
244 VirtIONet *n = qemu_get_nic_opaque(nc);
245 VirtIODevice *vdev = VIRTIO_DEVICE(n);
246 RxFilterInfo *info;
247 strList *str_list, *entry;
248 int i;
250 info = g_malloc0(sizeof(*info));
251 info->name = g_strdup(nc->name);
252 info->promiscuous = n->promisc;
254 if (n->nouni) {
255 info->unicast = RX_STATE_NONE;
256 } else if (n->alluni) {
257 info->unicast = RX_STATE_ALL;
258 } else {
259 info->unicast = RX_STATE_NORMAL;
262 if (n->nomulti) {
263 info->multicast = RX_STATE_NONE;
264 } else if (n->allmulti) {
265 info->multicast = RX_STATE_ALL;
266 } else {
267 info->multicast = RX_STATE_NORMAL;
270 info->broadcast_allowed = n->nobcast;
271 info->multicast_overflow = n->mac_table.multi_overflow;
272 info->unicast_overflow = n->mac_table.uni_overflow;
274 info->main_mac = mac_strdup_printf(n->mac);
276 str_list = NULL;
277 for (i = 0; i < n->mac_table.first_multi; i++) {
278 entry = g_malloc0(sizeof(*entry));
279 entry->value = mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
280 entry->next = str_list;
281 str_list = entry;
283 info->unicast_table = str_list;
285 str_list = NULL;
286 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
287 entry = g_malloc0(sizeof(*entry));
288 entry->value = mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
289 entry->next = str_list;
290 str_list = entry;
292 info->multicast_table = str_list;
293 info->vlan_table = get_vlan_table(n);
295 if (!((1 << VIRTIO_NET_F_CTRL_VLAN) & vdev->guest_features)) {
296 info->vlan = RX_STATE_ALL;
297 } else if (!info->vlan_table) {
298 info->vlan = RX_STATE_NONE;
299 } else {
300 info->vlan = RX_STATE_NORMAL;
303 /* enable event notification after query */
304 nc->rxfilter_notify_enabled = 1;
306 return info;
309 static void virtio_net_reset(VirtIODevice *vdev)
311 VirtIONet *n = VIRTIO_NET(vdev);
313 /* Reset back to compatibility mode */
314 n->promisc = 1;
315 n->allmulti = 0;
316 n->alluni = 0;
317 n->nomulti = 0;
318 n->nouni = 0;
319 n->nobcast = 0;
320 /* multiqueue is disabled by default */
321 n->curr_queues = 1;
322 timer_del(n->announce_timer);
323 n->announce_counter = 0;
324 n->status &= ~VIRTIO_NET_S_ANNOUNCE;
326 /* Flush any MAC and VLAN filter table state */
327 n->mac_table.in_use = 0;
328 n->mac_table.first_multi = 0;
329 n->mac_table.multi_overflow = 0;
330 n->mac_table.uni_overflow = 0;
331 memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
332 memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac));
333 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
334 memset(n->vlans, 0, MAX_VLAN >> 3);
337 static void peer_test_vnet_hdr(VirtIONet *n)
339 NetClientState *nc = qemu_get_queue(n->nic);
340 if (!nc->peer) {
341 return;
344 n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer);
347 static int peer_has_vnet_hdr(VirtIONet *n)
349 return n->has_vnet_hdr;
352 static int peer_has_ufo(VirtIONet *n)
354 if (!peer_has_vnet_hdr(n))
355 return 0;
357 n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer);
359 return n->has_ufo;
362 static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs)
364 int i;
365 NetClientState *nc;
367 n->mergeable_rx_bufs = mergeable_rx_bufs;
369 n->guest_hdr_len = n->mergeable_rx_bufs ?
370 sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr);
372 for (i = 0; i < n->max_queues; i++) {
373 nc = qemu_get_subqueue(n->nic, i);
375 if (peer_has_vnet_hdr(n) &&
376 qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) {
377 qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len);
378 n->host_hdr_len = n->guest_hdr_len;
383 static int peer_attach(VirtIONet *n, int index)
385 NetClientState *nc = qemu_get_subqueue(n->nic, index);
387 if (!nc->peer) {
388 return 0;
391 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
392 return 0;
395 return tap_enable(nc->peer);
398 static int peer_detach(VirtIONet *n, int index)
400 NetClientState *nc = qemu_get_subqueue(n->nic, index);
402 if (!nc->peer) {
403 return 0;
406 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
407 return 0;
410 return tap_disable(nc->peer);
413 static void virtio_net_set_queues(VirtIONet *n)
415 int i;
416 int r;
418 for (i = 0; i < n->max_queues; i++) {
419 if (i < n->curr_queues) {
420 r = peer_attach(n, i);
421 assert(!r);
422 } else {
423 r = peer_detach(n, i);
424 assert(!r);
429 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue);
431 static uint32_t virtio_net_get_features(VirtIODevice *vdev, uint32_t features)
433 VirtIONet *n = VIRTIO_NET(vdev);
434 NetClientState *nc = qemu_get_queue(n->nic);
436 features |= (1 << VIRTIO_NET_F_MAC);
438 if (!peer_has_vnet_hdr(n)) {
439 features &= ~(0x1 << VIRTIO_NET_F_CSUM);
440 features &= ~(0x1 << VIRTIO_NET_F_HOST_TSO4);
441 features &= ~(0x1 << VIRTIO_NET_F_HOST_TSO6);
442 features &= ~(0x1 << VIRTIO_NET_F_HOST_ECN);
444 features &= ~(0x1 << VIRTIO_NET_F_GUEST_CSUM);
445 features &= ~(0x1 << VIRTIO_NET_F_GUEST_TSO4);
446 features &= ~(0x1 << VIRTIO_NET_F_GUEST_TSO6);
447 features &= ~(0x1 << VIRTIO_NET_F_GUEST_ECN);
450 if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
451 features &= ~(0x1 << VIRTIO_NET_F_GUEST_UFO);
452 features &= ~(0x1 << VIRTIO_NET_F_HOST_UFO);
455 if (!get_vhost_net(nc->peer)) {
456 return features;
458 return vhost_net_get_features(get_vhost_net(nc->peer), features);
461 static uint32_t virtio_net_bad_features(VirtIODevice *vdev)
463 uint32_t features = 0;
465 /* Linux kernel 2.6.25. It understood MAC (as everyone must),
466 * but also these: */
467 features |= (1 << VIRTIO_NET_F_MAC);
468 features |= (1 << VIRTIO_NET_F_CSUM);
469 features |= (1 << VIRTIO_NET_F_HOST_TSO4);
470 features |= (1 << VIRTIO_NET_F_HOST_TSO6);
471 features |= (1 << VIRTIO_NET_F_HOST_ECN);
473 return features;
476 static void virtio_net_apply_guest_offloads(VirtIONet *n)
478 qemu_set_offload(qemu_get_queue(n->nic)->peer,
479 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)),
480 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)),
481 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)),
482 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)),
483 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO)));
486 static uint64_t virtio_net_guest_offloads_by_features(uint32_t features)
488 static const uint64_t guest_offloads_mask =
489 (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
490 (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
491 (1ULL << VIRTIO_NET_F_GUEST_TSO6) |
492 (1ULL << VIRTIO_NET_F_GUEST_ECN) |
493 (1ULL << VIRTIO_NET_F_GUEST_UFO);
495 return guest_offloads_mask & features;
498 static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n)
500 VirtIODevice *vdev = VIRTIO_DEVICE(n);
501 return virtio_net_guest_offloads_by_features(vdev->guest_features);
504 static void virtio_net_set_features(VirtIODevice *vdev, uint32_t features)
506 VirtIONet *n = VIRTIO_NET(vdev);
507 int i;
509 virtio_net_set_multiqueue(n, !!(features & (1 << VIRTIO_NET_F_MQ)));
511 virtio_net_set_mrg_rx_bufs(n, !!(features & (1 << VIRTIO_NET_F_MRG_RXBUF)));
513 if (n->has_vnet_hdr) {
514 n->curr_guest_offloads =
515 virtio_net_guest_offloads_by_features(features);
516 virtio_net_apply_guest_offloads(n);
519 for (i = 0; i < n->max_queues; i++) {
520 NetClientState *nc = qemu_get_subqueue(n->nic, i);
522 if (!get_vhost_net(nc->peer)) {
523 continue;
525 vhost_net_ack_features(get_vhost_net(nc->peer), features);
528 if ((1 << VIRTIO_NET_F_CTRL_VLAN) & features) {
529 memset(n->vlans, 0, MAX_VLAN >> 3);
530 } else {
531 memset(n->vlans, 0xff, MAX_VLAN >> 3);
535 static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd,
536 struct iovec *iov, unsigned int iov_cnt)
538 uint8_t on;
539 size_t s;
540 NetClientState *nc = qemu_get_queue(n->nic);
542 s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on));
543 if (s != sizeof(on)) {
544 return VIRTIO_NET_ERR;
547 if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) {
548 n->promisc = on;
549 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) {
550 n->allmulti = on;
551 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) {
552 n->alluni = on;
553 } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) {
554 n->nomulti = on;
555 } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) {
556 n->nouni = on;
557 } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) {
558 n->nobcast = on;
559 } else {
560 return VIRTIO_NET_ERR;
563 rxfilter_notify(nc);
565 return VIRTIO_NET_OK;
568 static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd,
569 struct iovec *iov, unsigned int iov_cnt)
571 VirtIODevice *vdev = VIRTIO_DEVICE(n);
572 uint64_t offloads;
573 size_t s;
575 if (!((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features)) {
576 return VIRTIO_NET_ERR;
579 s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads));
580 if (s != sizeof(offloads)) {
581 return VIRTIO_NET_ERR;
584 if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) {
585 uint64_t supported_offloads;
587 if (!n->has_vnet_hdr) {
588 return VIRTIO_NET_ERR;
591 supported_offloads = virtio_net_supported_guest_offloads(n);
592 if (offloads & ~supported_offloads) {
593 return VIRTIO_NET_ERR;
596 n->curr_guest_offloads = offloads;
597 virtio_net_apply_guest_offloads(n);
599 return VIRTIO_NET_OK;
600 } else {
601 return VIRTIO_NET_ERR;
605 static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd,
606 struct iovec *iov, unsigned int iov_cnt)
608 VirtIODevice *vdev = VIRTIO_DEVICE(n);
609 struct virtio_net_ctrl_mac mac_data;
610 size_t s;
611 NetClientState *nc = qemu_get_queue(n->nic);
613 if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) {
614 if (iov_size(iov, iov_cnt) != sizeof(n->mac)) {
615 return VIRTIO_NET_ERR;
617 s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac));
618 assert(s == sizeof(n->mac));
619 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
620 rxfilter_notify(nc);
622 return VIRTIO_NET_OK;
625 if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) {
626 return VIRTIO_NET_ERR;
629 int in_use = 0;
630 int first_multi = 0;
631 uint8_t uni_overflow = 0;
632 uint8_t multi_overflow = 0;
633 uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
635 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
636 sizeof(mac_data.entries));
637 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
638 if (s != sizeof(mac_data.entries)) {
639 goto error;
641 iov_discard_front(&iov, &iov_cnt, s);
643 if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) {
644 goto error;
647 if (mac_data.entries <= MAC_TABLE_ENTRIES) {
648 s = iov_to_buf(iov, iov_cnt, 0, macs,
649 mac_data.entries * ETH_ALEN);
650 if (s != mac_data.entries * ETH_ALEN) {
651 goto error;
653 in_use += mac_data.entries;
654 } else {
655 uni_overflow = 1;
658 iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN);
660 first_multi = in_use;
662 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
663 sizeof(mac_data.entries));
664 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
665 if (s != sizeof(mac_data.entries)) {
666 goto error;
669 iov_discard_front(&iov, &iov_cnt, s);
671 if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) {
672 goto error;
675 if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) {
676 s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN],
677 mac_data.entries * ETH_ALEN);
678 if (s != mac_data.entries * ETH_ALEN) {
679 goto error;
681 in_use += mac_data.entries;
682 } else {
683 multi_overflow = 1;
686 n->mac_table.in_use = in_use;
687 n->mac_table.first_multi = first_multi;
688 n->mac_table.uni_overflow = uni_overflow;
689 n->mac_table.multi_overflow = multi_overflow;
690 memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN);
691 g_free(macs);
692 rxfilter_notify(nc);
694 return VIRTIO_NET_OK;
696 error:
697 g_free(macs);
698 return VIRTIO_NET_ERR;
701 static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd,
702 struct iovec *iov, unsigned int iov_cnt)
704 VirtIODevice *vdev = VIRTIO_DEVICE(n);
705 uint16_t vid;
706 size_t s;
707 NetClientState *nc = qemu_get_queue(n->nic);
709 s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid));
710 vid = virtio_lduw_p(vdev, &vid);
711 if (s != sizeof(vid)) {
712 return VIRTIO_NET_ERR;
715 if (vid >= MAX_VLAN)
716 return VIRTIO_NET_ERR;
718 if (cmd == VIRTIO_NET_CTRL_VLAN_ADD)
719 n->vlans[vid >> 5] |= (1U << (vid & 0x1f));
720 else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL)
721 n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f));
722 else
723 return VIRTIO_NET_ERR;
725 rxfilter_notify(nc);
727 return VIRTIO_NET_OK;
730 static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd,
731 struct iovec *iov, unsigned int iov_cnt)
733 if (cmd == VIRTIO_NET_CTRL_ANNOUNCE_ACK &&
734 n->status & VIRTIO_NET_S_ANNOUNCE) {
735 n->status &= ~VIRTIO_NET_S_ANNOUNCE;
736 if (n->announce_counter) {
737 timer_mod(n->announce_timer,
738 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
739 self_announce_delay(n->announce_counter));
741 return VIRTIO_NET_OK;
742 } else {
743 return VIRTIO_NET_ERR;
747 static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
748 struct iovec *iov, unsigned int iov_cnt)
750 VirtIODevice *vdev = VIRTIO_DEVICE(n);
751 struct virtio_net_ctrl_mq mq;
752 size_t s;
753 uint16_t queues;
755 s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq));
756 if (s != sizeof(mq)) {
757 return VIRTIO_NET_ERR;
760 if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
761 return VIRTIO_NET_ERR;
764 queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs);
766 if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
767 queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
768 queues > n->max_queues ||
769 !n->multiqueue) {
770 return VIRTIO_NET_ERR;
773 n->curr_queues = queues;
774 /* stop the backend before changing the number of queues to avoid handling a
775 * disabled queue */
776 virtio_net_set_status(vdev, vdev->status);
777 virtio_net_set_queues(n);
779 return VIRTIO_NET_OK;
781 static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
783 VirtIONet *n = VIRTIO_NET(vdev);
784 struct virtio_net_ctrl_hdr ctrl;
785 virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
786 VirtQueueElement elem;
787 size_t s;
788 struct iovec *iov;
789 unsigned int iov_cnt;
791 while (virtqueue_pop(vq, &elem)) {
792 if (iov_size(elem.in_sg, elem.in_num) < sizeof(status) ||
793 iov_size(elem.out_sg, elem.out_num) < sizeof(ctrl)) {
794 error_report("virtio-net ctrl missing headers");
795 exit(1);
798 iov = elem.out_sg;
799 iov_cnt = elem.out_num;
800 s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl));
801 iov_discard_front(&iov, &iov_cnt, sizeof(ctrl));
802 if (s != sizeof(ctrl)) {
803 status = VIRTIO_NET_ERR;
804 } else if (ctrl.class == VIRTIO_NET_CTRL_RX) {
805 status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt);
806 } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) {
807 status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt);
808 } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
809 status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt);
810 } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) {
811 status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt);
812 } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
813 status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt);
814 } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
815 status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt);
818 s = iov_from_buf(elem.in_sg, elem.in_num, 0, &status, sizeof(status));
819 assert(s == sizeof(status));
821 virtqueue_push(vq, &elem, sizeof(status));
822 virtio_notify(vdev, vq);
826 /* RX */
828 static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
830 VirtIONet *n = VIRTIO_NET(vdev);
831 int queue_index = vq2q(virtio_get_queue_index(vq));
833 qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index));
836 static int virtio_net_can_receive(NetClientState *nc)
838 VirtIONet *n = qemu_get_nic_opaque(nc);
839 VirtIODevice *vdev = VIRTIO_DEVICE(n);
840 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
842 if (!vdev->vm_running) {
843 return 0;
846 if (nc->queue_index >= n->curr_queues) {
847 return 0;
850 if (!virtio_queue_ready(q->rx_vq) ||
851 !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
852 return 0;
855 return 1;
858 static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize)
860 VirtIONet *n = q->n;
861 if (virtio_queue_empty(q->rx_vq) ||
862 (n->mergeable_rx_bufs &&
863 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
864 virtio_queue_set_notification(q->rx_vq, 1);
866 /* To avoid a race condition where the guest has made some buffers
867 * available after the above check but before notification was
868 * enabled, check for available buffers again.
870 if (virtio_queue_empty(q->rx_vq) ||
871 (n->mergeable_rx_bufs &&
872 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
873 return 0;
877 virtio_queue_set_notification(q->rx_vq, 0);
878 return 1;
881 static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr)
883 virtio_tswap16s(vdev, &hdr->hdr_len);
884 virtio_tswap16s(vdev, &hdr->gso_size);
885 virtio_tswap16s(vdev, &hdr->csum_start);
886 virtio_tswap16s(vdev, &hdr->csum_offset);
889 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
890 * it never finds out that the packets don't have valid checksums. This
891 * causes dhclient to get upset. Fedora's carried a patch for ages to
892 * fix this with Xen but it hasn't appeared in an upstream release of
893 * dhclient yet.
895 * To avoid breaking existing guests, we catch udp packets and add
896 * checksums. This is terrible but it's better than hacking the guest
897 * kernels.
899 * N.B. if we introduce a zero-copy API, this operation is no longer free so
900 * we should provide a mechanism to disable it to avoid polluting the host
901 * cache.
903 static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
904 uint8_t *buf, size_t size)
906 if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */
907 (size > 27 && size < 1500) && /* normal sized MTU */
908 (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */
909 (buf[23] == 17) && /* ip.protocol == UDP */
910 (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */
911 net_checksum_calculate(buf, size);
912 hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
916 static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
917 const void *buf, size_t size)
919 if (n->has_vnet_hdr) {
920 /* FIXME this cast is evil */
921 void *wbuf = (void *)buf;
922 work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
923 size - n->host_hdr_len);
924 virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
925 iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
926 } else {
927 struct virtio_net_hdr hdr = {
928 .flags = 0,
929 .gso_type = VIRTIO_NET_HDR_GSO_NONE
931 iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr);
935 static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
937 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
938 static const uint8_t vlan[] = {0x81, 0x00};
939 uint8_t *ptr = (uint8_t *)buf;
940 int i;
942 if (n->promisc)
943 return 1;
945 ptr += n->host_hdr_len;
947 if (!memcmp(&ptr[12], vlan, sizeof(vlan))) {
948 int vid = be16_to_cpup((uint16_t *)(ptr + 14)) & 0xfff;
949 if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f))))
950 return 0;
953 if (ptr[0] & 1) { // multicast
954 if (!memcmp(ptr, bcast, sizeof(bcast))) {
955 return !n->nobcast;
956 } else if (n->nomulti) {
957 return 0;
958 } else if (n->allmulti || n->mac_table.multi_overflow) {
959 return 1;
962 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
963 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
964 return 1;
967 } else { // unicast
968 if (n->nouni) {
969 return 0;
970 } else if (n->alluni || n->mac_table.uni_overflow) {
971 return 1;
972 } else if (!memcmp(ptr, n->mac, ETH_ALEN)) {
973 return 1;
976 for (i = 0; i < n->mac_table.first_multi; i++) {
977 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
978 return 1;
983 return 0;
986 static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t size)
988 VirtIONet *n = qemu_get_nic_opaque(nc);
989 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
990 VirtIODevice *vdev = VIRTIO_DEVICE(n);
991 struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
992 struct virtio_net_hdr_mrg_rxbuf mhdr;
993 unsigned mhdr_cnt = 0;
994 size_t offset, i, guest_offset;
996 if (!virtio_net_can_receive(nc)) {
997 return -1;
1000 /* hdr_len refers to the header we supply to the guest */
1001 if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
1002 return 0;
1005 if (!receive_filter(n, buf, size))
1006 return size;
1008 offset = i = 0;
1010 while (offset < size) {
1011 VirtQueueElement elem;
1012 int len, total;
1013 const struct iovec *sg = elem.in_sg;
1015 total = 0;
1017 if (virtqueue_pop(q->rx_vq, &elem) == 0) {
1018 if (i == 0)
1019 return -1;
1020 error_report("virtio-net unexpected empty queue: "
1021 "i %zd mergeable %d offset %zd, size %zd, "
1022 "guest hdr len %zd, host hdr len %zd guest features 0x%x",
1023 i, n->mergeable_rx_bufs, offset, size,
1024 n->guest_hdr_len, n->host_hdr_len, vdev->guest_features);
1025 exit(1);
1028 if (elem.in_num < 1) {
1029 error_report("virtio-net receive queue contains no in buffers");
1030 exit(1);
1033 if (i == 0) {
1034 assert(offset == 0);
1035 if (n->mergeable_rx_bufs) {
1036 mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
1037 sg, elem.in_num,
1038 offsetof(typeof(mhdr), num_buffers),
1039 sizeof(mhdr.num_buffers));
1042 receive_header(n, sg, elem.in_num, buf, size);
1043 offset = n->host_hdr_len;
1044 total += n->guest_hdr_len;
1045 guest_offset = n->guest_hdr_len;
1046 } else {
1047 guest_offset = 0;
1050 /* copy in packet. ugh */
1051 len = iov_from_buf(sg, elem.in_num, guest_offset,
1052 buf + offset, size - offset);
1053 total += len;
1054 offset += len;
1055 /* If buffers can't be merged, at this point we
1056 * must have consumed the complete packet.
1057 * Otherwise, drop it. */
1058 if (!n->mergeable_rx_bufs && offset < size) {
1059 #if 0
1060 error_report("virtio-net truncated non-mergeable packet: "
1061 "i %zd mergeable %d offset %zd, size %zd, "
1062 "guest hdr len %zd, host hdr len %zd",
1063 i, n->mergeable_rx_bufs,
1064 offset, size, n->guest_hdr_len, n->host_hdr_len);
1065 #endif
1066 return size;
1069 /* signal other side */
1070 virtqueue_fill(q->rx_vq, &elem, total, i++);
1073 if (mhdr_cnt) {
1074 virtio_stw_p(vdev, &mhdr.num_buffers, i);
1075 iov_from_buf(mhdr_sg, mhdr_cnt,
1077 &mhdr.num_buffers, sizeof mhdr.num_buffers);
1080 virtqueue_flush(q->rx_vq, i);
1081 virtio_notify(vdev, q->rx_vq);
1083 return size;
1086 static int32_t virtio_net_flush_tx(VirtIONetQueue *q);
1088 static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
1090 VirtIONet *n = qemu_get_nic_opaque(nc);
1091 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1092 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1094 virtqueue_push(q->tx_vq, &q->async_tx.elem, 0);
1095 virtio_notify(vdev, q->tx_vq);
1097 q->async_tx.elem.out_num = q->async_tx.len = 0;
1099 virtio_queue_set_notification(q->tx_vq, 1);
1100 virtio_net_flush_tx(q);
1103 /* TX */
1104 static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
1106 VirtIONet *n = q->n;
1107 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1108 VirtQueueElement elem;
1109 int32_t num_packets = 0;
1110 int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
1111 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1112 return num_packets;
1115 assert(vdev->vm_running);
1117 if (q->async_tx.elem.out_num) {
1118 virtio_queue_set_notification(q->tx_vq, 0);
1119 return num_packets;
1122 while (virtqueue_pop(q->tx_vq, &elem)) {
1123 ssize_t ret, len;
1124 unsigned int out_num = elem.out_num;
1125 struct iovec *out_sg = &elem.out_sg[0];
1126 struct iovec sg[VIRTQUEUE_MAX_SIZE];
1128 if (out_num < 1) {
1129 error_report("virtio-net header not in first element");
1130 exit(1);
1133 if (n->has_vnet_hdr) {
1134 if (out_sg[0].iov_len < n->guest_hdr_len) {
1135 error_report("virtio-net header incorrect");
1136 exit(1);
1138 virtio_net_hdr_swap(vdev, (void *) out_sg[0].iov_base);
1142 * If host wants to see the guest header as is, we can
1143 * pass it on unchanged. Otherwise, copy just the parts
1144 * that host is interested in.
1146 assert(n->host_hdr_len <= n->guest_hdr_len);
1147 if (n->host_hdr_len != n->guest_hdr_len) {
1148 unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg),
1149 out_sg, out_num,
1150 0, n->host_hdr_len);
1151 sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num,
1152 out_sg, out_num,
1153 n->guest_hdr_len, -1);
1154 out_num = sg_num;
1155 out_sg = sg;
1158 len = n->guest_hdr_len;
1160 ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index),
1161 out_sg, out_num, virtio_net_tx_complete);
1162 if (ret == 0) {
1163 virtio_queue_set_notification(q->tx_vq, 0);
1164 q->async_tx.elem = elem;
1165 q->async_tx.len = len;
1166 return -EBUSY;
1169 len += ret;
1171 virtqueue_push(q->tx_vq, &elem, 0);
1172 virtio_notify(vdev, q->tx_vq);
1174 if (++num_packets >= n->tx_burst) {
1175 break;
1178 return num_packets;
1181 static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
1183 VirtIONet *n = VIRTIO_NET(vdev);
1184 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
1186 /* This happens when device was stopped but VCPU wasn't. */
1187 if (!vdev->vm_running) {
1188 q->tx_waiting = 1;
1189 return;
1192 if (q->tx_waiting) {
1193 virtio_queue_set_notification(vq, 1);
1194 timer_del(q->tx_timer);
1195 q->tx_waiting = 0;
1196 virtio_net_flush_tx(q);
1197 } else {
1198 timer_mod(q->tx_timer,
1199 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
1200 q->tx_waiting = 1;
1201 virtio_queue_set_notification(vq, 0);
1205 static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)
1207 VirtIONet *n = VIRTIO_NET(vdev);
1208 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
1210 if (unlikely(q->tx_waiting)) {
1211 return;
1213 q->tx_waiting = 1;
1214 /* This happens when device was stopped but VCPU wasn't. */
1215 if (!vdev->vm_running) {
1216 return;
1218 virtio_queue_set_notification(vq, 0);
1219 qemu_bh_schedule(q->tx_bh);
1222 static void virtio_net_tx_timer(void *opaque)
1224 VirtIONetQueue *q = opaque;
1225 VirtIONet *n = q->n;
1226 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1227 /* This happens when device was stopped but BH wasn't. */
1228 if (!vdev->vm_running) {
1229 /* Make sure tx waiting is set, so we'll run when restarted. */
1230 assert(q->tx_waiting);
1231 return;
1234 q->tx_waiting = 0;
1236 /* Just in case the driver is not ready on more */
1237 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1238 return;
1241 virtio_queue_set_notification(q->tx_vq, 1);
1242 virtio_net_flush_tx(q);
1245 static void virtio_net_tx_bh(void *opaque)
1247 VirtIONetQueue *q = opaque;
1248 VirtIONet *n = q->n;
1249 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1250 int32_t ret;
1252 /* This happens when device was stopped but BH wasn't. */
1253 if (!vdev->vm_running) {
1254 /* Make sure tx waiting is set, so we'll run when restarted. */
1255 assert(q->tx_waiting);
1256 return;
1259 q->tx_waiting = 0;
1261 /* Just in case the driver is not ready on more */
1262 if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) {
1263 return;
1266 ret = virtio_net_flush_tx(q);
1267 if (ret == -EBUSY) {
1268 return; /* Notification re-enable handled by tx_complete */
1271 /* If we flush a full burst of packets, assume there are
1272 * more coming and immediately reschedule */
1273 if (ret >= n->tx_burst) {
1274 qemu_bh_schedule(q->tx_bh);
1275 q->tx_waiting = 1;
1276 return;
1279 /* If less than a full burst, re-enable notification and flush
1280 * anything that may have come in while we weren't looking. If
1281 * we find something, assume the guest is still active and reschedule */
1282 virtio_queue_set_notification(q->tx_vq, 1);
1283 if (virtio_net_flush_tx(q) > 0) {
1284 virtio_queue_set_notification(q->tx_vq, 0);
1285 qemu_bh_schedule(q->tx_bh);
1286 q->tx_waiting = 1;
1290 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
1292 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1293 int i, max = multiqueue ? n->max_queues : 1;
1295 n->multiqueue = multiqueue;
1297 for (i = 2; i <= n->max_queues * 2 + 1; i++) {
1298 virtio_del_queue(vdev, i);
1301 for (i = 1; i < max; i++) {
1302 n->vqs[i].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
1303 if (n->vqs[i].tx_timer) {
1304 n->vqs[i].tx_vq =
1305 virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
1306 n->vqs[i].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1307 virtio_net_tx_timer,
1308 &n->vqs[i]);
1309 } else {
1310 n->vqs[i].tx_vq =
1311 virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
1312 n->vqs[i].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[i]);
1315 n->vqs[i].tx_waiting = 0;
1316 n->vqs[i].n = n;
1319 /* Note: Minux Guests (version 3.2.1) use ctrl vq but don't ack
1320 * VIRTIO_NET_F_CTRL_VQ. Create ctrl vq unconditionally to avoid
1321 * breaking them.
1323 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
1325 virtio_net_set_queues(n);
1328 static void virtio_net_save(QEMUFile *f, void *opaque)
1330 VirtIONet *n = opaque;
1331 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1333 /* At this point, backend must be stopped, otherwise
1334 * it might keep writing to memory. */
1335 assert(!n->vhost_started);
1336 virtio_save(vdev, f);
1339 static void virtio_net_save_device(VirtIODevice *vdev, QEMUFile *f)
1341 VirtIONet *n = VIRTIO_NET(vdev);
1342 int i;
1344 qemu_put_buffer(f, n->mac, ETH_ALEN);
1345 qemu_put_be32(f, n->vqs[0].tx_waiting);
1346 qemu_put_be32(f, n->mergeable_rx_bufs);
1347 qemu_put_be16(f, n->status);
1348 qemu_put_byte(f, n->promisc);
1349 qemu_put_byte(f, n->allmulti);
1350 qemu_put_be32(f, n->mac_table.in_use);
1351 qemu_put_buffer(f, n->mac_table.macs, n->mac_table.in_use * ETH_ALEN);
1352 qemu_put_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3);
1353 qemu_put_be32(f, n->has_vnet_hdr);
1354 qemu_put_byte(f, n->mac_table.multi_overflow);
1355 qemu_put_byte(f, n->mac_table.uni_overflow);
1356 qemu_put_byte(f, n->alluni);
1357 qemu_put_byte(f, n->nomulti);
1358 qemu_put_byte(f, n->nouni);
1359 qemu_put_byte(f, n->nobcast);
1360 qemu_put_byte(f, n->has_ufo);
1361 if (n->max_queues > 1) {
1362 qemu_put_be16(f, n->max_queues);
1363 qemu_put_be16(f, n->curr_queues);
1364 for (i = 1; i < n->curr_queues; i++) {
1365 qemu_put_be32(f, n->vqs[i].tx_waiting);
1369 if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features) {
1370 qemu_put_be64(f, n->curr_guest_offloads);
1374 static int virtio_net_load(QEMUFile *f, void *opaque, int version_id)
1376 VirtIONet *n = opaque;
1377 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1379 if (version_id < 2 || version_id > VIRTIO_NET_VM_VERSION)
1380 return -EINVAL;
1382 return virtio_load(vdev, f, version_id);
1385 static int virtio_net_load_device(VirtIODevice *vdev, QEMUFile *f,
1386 int version_id)
1388 VirtIONet *n = VIRTIO_NET(vdev);
1389 int i, link_down;
1391 qemu_get_buffer(f, n->mac, ETH_ALEN);
1392 n->vqs[0].tx_waiting = qemu_get_be32(f);
1394 virtio_net_set_mrg_rx_bufs(n, qemu_get_be32(f));
1396 if (version_id >= 3)
1397 n->status = qemu_get_be16(f);
1399 if (version_id >= 4) {
1400 if (version_id < 8) {
1401 n->promisc = qemu_get_be32(f);
1402 n->allmulti = qemu_get_be32(f);
1403 } else {
1404 n->promisc = qemu_get_byte(f);
1405 n->allmulti = qemu_get_byte(f);
1409 if (version_id >= 5) {
1410 n->mac_table.in_use = qemu_get_be32(f);
1411 /* MAC_TABLE_ENTRIES may be different from the saved image */
1412 if (n->mac_table.in_use <= MAC_TABLE_ENTRIES) {
1413 qemu_get_buffer(f, n->mac_table.macs,
1414 n->mac_table.in_use * ETH_ALEN);
1415 } else {
1416 int64_t i;
1418 /* Overflow detected - can happen if source has a larger MAC table.
1419 * We simply set overflow flag so there's no need to maintain the
1420 * table of addresses, discard them all.
1421 * Note: 64 bit math to avoid integer overflow.
1423 for (i = 0; i < (int64_t)n->mac_table.in_use * ETH_ALEN; ++i) {
1424 qemu_get_byte(f);
1426 n->mac_table.multi_overflow = n->mac_table.uni_overflow = 1;
1427 n->mac_table.in_use = 0;
1431 if (version_id >= 6)
1432 qemu_get_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3);
1434 if (version_id >= 7) {
1435 if (qemu_get_be32(f) && !peer_has_vnet_hdr(n)) {
1436 error_report("virtio-net: saved image requires vnet_hdr=on");
1437 return -1;
1441 if (version_id >= 9) {
1442 n->mac_table.multi_overflow = qemu_get_byte(f);
1443 n->mac_table.uni_overflow = qemu_get_byte(f);
1446 if (version_id >= 10) {
1447 n->alluni = qemu_get_byte(f);
1448 n->nomulti = qemu_get_byte(f);
1449 n->nouni = qemu_get_byte(f);
1450 n->nobcast = qemu_get_byte(f);
1453 if (version_id >= 11) {
1454 if (qemu_get_byte(f) && !peer_has_ufo(n)) {
1455 error_report("virtio-net: saved image requires TUN_F_UFO support");
1456 return -1;
1460 if (n->max_queues > 1) {
1461 if (n->max_queues != qemu_get_be16(f)) {
1462 error_report("virtio-net: different max_queues ");
1463 return -1;
1466 n->curr_queues = qemu_get_be16(f);
1467 if (n->curr_queues > n->max_queues) {
1468 error_report("virtio-net: curr_queues %x > max_queues %x",
1469 n->curr_queues, n->max_queues);
1470 return -1;
1472 for (i = 1; i < n->curr_queues; i++) {
1473 n->vqs[i].tx_waiting = qemu_get_be32(f);
1477 if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features) {
1478 n->curr_guest_offloads = qemu_get_be64(f);
1479 } else {
1480 n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
1483 if (peer_has_vnet_hdr(n)) {
1484 virtio_net_apply_guest_offloads(n);
1487 virtio_net_set_queues(n);
1489 /* Find the first multicast entry in the saved MAC filter */
1490 for (i = 0; i < n->mac_table.in_use; i++) {
1491 if (n->mac_table.macs[i * ETH_ALEN] & 1) {
1492 break;
1495 n->mac_table.first_multi = i;
1497 /* nc.link_down can't be migrated, so infer link_down according
1498 * to link status bit in n->status */
1499 link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0;
1500 for (i = 0; i < n->max_queues; i++) {
1501 qemu_get_subqueue(n->nic, i)->link_down = link_down;
1504 if (vdev->guest_features & (0x1 << VIRTIO_NET_F_GUEST_ANNOUNCE) &&
1505 vdev->guest_features & (0x1 << VIRTIO_NET_F_CTRL_VQ)) {
1506 n->announce_counter = SELF_ANNOUNCE_ROUNDS;
1507 timer_mod(n->announce_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL));
1510 return 0;
1513 static void virtio_net_cleanup(NetClientState *nc)
1515 VirtIONet *n = qemu_get_nic_opaque(nc);
1517 n->nic = NULL;
1520 static NetClientInfo net_virtio_info = {
1521 .type = NET_CLIENT_OPTIONS_KIND_NIC,
1522 .size = sizeof(NICState),
1523 .can_receive = virtio_net_can_receive,
1524 .receive = virtio_net_receive,
1525 .cleanup = virtio_net_cleanup,
1526 .link_status_changed = virtio_net_set_link_status,
1527 .query_rx_filter = virtio_net_query_rxfilter,
1530 static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
1532 VirtIONet *n = VIRTIO_NET(vdev);
1533 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
1534 assert(n->vhost_started);
1535 return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
1538 static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
1539 bool mask)
1541 VirtIONet *n = VIRTIO_NET(vdev);
1542 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
1543 assert(n->vhost_started);
1544 vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
1545 vdev, idx, mask);
1548 void virtio_net_set_config_size(VirtIONet *n, uint32_t host_features)
1550 int i, config_size = 0;
1551 host_features |= (1 << VIRTIO_NET_F_MAC);
1552 for (i = 0; feature_sizes[i].flags != 0; i++) {
1553 if (host_features & feature_sizes[i].flags) {
1554 config_size = MAX(feature_sizes[i].end, config_size);
1557 n->config_size = config_size;
1560 void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
1561 const char *type)
1564 * The name can be NULL, the netclient name will be type.x.
1566 assert(type != NULL);
1568 g_free(n->netclient_name);
1569 g_free(n->netclient_type);
1570 n->netclient_name = g_strdup(name);
1571 n->netclient_type = g_strdup(type);
1574 static void virtio_net_device_realize(DeviceState *dev, Error **errp)
1576 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1577 VirtIONet *n = VIRTIO_NET(dev);
1578 NetClientState *nc;
1579 int i;
1581 virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
1583 n->max_queues = MAX(n->nic_conf.peers.queues, 1);
1584 n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues);
1585 n->vqs[0].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
1586 n->curr_queues = 1;
1587 n->vqs[0].n = n;
1588 n->tx_timeout = n->net_conf.txtimer;
1590 if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer")
1591 && strcmp(n->net_conf.tx, "bh")) {
1592 error_report("virtio-net: "
1593 "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
1594 n->net_conf.tx);
1595 error_report("Defaulting to \"bh\"");
1598 if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
1599 n->vqs[0].tx_vq = virtio_add_queue(vdev, 256,
1600 virtio_net_handle_tx_timer);
1601 n->vqs[0].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, virtio_net_tx_timer,
1602 &n->vqs[0]);
1603 } else {
1604 n->vqs[0].tx_vq = virtio_add_queue(vdev, 256,
1605 virtio_net_handle_tx_bh);
1606 n->vqs[0].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[0]);
1608 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
1609 qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
1610 memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
1611 n->status = VIRTIO_NET_S_LINK_UP;
1612 n->announce_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
1613 virtio_net_announce_timer, n);
1615 if (n->netclient_type) {
1617 * Happen when virtio_net_set_netclient_name has been called.
1619 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
1620 n->netclient_type, n->netclient_name, n);
1621 } else {
1622 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
1623 object_get_typename(OBJECT(dev)), dev->id, n);
1626 peer_test_vnet_hdr(n);
1627 if (peer_has_vnet_hdr(n)) {
1628 for (i = 0; i < n->max_queues; i++) {
1629 qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true);
1631 n->host_hdr_len = sizeof(struct virtio_net_hdr);
1632 } else {
1633 n->host_hdr_len = 0;
1636 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a);
1638 n->vqs[0].tx_waiting = 0;
1639 n->tx_burst = n->net_conf.txburst;
1640 virtio_net_set_mrg_rx_bufs(n, 0);
1641 n->promisc = 1; /* for compatibility */
1643 n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
1645 n->vlans = g_malloc0(MAX_VLAN >> 3);
1647 nc = qemu_get_queue(n->nic);
1648 nc->rxfilter_notify_enabled = 1;
1650 n->qdev = dev;
1651 register_savevm(dev, "virtio-net", -1, VIRTIO_NET_VM_VERSION,
1652 virtio_net_save, virtio_net_load, n);
1654 add_boot_device_path(n->nic_conf.bootindex, dev, "/ethernet-phy@0");
1657 static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
1659 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1660 VirtIONet *n = VIRTIO_NET(dev);
1661 int i;
1663 /* This will stop vhost backend if appropriate. */
1664 virtio_net_set_status(vdev, 0);
1666 unregister_savevm(dev, "virtio-net", n);
1668 g_free(n->netclient_name);
1669 n->netclient_name = NULL;
1670 g_free(n->netclient_type);
1671 n->netclient_type = NULL;
1673 g_free(n->mac_table.macs);
1674 g_free(n->vlans);
1676 for (i = 0; i < n->max_queues; i++) {
1677 VirtIONetQueue *q = &n->vqs[i];
1678 NetClientState *nc = qemu_get_subqueue(n->nic, i);
1680 qemu_purge_queued_packets(nc);
1682 if (q->tx_timer) {
1683 timer_del(q->tx_timer);
1684 timer_free(q->tx_timer);
1685 } else if (q->tx_bh) {
1686 qemu_bh_delete(q->tx_bh);
1690 timer_del(n->announce_timer);
1691 timer_free(n->announce_timer);
1692 g_free(n->vqs);
1693 qemu_del_nic(n->nic);
1694 virtio_cleanup(vdev);
1697 static void virtio_net_instance_init(Object *obj)
1699 VirtIONet *n = VIRTIO_NET(obj);
1702 * The default config_size is sizeof(struct virtio_net_config).
1703 * Can be overriden with virtio_net_set_config_size.
1705 n->config_size = sizeof(struct virtio_net_config);
1708 static Property virtio_net_properties[] = {
1709 DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf),
1710 DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer,
1711 TX_TIMER_INTERVAL),
1712 DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
1713 DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
1714 DEFINE_PROP_END_OF_LIST(),
1717 static void virtio_net_class_init(ObjectClass *klass, void *data)
1719 DeviceClass *dc = DEVICE_CLASS(klass);
1720 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1722 dc->props = virtio_net_properties;
1723 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
1724 vdc->realize = virtio_net_device_realize;
1725 vdc->unrealize = virtio_net_device_unrealize;
1726 vdc->get_config = virtio_net_get_config;
1727 vdc->set_config = virtio_net_set_config;
1728 vdc->get_features = virtio_net_get_features;
1729 vdc->set_features = virtio_net_set_features;
1730 vdc->bad_features = virtio_net_bad_features;
1731 vdc->reset = virtio_net_reset;
1732 vdc->set_status = virtio_net_set_status;
1733 vdc->guest_notifier_mask = virtio_net_guest_notifier_mask;
1734 vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;
1735 vdc->load = virtio_net_load_device;
1736 vdc->save = virtio_net_save_device;
1739 static const TypeInfo virtio_net_info = {
1740 .name = TYPE_VIRTIO_NET,
1741 .parent = TYPE_VIRTIO_DEVICE,
1742 .instance_size = sizeof(VirtIONet),
1743 .instance_init = virtio_net_instance_init,
1744 .class_init = virtio_net_class_init,
1747 static void virtio_register_types(void)
1749 type_register_static(&virtio_net_info);
1752 type_init(virtio_register_types)