virtio: introduce device specific migration calls
[qemu.git] / hw / net / virtio-net.c
blobacfe91ccb6f705bf455c46d381a64919442062ad
1 /*
2 * Virtio Network Device
4 * Copyright IBM, Corp. 2007
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/iov.h"
15 #include "hw/virtio/virtio.h"
16 #include "net/net.h"
17 #include "net/checksum.h"
18 #include "net/tap.h"
19 #include "qemu/error-report.h"
20 #include "qemu/timer.h"
21 #include "hw/virtio/virtio-net.h"
22 #include "net/vhost_net.h"
23 #include "hw/virtio/virtio-bus.h"
24 #include "qapi/qmp/qjson.h"
25 #include "qapi-event.h"
27 #define VIRTIO_NET_VM_VERSION 11
29 #define MAC_TABLE_ENTRIES 64
30 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
33 * Calculate the number of bytes up to and including the given 'field' of
34 * 'container'.
36 #define endof(container, field) \
37 (offsetof(container, field) + sizeof(((container *)0)->field))
39 typedef struct VirtIOFeature {
40 uint32_t flags;
41 size_t end;
42 } VirtIOFeature;
44 static VirtIOFeature feature_sizes[] = {
45 {.flags = 1 << VIRTIO_NET_F_MAC,
46 .end = endof(struct virtio_net_config, mac)},
47 {.flags = 1 << VIRTIO_NET_F_STATUS,
48 .end = endof(struct virtio_net_config, status)},
49 {.flags = 1 << VIRTIO_NET_F_MQ,
50 .end = endof(struct virtio_net_config, max_virtqueue_pairs)},
54 static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc)
56 VirtIONet *n = qemu_get_nic_opaque(nc);
58 return &n->vqs[nc->queue_index];
61 static int vq2q(int queue_index)
63 return queue_index / 2;
66 /* TODO
67 * - we could suppress RX interrupt if we were so inclined.
70 static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
72 VirtIONet *n = VIRTIO_NET(vdev);
73 struct virtio_net_config netcfg;
75 stw_p(&netcfg.status, n->status);
76 stw_p(&netcfg.max_virtqueue_pairs, n->max_queues);
77 memcpy(netcfg.mac, n->mac, ETH_ALEN);
78 memcpy(config, &netcfg, n->config_size);
81 static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
83 VirtIONet *n = VIRTIO_NET(vdev);
84 struct virtio_net_config netcfg = {};
86 memcpy(&netcfg, config, n->config_size);
88 if (!(vdev->guest_features >> VIRTIO_NET_F_CTRL_MAC_ADDR & 1) &&
89 memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
90 memcpy(n->mac, netcfg.mac, ETH_ALEN);
91 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
95 static bool virtio_net_started(VirtIONet *n, uint8_t status)
97 VirtIODevice *vdev = VIRTIO_DEVICE(n);
98 return (status & VIRTIO_CONFIG_S_DRIVER_OK) &&
99 (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running;
102 static void virtio_net_announce_timer(void *opaque)
104 VirtIONet *n = opaque;
105 VirtIODevice *vdev = VIRTIO_DEVICE(n);
107 n->announce_counter--;
108 n->status |= VIRTIO_NET_S_ANNOUNCE;
109 virtio_notify_config(vdev);
112 static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
114 VirtIODevice *vdev = VIRTIO_DEVICE(n);
115 NetClientState *nc = qemu_get_queue(n->nic);
116 int queues = n->multiqueue ? n->max_queues : 1;
118 if (!get_vhost_net(nc->peer)) {
119 return;
122 if (!!n->vhost_started ==
123 (virtio_net_started(n, status) && !nc->peer->link_down)) {
124 return;
126 if (!n->vhost_started) {
127 int r;
128 if (!vhost_net_query(get_vhost_net(nc->peer), vdev)) {
129 return;
131 n->vhost_started = 1;
132 r = vhost_net_start(vdev, n->nic->ncs, queues);
133 if (r < 0) {
134 error_report("unable to start vhost net: %d: "
135 "falling back on userspace virtio", -r);
136 n->vhost_started = 0;
138 } else {
139 vhost_net_stop(vdev, n->nic->ncs, queues);
140 n->vhost_started = 0;
144 static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
146 VirtIONet *n = VIRTIO_NET(vdev);
147 VirtIONetQueue *q;
148 int i;
149 uint8_t queue_status;
151 virtio_net_vhost_status(n, status);
153 for (i = 0; i < n->max_queues; i++) {
154 q = &n->vqs[i];
156 if ((!n->multiqueue && i != 0) || i >= n->curr_queues) {
157 queue_status = 0;
158 } else {
159 queue_status = status;
162 if (!q->tx_waiting) {
163 continue;
166 if (virtio_net_started(n, queue_status) && !n->vhost_started) {
167 if (q->tx_timer) {
168 timer_mod(q->tx_timer,
169 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
170 } else {
171 qemu_bh_schedule(q->tx_bh);
173 } else {
174 if (q->tx_timer) {
175 timer_del(q->tx_timer);
176 } else {
177 qemu_bh_cancel(q->tx_bh);
183 static void virtio_net_set_link_status(NetClientState *nc)
185 VirtIONet *n = qemu_get_nic_opaque(nc);
186 VirtIODevice *vdev = VIRTIO_DEVICE(n);
187 uint16_t old_status = n->status;
189 if (nc->link_down)
190 n->status &= ~VIRTIO_NET_S_LINK_UP;
191 else
192 n->status |= VIRTIO_NET_S_LINK_UP;
194 if (n->status != old_status)
195 virtio_notify_config(vdev);
197 virtio_net_set_status(vdev, vdev->status);
200 static void rxfilter_notify(NetClientState *nc)
202 VirtIONet *n = qemu_get_nic_opaque(nc);
204 if (nc->rxfilter_notify_enabled) {
205 gchar *path = object_get_canonical_path(OBJECT(n->qdev));
206 qapi_event_send_nic_rx_filter_changed(!!n->netclient_name,
207 n->netclient_name, path, &error_abort);
208 g_free(path);
210 /* disable event notification to avoid events flooding */
211 nc->rxfilter_notify_enabled = 0;
215 static char *mac_strdup_printf(const uint8_t *mac)
217 return g_strdup_printf("%.2x:%.2x:%.2x:%.2x:%.2x:%.2x", mac[0],
218 mac[1], mac[2], mac[3], mac[4], mac[5]);
221 static intList *get_vlan_table(VirtIONet *n)
223 intList *list, *entry;
224 int i, j;
226 list = NULL;
227 for (i = 0; i < MAX_VLAN >> 5; i++) {
228 for (j = 0; n->vlans[i] && j <= 0x1f; j++) {
229 if (n->vlans[i] & (1U << j)) {
230 entry = g_malloc0(sizeof(*entry));
231 entry->value = (i << 5) + j;
232 entry->next = list;
233 list = entry;
238 return list;
241 static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc)
243 VirtIONet *n = qemu_get_nic_opaque(nc);
244 VirtIODevice *vdev = VIRTIO_DEVICE(n);
245 RxFilterInfo *info;
246 strList *str_list, *entry;
247 int i;
249 info = g_malloc0(sizeof(*info));
250 info->name = g_strdup(nc->name);
251 info->promiscuous = n->promisc;
253 if (n->nouni) {
254 info->unicast = RX_STATE_NONE;
255 } else if (n->alluni) {
256 info->unicast = RX_STATE_ALL;
257 } else {
258 info->unicast = RX_STATE_NORMAL;
261 if (n->nomulti) {
262 info->multicast = RX_STATE_NONE;
263 } else if (n->allmulti) {
264 info->multicast = RX_STATE_ALL;
265 } else {
266 info->multicast = RX_STATE_NORMAL;
269 info->broadcast_allowed = n->nobcast;
270 info->multicast_overflow = n->mac_table.multi_overflow;
271 info->unicast_overflow = n->mac_table.uni_overflow;
273 info->main_mac = mac_strdup_printf(n->mac);
275 str_list = NULL;
276 for (i = 0; i < n->mac_table.first_multi; i++) {
277 entry = g_malloc0(sizeof(*entry));
278 entry->value = mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
279 entry->next = str_list;
280 str_list = entry;
282 info->unicast_table = str_list;
284 str_list = NULL;
285 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
286 entry = g_malloc0(sizeof(*entry));
287 entry->value = mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
288 entry->next = str_list;
289 str_list = entry;
291 info->multicast_table = str_list;
292 info->vlan_table = get_vlan_table(n);
294 if (!((1 << VIRTIO_NET_F_CTRL_VLAN) & vdev->guest_features)) {
295 info->vlan = RX_STATE_ALL;
296 } else if (!info->vlan_table) {
297 info->vlan = RX_STATE_NONE;
298 } else {
299 info->vlan = RX_STATE_NORMAL;
302 /* enable event notification after query */
303 nc->rxfilter_notify_enabled = 1;
305 return info;
308 static void virtio_net_reset(VirtIODevice *vdev)
310 VirtIONet *n = VIRTIO_NET(vdev);
312 /* Reset back to compatibility mode */
313 n->promisc = 1;
314 n->allmulti = 0;
315 n->alluni = 0;
316 n->nomulti = 0;
317 n->nouni = 0;
318 n->nobcast = 0;
319 /* multiqueue is disabled by default */
320 n->curr_queues = 1;
321 timer_del(n->announce_timer);
322 n->announce_counter = 0;
323 n->status &= ~VIRTIO_NET_S_ANNOUNCE;
325 /* Flush any MAC and VLAN filter table state */
326 n->mac_table.in_use = 0;
327 n->mac_table.first_multi = 0;
328 n->mac_table.multi_overflow = 0;
329 n->mac_table.uni_overflow = 0;
330 memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
331 memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac));
332 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
333 memset(n->vlans, 0, MAX_VLAN >> 3);
336 static void peer_test_vnet_hdr(VirtIONet *n)
338 NetClientState *nc = qemu_get_queue(n->nic);
339 if (!nc->peer) {
340 return;
343 n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer);
346 static int peer_has_vnet_hdr(VirtIONet *n)
348 return n->has_vnet_hdr;
351 static int peer_has_ufo(VirtIONet *n)
353 if (!peer_has_vnet_hdr(n))
354 return 0;
356 n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer);
358 return n->has_ufo;
361 static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs)
363 int i;
364 NetClientState *nc;
366 n->mergeable_rx_bufs = mergeable_rx_bufs;
368 n->guest_hdr_len = n->mergeable_rx_bufs ?
369 sizeof(struct virtio_net_hdr_mrg_rxbuf) : sizeof(struct virtio_net_hdr);
371 for (i = 0; i < n->max_queues; i++) {
372 nc = qemu_get_subqueue(n->nic, i);
374 if (peer_has_vnet_hdr(n) &&
375 qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) {
376 qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len);
377 n->host_hdr_len = n->guest_hdr_len;
382 static int peer_attach(VirtIONet *n, int index)
384 NetClientState *nc = qemu_get_subqueue(n->nic, index);
386 if (!nc->peer) {
387 return 0;
390 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
391 return 0;
394 return tap_enable(nc->peer);
397 static int peer_detach(VirtIONet *n, int index)
399 NetClientState *nc = qemu_get_subqueue(n->nic, index);
401 if (!nc->peer) {
402 return 0;
405 if (nc->peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
406 return 0;
409 return tap_disable(nc->peer);
412 static void virtio_net_set_queues(VirtIONet *n)
414 int i;
415 int r;
417 for (i = 0; i < n->max_queues; i++) {
418 if (i < n->curr_queues) {
419 r = peer_attach(n, i);
420 assert(!r);
421 } else {
422 r = peer_detach(n, i);
423 assert(!r);
428 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue);
430 static uint32_t virtio_net_get_features(VirtIODevice *vdev, uint32_t features)
432 VirtIONet *n = VIRTIO_NET(vdev);
433 NetClientState *nc = qemu_get_queue(n->nic);
435 features |= (1 << VIRTIO_NET_F_MAC);
437 if (!peer_has_vnet_hdr(n)) {
438 features &= ~(0x1 << VIRTIO_NET_F_CSUM);
439 features &= ~(0x1 << VIRTIO_NET_F_HOST_TSO4);
440 features &= ~(0x1 << VIRTIO_NET_F_HOST_TSO6);
441 features &= ~(0x1 << VIRTIO_NET_F_HOST_ECN);
443 features &= ~(0x1 << VIRTIO_NET_F_GUEST_CSUM);
444 features &= ~(0x1 << VIRTIO_NET_F_GUEST_TSO4);
445 features &= ~(0x1 << VIRTIO_NET_F_GUEST_TSO6);
446 features &= ~(0x1 << VIRTIO_NET_F_GUEST_ECN);
449 if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
450 features &= ~(0x1 << VIRTIO_NET_F_GUEST_UFO);
451 features &= ~(0x1 << VIRTIO_NET_F_HOST_UFO);
454 if (!get_vhost_net(nc->peer)) {
455 return features;
457 return vhost_net_get_features(get_vhost_net(nc->peer), features);
460 static uint32_t virtio_net_bad_features(VirtIODevice *vdev)
462 uint32_t features = 0;
464 /* Linux kernel 2.6.25. It understood MAC (as everyone must),
465 * but also these: */
466 features |= (1 << VIRTIO_NET_F_MAC);
467 features |= (1 << VIRTIO_NET_F_CSUM);
468 features |= (1 << VIRTIO_NET_F_HOST_TSO4);
469 features |= (1 << VIRTIO_NET_F_HOST_TSO6);
470 features |= (1 << VIRTIO_NET_F_HOST_ECN);
472 return features;
475 static void virtio_net_apply_guest_offloads(VirtIONet *n)
477 qemu_set_offload(qemu_get_queue(n->nic)->peer,
478 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)),
479 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)),
480 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)),
481 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)),
482 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO)));
485 static uint64_t virtio_net_guest_offloads_by_features(uint32_t features)
487 static const uint64_t guest_offloads_mask =
488 (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
489 (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
490 (1ULL << VIRTIO_NET_F_GUEST_TSO6) |
491 (1ULL << VIRTIO_NET_F_GUEST_ECN) |
492 (1ULL << VIRTIO_NET_F_GUEST_UFO);
494 return guest_offloads_mask & features;
497 static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n)
499 VirtIODevice *vdev = VIRTIO_DEVICE(n);
500 return virtio_net_guest_offloads_by_features(vdev->guest_features);
503 static void virtio_net_set_features(VirtIODevice *vdev, uint32_t features)
505 VirtIONet *n = VIRTIO_NET(vdev);
506 int i;
508 virtio_net_set_multiqueue(n, !!(features & (1 << VIRTIO_NET_F_MQ)));
510 virtio_net_set_mrg_rx_bufs(n, !!(features & (1 << VIRTIO_NET_F_MRG_RXBUF)));
512 if (n->has_vnet_hdr) {
513 n->curr_guest_offloads =
514 virtio_net_guest_offloads_by_features(features);
515 virtio_net_apply_guest_offloads(n);
518 for (i = 0; i < n->max_queues; i++) {
519 NetClientState *nc = qemu_get_subqueue(n->nic, i);
521 if (!get_vhost_net(nc->peer)) {
522 continue;
524 vhost_net_ack_features(get_vhost_net(nc->peer), features);
527 if ((1 << VIRTIO_NET_F_CTRL_VLAN) & features) {
528 memset(n->vlans, 0, MAX_VLAN >> 3);
529 } else {
530 memset(n->vlans, 0xff, MAX_VLAN >> 3);
534 static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd,
535 struct iovec *iov, unsigned int iov_cnt)
537 uint8_t on;
538 size_t s;
539 NetClientState *nc = qemu_get_queue(n->nic);
541 s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on));
542 if (s != sizeof(on)) {
543 return VIRTIO_NET_ERR;
546 if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) {
547 n->promisc = on;
548 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) {
549 n->allmulti = on;
550 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) {
551 n->alluni = on;
552 } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) {
553 n->nomulti = on;
554 } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) {
555 n->nouni = on;
556 } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) {
557 n->nobcast = on;
558 } else {
559 return VIRTIO_NET_ERR;
562 rxfilter_notify(nc);
564 return VIRTIO_NET_OK;
567 static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd,
568 struct iovec *iov, unsigned int iov_cnt)
570 VirtIODevice *vdev = VIRTIO_DEVICE(n);
571 uint64_t offloads;
572 size_t s;
574 if (!((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features)) {
575 return VIRTIO_NET_ERR;
578 s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads));
579 if (s != sizeof(offloads)) {
580 return VIRTIO_NET_ERR;
583 if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) {
584 uint64_t supported_offloads;
586 if (!n->has_vnet_hdr) {
587 return VIRTIO_NET_ERR;
590 supported_offloads = virtio_net_supported_guest_offloads(n);
591 if (offloads & ~supported_offloads) {
592 return VIRTIO_NET_ERR;
595 n->curr_guest_offloads = offloads;
596 virtio_net_apply_guest_offloads(n);
598 return VIRTIO_NET_OK;
599 } else {
600 return VIRTIO_NET_ERR;
604 static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd,
605 struct iovec *iov, unsigned int iov_cnt)
607 struct virtio_net_ctrl_mac mac_data;
608 size_t s;
609 NetClientState *nc = qemu_get_queue(n->nic);
611 if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) {
612 if (iov_size(iov, iov_cnt) != sizeof(n->mac)) {
613 return VIRTIO_NET_ERR;
615 s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac));
616 assert(s == sizeof(n->mac));
617 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
618 rxfilter_notify(nc);
620 return VIRTIO_NET_OK;
623 if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) {
624 return VIRTIO_NET_ERR;
627 int in_use = 0;
628 int first_multi = 0;
629 uint8_t uni_overflow = 0;
630 uint8_t multi_overflow = 0;
631 uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
633 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
634 sizeof(mac_data.entries));
635 mac_data.entries = ldl_p(&mac_data.entries);
636 if (s != sizeof(mac_data.entries)) {
637 goto error;
639 iov_discard_front(&iov, &iov_cnt, s);
641 if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) {
642 goto error;
645 if (mac_data.entries <= MAC_TABLE_ENTRIES) {
646 s = iov_to_buf(iov, iov_cnt, 0, macs,
647 mac_data.entries * ETH_ALEN);
648 if (s != mac_data.entries * ETH_ALEN) {
649 goto error;
651 in_use += mac_data.entries;
652 } else {
653 uni_overflow = 1;
656 iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN);
658 first_multi = in_use;
660 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
661 sizeof(mac_data.entries));
662 mac_data.entries = ldl_p(&mac_data.entries);
663 if (s != sizeof(mac_data.entries)) {
664 goto error;
667 iov_discard_front(&iov, &iov_cnt, s);
669 if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) {
670 goto error;
673 if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) {
674 s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN],
675 mac_data.entries * ETH_ALEN);
676 if (s != mac_data.entries * ETH_ALEN) {
677 goto error;
679 in_use += mac_data.entries;
680 } else {
681 multi_overflow = 1;
684 n->mac_table.in_use = in_use;
685 n->mac_table.first_multi = first_multi;
686 n->mac_table.uni_overflow = uni_overflow;
687 n->mac_table.multi_overflow = multi_overflow;
688 memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN);
689 g_free(macs);
690 rxfilter_notify(nc);
692 return VIRTIO_NET_OK;
694 error:
695 g_free(macs);
696 return VIRTIO_NET_ERR;
699 static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd,
700 struct iovec *iov, unsigned int iov_cnt)
702 uint16_t vid;
703 size_t s;
704 NetClientState *nc = qemu_get_queue(n->nic);
706 s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid));
707 vid = lduw_p(&vid);
708 if (s != sizeof(vid)) {
709 return VIRTIO_NET_ERR;
712 if (vid >= MAX_VLAN)
713 return VIRTIO_NET_ERR;
715 if (cmd == VIRTIO_NET_CTRL_VLAN_ADD)
716 n->vlans[vid >> 5] |= (1U << (vid & 0x1f));
717 else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL)
718 n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f));
719 else
720 return VIRTIO_NET_ERR;
722 rxfilter_notify(nc);
724 return VIRTIO_NET_OK;
727 static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd,
728 struct iovec *iov, unsigned int iov_cnt)
730 if (cmd == VIRTIO_NET_CTRL_ANNOUNCE_ACK &&
731 n->status & VIRTIO_NET_S_ANNOUNCE) {
732 n->status &= ~VIRTIO_NET_S_ANNOUNCE;
733 if (n->announce_counter) {
734 timer_mod(n->announce_timer,
735 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
736 self_announce_delay(n->announce_counter));
738 return VIRTIO_NET_OK;
739 } else {
740 return VIRTIO_NET_ERR;
744 static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
745 struct iovec *iov, unsigned int iov_cnt)
747 VirtIODevice *vdev = VIRTIO_DEVICE(n);
748 struct virtio_net_ctrl_mq mq;
749 size_t s;
750 uint16_t queues;
752 s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq));
753 if (s != sizeof(mq)) {
754 return VIRTIO_NET_ERR;
757 if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
758 return VIRTIO_NET_ERR;
761 queues = lduw_p(&mq.virtqueue_pairs);
763 if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
764 queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
765 queues > n->max_queues ||
766 !n->multiqueue) {
767 return VIRTIO_NET_ERR;
770 n->curr_queues = queues;
771 /* stop the backend before changing the number of queues to avoid handling a
772 * disabled queue */
773 virtio_net_set_status(vdev, vdev->status);
774 virtio_net_set_queues(n);
776 return VIRTIO_NET_OK;
778 static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
780 VirtIONet *n = VIRTIO_NET(vdev);
781 struct virtio_net_ctrl_hdr ctrl;
782 virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
783 VirtQueueElement elem;
784 size_t s;
785 struct iovec *iov;
786 unsigned int iov_cnt;
788 while (virtqueue_pop(vq, &elem)) {
789 if (iov_size(elem.in_sg, elem.in_num) < sizeof(status) ||
790 iov_size(elem.out_sg, elem.out_num) < sizeof(ctrl)) {
791 error_report("virtio-net ctrl missing headers");
792 exit(1);
795 iov = elem.out_sg;
796 iov_cnt = elem.out_num;
797 s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl));
798 iov_discard_front(&iov, &iov_cnt, sizeof(ctrl));
799 if (s != sizeof(ctrl)) {
800 status = VIRTIO_NET_ERR;
801 } else if (ctrl.class == VIRTIO_NET_CTRL_RX) {
802 status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt);
803 } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) {
804 status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt);
805 } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
806 status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt);
807 } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) {
808 status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt);
809 } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
810 status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt);
811 } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
812 status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt);
815 s = iov_from_buf(elem.in_sg, elem.in_num, 0, &status, sizeof(status));
816 assert(s == sizeof(status));
818 virtqueue_push(vq, &elem, sizeof(status));
819 virtio_notify(vdev, vq);
823 /* RX */
825 static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
827 VirtIONet *n = VIRTIO_NET(vdev);
828 int queue_index = vq2q(virtio_get_queue_index(vq));
830 qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index));
833 static int virtio_net_can_receive(NetClientState *nc)
835 VirtIONet *n = qemu_get_nic_opaque(nc);
836 VirtIODevice *vdev = VIRTIO_DEVICE(n);
837 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
839 if (!vdev->vm_running) {
840 return 0;
843 if (nc->queue_index >= n->curr_queues) {
844 return 0;
847 if (!virtio_queue_ready(q->rx_vq) ||
848 !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
849 return 0;
852 return 1;
855 static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize)
857 VirtIONet *n = q->n;
858 if (virtio_queue_empty(q->rx_vq) ||
859 (n->mergeable_rx_bufs &&
860 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
861 virtio_queue_set_notification(q->rx_vq, 1);
863 /* To avoid a race condition where the guest has made some buffers
864 * available after the above check but before notification was
865 * enabled, check for available buffers again.
867 if (virtio_queue_empty(q->rx_vq) ||
868 (n->mergeable_rx_bufs &&
869 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
870 return 0;
874 virtio_queue_set_notification(q->rx_vq, 0);
875 return 1;
878 static void virtio_net_hdr_swap(struct virtio_net_hdr *hdr)
880 tswap16s(&hdr->hdr_len);
881 tswap16s(&hdr->gso_size);
882 tswap16s(&hdr->csum_start);
883 tswap16s(&hdr->csum_offset);
886 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
887 * it never finds out that the packets don't have valid checksums. This
888 * causes dhclient to get upset. Fedora's carried a patch for ages to
889 * fix this with Xen but it hasn't appeared in an upstream release of
890 * dhclient yet.
892 * To avoid breaking existing guests, we catch udp packets and add
893 * checksums. This is terrible but it's better than hacking the guest
894 * kernels.
896 * N.B. if we introduce a zero-copy API, this operation is no longer free so
897 * we should provide a mechanism to disable it to avoid polluting the host
898 * cache.
900 static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
901 uint8_t *buf, size_t size)
903 if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */
904 (size > 27 && size < 1500) && /* normal sized MTU */
905 (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */
906 (buf[23] == 17) && /* ip.protocol == UDP */
907 (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */
908 net_checksum_calculate(buf, size);
909 hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
913 static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
914 const void *buf, size_t size)
916 if (n->has_vnet_hdr) {
917 /* FIXME this cast is evil */
918 void *wbuf = (void *)buf;
919 work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
920 size - n->host_hdr_len);
921 virtio_net_hdr_swap(wbuf);
922 iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
923 } else {
924 struct virtio_net_hdr hdr = {
925 .flags = 0,
926 .gso_type = VIRTIO_NET_HDR_GSO_NONE
928 iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr);
932 static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
934 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
935 static const uint8_t vlan[] = {0x81, 0x00};
936 uint8_t *ptr = (uint8_t *)buf;
937 int i;
939 if (n->promisc)
940 return 1;
942 ptr += n->host_hdr_len;
944 if (!memcmp(&ptr[12], vlan, sizeof(vlan))) {
945 int vid = be16_to_cpup((uint16_t *)(ptr + 14)) & 0xfff;
946 if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f))))
947 return 0;
950 if (ptr[0] & 1) { // multicast
951 if (!memcmp(ptr, bcast, sizeof(bcast))) {
952 return !n->nobcast;
953 } else if (n->nomulti) {
954 return 0;
955 } else if (n->allmulti || n->mac_table.multi_overflow) {
956 return 1;
959 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
960 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
961 return 1;
964 } else { // unicast
965 if (n->nouni) {
966 return 0;
967 } else if (n->alluni || n->mac_table.uni_overflow) {
968 return 1;
969 } else if (!memcmp(ptr, n->mac, ETH_ALEN)) {
970 return 1;
973 for (i = 0; i < n->mac_table.first_multi; i++) {
974 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
975 return 1;
980 return 0;
983 static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t size)
985 VirtIONet *n = qemu_get_nic_opaque(nc);
986 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
987 VirtIODevice *vdev = VIRTIO_DEVICE(n);
988 struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
989 struct virtio_net_hdr_mrg_rxbuf mhdr;
990 unsigned mhdr_cnt = 0;
991 size_t offset, i, guest_offset;
993 if (!virtio_net_can_receive(nc)) {
994 return -1;
997 /* hdr_len refers to the header we supply to the guest */
998 if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
999 return 0;
1002 if (!receive_filter(n, buf, size))
1003 return size;
1005 offset = i = 0;
1007 while (offset < size) {
1008 VirtQueueElement elem;
1009 int len, total;
1010 const struct iovec *sg = elem.in_sg;
1012 total = 0;
1014 if (virtqueue_pop(q->rx_vq, &elem) == 0) {
1015 if (i == 0)
1016 return -1;
1017 error_report("virtio-net unexpected empty queue: "
1018 "i %zd mergeable %d offset %zd, size %zd, "
1019 "guest hdr len %zd, host hdr len %zd guest features 0x%x",
1020 i, n->mergeable_rx_bufs, offset, size,
1021 n->guest_hdr_len, n->host_hdr_len, vdev->guest_features);
1022 exit(1);
1025 if (elem.in_num < 1) {
1026 error_report("virtio-net receive queue contains no in buffers");
1027 exit(1);
1030 if (i == 0) {
1031 assert(offset == 0);
1032 if (n->mergeable_rx_bufs) {
1033 mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
1034 sg, elem.in_num,
1035 offsetof(typeof(mhdr), num_buffers),
1036 sizeof(mhdr.num_buffers));
1039 receive_header(n, sg, elem.in_num, buf, size);
1040 offset = n->host_hdr_len;
1041 total += n->guest_hdr_len;
1042 guest_offset = n->guest_hdr_len;
1043 } else {
1044 guest_offset = 0;
1047 /* copy in packet. ugh */
1048 len = iov_from_buf(sg, elem.in_num, guest_offset,
1049 buf + offset, size - offset);
1050 total += len;
1051 offset += len;
1052 /* If buffers can't be merged, at this point we
1053 * must have consumed the complete packet.
1054 * Otherwise, drop it. */
1055 if (!n->mergeable_rx_bufs && offset < size) {
1056 #if 0
1057 error_report("virtio-net truncated non-mergeable packet: "
1058 "i %zd mergeable %d offset %zd, size %zd, "
1059 "guest hdr len %zd, host hdr len %zd",
1060 i, n->mergeable_rx_bufs,
1061 offset, size, n->guest_hdr_len, n->host_hdr_len);
1062 #endif
1063 return size;
1066 /* signal other side */
1067 virtqueue_fill(q->rx_vq, &elem, total, i++);
1070 if (mhdr_cnt) {
1071 stw_p(&mhdr.num_buffers, i);
1072 iov_from_buf(mhdr_sg, mhdr_cnt,
1074 &mhdr.num_buffers, sizeof mhdr.num_buffers);
1077 virtqueue_flush(q->rx_vq, i);
1078 virtio_notify(vdev, q->rx_vq);
1080 return size;
1083 static int32_t virtio_net_flush_tx(VirtIONetQueue *q);
1085 static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
1087 VirtIONet *n = qemu_get_nic_opaque(nc);
1088 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1089 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1091 virtqueue_push(q->tx_vq, &q->async_tx.elem, 0);
1092 virtio_notify(vdev, q->tx_vq);
1094 q->async_tx.elem.out_num = q->async_tx.len = 0;
1096 virtio_queue_set_notification(q->tx_vq, 1);
1097 virtio_net_flush_tx(q);
1100 /* TX */
1101 static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
1103 VirtIONet *n = q->n;
1104 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1105 VirtQueueElement elem;
1106 int32_t num_packets = 0;
1107 int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
1108 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1109 return num_packets;
1112 assert(vdev->vm_running);
1114 if (q->async_tx.elem.out_num) {
1115 virtio_queue_set_notification(q->tx_vq, 0);
1116 return num_packets;
1119 while (virtqueue_pop(q->tx_vq, &elem)) {
1120 ssize_t ret, len;
1121 unsigned int out_num = elem.out_num;
1122 struct iovec *out_sg = &elem.out_sg[0];
1123 struct iovec sg[VIRTQUEUE_MAX_SIZE];
1125 if (out_num < 1) {
1126 error_report("virtio-net header not in first element");
1127 exit(1);
1130 if (n->has_vnet_hdr) {
1131 if (out_sg[0].iov_len < n->guest_hdr_len) {
1132 error_report("virtio-net header incorrect");
1133 exit(1);
1135 virtio_net_hdr_swap((void *) out_sg[0].iov_base);
1139 * If host wants to see the guest header as is, we can
1140 * pass it on unchanged. Otherwise, copy just the parts
1141 * that host is interested in.
1143 assert(n->host_hdr_len <= n->guest_hdr_len);
1144 if (n->host_hdr_len != n->guest_hdr_len) {
1145 unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg),
1146 out_sg, out_num,
1147 0, n->host_hdr_len);
1148 sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num,
1149 out_sg, out_num,
1150 n->guest_hdr_len, -1);
1151 out_num = sg_num;
1152 out_sg = sg;
1155 len = n->guest_hdr_len;
1157 ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index),
1158 out_sg, out_num, virtio_net_tx_complete);
1159 if (ret == 0) {
1160 virtio_queue_set_notification(q->tx_vq, 0);
1161 q->async_tx.elem = elem;
1162 q->async_tx.len = len;
1163 return -EBUSY;
1166 len += ret;
1168 virtqueue_push(q->tx_vq, &elem, 0);
1169 virtio_notify(vdev, q->tx_vq);
1171 if (++num_packets >= n->tx_burst) {
1172 break;
1175 return num_packets;
1178 static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
1180 VirtIONet *n = VIRTIO_NET(vdev);
1181 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
1183 /* This happens when device was stopped but VCPU wasn't. */
1184 if (!vdev->vm_running) {
1185 q->tx_waiting = 1;
1186 return;
1189 if (q->tx_waiting) {
1190 virtio_queue_set_notification(vq, 1);
1191 timer_del(q->tx_timer);
1192 q->tx_waiting = 0;
1193 virtio_net_flush_tx(q);
1194 } else {
1195 timer_mod(q->tx_timer,
1196 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
1197 q->tx_waiting = 1;
1198 virtio_queue_set_notification(vq, 0);
1202 static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)
1204 VirtIONet *n = VIRTIO_NET(vdev);
1205 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
1207 if (unlikely(q->tx_waiting)) {
1208 return;
1210 q->tx_waiting = 1;
1211 /* This happens when device was stopped but VCPU wasn't. */
1212 if (!vdev->vm_running) {
1213 return;
1215 virtio_queue_set_notification(vq, 0);
1216 qemu_bh_schedule(q->tx_bh);
1219 static void virtio_net_tx_timer(void *opaque)
1221 VirtIONetQueue *q = opaque;
1222 VirtIONet *n = q->n;
1223 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1224 assert(vdev->vm_running);
1226 q->tx_waiting = 0;
1228 /* Just in case the driver is not ready on more */
1229 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1230 return;
1233 virtio_queue_set_notification(q->tx_vq, 1);
1234 virtio_net_flush_tx(q);
1237 static void virtio_net_tx_bh(void *opaque)
1239 VirtIONetQueue *q = opaque;
1240 VirtIONet *n = q->n;
1241 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1242 int32_t ret;
1244 assert(vdev->vm_running);
1246 q->tx_waiting = 0;
1248 /* Just in case the driver is not ready on more */
1249 if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) {
1250 return;
1253 ret = virtio_net_flush_tx(q);
1254 if (ret == -EBUSY) {
1255 return; /* Notification re-enable handled by tx_complete */
1258 /* If we flush a full burst of packets, assume there are
1259 * more coming and immediately reschedule */
1260 if (ret >= n->tx_burst) {
1261 qemu_bh_schedule(q->tx_bh);
1262 q->tx_waiting = 1;
1263 return;
1266 /* If less than a full burst, re-enable notification and flush
1267 * anything that may have come in while we weren't looking. If
1268 * we find something, assume the guest is still active and reschedule */
1269 virtio_queue_set_notification(q->tx_vq, 1);
1270 if (virtio_net_flush_tx(q) > 0) {
1271 virtio_queue_set_notification(q->tx_vq, 0);
1272 qemu_bh_schedule(q->tx_bh);
1273 q->tx_waiting = 1;
1277 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
1279 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1280 int i, max = multiqueue ? n->max_queues : 1;
1282 n->multiqueue = multiqueue;
1284 for (i = 2; i <= n->max_queues * 2 + 1; i++) {
1285 virtio_del_queue(vdev, i);
1288 for (i = 1; i < max; i++) {
1289 n->vqs[i].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
1290 if (n->vqs[i].tx_timer) {
1291 n->vqs[i].tx_vq =
1292 virtio_add_queue(vdev, 256, virtio_net_handle_tx_timer);
1293 n->vqs[i].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1294 virtio_net_tx_timer,
1295 &n->vqs[i]);
1296 } else {
1297 n->vqs[i].tx_vq =
1298 virtio_add_queue(vdev, 256, virtio_net_handle_tx_bh);
1299 n->vqs[i].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[i]);
1302 n->vqs[i].tx_waiting = 0;
1303 n->vqs[i].n = n;
1306 /* Note: Minux Guests (version 3.2.1) use ctrl vq but don't ack
1307 * VIRTIO_NET_F_CTRL_VQ. Create ctrl vq unconditionally to avoid
1308 * breaking them.
1310 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
1312 virtio_net_set_queues(n);
1315 static void virtio_net_save(QEMUFile *f, void *opaque)
1317 int i;
1318 VirtIONet *n = opaque;
1319 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1321 /* At this point, backend must be stopped, otherwise
1322 * it might keep writing to memory. */
1323 assert(!n->vhost_started);
1324 virtio_save(vdev, f);
1326 qemu_put_buffer(f, n->mac, ETH_ALEN);
1327 qemu_put_be32(f, n->vqs[0].tx_waiting);
1328 qemu_put_be32(f, n->mergeable_rx_bufs);
1329 qemu_put_be16(f, n->status);
1330 qemu_put_byte(f, n->promisc);
1331 qemu_put_byte(f, n->allmulti);
1332 qemu_put_be32(f, n->mac_table.in_use);
1333 qemu_put_buffer(f, n->mac_table.macs, n->mac_table.in_use * ETH_ALEN);
1334 qemu_put_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3);
1335 qemu_put_be32(f, n->has_vnet_hdr);
1336 qemu_put_byte(f, n->mac_table.multi_overflow);
1337 qemu_put_byte(f, n->mac_table.uni_overflow);
1338 qemu_put_byte(f, n->alluni);
1339 qemu_put_byte(f, n->nomulti);
1340 qemu_put_byte(f, n->nouni);
1341 qemu_put_byte(f, n->nobcast);
1342 qemu_put_byte(f, n->has_ufo);
1343 if (n->max_queues > 1) {
1344 qemu_put_be16(f, n->max_queues);
1345 qemu_put_be16(f, n->curr_queues);
1346 for (i = 1; i < n->curr_queues; i++) {
1347 qemu_put_be32(f, n->vqs[i].tx_waiting);
1351 if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features) {
1352 qemu_put_be64(f, n->curr_guest_offloads);
1356 static int virtio_net_load(QEMUFile *f, void *opaque, int version_id)
1358 VirtIONet *n = opaque;
1359 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1360 int ret, i, link_down;
1362 if (version_id < 2 || version_id > VIRTIO_NET_VM_VERSION)
1363 return -EINVAL;
1365 ret = virtio_load(vdev, f, version_id);
1366 if (ret) {
1367 return ret;
1370 qemu_get_buffer(f, n->mac, ETH_ALEN);
1371 n->vqs[0].tx_waiting = qemu_get_be32(f);
1373 virtio_net_set_mrg_rx_bufs(n, qemu_get_be32(f));
1375 if (version_id >= 3)
1376 n->status = qemu_get_be16(f);
1378 if (version_id >= 4) {
1379 if (version_id < 8) {
1380 n->promisc = qemu_get_be32(f);
1381 n->allmulti = qemu_get_be32(f);
1382 } else {
1383 n->promisc = qemu_get_byte(f);
1384 n->allmulti = qemu_get_byte(f);
1388 if (version_id >= 5) {
1389 n->mac_table.in_use = qemu_get_be32(f);
1390 /* MAC_TABLE_ENTRIES may be different from the saved image */
1391 if (n->mac_table.in_use <= MAC_TABLE_ENTRIES) {
1392 qemu_get_buffer(f, n->mac_table.macs,
1393 n->mac_table.in_use * ETH_ALEN);
1394 } else {
1395 int64_t i;
1397 /* Overflow detected - can happen if source has a larger MAC table.
1398 * We simply set overflow flag so there's no need to maintain the
1399 * table of addresses, discard them all.
1400 * Note: 64 bit math to avoid integer overflow.
1402 for (i = 0; i < (int64_t)n->mac_table.in_use * ETH_ALEN; ++i) {
1403 qemu_get_byte(f);
1405 n->mac_table.multi_overflow = n->mac_table.uni_overflow = 1;
1406 n->mac_table.in_use = 0;
1410 if (version_id >= 6)
1411 qemu_get_buffer(f, (uint8_t *)n->vlans, MAX_VLAN >> 3);
1413 if (version_id >= 7) {
1414 if (qemu_get_be32(f) && !peer_has_vnet_hdr(n)) {
1415 error_report("virtio-net: saved image requires vnet_hdr=on");
1416 return -1;
1420 if (version_id >= 9) {
1421 n->mac_table.multi_overflow = qemu_get_byte(f);
1422 n->mac_table.uni_overflow = qemu_get_byte(f);
1425 if (version_id >= 10) {
1426 n->alluni = qemu_get_byte(f);
1427 n->nomulti = qemu_get_byte(f);
1428 n->nouni = qemu_get_byte(f);
1429 n->nobcast = qemu_get_byte(f);
1432 if (version_id >= 11) {
1433 if (qemu_get_byte(f) && !peer_has_ufo(n)) {
1434 error_report("virtio-net: saved image requires TUN_F_UFO support");
1435 return -1;
1439 if (n->max_queues > 1) {
1440 if (n->max_queues != qemu_get_be16(f)) {
1441 error_report("virtio-net: different max_queues ");
1442 return -1;
1445 n->curr_queues = qemu_get_be16(f);
1446 if (n->curr_queues > n->max_queues) {
1447 error_report("virtio-net: curr_queues %x > max_queues %x",
1448 n->curr_queues, n->max_queues);
1449 return -1;
1451 for (i = 1; i < n->curr_queues; i++) {
1452 n->vqs[i].tx_waiting = qemu_get_be32(f);
1456 if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) & vdev->guest_features) {
1457 n->curr_guest_offloads = qemu_get_be64(f);
1458 } else {
1459 n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
1462 if (peer_has_vnet_hdr(n)) {
1463 virtio_net_apply_guest_offloads(n);
1466 virtio_net_set_queues(n);
1468 /* Find the first multicast entry in the saved MAC filter */
1469 for (i = 0; i < n->mac_table.in_use; i++) {
1470 if (n->mac_table.macs[i * ETH_ALEN] & 1) {
1471 break;
1474 n->mac_table.first_multi = i;
1476 /* nc.link_down can't be migrated, so infer link_down according
1477 * to link status bit in n->status */
1478 link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0;
1479 for (i = 0; i < n->max_queues; i++) {
1480 qemu_get_subqueue(n->nic, i)->link_down = link_down;
1483 if (vdev->guest_features & (0x1 << VIRTIO_NET_F_GUEST_ANNOUNCE) &&
1484 vdev->guest_features & (0x1 << VIRTIO_NET_F_CTRL_VQ)) {
1485 n->announce_counter = SELF_ANNOUNCE_ROUNDS;
1486 timer_mod(n->announce_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL));
1489 return 0;
1492 static void virtio_net_cleanup(NetClientState *nc)
1494 VirtIONet *n = qemu_get_nic_opaque(nc);
1496 n->nic = NULL;
1499 static NetClientInfo net_virtio_info = {
1500 .type = NET_CLIENT_OPTIONS_KIND_NIC,
1501 .size = sizeof(NICState),
1502 .can_receive = virtio_net_can_receive,
1503 .receive = virtio_net_receive,
1504 .cleanup = virtio_net_cleanup,
1505 .link_status_changed = virtio_net_set_link_status,
1506 .query_rx_filter = virtio_net_query_rxfilter,
1509 static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
1511 VirtIONet *n = VIRTIO_NET(vdev);
1512 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
1513 assert(n->vhost_started);
1514 return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
1517 static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
1518 bool mask)
1520 VirtIONet *n = VIRTIO_NET(vdev);
1521 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
1522 assert(n->vhost_started);
1523 vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
1524 vdev, idx, mask);
1527 void virtio_net_set_config_size(VirtIONet *n, uint32_t host_features)
1529 int i, config_size = 0;
1530 host_features |= (1 << VIRTIO_NET_F_MAC);
1531 for (i = 0; feature_sizes[i].flags != 0; i++) {
1532 if (host_features & feature_sizes[i].flags) {
1533 config_size = MAX(feature_sizes[i].end, config_size);
1536 n->config_size = config_size;
1539 void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
1540 const char *type)
1543 * The name can be NULL, the netclient name will be type.x.
1545 assert(type != NULL);
1547 g_free(n->netclient_name);
1548 g_free(n->netclient_type);
1549 n->netclient_name = g_strdup(name);
1550 n->netclient_type = g_strdup(type);
1553 static void virtio_net_device_realize(DeviceState *dev, Error **errp)
1555 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1556 VirtIONet *n = VIRTIO_NET(dev);
1557 NetClientState *nc;
1558 int i;
1560 virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
1562 n->max_queues = MAX(n->nic_conf.peers.queues, 1);
1563 n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues);
1564 n->vqs[0].rx_vq = virtio_add_queue(vdev, 256, virtio_net_handle_rx);
1565 n->curr_queues = 1;
1566 n->vqs[0].n = n;
1567 n->tx_timeout = n->net_conf.txtimer;
1569 if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer")
1570 && strcmp(n->net_conf.tx, "bh")) {
1571 error_report("virtio-net: "
1572 "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
1573 n->net_conf.tx);
1574 error_report("Defaulting to \"bh\"");
1577 if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
1578 n->vqs[0].tx_vq = virtio_add_queue(vdev, 256,
1579 virtio_net_handle_tx_timer);
1580 n->vqs[0].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, virtio_net_tx_timer,
1581 &n->vqs[0]);
1582 } else {
1583 n->vqs[0].tx_vq = virtio_add_queue(vdev, 256,
1584 virtio_net_handle_tx_bh);
1585 n->vqs[0].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[0]);
1587 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
1588 qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
1589 memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
1590 n->status = VIRTIO_NET_S_LINK_UP;
1591 n->announce_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
1592 virtio_net_announce_timer, n);
1594 if (n->netclient_type) {
1596 * Happen when virtio_net_set_netclient_name has been called.
1598 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
1599 n->netclient_type, n->netclient_name, n);
1600 } else {
1601 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
1602 object_get_typename(OBJECT(dev)), dev->id, n);
1605 peer_test_vnet_hdr(n);
1606 if (peer_has_vnet_hdr(n)) {
1607 for (i = 0; i < n->max_queues; i++) {
1608 qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true);
1610 n->host_hdr_len = sizeof(struct virtio_net_hdr);
1611 } else {
1612 n->host_hdr_len = 0;
1615 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a);
1617 n->vqs[0].tx_waiting = 0;
1618 n->tx_burst = n->net_conf.txburst;
1619 virtio_net_set_mrg_rx_bufs(n, 0);
1620 n->promisc = 1; /* for compatibility */
1622 n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
1624 n->vlans = g_malloc0(MAX_VLAN >> 3);
1626 nc = qemu_get_queue(n->nic);
1627 nc->rxfilter_notify_enabled = 1;
1629 n->qdev = dev;
1630 register_savevm(dev, "virtio-net", -1, VIRTIO_NET_VM_VERSION,
1631 virtio_net_save, virtio_net_load, n);
1633 add_boot_device_path(n->nic_conf.bootindex, dev, "/ethernet-phy@0");
1636 static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
1638 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1639 VirtIONet *n = VIRTIO_NET(dev);
1640 int i;
1642 /* This will stop vhost backend if appropriate. */
1643 virtio_net_set_status(vdev, 0);
1645 unregister_savevm(dev, "virtio-net", n);
1647 g_free(n->netclient_name);
1648 n->netclient_name = NULL;
1649 g_free(n->netclient_type);
1650 n->netclient_type = NULL;
1652 g_free(n->mac_table.macs);
1653 g_free(n->vlans);
1655 for (i = 0; i < n->max_queues; i++) {
1656 VirtIONetQueue *q = &n->vqs[i];
1657 NetClientState *nc = qemu_get_subqueue(n->nic, i);
1659 qemu_purge_queued_packets(nc);
1661 if (q->tx_timer) {
1662 timer_del(q->tx_timer);
1663 timer_free(q->tx_timer);
1664 } else if (q->tx_bh) {
1665 qemu_bh_delete(q->tx_bh);
1669 timer_del(n->announce_timer);
1670 timer_free(n->announce_timer);
1671 g_free(n->vqs);
1672 qemu_del_nic(n->nic);
1673 virtio_cleanup(vdev);
1676 static void virtio_net_instance_init(Object *obj)
1678 VirtIONet *n = VIRTIO_NET(obj);
1681 * The default config_size is sizeof(struct virtio_net_config).
1682 * Can be overriden with virtio_net_set_config_size.
1684 n->config_size = sizeof(struct virtio_net_config);
1687 static Property virtio_net_properties[] = {
1688 DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf),
1689 DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer,
1690 TX_TIMER_INTERVAL),
1691 DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
1692 DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
1693 DEFINE_PROP_END_OF_LIST(),
1696 static void virtio_net_class_init(ObjectClass *klass, void *data)
1698 DeviceClass *dc = DEVICE_CLASS(klass);
1699 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
1701 dc->props = virtio_net_properties;
1702 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
1703 vdc->realize = virtio_net_device_realize;
1704 vdc->unrealize = virtio_net_device_unrealize;
1705 vdc->get_config = virtio_net_get_config;
1706 vdc->set_config = virtio_net_set_config;
1707 vdc->get_features = virtio_net_get_features;
1708 vdc->set_features = virtio_net_set_features;
1709 vdc->bad_features = virtio_net_bad_features;
1710 vdc->reset = virtio_net_reset;
1711 vdc->set_status = virtio_net_set_status;
1712 vdc->guest_notifier_mask = virtio_net_guest_notifier_mask;
1713 vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;
1716 static const TypeInfo virtio_net_info = {
1717 .name = TYPE_VIRTIO_NET,
1718 .parent = TYPE_VIRTIO_DEVICE,
1719 .instance_size = sizeof(VirtIONet),
1720 .instance_init = virtio_net_instance_init,
1721 .class_init = virtio_net_class_init,
1724 static void virtio_register_types(void)
1726 type_register_static(&virtio_net_info);
1729 type_init(virtio_register_types)