build: move warning flag selection to meson
[qemu/kevin.git] / hw / net / virtio-net.c
blob447f669921681fcdc4c0fb0c7854269e55288bf2
1 /*
2 * Virtio Network Device
4 * Copyright IBM, Corp. 2007
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/atomic.h"
16 #include "qemu/iov.h"
17 #include "qemu/log.h"
18 #include "qemu/main-loop.h"
19 #include "qemu/module.h"
20 #include "hw/virtio/virtio.h"
21 #include "net/net.h"
22 #include "net/checksum.h"
23 #include "net/tap.h"
24 #include "qemu/error-report.h"
25 #include "qemu/timer.h"
26 #include "qemu/option.h"
27 #include "qemu/option_int.h"
28 #include "qemu/config-file.h"
29 #include "qapi/qmp/qdict.h"
30 #include "hw/virtio/virtio-net.h"
31 #include "net/vhost_net.h"
32 #include "net/announce.h"
33 #include "hw/virtio/virtio-bus.h"
34 #include "qapi/error.h"
35 #include "qapi/qapi-events-net.h"
36 #include "hw/qdev-properties.h"
37 #include "qapi/qapi-types-migration.h"
38 #include "qapi/qapi-events-migration.h"
39 #include "hw/virtio/virtio-access.h"
40 #include "migration/misc.h"
41 #include "standard-headers/linux/ethtool.h"
42 #include "sysemu/sysemu.h"
43 #include "trace.h"
44 #include "monitor/qdev.h"
45 #include "hw/pci/pci_device.h"
46 #include "net_rx_pkt.h"
47 #include "hw/virtio/vhost.h"
48 #include "sysemu/qtest.h"
50 #define VIRTIO_NET_VM_VERSION 11
52 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
54 /* previously fixed value */
55 #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
56 #define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256
58 /* for now, only allow larger queue_pairs; with virtio-1, guest can downsize */
59 #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
60 #define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
62 #define VIRTIO_NET_IP4_ADDR_SIZE 8 /* ipv4 saddr + daddr */
64 #define VIRTIO_NET_TCP_FLAG 0x3F
65 #define VIRTIO_NET_TCP_HDR_LENGTH 0xF000
67 /* IPv4 max payload, 16 bits in the header */
68 #define VIRTIO_NET_MAX_IP4_PAYLOAD (65535 - sizeof(struct ip_header))
69 #define VIRTIO_NET_MAX_TCP_PAYLOAD 65535
71 /* header length value in ip header without option */
72 #define VIRTIO_NET_IP4_HEADER_LENGTH 5
74 #define VIRTIO_NET_IP6_ADDR_SIZE 32 /* ipv6 saddr + daddr */
75 #define VIRTIO_NET_MAX_IP6_PAYLOAD VIRTIO_NET_MAX_TCP_PAYLOAD
77 /* Purge coalesced packets timer interval, This value affects the performance
78 a lot, and should be tuned carefully, '300000'(300us) is the recommended
79 value to pass the WHQL test, '50000' can gain 2x netperf throughput with
80 tso/gso/gro 'off'. */
81 #define VIRTIO_NET_RSC_DEFAULT_INTERVAL 300000
83 #define VIRTIO_NET_RSS_SUPPORTED_HASHES (VIRTIO_NET_RSS_HASH_TYPE_IPv4 | \
84 VIRTIO_NET_RSS_HASH_TYPE_TCPv4 | \
85 VIRTIO_NET_RSS_HASH_TYPE_UDPv4 | \
86 VIRTIO_NET_RSS_HASH_TYPE_IPv6 | \
87 VIRTIO_NET_RSS_HASH_TYPE_TCPv6 | \
88 VIRTIO_NET_RSS_HASH_TYPE_UDPv6 | \
89 VIRTIO_NET_RSS_HASH_TYPE_IP_EX | \
90 VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | \
91 VIRTIO_NET_RSS_HASH_TYPE_UDP_EX)
93 static const VirtIOFeature feature_sizes[] = {
94 {.flags = 1ULL << VIRTIO_NET_F_MAC,
95 .end = endof(struct virtio_net_config, mac)},
96 {.flags = 1ULL << VIRTIO_NET_F_STATUS,
97 .end = endof(struct virtio_net_config, status)},
98 {.flags = 1ULL << VIRTIO_NET_F_MQ,
99 .end = endof(struct virtio_net_config, max_virtqueue_pairs)},
100 {.flags = 1ULL << VIRTIO_NET_F_MTU,
101 .end = endof(struct virtio_net_config, mtu)},
102 {.flags = 1ULL << VIRTIO_NET_F_SPEED_DUPLEX,
103 .end = endof(struct virtio_net_config, duplex)},
104 {.flags = (1ULL << VIRTIO_NET_F_RSS) | (1ULL << VIRTIO_NET_F_HASH_REPORT),
105 .end = endof(struct virtio_net_config, supported_hash_types)},
109 static const VirtIOConfigSizeParams cfg_size_params = {
110 .min_size = endof(struct virtio_net_config, mac),
111 .max_size = sizeof(struct virtio_net_config),
112 .feature_sizes = feature_sizes
115 static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc)
117 VirtIONet *n = qemu_get_nic_opaque(nc);
119 return &n->vqs[nc->queue_index];
122 static int vq2q(int queue_index)
124 return queue_index / 2;
127 static void flush_or_purge_queued_packets(NetClientState *nc)
129 if (!nc->peer) {
130 return;
133 qemu_flush_or_purge_queued_packets(nc->peer, true);
134 assert(!virtio_net_get_subqueue(nc)->async_tx.elem);
137 /* TODO
138 * - we could suppress RX interrupt if we were so inclined.
141 static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
143 VirtIONet *n = VIRTIO_NET(vdev);
144 struct virtio_net_config netcfg;
145 NetClientState *nc = qemu_get_queue(n->nic);
146 static const MACAddr zero = { .a = { 0, 0, 0, 0, 0, 0 } };
148 int ret = 0;
149 memset(&netcfg, 0 , sizeof(struct virtio_net_config));
150 virtio_stw_p(vdev, &netcfg.status, n->status);
151 virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queue_pairs);
152 virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu);
153 memcpy(netcfg.mac, n->mac, ETH_ALEN);
154 virtio_stl_p(vdev, &netcfg.speed, n->net_conf.speed);
155 netcfg.duplex = n->net_conf.duplex;
156 netcfg.rss_max_key_size = VIRTIO_NET_RSS_MAX_KEY_SIZE;
157 virtio_stw_p(vdev, &netcfg.rss_max_indirection_table_length,
158 virtio_host_has_feature(vdev, VIRTIO_NET_F_RSS) ?
159 VIRTIO_NET_RSS_MAX_TABLE_LEN : 1);
160 virtio_stl_p(vdev, &netcfg.supported_hash_types,
161 VIRTIO_NET_RSS_SUPPORTED_HASHES);
162 memcpy(config, &netcfg, n->config_size);
165 * Is this VDPA? No peer means not VDPA: there's no way to
166 * disconnect/reconnect a VDPA peer.
168 if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
169 ret = vhost_net_get_config(get_vhost_net(nc->peer), (uint8_t *)&netcfg,
170 n->config_size);
171 if (ret == -1) {
172 return;
176 * Some NIC/kernel combinations present 0 as the mac address. As that
177 * is not a legal address, try to proceed with the address from the
178 * QEMU command line in the hope that the address has been configured
179 * correctly elsewhere - just not reported by the device.
181 if (memcmp(&netcfg.mac, &zero, sizeof(zero)) == 0) {
182 info_report("Zero hardware mac address detected. Ignoring.");
183 memcpy(netcfg.mac, n->mac, ETH_ALEN);
186 netcfg.status |= virtio_tswap16(vdev,
187 n->status & VIRTIO_NET_S_ANNOUNCE);
188 memcpy(config, &netcfg, n->config_size);
192 static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
194 VirtIONet *n = VIRTIO_NET(vdev);
195 struct virtio_net_config netcfg = {};
196 NetClientState *nc = qemu_get_queue(n->nic);
198 memcpy(&netcfg, config, n->config_size);
200 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
201 !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
202 memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
203 memcpy(n->mac, netcfg.mac, ETH_ALEN);
204 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
208 * Is this VDPA? No peer means not VDPA: there's no way to
209 * disconnect/reconnect a VDPA peer.
211 if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
212 vhost_net_set_config(get_vhost_net(nc->peer),
213 (uint8_t *)&netcfg, 0, n->config_size,
214 VHOST_SET_CONFIG_TYPE_MASTER);
218 static bool virtio_net_started(VirtIONet *n, uint8_t status)
220 VirtIODevice *vdev = VIRTIO_DEVICE(n);
221 return (status & VIRTIO_CONFIG_S_DRIVER_OK) &&
222 (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running;
225 static void virtio_net_announce_notify(VirtIONet *net)
227 VirtIODevice *vdev = VIRTIO_DEVICE(net);
228 trace_virtio_net_announce_notify();
230 net->status |= VIRTIO_NET_S_ANNOUNCE;
231 virtio_notify_config(vdev);
234 static void virtio_net_announce_timer(void *opaque)
236 VirtIONet *n = opaque;
237 trace_virtio_net_announce_timer(n->announce_timer.round);
239 n->announce_timer.round--;
240 virtio_net_announce_notify(n);
243 static void virtio_net_announce(NetClientState *nc)
245 VirtIONet *n = qemu_get_nic_opaque(nc);
246 VirtIODevice *vdev = VIRTIO_DEVICE(n);
249 * Make sure the virtio migration announcement timer isn't running
250 * If it is, let it trigger announcement so that we do not cause
251 * confusion.
253 if (n->announce_timer.round) {
254 return;
257 if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
258 virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
259 virtio_net_announce_notify(n);
263 static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
265 VirtIODevice *vdev = VIRTIO_DEVICE(n);
266 NetClientState *nc = qemu_get_queue(n->nic);
267 int queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
268 int cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ?
269 n->max_ncs - n->max_queue_pairs : 0;
271 if (!get_vhost_net(nc->peer)) {
272 return;
275 if ((virtio_net_started(n, status) && !nc->peer->link_down) ==
276 !!n->vhost_started) {
277 return;
279 if (!n->vhost_started) {
280 int r, i;
282 if (n->needs_vnet_hdr_swap) {
283 error_report("backend does not support %s vnet headers; "
284 "falling back on userspace virtio",
285 virtio_is_big_endian(vdev) ? "BE" : "LE");
286 return;
289 /* Any packets outstanding? Purge them to avoid touching rings
290 * when vhost is running.
292 for (i = 0; i < queue_pairs; i++) {
293 NetClientState *qnc = qemu_get_subqueue(n->nic, i);
295 /* Purge both directions: TX and RX. */
296 qemu_net_queue_purge(qnc->peer->incoming_queue, qnc);
297 qemu_net_queue_purge(qnc->incoming_queue, qnc->peer);
300 if (virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_MTU)) {
301 r = vhost_net_set_mtu(get_vhost_net(nc->peer), n->net_conf.mtu);
302 if (r < 0) {
303 error_report("%uBytes MTU not supported by the backend",
304 n->net_conf.mtu);
306 return;
310 n->vhost_started = 1;
311 r = vhost_net_start(vdev, n->nic->ncs, queue_pairs, cvq);
312 if (r < 0) {
313 error_report("unable to start vhost net: %d: "
314 "falling back on userspace virtio", -r);
315 n->vhost_started = 0;
317 } else {
318 vhost_net_stop(vdev, n->nic->ncs, queue_pairs, cvq);
319 n->vhost_started = 0;
323 static int virtio_net_set_vnet_endian_one(VirtIODevice *vdev,
324 NetClientState *peer,
325 bool enable)
327 if (virtio_is_big_endian(vdev)) {
328 return qemu_set_vnet_be(peer, enable);
329 } else {
330 return qemu_set_vnet_le(peer, enable);
334 static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs,
335 int queue_pairs, bool enable)
337 int i;
339 for (i = 0; i < queue_pairs; i++) {
340 if (virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, enable) < 0 &&
341 enable) {
342 while (--i >= 0) {
343 virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, false);
346 return true;
350 return false;
353 static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status)
355 VirtIODevice *vdev = VIRTIO_DEVICE(n);
356 int queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
358 if (virtio_net_started(n, status)) {
359 /* Before using the device, we tell the network backend about the
360 * endianness to use when parsing vnet headers. If the backend
361 * can't do it, we fallback onto fixing the headers in the core
362 * virtio-net code.
364 n->needs_vnet_hdr_swap = virtio_net_set_vnet_endian(vdev, n->nic->ncs,
365 queue_pairs, true);
366 } else if (virtio_net_started(n, vdev->status)) {
367 /* After using the device, we need to reset the network backend to
368 * the default (guest native endianness), otherwise the guest may
369 * lose network connectivity if it is rebooted into a different
370 * endianness.
372 virtio_net_set_vnet_endian(vdev, n->nic->ncs, queue_pairs, false);
376 static void virtio_net_drop_tx_queue_data(VirtIODevice *vdev, VirtQueue *vq)
378 unsigned int dropped = virtqueue_drop_all(vq);
379 if (dropped) {
380 virtio_notify(vdev, vq);
384 static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
386 VirtIONet *n = VIRTIO_NET(vdev);
387 VirtIONetQueue *q;
388 int i;
389 uint8_t queue_status;
391 virtio_net_vnet_endian_status(n, status);
392 virtio_net_vhost_status(n, status);
394 for (i = 0; i < n->max_queue_pairs; i++) {
395 NetClientState *ncs = qemu_get_subqueue(n->nic, i);
396 bool queue_started;
397 q = &n->vqs[i];
399 if ((!n->multiqueue && i != 0) || i >= n->curr_queue_pairs) {
400 queue_status = 0;
401 } else {
402 queue_status = status;
404 queue_started =
405 virtio_net_started(n, queue_status) && !n->vhost_started;
407 if (queue_started) {
408 qemu_flush_queued_packets(ncs);
411 if (!q->tx_waiting) {
412 continue;
415 if (queue_started) {
416 if (q->tx_timer) {
417 timer_mod(q->tx_timer,
418 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
419 } else {
420 qemu_bh_schedule(q->tx_bh);
422 } else {
423 if (q->tx_timer) {
424 timer_del(q->tx_timer);
425 } else {
426 qemu_bh_cancel(q->tx_bh);
428 if ((n->status & VIRTIO_NET_S_LINK_UP) == 0 &&
429 (queue_status & VIRTIO_CONFIG_S_DRIVER_OK) &&
430 vdev->vm_running) {
431 /* if tx is waiting we are likely have some packets in tx queue
432 * and disabled notification */
433 q->tx_waiting = 0;
434 virtio_queue_set_notification(q->tx_vq, 1);
435 virtio_net_drop_tx_queue_data(vdev, q->tx_vq);
441 static void virtio_net_set_link_status(NetClientState *nc)
443 VirtIONet *n = qemu_get_nic_opaque(nc);
444 VirtIODevice *vdev = VIRTIO_DEVICE(n);
445 uint16_t old_status = n->status;
447 if (nc->link_down)
448 n->status &= ~VIRTIO_NET_S_LINK_UP;
449 else
450 n->status |= VIRTIO_NET_S_LINK_UP;
452 if (n->status != old_status)
453 virtio_notify_config(vdev);
455 virtio_net_set_status(vdev, vdev->status);
458 static void rxfilter_notify(NetClientState *nc)
460 VirtIONet *n = qemu_get_nic_opaque(nc);
462 if (nc->rxfilter_notify_enabled) {
463 char *path = object_get_canonical_path(OBJECT(n->qdev));
464 qapi_event_send_nic_rx_filter_changed(n->netclient_name, path);
465 g_free(path);
467 /* disable event notification to avoid events flooding */
468 nc->rxfilter_notify_enabled = 0;
472 static intList *get_vlan_table(VirtIONet *n)
474 intList *list;
475 int i, j;
477 list = NULL;
478 for (i = 0; i < MAX_VLAN >> 5; i++) {
479 for (j = 0; n->vlans[i] && j <= 0x1f; j++) {
480 if (n->vlans[i] & (1U << j)) {
481 QAPI_LIST_PREPEND(list, (i << 5) + j);
486 return list;
489 static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc)
491 VirtIONet *n = qemu_get_nic_opaque(nc);
492 VirtIODevice *vdev = VIRTIO_DEVICE(n);
493 RxFilterInfo *info;
494 strList *str_list;
495 int i;
497 info = g_malloc0(sizeof(*info));
498 info->name = g_strdup(nc->name);
499 info->promiscuous = n->promisc;
501 if (n->nouni) {
502 info->unicast = RX_STATE_NONE;
503 } else if (n->alluni) {
504 info->unicast = RX_STATE_ALL;
505 } else {
506 info->unicast = RX_STATE_NORMAL;
509 if (n->nomulti) {
510 info->multicast = RX_STATE_NONE;
511 } else if (n->allmulti) {
512 info->multicast = RX_STATE_ALL;
513 } else {
514 info->multicast = RX_STATE_NORMAL;
517 info->broadcast_allowed = n->nobcast;
518 info->multicast_overflow = n->mac_table.multi_overflow;
519 info->unicast_overflow = n->mac_table.uni_overflow;
521 info->main_mac = qemu_mac_strdup_printf(n->mac);
523 str_list = NULL;
524 for (i = 0; i < n->mac_table.first_multi; i++) {
525 QAPI_LIST_PREPEND(str_list,
526 qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN));
528 info->unicast_table = str_list;
530 str_list = NULL;
531 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
532 QAPI_LIST_PREPEND(str_list,
533 qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN));
535 info->multicast_table = str_list;
536 info->vlan_table = get_vlan_table(n);
538 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) {
539 info->vlan = RX_STATE_ALL;
540 } else if (!info->vlan_table) {
541 info->vlan = RX_STATE_NONE;
542 } else {
543 info->vlan = RX_STATE_NORMAL;
546 /* enable event notification after query */
547 nc->rxfilter_notify_enabled = 1;
549 return info;
552 static void virtio_net_queue_reset(VirtIODevice *vdev, uint32_t queue_index)
554 VirtIONet *n = VIRTIO_NET(vdev);
555 NetClientState *nc;
557 /* validate queue_index and skip for cvq */
558 if (queue_index >= n->max_queue_pairs * 2) {
559 return;
562 nc = qemu_get_subqueue(n->nic, vq2q(queue_index));
564 if (!nc->peer) {
565 return;
568 if (get_vhost_net(nc->peer) &&
569 nc->peer->info->type == NET_CLIENT_DRIVER_TAP) {
570 vhost_net_virtqueue_reset(vdev, nc, queue_index);
573 flush_or_purge_queued_packets(nc);
576 static void virtio_net_queue_enable(VirtIODevice *vdev, uint32_t queue_index)
578 VirtIONet *n = VIRTIO_NET(vdev);
579 NetClientState *nc;
580 int r;
582 /* validate queue_index and skip for cvq */
583 if (queue_index >= n->max_queue_pairs * 2) {
584 return;
587 nc = qemu_get_subqueue(n->nic, vq2q(queue_index));
589 if (!nc->peer || !vdev->vhost_started) {
590 return;
593 if (get_vhost_net(nc->peer) &&
594 nc->peer->info->type == NET_CLIENT_DRIVER_TAP) {
595 r = vhost_net_virtqueue_restart(vdev, nc, queue_index);
596 if (r < 0) {
597 error_report("unable to restart vhost net virtqueue: %d, "
598 "when resetting the queue", queue_index);
603 static void virtio_net_reset(VirtIODevice *vdev)
605 VirtIONet *n = VIRTIO_NET(vdev);
606 int i;
608 /* Reset back to compatibility mode */
609 n->promisc = 1;
610 n->allmulti = 0;
611 n->alluni = 0;
612 n->nomulti = 0;
613 n->nouni = 0;
614 n->nobcast = 0;
615 /* multiqueue is disabled by default */
616 n->curr_queue_pairs = 1;
617 timer_del(n->announce_timer.tm);
618 n->announce_timer.round = 0;
619 n->status &= ~VIRTIO_NET_S_ANNOUNCE;
621 /* Flush any MAC and VLAN filter table state */
622 n->mac_table.in_use = 0;
623 n->mac_table.first_multi = 0;
624 n->mac_table.multi_overflow = 0;
625 n->mac_table.uni_overflow = 0;
626 memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
627 memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac));
628 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
629 memset(n->vlans, 0, MAX_VLAN >> 3);
631 /* Flush any async TX */
632 for (i = 0; i < n->max_queue_pairs; i++) {
633 flush_or_purge_queued_packets(qemu_get_subqueue(n->nic, i));
637 static void peer_test_vnet_hdr(VirtIONet *n)
639 NetClientState *nc = qemu_get_queue(n->nic);
640 if (!nc->peer) {
641 return;
644 n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer);
647 static int peer_has_vnet_hdr(VirtIONet *n)
649 return n->has_vnet_hdr;
652 static int peer_has_ufo(VirtIONet *n)
654 if (!peer_has_vnet_hdr(n))
655 return 0;
657 n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer);
659 return n->has_ufo;
662 static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs,
663 int version_1, int hash_report)
665 int i;
666 NetClientState *nc;
668 n->mergeable_rx_bufs = mergeable_rx_bufs;
670 if (version_1) {
671 n->guest_hdr_len = hash_report ?
672 sizeof(struct virtio_net_hdr_v1_hash) :
673 sizeof(struct virtio_net_hdr_mrg_rxbuf);
674 n->rss_data.populate_hash = !!hash_report;
675 } else {
676 n->guest_hdr_len = n->mergeable_rx_bufs ?
677 sizeof(struct virtio_net_hdr_mrg_rxbuf) :
678 sizeof(struct virtio_net_hdr);
681 for (i = 0; i < n->max_queue_pairs; i++) {
682 nc = qemu_get_subqueue(n->nic, i);
684 if (peer_has_vnet_hdr(n) &&
685 qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) {
686 qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len);
687 n->host_hdr_len = n->guest_hdr_len;
692 static int virtio_net_max_tx_queue_size(VirtIONet *n)
694 NetClientState *peer = n->nic_conf.peers.ncs[0];
697 * Backends other than vhost-user or vhost-vdpa don't support max queue
698 * size.
700 if (!peer) {
701 return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
704 switch(peer->info->type) {
705 case NET_CLIENT_DRIVER_VHOST_USER:
706 case NET_CLIENT_DRIVER_VHOST_VDPA:
707 return VIRTQUEUE_MAX_SIZE;
708 default:
709 return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
713 static int peer_attach(VirtIONet *n, int index)
715 NetClientState *nc = qemu_get_subqueue(n->nic, index);
717 if (!nc->peer) {
718 return 0;
721 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
722 vhost_set_vring_enable(nc->peer, 1);
725 if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) {
726 return 0;
729 if (n->max_queue_pairs == 1) {
730 return 0;
733 return tap_enable(nc->peer);
736 static int peer_detach(VirtIONet *n, int index)
738 NetClientState *nc = qemu_get_subqueue(n->nic, index);
740 if (!nc->peer) {
741 return 0;
744 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
745 vhost_set_vring_enable(nc->peer, 0);
748 if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) {
749 return 0;
752 return tap_disable(nc->peer);
755 static void virtio_net_set_queue_pairs(VirtIONet *n)
757 int i;
758 int r;
760 if (n->nic->peer_deleted) {
761 return;
764 for (i = 0; i < n->max_queue_pairs; i++) {
765 if (i < n->curr_queue_pairs) {
766 r = peer_attach(n, i);
767 assert(!r);
768 } else {
769 r = peer_detach(n, i);
770 assert(!r);
775 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue);
777 static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
778 Error **errp)
780 VirtIONet *n = VIRTIO_NET(vdev);
781 NetClientState *nc = qemu_get_queue(n->nic);
783 /* Firstly sync all virtio-net possible supported features */
784 features |= n->host_features;
786 virtio_add_feature(&features, VIRTIO_NET_F_MAC);
788 if (!peer_has_vnet_hdr(n)) {
789 virtio_clear_feature(&features, VIRTIO_NET_F_CSUM);
790 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4);
791 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6);
792 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN);
794 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM);
795 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4);
796 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6);
797 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN);
799 virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT);
802 if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
803 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO);
804 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO);
807 if (!get_vhost_net(nc->peer)) {
808 virtio_add_feature(&features, VIRTIO_F_RING_RESET);
809 return features;
812 if (!ebpf_rss_is_loaded(&n->ebpf_rss)) {
813 virtio_clear_feature(&features, VIRTIO_NET_F_RSS);
815 features = vhost_net_get_features(get_vhost_net(nc->peer), features);
816 vdev->backend_features = features;
818 if (n->mtu_bypass_backend &&
819 (n->host_features & 1ULL << VIRTIO_NET_F_MTU)) {
820 features |= (1ULL << VIRTIO_NET_F_MTU);
824 * Since GUEST_ANNOUNCE is emulated the feature bit could be set without
825 * enabled. This happens in the vDPA case.
827 * Make sure the feature set is not incoherent, as the driver could refuse
828 * to start.
830 * TODO: QEMU is able to emulate a CVQ just for guest_announce purposes,
831 * helping guest to notify the new location with vDPA devices that does not
832 * support it.
834 if (!virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_CTRL_VQ)) {
835 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ANNOUNCE);
838 return features;
841 static uint64_t virtio_net_bad_features(VirtIODevice *vdev)
843 uint64_t features = 0;
845 /* Linux kernel 2.6.25. It understood MAC (as everyone must),
846 * but also these: */
847 virtio_add_feature(&features, VIRTIO_NET_F_MAC);
848 virtio_add_feature(&features, VIRTIO_NET_F_CSUM);
849 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO4);
850 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO6);
851 virtio_add_feature(&features, VIRTIO_NET_F_HOST_ECN);
853 return features;
856 static void virtio_net_apply_guest_offloads(VirtIONet *n)
858 qemu_set_offload(qemu_get_queue(n->nic)->peer,
859 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)),
860 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)),
861 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)),
862 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)),
863 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO)));
866 static uint64_t virtio_net_guest_offloads_by_features(uint32_t features)
868 static const uint64_t guest_offloads_mask =
869 (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
870 (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
871 (1ULL << VIRTIO_NET_F_GUEST_TSO6) |
872 (1ULL << VIRTIO_NET_F_GUEST_ECN) |
873 (1ULL << VIRTIO_NET_F_GUEST_UFO);
875 return guest_offloads_mask & features;
878 static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n)
880 VirtIODevice *vdev = VIRTIO_DEVICE(n);
881 return virtio_net_guest_offloads_by_features(vdev->guest_features);
884 typedef struct {
885 VirtIONet *n;
886 DeviceState *dev;
887 } FailoverDevice;
890 * Set the failover primary device
892 * @opaque: FailoverId to setup
893 * @opts: opts for device we are handling
894 * @errp: returns an error if this function fails
896 static int failover_set_primary(DeviceState *dev, void *opaque)
898 FailoverDevice *fdev = opaque;
899 PCIDevice *pci_dev = (PCIDevice *)
900 object_dynamic_cast(OBJECT(dev), TYPE_PCI_DEVICE);
902 if (!pci_dev) {
903 return 0;
906 if (!g_strcmp0(pci_dev->failover_pair_id, fdev->n->netclient_name)) {
907 fdev->dev = dev;
908 return 1;
911 return 0;
915 * Find the primary device for this failover virtio-net
917 * @n: VirtIONet device
918 * @errp: returns an error if this function fails
920 static DeviceState *failover_find_primary_device(VirtIONet *n)
922 FailoverDevice fdev = {
923 .n = n,
926 qbus_walk_children(sysbus_get_default(), failover_set_primary, NULL,
927 NULL, NULL, &fdev);
928 return fdev.dev;
931 static void failover_add_primary(VirtIONet *n, Error **errp)
933 Error *err = NULL;
934 DeviceState *dev = failover_find_primary_device(n);
936 if (dev) {
937 return;
940 if (!n->primary_opts) {
941 error_setg(errp, "Primary device not found");
942 error_append_hint(errp, "Virtio-net failover will not work. Make "
943 "sure primary device has parameter"
944 " failover_pair_id=%s\n", n->netclient_name);
945 return;
948 dev = qdev_device_add_from_qdict(n->primary_opts,
949 n->primary_opts_from_json,
950 &err);
951 if (err) {
952 qobject_unref(n->primary_opts);
953 n->primary_opts = NULL;
954 } else {
955 object_unref(OBJECT(dev));
957 error_propagate(errp, err);
960 static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
962 VirtIONet *n = VIRTIO_NET(vdev);
963 Error *err = NULL;
964 int i;
966 if (n->mtu_bypass_backend &&
967 !virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_MTU)) {
968 features &= ~(1ULL << VIRTIO_NET_F_MTU);
971 virtio_net_set_multiqueue(n,
972 virtio_has_feature(features, VIRTIO_NET_F_RSS) ||
973 virtio_has_feature(features, VIRTIO_NET_F_MQ));
975 virtio_net_set_mrg_rx_bufs(n,
976 virtio_has_feature(features,
977 VIRTIO_NET_F_MRG_RXBUF),
978 virtio_has_feature(features,
979 VIRTIO_F_VERSION_1),
980 virtio_has_feature(features,
981 VIRTIO_NET_F_HASH_REPORT));
983 n->rsc4_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) &&
984 virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO4);
985 n->rsc6_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) &&
986 virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO6);
987 n->rss_data.redirect = virtio_has_feature(features, VIRTIO_NET_F_RSS);
989 if (n->has_vnet_hdr) {
990 n->curr_guest_offloads =
991 virtio_net_guest_offloads_by_features(features);
992 virtio_net_apply_guest_offloads(n);
995 for (i = 0; i < n->max_queue_pairs; i++) {
996 NetClientState *nc = qemu_get_subqueue(n->nic, i);
998 if (!get_vhost_net(nc->peer)) {
999 continue;
1001 vhost_net_ack_features(get_vhost_net(nc->peer), features);
1004 * keep acked_features in NetVhostUserState up-to-date so it
1005 * can't miss any features configured by guest virtio driver.
1007 vhost_net_save_acked_features(nc->peer);
1010 if (virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) {
1011 memset(n->vlans, 0, MAX_VLAN >> 3);
1012 } else {
1013 memset(n->vlans, 0xff, MAX_VLAN >> 3);
1016 if (virtio_has_feature(features, VIRTIO_NET_F_STANDBY)) {
1017 qapi_event_send_failover_negotiated(n->netclient_name);
1018 qatomic_set(&n->failover_primary_hidden, false);
1019 failover_add_primary(n, &err);
1020 if (err) {
1021 if (!qtest_enabled()) {
1022 warn_report_err(err);
1023 } else {
1024 error_free(err);
1030 static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd,
1031 struct iovec *iov, unsigned int iov_cnt)
1033 uint8_t on;
1034 size_t s;
1035 NetClientState *nc = qemu_get_queue(n->nic);
1037 s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on));
1038 if (s != sizeof(on)) {
1039 return VIRTIO_NET_ERR;
1042 if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) {
1043 n->promisc = on;
1044 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) {
1045 n->allmulti = on;
1046 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) {
1047 n->alluni = on;
1048 } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) {
1049 n->nomulti = on;
1050 } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) {
1051 n->nouni = on;
1052 } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) {
1053 n->nobcast = on;
1054 } else {
1055 return VIRTIO_NET_ERR;
1058 rxfilter_notify(nc);
1060 return VIRTIO_NET_OK;
1063 static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd,
1064 struct iovec *iov, unsigned int iov_cnt)
1066 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1067 uint64_t offloads;
1068 size_t s;
1070 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
1071 return VIRTIO_NET_ERR;
1074 s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads));
1075 if (s != sizeof(offloads)) {
1076 return VIRTIO_NET_ERR;
1079 if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) {
1080 uint64_t supported_offloads;
1082 offloads = virtio_ldq_p(vdev, &offloads);
1084 if (!n->has_vnet_hdr) {
1085 return VIRTIO_NET_ERR;
1088 n->rsc4_enabled = virtio_has_feature(offloads, VIRTIO_NET_F_RSC_EXT) &&
1089 virtio_has_feature(offloads, VIRTIO_NET_F_GUEST_TSO4);
1090 n->rsc6_enabled = virtio_has_feature(offloads, VIRTIO_NET_F_RSC_EXT) &&
1091 virtio_has_feature(offloads, VIRTIO_NET_F_GUEST_TSO6);
1092 virtio_clear_feature(&offloads, VIRTIO_NET_F_RSC_EXT);
1094 supported_offloads = virtio_net_supported_guest_offloads(n);
1095 if (offloads & ~supported_offloads) {
1096 return VIRTIO_NET_ERR;
1099 n->curr_guest_offloads = offloads;
1100 virtio_net_apply_guest_offloads(n);
1102 return VIRTIO_NET_OK;
1103 } else {
1104 return VIRTIO_NET_ERR;
1108 static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd,
1109 struct iovec *iov, unsigned int iov_cnt)
1111 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1112 struct virtio_net_ctrl_mac mac_data;
1113 size_t s;
1114 NetClientState *nc = qemu_get_queue(n->nic);
1116 if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) {
1117 if (iov_size(iov, iov_cnt) != sizeof(n->mac)) {
1118 return VIRTIO_NET_ERR;
1120 s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac));
1121 assert(s == sizeof(n->mac));
1122 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
1123 rxfilter_notify(nc);
1125 return VIRTIO_NET_OK;
1128 if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) {
1129 return VIRTIO_NET_ERR;
1132 int in_use = 0;
1133 int first_multi = 0;
1134 uint8_t uni_overflow = 0;
1135 uint8_t multi_overflow = 0;
1136 uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
1138 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
1139 sizeof(mac_data.entries));
1140 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
1141 if (s != sizeof(mac_data.entries)) {
1142 goto error;
1144 iov_discard_front(&iov, &iov_cnt, s);
1146 if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) {
1147 goto error;
1150 if (mac_data.entries <= MAC_TABLE_ENTRIES) {
1151 s = iov_to_buf(iov, iov_cnt, 0, macs,
1152 mac_data.entries * ETH_ALEN);
1153 if (s != mac_data.entries * ETH_ALEN) {
1154 goto error;
1156 in_use += mac_data.entries;
1157 } else {
1158 uni_overflow = 1;
1161 iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN);
1163 first_multi = in_use;
1165 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
1166 sizeof(mac_data.entries));
1167 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
1168 if (s != sizeof(mac_data.entries)) {
1169 goto error;
1172 iov_discard_front(&iov, &iov_cnt, s);
1174 if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) {
1175 goto error;
1178 if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) {
1179 s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN],
1180 mac_data.entries * ETH_ALEN);
1181 if (s != mac_data.entries * ETH_ALEN) {
1182 goto error;
1184 in_use += mac_data.entries;
1185 } else {
1186 multi_overflow = 1;
1189 n->mac_table.in_use = in_use;
1190 n->mac_table.first_multi = first_multi;
1191 n->mac_table.uni_overflow = uni_overflow;
1192 n->mac_table.multi_overflow = multi_overflow;
1193 memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN);
1194 g_free(macs);
1195 rxfilter_notify(nc);
1197 return VIRTIO_NET_OK;
1199 error:
1200 g_free(macs);
1201 return VIRTIO_NET_ERR;
1204 static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd,
1205 struct iovec *iov, unsigned int iov_cnt)
1207 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1208 uint16_t vid;
1209 size_t s;
1210 NetClientState *nc = qemu_get_queue(n->nic);
1212 s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid));
1213 vid = virtio_lduw_p(vdev, &vid);
1214 if (s != sizeof(vid)) {
1215 return VIRTIO_NET_ERR;
1218 if (vid >= MAX_VLAN)
1219 return VIRTIO_NET_ERR;
1221 if (cmd == VIRTIO_NET_CTRL_VLAN_ADD)
1222 n->vlans[vid >> 5] |= (1U << (vid & 0x1f));
1223 else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL)
1224 n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f));
1225 else
1226 return VIRTIO_NET_ERR;
1228 rxfilter_notify(nc);
1230 return VIRTIO_NET_OK;
1233 static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd,
1234 struct iovec *iov, unsigned int iov_cnt)
1236 trace_virtio_net_handle_announce(n->announce_timer.round);
1237 if (cmd == VIRTIO_NET_CTRL_ANNOUNCE_ACK &&
1238 n->status & VIRTIO_NET_S_ANNOUNCE) {
1239 n->status &= ~VIRTIO_NET_S_ANNOUNCE;
1240 if (n->announce_timer.round) {
1241 qemu_announce_timer_step(&n->announce_timer);
1243 return VIRTIO_NET_OK;
1244 } else {
1245 return VIRTIO_NET_ERR;
1249 static void virtio_net_detach_epbf_rss(VirtIONet *n);
1251 static void virtio_net_disable_rss(VirtIONet *n)
1253 if (n->rss_data.enabled) {
1254 trace_virtio_net_rss_disable();
1256 n->rss_data.enabled = false;
1258 virtio_net_detach_epbf_rss(n);
1261 static bool virtio_net_attach_ebpf_to_backend(NICState *nic, int prog_fd)
1263 NetClientState *nc = qemu_get_peer(qemu_get_queue(nic), 0);
1264 if (nc == NULL || nc->info->set_steering_ebpf == NULL) {
1265 return false;
1268 return nc->info->set_steering_ebpf(nc, prog_fd);
1271 static void rss_data_to_rss_config(struct VirtioNetRssData *data,
1272 struct EBPFRSSConfig *config)
1274 config->redirect = data->redirect;
1275 config->populate_hash = data->populate_hash;
1276 config->hash_types = data->hash_types;
1277 config->indirections_len = data->indirections_len;
1278 config->default_queue = data->default_queue;
1281 static bool virtio_net_attach_epbf_rss(VirtIONet *n)
1283 struct EBPFRSSConfig config = {};
1285 if (!ebpf_rss_is_loaded(&n->ebpf_rss)) {
1286 return false;
1289 rss_data_to_rss_config(&n->rss_data, &config);
1291 if (!ebpf_rss_set_all(&n->ebpf_rss, &config,
1292 n->rss_data.indirections_table, n->rss_data.key)) {
1293 return false;
1296 if (!virtio_net_attach_ebpf_to_backend(n->nic, n->ebpf_rss.program_fd)) {
1297 return false;
1300 return true;
1303 static void virtio_net_detach_epbf_rss(VirtIONet *n)
1305 virtio_net_attach_ebpf_to_backend(n->nic, -1);
1308 static bool virtio_net_load_ebpf(VirtIONet *n)
1310 if (!virtio_net_attach_ebpf_to_backend(n->nic, -1)) {
1311 /* backend does't support steering ebpf */
1312 return false;
1315 return ebpf_rss_load(&n->ebpf_rss);
1318 static void virtio_net_unload_ebpf(VirtIONet *n)
1320 virtio_net_attach_ebpf_to_backend(n->nic, -1);
1321 ebpf_rss_unload(&n->ebpf_rss);
1324 static uint16_t virtio_net_handle_rss(VirtIONet *n,
1325 struct iovec *iov,
1326 unsigned int iov_cnt,
1327 bool do_rss)
1329 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1330 struct virtio_net_rss_config cfg;
1331 size_t s, offset = 0, size_get;
1332 uint16_t queue_pairs, i;
1333 struct {
1334 uint16_t us;
1335 uint8_t b;
1336 } QEMU_PACKED temp;
1337 const char *err_msg = "";
1338 uint32_t err_value = 0;
1340 if (do_rss && !virtio_vdev_has_feature(vdev, VIRTIO_NET_F_RSS)) {
1341 err_msg = "RSS is not negotiated";
1342 goto error;
1344 if (!do_rss && !virtio_vdev_has_feature(vdev, VIRTIO_NET_F_HASH_REPORT)) {
1345 err_msg = "Hash report is not negotiated";
1346 goto error;
1348 size_get = offsetof(struct virtio_net_rss_config, indirection_table);
1349 s = iov_to_buf(iov, iov_cnt, offset, &cfg, size_get);
1350 if (s != size_get) {
1351 err_msg = "Short command buffer";
1352 err_value = (uint32_t)s;
1353 goto error;
1355 n->rss_data.hash_types = virtio_ldl_p(vdev, &cfg.hash_types);
1356 n->rss_data.indirections_len =
1357 virtio_lduw_p(vdev, &cfg.indirection_table_mask);
1358 n->rss_data.indirections_len++;
1359 if (!do_rss) {
1360 n->rss_data.indirections_len = 1;
1362 if (!is_power_of_2(n->rss_data.indirections_len)) {
1363 err_msg = "Invalid size of indirection table";
1364 err_value = n->rss_data.indirections_len;
1365 goto error;
1367 if (n->rss_data.indirections_len > VIRTIO_NET_RSS_MAX_TABLE_LEN) {
1368 err_msg = "Too large indirection table";
1369 err_value = n->rss_data.indirections_len;
1370 goto error;
1372 n->rss_data.default_queue = do_rss ?
1373 virtio_lduw_p(vdev, &cfg.unclassified_queue) : 0;
1374 if (n->rss_data.default_queue >= n->max_queue_pairs) {
1375 err_msg = "Invalid default queue";
1376 err_value = n->rss_data.default_queue;
1377 goto error;
1379 offset += size_get;
1380 size_get = sizeof(uint16_t) * n->rss_data.indirections_len;
1381 g_free(n->rss_data.indirections_table);
1382 n->rss_data.indirections_table = g_malloc(size_get);
1383 if (!n->rss_data.indirections_table) {
1384 err_msg = "Can't allocate indirections table";
1385 err_value = n->rss_data.indirections_len;
1386 goto error;
1388 s = iov_to_buf(iov, iov_cnt, offset,
1389 n->rss_data.indirections_table, size_get);
1390 if (s != size_get) {
1391 err_msg = "Short indirection table buffer";
1392 err_value = (uint32_t)s;
1393 goto error;
1395 for (i = 0; i < n->rss_data.indirections_len; ++i) {
1396 uint16_t val = n->rss_data.indirections_table[i];
1397 n->rss_data.indirections_table[i] = virtio_lduw_p(vdev, &val);
1399 offset += size_get;
1400 size_get = sizeof(temp);
1401 s = iov_to_buf(iov, iov_cnt, offset, &temp, size_get);
1402 if (s != size_get) {
1403 err_msg = "Can't get queue_pairs";
1404 err_value = (uint32_t)s;
1405 goto error;
1407 queue_pairs = do_rss ? virtio_lduw_p(vdev, &temp.us) : n->curr_queue_pairs;
1408 if (queue_pairs == 0 || queue_pairs > n->max_queue_pairs) {
1409 err_msg = "Invalid number of queue_pairs";
1410 err_value = queue_pairs;
1411 goto error;
1413 if (temp.b > VIRTIO_NET_RSS_MAX_KEY_SIZE) {
1414 err_msg = "Invalid key size";
1415 err_value = temp.b;
1416 goto error;
1418 if (!temp.b && n->rss_data.hash_types) {
1419 err_msg = "No key provided";
1420 err_value = 0;
1421 goto error;
1423 if (!temp.b && !n->rss_data.hash_types) {
1424 virtio_net_disable_rss(n);
1425 return queue_pairs;
1427 offset += size_get;
1428 size_get = temp.b;
1429 s = iov_to_buf(iov, iov_cnt, offset, n->rss_data.key, size_get);
1430 if (s != size_get) {
1431 err_msg = "Can get key buffer";
1432 err_value = (uint32_t)s;
1433 goto error;
1435 n->rss_data.enabled = true;
1437 if (!n->rss_data.populate_hash) {
1438 if (!virtio_net_attach_epbf_rss(n)) {
1439 /* EBPF must be loaded for vhost */
1440 if (get_vhost_net(qemu_get_queue(n->nic)->peer)) {
1441 warn_report("Can't load eBPF RSS for vhost");
1442 goto error;
1444 /* fallback to software RSS */
1445 warn_report("Can't load eBPF RSS - fallback to software RSS");
1446 n->rss_data.enabled_software_rss = true;
1448 } else {
1449 /* use software RSS for hash populating */
1450 /* and detach eBPF if was loaded before */
1451 virtio_net_detach_epbf_rss(n);
1452 n->rss_data.enabled_software_rss = true;
1455 trace_virtio_net_rss_enable(n->rss_data.hash_types,
1456 n->rss_data.indirections_len,
1457 temp.b);
1458 return queue_pairs;
1459 error:
1460 trace_virtio_net_rss_error(err_msg, err_value);
1461 virtio_net_disable_rss(n);
1462 return 0;
1465 static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
1466 struct iovec *iov, unsigned int iov_cnt)
1468 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1469 uint16_t queue_pairs;
1470 NetClientState *nc = qemu_get_queue(n->nic);
1472 virtio_net_disable_rss(n);
1473 if (cmd == VIRTIO_NET_CTRL_MQ_HASH_CONFIG) {
1474 queue_pairs = virtio_net_handle_rss(n, iov, iov_cnt, false);
1475 return queue_pairs ? VIRTIO_NET_OK : VIRTIO_NET_ERR;
1477 if (cmd == VIRTIO_NET_CTRL_MQ_RSS_CONFIG) {
1478 queue_pairs = virtio_net_handle_rss(n, iov, iov_cnt, true);
1479 } else if (cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
1480 struct virtio_net_ctrl_mq mq;
1481 size_t s;
1482 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ)) {
1483 return VIRTIO_NET_ERR;
1485 s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq));
1486 if (s != sizeof(mq)) {
1487 return VIRTIO_NET_ERR;
1489 queue_pairs = virtio_lduw_p(vdev, &mq.virtqueue_pairs);
1491 } else {
1492 return VIRTIO_NET_ERR;
1495 if (queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
1496 queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
1497 queue_pairs > n->max_queue_pairs ||
1498 !n->multiqueue) {
1499 return VIRTIO_NET_ERR;
1502 n->curr_queue_pairs = queue_pairs;
1503 if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
1505 * Avoid updating the backend for a vdpa device: We're only interested
1506 * in updating the device model queues.
1508 return VIRTIO_NET_OK;
1510 /* stop the backend before changing the number of queue_pairs to avoid handling a
1511 * disabled queue */
1512 virtio_net_set_status(vdev, vdev->status);
1513 virtio_net_set_queue_pairs(n);
1515 return VIRTIO_NET_OK;
1518 size_t virtio_net_handle_ctrl_iov(VirtIODevice *vdev,
1519 const struct iovec *in_sg, unsigned in_num,
1520 const struct iovec *out_sg,
1521 unsigned out_num)
1523 VirtIONet *n = VIRTIO_NET(vdev);
1524 struct virtio_net_ctrl_hdr ctrl;
1525 virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
1526 size_t s;
1527 struct iovec *iov, *iov2;
1529 if (iov_size(in_sg, in_num) < sizeof(status) ||
1530 iov_size(out_sg, out_num) < sizeof(ctrl)) {
1531 virtio_error(vdev, "virtio-net ctrl missing headers");
1532 return 0;
1535 iov2 = iov = g_memdup2(out_sg, sizeof(struct iovec) * out_num);
1536 s = iov_to_buf(iov, out_num, 0, &ctrl, sizeof(ctrl));
1537 iov_discard_front(&iov, &out_num, sizeof(ctrl));
1538 if (s != sizeof(ctrl)) {
1539 status = VIRTIO_NET_ERR;
1540 } else if (ctrl.class == VIRTIO_NET_CTRL_RX) {
1541 status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, out_num);
1542 } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) {
1543 status = virtio_net_handle_mac(n, ctrl.cmd, iov, out_num);
1544 } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
1545 status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, out_num);
1546 } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) {
1547 status = virtio_net_handle_announce(n, ctrl.cmd, iov, out_num);
1548 } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
1549 status = virtio_net_handle_mq(n, ctrl.cmd, iov, out_num);
1550 } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
1551 status = virtio_net_handle_offloads(n, ctrl.cmd, iov, out_num);
1554 s = iov_from_buf(in_sg, in_num, 0, &status, sizeof(status));
1555 assert(s == sizeof(status));
1557 g_free(iov2);
1558 return sizeof(status);
1561 static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
1563 VirtQueueElement *elem;
1565 for (;;) {
1566 size_t written;
1567 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
1568 if (!elem) {
1569 break;
1572 written = virtio_net_handle_ctrl_iov(vdev, elem->in_sg, elem->in_num,
1573 elem->out_sg, elem->out_num);
1574 if (written > 0) {
1575 virtqueue_push(vq, elem, written);
1576 virtio_notify(vdev, vq);
1577 g_free(elem);
1578 } else {
1579 virtqueue_detach_element(vq, elem, 0);
1580 g_free(elem);
1581 break;
1586 /* RX */
1588 static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
1590 VirtIONet *n = VIRTIO_NET(vdev);
1591 int queue_index = vq2q(virtio_get_queue_index(vq));
1593 qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index));
1596 static bool virtio_net_can_receive(NetClientState *nc)
1598 VirtIONet *n = qemu_get_nic_opaque(nc);
1599 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1600 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1602 if (!vdev->vm_running) {
1603 return false;
1606 if (nc->queue_index >= n->curr_queue_pairs) {
1607 return false;
1610 if (!virtio_queue_ready(q->rx_vq) ||
1611 !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1612 return false;
1615 return true;
1618 static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize)
1620 VirtIONet *n = q->n;
1621 if (virtio_queue_empty(q->rx_vq) ||
1622 (n->mergeable_rx_bufs &&
1623 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
1624 virtio_queue_set_notification(q->rx_vq, 1);
1626 /* To avoid a race condition where the guest has made some buffers
1627 * available after the above check but before notification was
1628 * enabled, check for available buffers again.
1630 if (virtio_queue_empty(q->rx_vq) ||
1631 (n->mergeable_rx_bufs &&
1632 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
1633 return 0;
1637 virtio_queue_set_notification(q->rx_vq, 0);
1638 return 1;
1641 static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr)
1643 virtio_tswap16s(vdev, &hdr->hdr_len);
1644 virtio_tswap16s(vdev, &hdr->gso_size);
1645 virtio_tswap16s(vdev, &hdr->csum_start);
1646 virtio_tswap16s(vdev, &hdr->csum_offset);
1649 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
1650 * it never finds out that the packets don't have valid checksums. This
1651 * causes dhclient to get upset. Fedora's carried a patch for ages to
1652 * fix this with Xen but it hasn't appeared in an upstream release of
1653 * dhclient yet.
1655 * To avoid breaking existing guests, we catch udp packets and add
1656 * checksums. This is terrible but it's better than hacking the guest
1657 * kernels.
1659 * N.B. if we introduce a zero-copy API, this operation is no longer free so
1660 * we should provide a mechanism to disable it to avoid polluting the host
1661 * cache.
1663 static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
1664 uint8_t *buf, size_t size)
1666 if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */
1667 (size > 27 && size < 1500) && /* normal sized MTU */
1668 (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */
1669 (buf[23] == 17) && /* ip.protocol == UDP */
1670 (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */
1671 net_checksum_calculate(buf, size, CSUM_UDP);
1672 hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
1676 static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
1677 const void *buf, size_t size)
1679 if (n->has_vnet_hdr) {
1680 /* FIXME this cast is evil */
1681 void *wbuf = (void *)buf;
1682 work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
1683 size - n->host_hdr_len);
1685 if (n->needs_vnet_hdr_swap) {
1686 virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
1688 iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
1689 } else {
1690 struct virtio_net_hdr hdr = {
1691 .flags = 0,
1692 .gso_type = VIRTIO_NET_HDR_GSO_NONE
1694 iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr);
1698 static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
1700 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1701 static const uint8_t vlan[] = {0x81, 0x00};
1702 uint8_t *ptr = (uint8_t *)buf;
1703 int i;
1705 if (n->promisc)
1706 return 1;
1708 ptr += n->host_hdr_len;
1710 if (!memcmp(&ptr[12], vlan, sizeof(vlan))) {
1711 int vid = lduw_be_p(ptr + 14) & 0xfff;
1712 if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f))))
1713 return 0;
1716 if (ptr[0] & 1) { // multicast
1717 if (!memcmp(ptr, bcast, sizeof(bcast))) {
1718 return !n->nobcast;
1719 } else if (n->nomulti) {
1720 return 0;
1721 } else if (n->allmulti || n->mac_table.multi_overflow) {
1722 return 1;
1725 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
1726 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
1727 return 1;
1730 } else { // unicast
1731 if (n->nouni) {
1732 return 0;
1733 } else if (n->alluni || n->mac_table.uni_overflow) {
1734 return 1;
1735 } else if (!memcmp(ptr, n->mac, ETH_ALEN)) {
1736 return 1;
1739 for (i = 0; i < n->mac_table.first_multi; i++) {
1740 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
1741 return 1;
1746 return 0;
1749 static uint8_t virtio_net_get_hash_type(bool hasip4,
1750 bool hasip6,
1751 EthL4HdrProto l4hdr_proto,
1752 uint32_t types)
1754 if (hasip4) {
1755 switch (l4hdr_proto) {
1756 case ETH_L4_HDR_PROTO_TCP:
1757 if (types & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) {
1758 return NetPktRssIpV4Tcp;
1760 break;
1762 case ETH_L4_HDR_PROTO_UDP:
1763 if (types & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) {
1764 return NetPktRssIpV4Udp;
1766 break;
1768 default:
1769 break;
1772 if (types & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
1773 return NetPktRssIpV4;
1775 } else if (hasip6) {
1776 switch (l4hdr_proto) {
1777 case ETH_L4_HDR_PROTO_TCP:
1778 if (types & VIRTIO_NET_RSS_HASH_TYPE_TCP_EX) {
1779 return NetPktRssIpV6TcpEx;
1781 if (types & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) {
1782 return NetPktRssIpV6Tcp;
1784 break;
1786 case ETH_L4_HDR_PROTO_UDP:
1787 if (types & VIRTIO_NET_RSS_HASH_TYPE_UDP_EX) {
1788 return NetPktRssIpV6UdpEx;
1790 if (types & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) {
1791 return NetPktRssIpV6Udp;
1793 break;
1795 default:
1796 break;
1799 if (types & VIRTIO_NET_RSS_HASH_TYPE_IP_EX) {
1800 return NetPktRssIpV6Ex;
1802 if (types & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
1803 return NetPktRssIpV6;
1806 return 0xff;
1809 static void virtio_set_packet_hash(const uint8_t *buf, uint8_t report,
1810 uint32_t hash)
1812 struct virtio_net_hdr_v1_hash *hdr = (void *)buf;
1813 hdr->hash_value = hash;
1814 hdr->hash_report = report;
1817 static int virtio_net_process_rss(NetClientState *nc, const uint8_t *buf,
1818 size_t size)
1820 VirtIONet *n = qemu_get_nic_opaque(nc);
1821 unsigned int index = nc->queue_index, new_index = index;
1822 struct NetRxPkt *pkt = n->rx_pkt;
1823 uint8_t net_hash_type;
1824 uint32_t hash;
1825 bool hasip4, hasip6;
1826 EthL4HdrProto l4hdr_proto;
1827 static const uint8_t reports[NetPktRssIpV6UdpEx + 1] = {
1828 VIRTIO_NET_HASH_REPORT_IPv4,
1829 VIRTIO_NET_HASH_REPORT_TCPv4,
1830 VIRTIO_NET_HASH_REPORT_TCPv6,
1831 VIRTIO_NET_HASH_REPORT_IPv6,
1832 VIRTIO_NET_HASH_REPORT_IPv6_EX,
1833 VIRTIO_NET_HASH_REPORT_TCPv6_EX,
1834 VIRTIO_NET_HASH_REPORT_UDPv4,
1835 VIRTIO_NET_HASH_REPORT_UDPv6,
1836 VIRTIO_NET_HASH_REPORT_UDPv6_EX
1839 net_rx_pkt_set_protocols(pkt, buf + n->host_hdr_len,
1840 size - n->host_hdr_len);
1841 net_rx_pkt_get_protocols(pkt, &hasip4, &hasip6, &l4hdr_proto);
1842 net_hash_type = virtio_net_get_hash_type(hasip4, hasip6, l4hdr_proto,
1843 n->rss_data.hash_types);
1844 if (net_hash_type > NetPktRssIpV6UdpEx) {
1845 if (n->rss_data.populate_hash) {
1846 virtio_set_packet_hash(buf, VIRTIO_NET_HASH_REPORT_NONE, 0);
1848 return n->rss_data.redirect ? n->rss_data.default_queue : -1;
1851 hash = net_rx_pkt_calc_rss_hash(pkt, net_hash_type, n->rss_data.key);
1853 if (n->rss_data.populate_hash) {
1854 virtio_set_packet_hash(buf, reports[net_hash_type], hash);
1857 if (n->rss_data.redirect) {
1858 new_index = hash & (n->rss_data.indirections_len - 1);
1859 new_index = n->rss_data.indirections_table[new_index];
1862 return (index == new_index) ? -1 : new_index;
1865 static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
1866 size_t size, bool no_rss)
1868 VirtIONet *n = qemu_get_nic_opaque(nc);
1869 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1870 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1871 VirtQueueElement *elems[VIRTQUEUE_MAX_SIZE];
1872 size_t lens[VIRTQUEUE_MAX_SIZE];
1873 struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
1874 struct virtio_net_hdr_mrg_rxbuf mhdr;
1875 unsigned mhdr_cnt = 0;
1876 size_t offset, i, guest_offset, j;
1877 ssize_t err;
1879 if (!virtio_net_can_receive(nc)) {
1880 return -1;
1883 if (!no_rss && n->rss_data.enabled && n->rss_data.enabled_software_rss) {
1884 int index = virtio_net_process_rss(nc, buf, size);
1885 if (index >= 0) {
1886 NetClientState *nc2 = qemu_get_subqueue(n->nic, index);
1887 return virtio_net_receive_rcu(nc2, buf, size, true);
1891 /* hdr_len refers to the header we supply to the guest */
1892 if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
1893 return 0;
1896 if (!receive_filter(n, buf, size))
1897 return size;
1899 offset = i = 0;
1901 while (offset < size) {
1902 VirtQueueElement *elem;
1903 int len, total;
1904 const struct iovec *sg;
1906 total = 0;
1908 if (i == VIRTQUEUE_MAX_SIZE) {
1909 virtio_error(vdev, "virtio-net unexpected long buffer chain");
1910 err = size;
1911 goto err;
1914 elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement));
1915 if (!elem) {
1916 if (i) {
1917 virtio_error(vdev, "virtio-net unexpected empty queue: "
1918 "i %zd mergeable %d offset %zd, size %zd, "
1919 "guest hdr len %zd, host hdr len %zd "
1920 "guest features 0x%" PRIx64,
1921 i, n->mergeable_rx_bufs, offset, size,
1922 n->guest_hdr_len, n->host_hdr_len,
1923 vdev->guest_features);
1925 err = -1;
1926 goto err;
1929 if (elem->in_num < 1) {
1930 virtio_error(vdev,
1931 "virtio-net receive queue contains no in buffers");
1932 virtqueue_detach_element(q->rx_vq, elem, 0);
1933 g_free(elem);
1934 err = -1;
1935 goto err;
1938 sg = elem->in_sg;
1939 if (i == 0) {
1940 assert(offset == 0);
1941 if (n->mergeable_rx_bufs) {
1942 mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
1943 sg, elem->in_num,
1944 offsetof(typeof(mhdr), num_buffers),
1945 sizeof(mhdr.num_buffers));
1948 receive_header(n, sg, elem->in_num, buf, size);
1949 if (n->rss_data.populate_hash) {
1950 offset = sizeof(mhdr);
1951 iov_from_buf(sg, elem->in_num, offset,
1952 buf + offset, n->host_hdr_len - sizeof(mhdr));
1954 offset = n->host_hdr_len;
1955 total += n->guest_hdr_len;
1956 guest_offset = n->guest_hdr_len;
1957 } else {
1958 guest_offset = 0;
1961 /* copy in packet. ugh */
1962 len = iov_from_buf(sg, elem->in_num, guest_offset,
1963 buf + offset, size - offset);
1964 total += len;
1965 offset += len;
1966 /* If buffers can't be merged, at this point we
1967 * must have consumed the complete packet.
1968 * Otherwise, drop it. */
1969 if (!n->mergeable_rx_bufs && offset < size) {
1970 virtqueue_unpop(q->rx_vq, elem, total);
1971 g_free(elem);
1972 err = size;
1973 goto err;
1976 elems[i] = elem;
1977 lens[i] = total;
1978 i++;
1981 if (mhdr_cnt) {
1982 virtio_stw_p(vdev, &mhdr.num_buffers, i);
1983 iov_from_buf(mhdr_sg, mhdr_cnt,
1985 &mhdr.num_buffers, sizeof mhdr.num_buffers);
1988 for (j = 0; j < i; j++) {
1989 /* signal other side */
1990 virtqueue_fill(q->rx_vq, elems[j], lens[j], j);
1991 g_free(elems[j]);
1994 virtqueue_flush(q->rx_vq, i);
1995 virtio_notify(vdev, q->rx_vq);
1997 return size;
1999 err:
2000 for (j = 0; j < i; j++) {
2001 virtqueue_detach_element(q->rx_vq, elems[j], lens[j]);
2002 g_free(elems[j]);
2005 return err;
2008 static ssize_t virtio_net_do_receive(NetClientState *nc, const uint8_t *buf,
2009 size_t size)
2011 RCU_READ_LOCK_GUARD();
2013 return virtio_net_receive_rcu(nc, buf, size, false);
2016 static void virtio_net_rsc_extract_unit4(VirtioNetRscChain *chain,
2017 const uint8_t *buf,
2018 VirtioNetRscUnit *unit)
2020 uint16_t ip_hdrlen;
2021 struct ip_header *ip;
2023 ip = (struct ip_header *)(buf + chain->n->guest_hdr_len
2024 + sizeof(struct eth_header));
2025 unit->ip = (void *)ip;
2026 ip_hdrlen = (ip->ip_ver_len & 0xF) << 2;
2027 unit->ip_plen = &ip->ip_len;
2028 unit->tcp = (struct tcp_header *)(((uint8_t *)unit->ip) + ip_hdrlen);
2029 unit->tcp_hdrlen = (htons(unit->tcp->th_offset_flags) & 0xF000) >> 10;
2030 unit->payload = htons(*unit->ip_plen) - ip_hdrlen - unit->tcp_hdrlen;
2033 static void virtio_net_rsc_extract_unit6(VirtioNetRscChain *chain,
2034 const uint8_t *buf,
2035 VirtioNetRscUnit *unit)
2037 struct ip6_header *ip6;
2039 ip6 = (struct ip6_header *)(buf + chain->n->guest_hdr_len
2040 + sizeof(struct eth_header));
2041 unit->ip = ip6;
2042 unit->ip_plen = &(ip6->ip6_ctlun.ip6_un1.ip6_un1_plen);
2043 unit->tcp = (struct tcp_header *)(((uint8_t *)unit->ip)
2044 + sizeof(struct ip6_header));
2045 unit->tcp_hdrlen = (htons(unit->tcp->th_offset_flags) & 0xF000) >> 10;
2047 /* There is a difference between payload lenght in ipv4 and v6,
2048 ip header is excluded in ipv6 */
2049 unit->payload = htons(*unit->ip_plen) - unit->tcp_hdrlen;
2052 static size_t virtio_net_rsc_drain_seg(VirtioNetRscChain *chain,
2053 VirtioNetRscSeg *seg)
2055 int ret;
2056 struct virtio_net_hdr_v1 *h;
2058 h = (struct virtio_net_hdr_v1 *)seg->buf;
2059 h->flags = 0;
2060 h->gso_type = VIRTIO_NET_HDR_GSO_NONE;
2062 if (seg->is_coalesced) {
2063 h->rsc.segments = seg->packets;
2064 h->rsc.dup_acks = seg->dup_ack;
2065 h->flags = VIRTIO_NET_HDR_F_RSC_INFO;
2066 if (chain->proto == ETH_P_IP) {
2067 h->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2068 } else {
2069 h->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2073 ret = virtio_net_do_receive(seg->nc, seg->buf, seg->size);
2074 QTAILQ_REMOVE(&chain->buffers, seg, next);
2075 g_free(seg->buf);
2076 g_free(seg);
2078 return ret;
2081 static void virtio_net_rsc_purge(void *opq)
2083 VirtioNetRscSeg *seg, *rn;
2084 VirtioNetRscChain *chain = (VirtioNetRscChain *)opq;
2086 QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn) {
2087 if (virtio_net_rsc_drain_seg(chain, seg) == 0) {
2088 chain->stat.purge_failed++;
2089 continue;
2093 chain->stat.timer++;
2094 if (!QTAILQ_EMPTY(&chain->buffers)) {
2095 timer_mod(chain->drain_timer,
2096 qemu_clock_get_ns(QEMU_CLOCK_HOST) + chain->n->rsc_timeout);
2100 static void virtio_net_rsc_cleanup(VirtIONet *n)
2102 VirtioNetRscChain *chain, *rn_chain;
2103 VirtioNetRscSeg *seg, *rn_seg;
2105 QTAILQ_FOREACH_SAFE(chain, &n->rsc_chains, next, rn_chain) {
2106 QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn_seg) {
2107 QTAILQ_REMOVE(&chain->buffers, seg, next);
2108 g_free(seg->buf);
2109 g_free(seg);
2112 timer_free(chain->drain_timer);
2113 QTAILQ_REMOVE(&n->rsc_chains, chain, next);
2114 g_free(chain);
2118 static void virtio_net_rsc_cache_buf(VirtioNetRscChain *chain,
2119 NetClientState *nc,
2120 const uint8_t *buf, size_t size)
2122 uint16_t hdr_len;
2123 VirtioNetRscSeg *seg;
2125 hdr_len = chain->n->guest_hdr_len;
2126 seg = g_new(VirtioNetRscSeg, 1);
2127 seg->buf = g_malloc(hdr_len + sizeof(struct eth_header)
2128 + sizeof(struct ip6_header) + VIRTIO_NET_MAX_TCP_PAYLOAD);
2129 memcpy(seg->buf, buf, size);
2130 seg->size = size;
2131 seg->packets = 1;
2132 seg->dup_ack = 0;
2133 seg->is_coalesced = 0;
2134 seg->nc = nc;
2136 QTAILQ_INSERT_TAIL(&chain->buffers, seg, next);
2137 chain->stat.cache++;
2139 switch (chain->proto) {
2140 case ETH_P_IP:
2141 virtio_net_rsc_extract_unit4(chain, seg->buf, &seg->unit);
2142 break;
2143 case ETH_P_IPV6:
2144 virtio_net_rsc_extract_unit6(chain, seg->buf, &seg->unit);
2145 break;
2146 default:
2147 g_assert_not_reached();
2151 static int32_t virtio_net_rsc_handle_ack(VirtioNetRscChain *chain,
2152 VirtioNetRscSeg *seg,
2153 const uint8_t *buf,
2154 struct tcp_header *n_tcp,
2155 struct tcp_header *o_tcp)
2157 uint32_t nack, oack;
2158 uint16_t nwin, owin;
2160 nack = htonl(n_tcp->th_ack);
2161 nwin = htons(n_tcp->th_win);
2162 oack = htonl(o_tcp->th_ack);
2163 owin = htons(o_tcp->th_win);
2165 if ((nack - oack) >= VIRTIO_NET_MAX_TCP_PAYLOAD) {
2166 chain->stat.ack_out_of_win++;
2167 return RSC_FINAL;
2168 } else if (nack == oack) {
2169 /* duplicated ack or window probe */
2170 if (nwin == owin) {
2171 /* duplicated ack, add dup ack count due to whql test up to 1 */
2172 chain->stat.dup_ack++;
2173 return RSC_FINAL;
2174 } else {
2175 /* Coalesce window update */
2176 o_tcp->th_win = n_tcp->th_win;
2177 chain->stat.win_update++;
2178 return RSC_COALESCE;
2180 } else {
2181 /* pure ack, go to 'C', finalize*/
2182 chain->stat.pure_ack++;
2183 return RSC_FINAL;
2187 static int32_t virtio_net_rsc_coalesce_data(VirtioNetRscChain *chain,
2188 VirtioNetRscSeg *seg,
2189 const uint8_t *buf,
2190 VirtioNetRscUnit *n_unit)
2192 void *data;
2193 uint16_t o_ip_len;
2194 uint32_t nseq, oseq;
2195 VirtioNetRscUnit *o_unit;
2197 o_unit = &seg->unit;
2198 o_ip_len = htons(*o_unit->ip_plen);
2199 nseq = htonl(n_unit->tcp->th_seq);
2200 oseq = htonl(o_unit->tcp->th_seq);
2202 /* out of order or retransmitted. */
2203 if ((nseq - oseq) > VIRTIO_NET_MAX_TCP_PAYLOAD) {
2204 chain->stat.data_out_of_win++;
2205 return RSC_FINAL;
2208 data = ((uint8_t *)n_unit->tcp) + n_unit->tcp_hdrlen;
2209 if (nseq == oseq) {
2210 if ((o_unit->payload == 0) && n_unit->payload) {
2211 /* From no payload to payload, normal case, not a dup ack or etc */
2212 chain->stat.data_after_pure_ack++;
2213 goto coalesce;
2214 } else {
2215 return virtio_net_rsc_handle_ack(chain, seg, buf,
2216 n_unit->tcp, o_unit->tcp);
2218 } else if ((nseq - oseq) != o_unit->payload) {
2219 /* Not a consistent packet, out of order */
2220 chain->stat.data_out_of_order++;
2221 return RSC_FINAL;
2222 } else {
2223 coalesce:
2224 if ((o_ip_len + n_unit->payload) > chain->max_payload) {
2225 chain->stat.over_size++;
2226 return RSC_FINAL;
2229 /* Here comes the right data, the payload length in v4/v6 is different,
2230 so use the field value to update and record the new data len */
2231 o_unit->payload += n_unit->payload; /* update new data len */
2233 /* update field in ip header */
2234 *o_unit->ip_plen = htons(o_ip_len + n_unit->payload);
2236 /* Bring 'PUSH' big, the whql test guide says 'PUSH' can be coalesced
2237 for windows guest, while this may change the behavior for linux
2238 guest (only if it uses RSC feature). */
2239 o_unit->tcp->th_offset_flags = n_unit->tcp->th_offset_flags;
2241 o_unit->tcp->th_ack = n_unit->tcp->th_ack;
2242 o_unit->tcp->th_win = n_unit->tcp->th_win;
2244 memmove(seg->buf + seg->size, data, n_unit->payload);
2245 seg->size += n_unit->payload;
2246 seg->packets++;
2247 chain->stat.coalesced++;
2248 return RSC_COALESCE;
2252 static int32_t virtio_net_rsc_coalesce4(VirtioNetRscChain *chain,
2253 VirtioNetRscSeg *seg,
2254 const uint8_t *buf, size_t size,
2255 VirtioNetRscUnit *unit)
2257 struct ip_header *ip1, *ip2;
2259 ip1 = (struct ip_header *)(unit->ip);
2260 ip2 = (struct ip_header *)(seg->unit.ip);
2261 if ((ip1->ip_src ^ ip2->ip_src) || (ip1->ip_dst ^ ip2->ip_dst)
2262 || (unit->tcp->th_sport ^ seg->unit.tcp->th_sport)
2263 || (unit->tcp->th_dport ^ seg->unit.tcp->th_dport)) {
2264 chain->stat.no_match++;
2265 return RSC_NO_MATCH;
2268 return virtio_net_rsc_coalesce_data(chain, seg, buf, unit);
2271 static int32_t virtio_net_rsc_coalesce6(VirtioNetRscChain *chain,
2272 VirtioNetRscSeg *seg,
2273 const uint8_t *buf, size_t size,
2274 VirtioNetRscUnit *unit)
2276 struct ip6_header *ip1, *ip2;
2278 ip1 = (struct ip6_header *)(unit->ip);
2279 ip2 = (struct ip6_header *)(seg->unit.ip);
2280 if (memcmp(&ip1->ip6_src, &ip2->ip6_src, sizeof(struct in6_address))
2281 || memcmp(&ip1->ip6_dst, &ip2->ip6_dst, sizeof(struct in6_address))
2282 || (unit->tcp->th_sport ^ seg->unit.tcp->th_sport)
2283 || (unit->tcp->th_dport ^ seg->unit.tcp->th_dport)) {
2284 chain->stat.no_match++;
2285 return RSC_NO_MATCH;
2288 return virtio_net_rsc_coalesce_data(chain, seg, buf, unit);
2291 /* Packets with 'SYN' should bypass, other flag should be sent after drain
2292 * to prevent out of order */
2293 static int virtio_net_rsc_tcp_ctrl_check(VirtioNetRscChain *chain,
2294 struct tcp_header *tcp)
2296 uint16_t tcp_hdr;
2297 uint16_t tcp_flag;
2299 tcp_flag = htons(tcp->th_offset_flags);
2300 tcp_hdr = (tcp_flag & VIRTIO_NET_TCP_HDR_LENGTH) >> 10;
2301 tcp_flag &= VIRTIO_NET_TCP_FLAG;
2302 if (tcp_flag & TH_SYN) {
2303 chain->stat.tcp_syn++;
2304 return RSC_BYPASS;
2307 if (tcp_flag & (TH_FIN | TH_URG | TH_RST | TH_ECE | TH_CWR)) {
2308 chain->stat.tcp_ctrl_drain++;
2309 return RSC_FINAL;
2312 if (tcp_hdr > sizeof(struct tcp_header)) {
2313 chain->stat.tcp_all_opt++;
2314 return RSC_FINAL;
2317 return RSC_CANDIDATE;
2320 static size_t virtio_net_rsc_do_coalesce(VirtioNetRscChain *chain,
2321 NetClientState *nc,
2322 const uint8_t *buf, size_t size,
2323 VirtioNetRscUnit *unit)
2325 int ret;
2326 VirtioNetRscSeg *seg, *nseg;
2328 if (QTAILQ_EMPTY(&chain->buffers)) {
2329 chain->stat.empty_cache++;
2330 virtio_net_rsc_cache_buf(chain, nc, buf, size);
2331 timer_mod(chain->drain_timer,
2332 qemu_clock_get_ns(QEMU_CLOCK_HOST) + chain->n->rsc_timeout);
2333 return size;
2336 QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, nseg) {
2337 if (chain->proto == ETH_P_IP) {
2338 ret = virtio_net_rsc_coalesce4(chain, seg, buf, size, unit);
2339 } else {
2340 ret = virtio_net_rsc_coalesce6(chain, seg, buf, size, unit);
2343 if (ret == RSC_FINAL) {
2344 if (virtio_net_rsc_drain_seg(chain, seg) == 0) {
2345 /* Send failed */
2346 chain->stat.final_failed++;
2347 return 0;
2350 /* Send current packet */
2351 return virtio_net_do_receive(nc, buf, size);
2352 } else if (ret == RSC_NO_MATCH) {
2353 continue;
2354 } else {
2355 /* Coalesced, mark coalesced flag to tell calc cksum for ipv4 */
2356 seg->is_coalesced = 1;
2357 return size;
2361 chain->stat.no_match_cache++;
2362 virtio_net_rsc_cache_buf(chain, nc, buf, size);
2363 return size;
2366 /* Drain a connection data, this is to avoid out of order segments */
2367 static size_t virtio_net_rsc_drain_flow(VirtioNetRscChain *chain,
2368 NetClientState *nc,
2369 const uint8_t *buf, size_t size,
2370 uint16_t ip_start, uint16_t ip_size,
2371 uint16_t tcp_port)
2373 VirtioNetRscSeg *seg, *nseg;
2374 uint32_t ppair1, ppair2;
2376 ppair1 = *(uint32_t *)(buf + tcp_port);
2377 QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, nseg) {
2378 ppair2 = *(uint32_t *)(seg->buf + tcp_port);
2379 if (memcmp(buf + ip_start, seg->buf + ip_start, ip_size)
2380 || (ppair1 != ppair2)) {
2381 continue;
2383 if (virtio_net_rsc_drain_seg(chain, seg) == 0) {
2384 chain->stat.drain_failed++;
2387 break;
2390 return virtio_net_do_receive(nc, buf, size);
2393 static int32_t virtio_net_rsc_sanity_check4(VirtioNetRscChain *chain,
2394 struct ip_header *ip,
2395 const uint8_t *buf, size_t size)
2397 uint16_t ip_len;
2399 /* Not an ipv4 packet */
2400 if (((ip->ip_ver_len & 0xF0) >> 4) != IP_HEADER_VERSION_4) {
2401 chain->stat.ip_option++;
2402 return RSC_BYPASS;
2405 /* Don't handle packets with ip option */
2406 if ((ip->ip_ver_len & 0xF) != VIRTIO_NET_IP4_HEADER_LENGTH) {
2407 chain->stat.ip_option++;
2408 return RSC_BYPASS;
2411 if (ip->ip_p != IPPROTO_TCP) {
2412 chain->stat.bypass_not_tcp++;
2413 return RSC_BYPASS;
2416 /* Don't handle packets with ip fragment */
2417 if (!(htons(ip->ip_off) & IP_DF)) {
2418 chain->stat.ip_frag++;
2419 return RSC_BYPASS;
2422 /* Don't handle packets with ecn flag */
2423 if (IPTOS_ECN(ip->ip_tos)) {
2424 chain->stat.ip_ecn++;
2425 return RSC_BYPASS;
2428 ip_len = htons(ip->ip_len);
2429 if (ip_len < (sizeof(struct ip_header) + sizeof(struct tcp_header))
2430 || ip_len > (size - chain->n->guest_hdr_len -
2431 sizeof(struct eth_header))) {
2432 chain->stat.ip_hacked++;
2433 return RSC_BYPASS;
2436 return RSC_CANDIDATE;
2439 static size_t virtio_net_rsc_receive4(VirtioNetRscChain *chain,
2440 NetClientState *nc,
2441 const uint8_t *buf, size_t size)
2443 int32_t ret;
2444 uint16_t hdr_len;
2445 VirtioNetRscUnit unit;
2447 hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len;
2449 if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip_header)
2450 + sizeof(struct tcp_header))) {
2451 chain->stat.bypass_not_tcp++;
2452 return virtio_net_do_receive(nc, buf, size);
2455 virtio_net_rsc_extract_unit4(chain, buf, &unit);
2456 if (virtio_net_rsc_sanity_check4(chain, unit.ip, buf, size)
2457 != RSC_CANDIDATE) {
2458 return virtio_net_do_receive(nc, buf, size);
2461 ret = virtio_net_rsc_tcp_ctrl_check(chain, unit.tcp);
2462 if (ret == RSC_BYPASS) {
2463 return virtio_net_do_receive(nc, buf, size);
2464 } else if (ret == RSC_FINAL) {
2465 return virtio_net_rsc_drain_flow(chain, nc, buf, size,
2466 ((hdr_len + sizeof(struct eth_header)) + 12),
2467 VIRTIO_NET_IP4_ADDR_SIZE,
2468 hdr_len + sizeof(struct eth_header) + sizeof(struct ip_header));
2471 return virtio_net_rsc_do_coalesce(chain, nc, buf, size, &unit);
2474 static int32_t virtio_net_rsc_sanity_check6(VirtioNetRscChain *chain,
2475 struct ip6_header *ip6,
2476 const uint8_t *buf, size_t size)
2478 uint16_t ip_len;
2480 if (((ip6->ip6_ctlun.ip6_un1.ip6_un1_flow & 0xF0) >> 4)
2481 != IP_HEADER_VERSION_6) {
2482 return RSC_BYPASS;
2485 /* Both option and protocol is checked in this */
2486 if (ip6->ip6_ctlun.ip6_un1.ip6_un1_nxt != IPPROTO_TCP) {
2487 chain->stat.bypass_not_tcp++;
2488 return RSC_BYPASS;
2491 ip_len = htons(ip6->ip6_ctlun.ip6_un1.ip6_un1_plen);
2492 if (ip_len < sizeof(struct tcp_header) ||
2493 ip_len > (size - chain->n->guest_hdr_len - sizeof(struct eth_header)
2494 - sizeof(struct ip6_header))) {
2495 chain->stat.ip_hacked++;
2496 return RSC_BYPASS;
2499 /* Don't handle packets with ecn flag */
2500 if (IP6_ECN(ip6->ip6_ctlun.ip6_un3.ip6_un3_ecn)) {
2501 chain->stat.ip_ecn++;
2502 return RSC_BYPASS;
2505 return RSC_CANDIDATE;
2508 static size_t virtio_net_rsc_receive6(void *opq, NetClientState *nc,
2509 const uint8_t *buf, size_t size)
2511 int32_t ret;
2512 uint16_t hdr_len;
2513 VirtioNetRscChain *chain;
2514 VirtioNetRscUnit unit;
2516 chain = opq;
2517 hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len;
2519 if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip6_header)
2520 + sizeof(tcp_header))) {
2521 return virtio_net_do_receive(nc, buf, size);
2524 virtio_net_rsc_extract_unit6(chain, buf, &unit);
2525 if (RSC_CANDIDATE != virtio_net_rsc_sanity_check6(chain,
2526 unit.ip, buf, size)) {
2527 return virtio_net_do_receive(nc, buf, size);
2530 ret = virtio_net_rsc_tcp_ctrl_check(chain, unit.tcp);
2531 if (ret == RSC_BYPASS) {
2532 return virtio_net_do_receive(nc, buf, size);
2533 } else if (ret == RSC_FINAL) {
2534 return virtio_net_rsc_drain_flow(chain, nc, buf, size,
2535 ((hdr_len + sizeof(struct eth_header)) + 8),
2536 VIRTIO_NET_IP6_ADDR_SIZE,
2537 hdr_len + sizeof(struct eth_header)
2538 + sizeof(struct ip6_header));
2541 return virtio_net_rsc_do_coalesce(chain, nc, buf, size, &unit);
2544 static VirtioNetRscChain *virtio_net_rsc_lookup_chain(VirtIONet *n,
2545 NetClientState *nc,
2546 uint16_t proto)
2548 VirtioNetRscChain *chain;
2550 if ((proto != (uint16_t)ETH_P_IP) && (proto != (uint16_t)ETH_P_IPV6)) {
2551 return NULL;
2554 QTAILQ_FOREACH(chain, &n->rsc_chains, next) {
2555 if (chain->proto == proto) {
2556 return chain;
2560 chain = g_malloc(sizeof(*chain));
2561 chain->n = n;
2562 chain->proto = proto;
2563 if (proto == (uint16_t)ETH_P_IP) {
2564 chain->max_payload = VIRTIO_NET_MAX_IP4_PAYLOAD;
2565 chain->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2566 } else {
2567 chain->max_payload = VIRTIO_NET_MAX_IP6_PAYLOAD;
2568 chain->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2570 chain->drain_timer = timer_new_ns(QEMU_CLOCK_HOST,
2571 virtio_net_rsc_purge, chain);
2572 memset(&chain->stat, 0, sizeof(chain->stat));
2574 QTAILQ_INIT(&chain->buffers);
2575 QTAILQ_INSERT_TAIL(&n->rsc_chains, chain, next);
2577 return chain;
2580 static ssize_t virtio_net_rsc_receive(NetClientState *nc,
2581 const uint8_t *buf,
2582 size_t size)
2584 uint16_t proto;
2585 VirtioNetRscChain *chain;
2586 struct eth_header *eth;
2587 VirtIONet *n;
2589 n = qemu_get_nic_opaque(nc);
2590 if (size < (n->host_hdr_len + sizeof(struct eth_header))) {
2591 return virtio_net_do_receive(nc, buf, size);
2594 eth = (struct eth_header *)(buf + n->guest_hdr_len);
2595 proto = htons(eth->h_proto);
2597 chain = virtio_net_rsc_lookup_chain(n, nc, proto);
2598 if (chain) {
2599 chain->stat.received++;
2600 if (proto == (uint16_t)ETH_P_IP && n->rsc4_enabled) {
2601 return virtio_net_rsc_receive4(chain, nc, buf, size);
2602 } else if (proto == (uint16_t)ETH_P_IPV6 && n->rsc6_enabled) {
2603 return virtio_net_rsc_receive6(chain, nc, buf, size);
2606 return virtio_net_do_receive(nc, buf, size);
2609 static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf,
2610 size_t size)
2612 VirtIONet *n = qemu_get_nic_opaque(nc);
2613 if ((n->rsc4_enabled || n->rsc6_enabled)) {
2614 return virtio_net_rsc_receive(nc, buf, size);
2615 } else {
2616 return virtio_net_do_receive(nc, buf, size);
2620 static int32_t virtio_net_flush_tx(VirtIONetQueue *q);
2622 static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
2624 VirtIONet *n = qemu_get_nic_opaque(nc);
2625 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
2626 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2627 int ret;
2629 virtqueue_push(q->tx_vq, q->async_tx.elem, 0);
2630 virtio_notify(vdev, q->tx_vq);
2632 g_free(q->async_tx.elem);
2633 q->async_tx.elem = NULL;
2635 virtio_queue_set_notification(q->tx_vq, 1);
2636 ret = virtio_net_flush_tx(q);
2637 if (ret >= n->tx_burst) {
2639 * the flush has been stopped by tx_burst
2640 * we will not receive notification for the
2641 * remainining part, so re-schedule
2643 virtio_queue_set_notification(q->tx_vq, 0);
2644 if (q->tx_bh) {
2645 qemu_bh_schedule(q->tx_bh);
2646 } else {
2647 timer_mod(q->tx_timer,
2648 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
2650 q->tx_waiting = 1;
2654 /* TX */
2655 static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
2657 VirtIONet *n = q->n;
2658 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2659 VirtQueueElement *elem;
2660 int32_t num_packets = 0;
2661 int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
2662 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
2663 return num_packets;
2666 if (q->async_tx.elem) {
2667 virtio_queue_set_notification(q->tx_vq, 0);
2668 return num_packets;
2671 for (;;) {
2672 ssize_t ret;
2673 unsigned int out_num;
2674 struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], *out_sg;
2675 struct virtio_net_hdr_mrg_rxbuf mhdr;
2677 elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement));
2678 if (!elem) {
2679 break;
2682 out_num = elem->out_num;
2683 out_sg = elem->out_sg;
2684 if (out_num < 1) {
2685 virtio_error(vdev, "virtio-net header not in first element");
2686 virtqueue_detach_element(q->tx_vq, elem, 0);
2687 g_free(elem);
2688 return -EINVAL;
2691 if (n->has_vnet_hdr) {
2692 if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) <
2693 n->guest_hdr_len) {
2694 virtio_error(vdev, "virtio-net header incorrect");
2695 virtqueue_detach_element(q->tx_vq, elem, 0);
2696 g_free(elem);
2697 return -EINVAL;
2699 if (n->needs_vnet_hdr_swap) {
2700 virtio_net_hdr_swap(vdev, (void *) &mhdr);
2701 sg2[0].iov_base = &mhdr;
2702 sg2[0].iov_len = n->guest_hdr_len;
2703 out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1,
2704 out_sg, out_num,
2705 n->guest_hdr_len, -1);
2706 if (out_num == VIRTQUEUE_MAX_SIZE) {
2707 goto drop;
2709 out_num += 1;
2710 out_sg = sg2;
2714 * If host wants to see the guest header as is, we can
2715 * pass it on unchanged. Otherwise, copy just the parts
2716 * that host is interested in.
2718 assert(n->host_hdr_len <= n->guest_hdr_len);
2719 if (n->host_hdr_len != n->guest_hdr_len) {
2720 unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg),
2721 out_sg, out_num,
2722 0, n->host_hdr_len);
2723 sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num,
2724 out_sg, out_num,
2725 n->guest_hdr_len, -1);
2726 out_num = sg_num;
2727 out_sg = sg;
2730 ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index),
2731 out_sg, out_num, virtio_net_tx_complete);
2732 if (ret == 0) {
2733 virtio_queue_set_notification(q->tx_vq, 0);
2734 q->async_tx.elem = elem;
2735 return -EBUSY;
2738 drop:
2739 virtqueue_push(q->tx_vq, elem, 0);
2740 virtio_notify(vdev, q->tx_vq);
2741 g_free(elem);
2743 if (++num_packets >= n->tx_burst) {
2744 break;
2747 return num_packets;
2750 static void virtio_net_tx_timer(void *opaque);
2752 static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
2754 VirtIONet *n = VIRTIO_NET(vdev);
2755 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
2757 if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
2758 virtio_net_drop_tx_queue_data(vdev, vq);
2759 return;
2762 /* This happens when device was stopped but VCPU wasn't. */
2763 if (!vdev->vm_running) {
2764 q->tx_waiting = 1;
2765 return;
2768 if (q->tx_waiting) {
2769 /* We already have queued packets, immediately flush */
2770 timer_del(q->tx_timer);
2771 virtio_net_tx_timer(q);
2772 } else {
2773 /* re-arm timer to flush it (and more) on next tick */
2774 timer_mod(q->tx_timer,
2775 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
2776 q->tx_waiting = 1;
2777 virtio_queue_set_notification(vq, 0);
2781 static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)
2783 VirtIONet *n = VIRTIO_NET(vdev);
2784 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
2786 if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
2787 virtio_net_drop_tx_queue_data(vdev, vq);
2788 return;
2791 if (unlikely(q->tx_waiting)) {
2792 return;
2794 q->tx_waiting = 1;
2795 /* This happens when device was stopped but VCPU wasn't. */
2796 if (!vdev->vm_running) {
2797 return;
2799 virtio_queue_set_notification(vq, 0);
2800 qemu_bh_schedule(q->tx_bh);
2803 static void virtio_net_tx_timer(void *opaque)
2805 VirtIONetQueue *q = opaque;
2806 VirtIONet *n = q->n;
2807 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2808 int ret;
2810 /* This happens when device was stopped but BH wasn't. */
2811 if (!vdev->vm_running) {
2812 /* Make sure tx waiting is set, so we'll run when restarted. */
2813 assert(q->tx_waiting);
2814 return;
2817 q->tx_waiting = 0;
2819 /* Just in case the driver is not ready on more */
2820 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
2821 return;
2824 ret = virtio_net_flush_tx(q);
2825 if (ret == -EBUSY || ret == -EINVAL) {
2826 return;
2829 * If we flush a full burst of packets, assume there are
2830 * more coming and immediately rearm
2832 if (ret >= n->tx_burst) {
2833 q->tx_waiting = 1;
2834 timer_mod(q->tx_timer,
2835 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
2836 return;
2839 * If less than a full burst, re-enable notification and flush
2840 * anything that may have come in while we weren't looking. If
2841 * we find something, assume the guest is still active and rearm
2843 virtio_queue_set_notification(q->tx_vq, 1);
2844 ret = virtio_net_flush_tx(q);
2845 if (ret > 0) {
2846 virtio_queue_set_notification(q->tx_vq, 0);
2847 q->tx_waiting = 1;
2848 timer_mod(q->tx_timer,
2849 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
2853 static void virtio_net_tx_bh(void *opaque)
2855 VirtIONetQueue *q = opaque;
2856 VirtIONet *n = q->n;
2857 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2858 int32_t ret;
2860 /* This happens when device was stopped but BH wasn't. */
2861 if (!vdev->vm_running) {
2862 /* Make sure tx waiting is set, so we'll run when restarted. */
2863 assert(q->tx_waiting);
2864 return;
2867 q->tx_waiting = 0;
2869 /* Just in case the driver is not ready on more */
2870 if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) {
2871 return;
2874 ret = virtio_net_flush_tx(q);
2875 if (ret == -EBUSY || ret == -EINVAL) {
2876 return; /* Notification re-enable handled by tx_complete or device
2877 * broken */
2880 /* If we flush a full burst of packets, assume there are
2881 * more coming and immediately reschedule */
2882 if (ret >= n->tx_burst) {
2883 qemu_bh_schedule(q->tx_bh);
2884 q->tx_waiting = 1;
2885 return;
2888 /* If less than a full burst, re-enable notification and flush
2889 * anything that may have come in while we weren't looking. If
2890 * we find something, assume the guest is still active and reschedule */
2891 virtio_queue_set_notification(q->tx_vq, 1);
2892 ret = virtio_net_flush_tx(q);
2893 if (ret == -EINVAL) {
2894 return;
2895 } else if (ret > 0) {
2896 virtio_queue_set_notification(q->tx_vq, 0);
2897 qemu_bh_schedule(q->tx_bh);
2898 q->tx_waiting = 1;
2902 static void virtio_net_add_queue(VirtIONet *n, int index)
2904 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2906 n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
2907 virtio_net_handle_rx);
2909 if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
2910 n->vqs[index].tx_vq =
2911 virtio_add_queue(vdev, n->net_conf.tx_queue_size,
2912 virtio_net_handle_tx_timer);
2913 n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2914 virtio_net_tx_timer,
2915 &n->vqs[index]);
2916 } else {
2917 n->vqs[index].tx_vq =
2918 virtio_add_queue(vdev, n->net_conf.tx_queue_size,
2919 virtio_net_handle_tx_bh);
2920 n->vqs[index].tx_bh = qemu_bh_new_guarded(virtio_net_tx_bh, &n->vqs[index],
2921 &DEVICE(vdev)->mem_reentrancy_guard);
2924 n->vqs[index].tx_waiting = 0;
2925 n->vqs[index].n = n;
2928 static void virtio_net_del_queue(VirtIONet *n, int index)
2930 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2931 VirtIONetQueue *q = &n->vqs[index];
2932 NetClientState *nc = qemu_get_subqueue(n->nic, index);
2934 qemu_purge_queued_packets(nc);
2936 virtio_del_queue(vdev, index * 2);
2937 if (q->tx_timer) {
2938 timer_free(q->tx_timer);
2939 q->tx_timer = NULL;
2940 } else {
2941 qemu_bh_delete(q->tx_bh);
2942 q->tx_bh = NULL;
2944 q->tx_waiting = 0;
2945 virtio_del_queue(vdev, index * 2 + 1);
2948 static void virtio_net_change_num_queue_pairs(VirtIONet *n, int new_max_queue_pairs)
2950 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2951 int old_num_queues = virtio_get_num_queues(vdev);
2952 int new_num_queues = new_max_queue_pairs * 2 + 1;
2953 int i;
2955 assert(old_num_queues >= 3);
2956 assert(old_num_queues % 2 == 1);
2958 if (old_num_queues == new_num_queues) {
2959 return;
2963 * We always need to remove and add ctrl vq if
2964 * old_num_queues != new_num_queues. Remove ctrl_vq first,
2965 * and then we only enter one of the following two loops.
2967 virtio_del_queue(vdev, old_num_queues - 1);
2969 for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) {
2970 /* new_num_queues < old_num_queues */
2971 virtio_net_del_queue(n, i / 2);
2974 for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) {
2975 /* new_num_queues > old_num_queues */
2976 virtio_net_add_queue(n, i / 2);
2979 /* add ctrl_vq last */
2980 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
2983 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
2985 int max = multiqueue ? n->max_queue_pairs : 1;
2987 n->multiqueue = multiqueue;
2988 virtio_net_change_num_queue_pairs(n, max);
2990 virtio_net_set_queue_pairs(n);
2993 static int virtio_net_post_load_device(void *opaque, int version_id)
2995 VirtIONet *n = opaque;
2996 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2997 int i, link_down;
2999 trace_virtio_net_post_load_device();
3000 virtio_net_set_mrg_rx_bufs(n, n->mergeable_rx_bufs,
3001 virtio_vdev_has_feature(vdev,
3002 VIRTIO_F_VERSION_1),
3003 virtio_vdev_has_feature(vdev,
3004 VIRTIO_NET_F_HASH_REPORT));
3006 /* MAC_TABLE_ENTRIES may be different from the saved image */
3007 if (n->mac_table.in_use > MAC_TABLE_ENTRIES) {
3008 n->mac_table.in_use = 0;
3011 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
3012 n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
3016 * curr_guest_offloads will be later overwritten by the
3017 * virtio_set_features_nocheck call done from the virtio_load.
3018 * Here we make sure it is preserved and restored accordingly
3019 * in the virtio_net_post_load_virtio callback.
3021 n->saved_guest_offloads = n->curr_guest_offloads;
3023 virtio_net_set_queue_pairs(n);
3025 /* Find the first multicast entry in the saved MAC filter */
3026 for (i = 0; i < n->mac_table.in_use; i++) {
3027 if (n->mac_table.macs[i * ETH_ALEN] & 1) {
3028 break;
3031 n->mac_table.first_multi = i;
3033 /* nc.link_down can't be migrated, so infer link_down according
3034 * to link status bit in n->status */
3035 link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0;
3036 for (i = 0; i < n->max_queue_pairs; i++) {
3037 qemu_get_subqueue(n->nic, i)->link_down = link_down;
3040 if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
3041 virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
3042 qemu_announce_timer_reset(&n->announce_timer, migrate_announce_params(),
3043 QEMU_CLOCK_VIRTUAL,
3044 virtio_net_announce_timer, n);
3045 if (n->announce_timer.round) {
3046 timer_mod(n->announce_timer.tm,
3047 qemu_clock_get_ms(n->announce_timer.type));
3048 } else {
3049 qemu_announce_timer_del(&n->announce_timer, false);
3053 if (n->rss_data.enabled) {
3054 n->rss_data.enabled_software_rss = n->rss_data.populate_hash;
3055 if (!n->rss_data.populate_hash) {
3056 if (!virtio_net_attach_epbf_rss(n)) {
3057 if (get_vhost_net(qemu_get_queue(n->nic)->peer)) {
3058 warn_report("Can't post-load eBPF RSS for vhost");
3059 } else {
3060 warn_report("Can't post-load eBPF RSS - "
3061 "fallback to software RSS");
3062 n->rss_data.enabled_software_rss = true;
3067 trace_virtio_net_rss_enable(n->rss_data.hash_types,
3068 n->rss_data.indirections_len,
3069 sizeof(n->rss_data.key));
3070 } else {
3071 trace_virtio_net_rss_disable();
3073 return 0;
3076 static int virtio_net_post_load_virtio(VirtIODevice *vdev)
3078 VirtIONet *n = VIRTIO_NET(vdev);
3080 * The actual needed state is now in saved_guest_offloads,
3081 * see virtio_net_post_load_device for detail.
3082 * Restore it back and apply the desired offloads.
3084 n->curr_guest_offloads = n->saved_guest_offloads;
3085 if (peer_has_vnet_hdr(n)) {
3086 virtio_net_apply_guest_offloads(n);
3089 return 0;
3092 /* tx_waiting field of a VirtIONetQueue */
3093 static const VMStateDescription vmstate_virtio_net_queue_tx_waiting = {
3094 .name = "virtio-net-queue-tx_waiting",
3095 .fields = (VMStateField[]) {
3096 VMSTATE_UINT32(tx_waiting, VirtIONetQueue),
3097 VMSTATE_END_OF_LIST()
3101 static bool max_queue_pairs_gt_1(void *opaque, int version_id)
3103 return VIRTIO_NET(opaque)->max_queue_pairs > 1;
3106 static bool has_ctrl_guest_offloads(void *opaque, int version_id)
3108 return virtio_vdev_has_feature(VIRTIO_DEVICE(opaque),
3109 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
3112 static bool mac_table_fits(void *opaque, int version_id)
3114 return VIRTIO_NET(opaque)->mac_table.in_use <= MAC_TABLE_ENTRIES;
3117 static bool mac_table_doesnt_fit(void *opaque, int version_id)
3119 return !mac_table_fits(opaque, version_id);
3122 /* This temporary type is shared by all the WITH_TMP methods
3123 * although only some fields are used by each.
3125 struct VirtIONetMigTmp {
3126 VirtIONet *parent;
3127 VirtIONetQueue *vqs_1;
3128 uint16_t curr_queue_pairs_1;
3129 uint8_t has_ufo;
3130 uint32_t has_vnet_hdr;
3133 /* The 2nd and subsequent tx_waiting flags are loaded later than
3134 * the 1st entry in the queue_pairs and only if there's more than one
3135 * entry. We use the tmp mechanism to calculate a temporary
3136 * pointer and count and also validate the count.
3139 static int virtio_net_tx_waiting_pre_save(void *opaque)
3141 struct VirtIONetMigTmp *tmp = opaque;
3143 tmp->vqs_1 = tmp->parent->vqs + 1;
3144 tmp->curr_queue_pairs_1 = tmp->parent->curr_queue_pairs - 1;
3145 if (tmp->parent->curr_queue_pairs == 0) {
3146 tmp->curr_queue_pairs_1 = 0;
3149 return 0;
3152 static int virtio_net_tx_waiting_pre_load(void *opaque)
3154 struct VirtIONetMigTmp *tmp = opaque;
3156 /* Reuse the pointer setup from save */
3157 virtio_net_tx_waiting_pre_save(opaque);
3159 if (tmp->parent->curr_queue_pairs > tmp->parent->max_queue_pairs) {
3160 error_report("virtio-net: curr_queue_pairs %x > max_queue_pairs %x",
3161 tmp->parent->curr_queue_pairs, tmp->parent->max_queue_pairs);
3163 return -EINVAL;
3166 return 0; /* all good */
3169 static const VMStateDescription vmstate_virtio_net_tx_waiting = {
3170 .name = "virtio-net-tx_waiting",
3171 .pre_load = virtio_net_tx_waiting_pre_load,
3172 .pre_save = virtio_net_tx_waiting_pre_save,
3173 .fields = (VMStateField[]) {
3174 VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp,
3175 curr_queue_pairs_1,
3176 vmstate_virtio_net_queue_tx_waiting,
3177 struct VirtIONetQueue),
3178 VMSTATE_END_OF_LIST()
3182 /* the 'has_ufo' flag is just tested; if the incoming stream has the
3183 * flag set we need to check that we have it
3185 static int virtio_net_ufo_post_load(void *opaque, int version_id)
3187 struct VirtIONetMigTmp *tmp = opaque;
3189 if (tmp->has_ufo && !peer_has_ufo(tmp->parent)) {
3190 error_report("virtio-net: saved image requires TUN_F_UFO support");
3191 return -EINVAL;
3194 return 0;
3197 static int virtio_net_ufo_pre_save(void *opaque)
3199 struct VirtIONetMigTmp *tmp = opaque;
3201 tmp->has_ufo = tmp->parent->has_ufo;
3203 return 0;
3206 static const VMStateDescription vmstate_virtio_net_has_ufo = {
3207 .name = "virtio-net-ufo",
3208 .post_load = virtio_net_ufo_post_load,
3209 .pre_save = virtio_net_ufo_pre_save,
3210 .fields = (VMStateField[]) {
3211 VMSTATE_UINT8(has_ufo, struct VirtIONetMigTmp),
3212 VMSTATE_END_OF_LIST()
3216 /* the 'has_vnet_hdr' flag is just tested; if the incoming stream has the
3217 * flag set we need to check that we have it
3219 static int virtio_net_vnet_post_load(void *opaque, int version_id)
3221 struct VirtIONetMigTmp *tmp = opaque;
3223 if (tmp->has_vnet_hdr && !peer_has_vnet_hdr(tmp->parent)) {
3224 error_report("virtio-net: saved image requires vnet_hdr=on");
3225 return -EINVAL;
3228 return 0;
3231 static int virtio_net_vnet_pre_save(void *opaque)
3233 struct VirtIONetMigTmp *tmp = opaque;
3235 tmp->has_vnet_hdr = tmp->parent->has_vnet_hdr;
3237 return 0;
3240 static const VMStateDescription vmstate_virtio_net_has_vnet = {
3241 .name = "virtio-net-vnet",
3242 .post_load = virtio_net_vnet_post_load,
3243 .pre_save = virtio_net_vnet_pre_save,
3244 .fields = (VMStateField[]) {
3245 VMSTATE_UINT32(has_vnet_hdr, struct VirtIONetMigTmp),
3246 VMSTATE_END_OF_LIST()
3250 static bool virtio_net_rss_needed(void *opaque)
3252 return VIRTIO_NET(opaque)->rss_data.enabled;
3255 static const VMStateDescription vmstate_virtio_net_rss = {
3256 .name = "virtio-net-device/rss",
3257 .version_id = 1,
3258 .minimum_version_id = 1,
3259 .needed = virtio_net_rss_needed,
3260 .fields = (VMStateField[]) {
3261 VMSTATE_BOOL(rss_data.enabled, VirtIONet),
3262 VMSTATE_BOOL(rss_data.redirect, VirtIONet),
3263 VMSTATE_BOOL(rss_data.populate_hash, VirtIONet),
3264 VMSTATE_UINT32(rss_data.hash_types, VirtIONet),
3265 VMSTATE_UINT16(rss_data.indirections_len, VirtIONet),
3266 VMSTATE_UINT16(rss_data.default_queue, VirtIONet),
3267 VMSTATE_UINT8_ARRAY(rss_data.key, VirtIONet,
3268 VIRTIO_NET_RSS_MAX_KEY_SIZE),
3269 VMSTATE_VARRAY_UINT16_ALLOC(rss_data.indirections_table, VirtIONet,
3270 rss_data.indirections_len, 0,
3271 vmstate_info_uint16, uint16_t),
3272 VMSTATE_END_OF_LIST()
3276 static const VMStateDescription vmstate_virtio_net_device = {
3277 .name = "virtio-net-device",
3278 .version_id = VIRTIO_NET_VM_VERSION,
3279 .minimum_version_id = VIRTIO_NET_VM_VERSION,
3280 .post_load = virtio_net_post_load_device,
3281 .fields = (VMStateField[]) {
3282 VMSTATE_UINT8_ARRAY(mac, VirtIONet, ETH_ALEN),
3283 VMSTATE_STRUCT_POINTER(vqs, VirtIONet,
3284 vmstate_virtio_net_queue_tx_waiting,
3285 VirtIONetQueue),
3286 VMSTATE_UINT32(mergeable_rx_bufs, VirtIONet),
3287 VMSTATE_UINT16(status, VirtIONet),
3288 VMSTATE_UINT8(promisc, VirtIONet),
3289 VMSTATE_UINT8(allmulti, VirtIONet),
3290 VMSTATE_UINT32(mac_table.in_use, VirtIONet),
3292 /* Guarded pair: If it fits we load it, else we throw it away
3293 * - can happen if source has a larger MAC table.; post-load
3294 * sets flags in this case.
3296 VMSTATE_VBUFFER_MULTIPLY(mac_table.macs, VirtIONet,
3297 0, mac_table_fits, mac_table.in_use,
3298 ETH_ALEN),
3299 VMSTATE_UNUSED_VARRAY_UINT32(VirtIONet, mac_table_doesnt_fit, 0,
3300 mac_table.in_use, ETH_ALEN),
3302 /* Note: This is an array of uint32's that's always been saved as a
3303 * buffer; hold onto your endiannesses; it's actually used as a bitmap
3304 * but based on the uint.
3306 VMSTATE_BUFFER_POINTER_UNSAFE(vlans, VirtIONet, 0, MAX_VLAN >> 3),
3307 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
3308 vmstate_virtio_net_has_vnet),
3309 VMSTATE_UINT8(mac_table.multi_overflow, VirtIONet),
3310 VMSTATE_UINT8(mac_table.uni_overflow, VirtIONet),
3311 VMSTATE_UINT8(alluni, VirtIONet),
3312 VMSTATE_UINT8(nomulti, VirtIONet),
3313 VMSTATE_UINT8(nouni, VirtIONet),
3314 VMSTATE_UINT8(nobcast, VirtIONet),
3315 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
3316 vmstate_virtio_net_has_ufo),
3317 VMSTATE_SINGLE_TEST(max_queue_pairs, VirtIONet, max_queue_pairs_gt_1, 0,
3318 vmstate_info_uint16_equal, uint16_t),
3319 VMSTATE_UINT16_TEST(curr_queue_pairs, VirtIONet, max_queue_pairs_gt_1),
3320 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
3321 vmstate_virtio_net_tx_waiting),
3322 VMSTATE_UINT64_TEST(curr_guest_offloads, VirtIONet,
3323 has_ctrl_guest_offloads),
3324 VMSTATE_END_OF_LIST()
3326 .subsections = (const VMStateDescription * []) {
3327 &vmstate_virtio_net_rss,
3328 NULL
3332 static NetClientInfo net_virtio_info = {
3333 .type = NET_CLIENT_DRIVER_NIC,
3334 .size = sizeof(NICState),
3335 .can_receive = virtio_net_can_receive,
3336 .receive = virtio_net_receive,
3337 .link_status_changed = virtio_net_set_link_status,
3338 .query_rx_filter = virtio_net_query_rxfilter,
3339 .announce = virtio_net_announce,
3342 static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
3344 VirtIONet *n = VIRTIO_NET(vdev);
3345 NetClientState *nc;
3346 assert(n->vhost_started);
3347 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ) && idx == 2) {
3348 /* Must guard against invalid features and bogus queue index
3349 * from being set by malicious guest, or penetrated through
3350 * buggy migration stream.
3352 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
3353 qemu_log_mask(LOG_GUEST_ERROR,
3354 "%s: bogus vq index ignored\n", __func__);
3355 return false;
3357 nc = qemu_get_subqueue(n->nic, n->max_queue_pairs);
3358 } else {
3359 nc = qemu_get_subqueue(n->nic, vq2q(idx));
3362 * Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
3363 * as the Marco of configure interrupt's IDX, If this driver does not
3364 * support, the function will return false
3367 if (idx == VIRTIO_CONFIG_IRQ_IDX) {
3368 return vhost_net_config_pending(get_vhost_net(nc->peer));
3370 return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
3373 static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
3374 bool mask)
3376 VirtIONet *n = VIRTIO_NET(vdev);
3377 NetClientState *nc;
3378 assert(n->vhost_started);
3379 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_MQ) && idx == 2) {
3380 /* Must guard against invalid features and bogus queue index
3381 * from being set by malicious guest, or penetrated through
3382 * buggy migration stream.
3384 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
3385 qemu_log_mask(LOG_GUEST_ERROR,
3386 "%s: bogus vq index ignored\n", __func__);
3387 return;
3389 nc = qemu_get_subqueue(n->nic, n->max_queue_pairs);
3390 } else {
3391 nc = qemu_get_subqueue(n->nic, vq2q(idx));
3394 *Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1
3395 * as the Marco of configure interrupt's IDX, If this driver does not
3396 * support, the function will return
3399 if (idx == VIRTIO_CONFIG_IRQ_IDX) {
3400 vhost_net_config_mask(get_vhost_net(nc->peer), vdev, mask);
3401 return;
3403 vhost_net_virtqueue_mask(get_vhost_net(nc->peer), vdev, idx, mask);
3406 static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
3408 virtio_add_feature(&host_features, VIRTIO_NET_F_MAC);
3410 n->config_size = virtio_get_config_size(&cfg_size_params, host_features);
3413 void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
3414 const char *type)
3417 * The name can be NULL, the netclient name will be type.x.
3419 assert(type != NULL);
3421 g_free(n->netclient_name);
3422 g_free(n->netclient_type);
3423 n->netclient_name = g_strdup(name);
3424 n->netclient_type = g_strdup(type);
3427 static bool failover_unplug_primary(VirtIONet *n, DeviceState *dev)
3429 HotplugHandler *hotplug_ctrl;
3430 PCIDevice *pci_dev;
3431 Error *err = NULL;
3433 hotplug_ctrl = qdev_get_hotplug_handler(dev);
3434 if (hotplug_ctrl) {
3435 pci_dev = PCI_DEVICE(dev);
3436 pci_dev->partially_hotplugged = true;
3437 hotplug_handler_unplug_request(hotplug_ctrl, dev, &err);
3438 if (err) {
3439 error_report_err(err);
3440 return false;
3442 } else {
3443 return false;
3445 return true;
3448 static bool failover_replug_primary(VirtIONet *n, DeviceState *dev,
3449 Error **errp)
3451 Error *err = NULL;
3452 HotplugHandler *hotplug_ctrl;
3453 PCIDevice *pdev = PCI_DEVICE(dev);
3454 BusState *primary_bus;
3456 if (!pdev->partially_hotplugged) {
3457 return true;
3459 primary_bus = dev->parent_bus;
3460 if (!primary_bus) {
3461 error_setg(errp, "virtio_net: couldn't find primary bus");
3462 return false;
3464 qdev_set_parent_bus(dev, primary_bus, &error_abort);
3465 qatomic_set(&n->failover_primary_hidden, false);
3466 hotplug_ctrl = qdev_get_hotplug_handler(dev);
3467 if (hotplug_ctrl) {
3468 hotplug_handler_pre_plug(hotplug_ctrl, dev, &err);
3469 if (err) {
3470 goto out;
3472 hotplug_handler_plug(hotplug_ctrl, dev, &err);
3474 pdev->partially_hotplugged = false;
3476 out:
3477 error_propagate(errp, err);
3478 return !err;
3481 static void virtio_net_handle_migration_primary(VirtIONet *n, MigrationState *s)
3483 bool should_be_hidden;
3484 Error *err = NULL;
3485 DeviceState *dev = failover_find_primary_device(n);
3487 if (!dev) {
3488 return;
3491 should_be_hidden = qatomic_read(&n->failover_primary_hidden);
3493 if (migration_in_setup(s) && !should_be_hidden) {
3494 if (failover_unplug_primary(n, dev)) {
3495 vmstate_unregister(VMSTATE_IF(dev), qdev_get_vmsd(dev), dev);
3496 qapi_event_send_unplug_primary(dev->id);
3497 qatomic_set(&n->failover_primary_hidden, true);
3498 } else {
3499 warn_report("couldn't unplug primary device");
3501 } else if (migration_has_failed(s)) {
3502 /* We already unplugged the device let's plug it back */
3503 if (!failover_replug_primary(n, dev, &err)) {
3504 if (err) {
3505 error_report_err(err);
3511 static void virtio_net_migration_state_notifier(Notifier *notifier, void *data)
3513 MigrationState *s = data;
3514 VirtIONet *n = container_of(notifier, VirtIONet, migration_state);
3515 virtio_net_handle_migration_primary(n, s);
3518 static bool failover_hide_primary_device(DeviceListener *listener,
3519 const QDict *device_opts,
3520 bool from_json,
3521 Error **errp)
3523 VirtIONet *n = container_of(listener, VirtIONet, primary_listener);
3524 const char *standby_id;
3526 if (!device_opts) {
3527 return false;
3530 if (!qdict_haskey(device_opts, "failover_pair_id")) {
3531 return false;
3534 if (!qdict_haskey(device_opts, "id")) {
3535 error_setg(errp, "Device with failover_pair_id needs to have id");
3536 return false;
3539 standby_id = qdict_get_str(device_opts, "failover_pair_id");
3540 if (g_strcmp0(standby_id, n->netclient_name) != 0) {
3541 return false;
3545 * The hide helper can be called several times for a given device.
3546 * Check there is only one primary for a virtio-net device but
3547 * don't duplicate the qdict several times if it's called for the same
3548 * device.
3550 if (n->primary_opts) {
3551 const char *old, *new;
3552 /* devices with failover_pair_id always have an id */
3553 old = qdict_get_str(n->primary_opts, "id");
3554 new = qdict_get_str(device_opts, "id");
3555 if (strcmp(old, new) != 0) {
3556 error_setg(errp, "Cannot attach more than one primary device to "
3557 "'%s': '%s' and '%s'", n->netclient_name, old, new);
3558 return false;
3560 } else {
3561 n->primary_opts = qdict_clone_shallow(device_opts);
3562 n->primary_opts_from_json = from_json;
3565 /* failover_primary_hidden is set during feature negotiation */
3566 return qatomic_read(&n->failover_primary_hidden);
3569 static void virtio_net_device_realize(DeviceState *dev, Error **errp)
3571 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3572 VirtIONet *n = VIRTIO_NET(dev);
3573 NetClientState *nc;
3574 int i;
3576 if (n->net_conf.mtu) {
3577 n->host_features |= (1ULL << VIRTIO_NET_F_MTU);
3580 if (n->net_conf.duplex_str) {
3581 if (strncmp(n->net_conf.duplex_str, "half", 5) == 0) {
3582 n->net_conf.duplex = DUPLEX_HALF;
3583 } else if (strncmp(n->net_conf.duplex_str, "full", 5) == 0) {
3584 n->net_conf.duplex = DUPLEX_FULL;
3585 } else {
3586 error_setg(errp, "'duplex' must be 'half' or 'full'");
3587 return;
3589 n->host_features |= (1ULL << VIRTIO_NET_F_SPEED_DUPLEX);
3590 } else {
3591 n->net_conf.duplex = DUPLEX_UNKNOWN;
3594 if (n->net_conf.speed < SPEED_UNKNOWN) {
3595 error_setg(errp, "'speed' must be between 0 and INT_MAX");
3596 return;
3598 if (n->net_conf.speed >= 0) {
3599 n->host_features |= (1ULL << VIRTIO_NET_F_SPEED_DUPLEX);
3602 if (n->failover) {
3603 n->primary_listener.hide_device = failover_hide_primary_device;
3604 qatomic_set(&n->failover_primary_hidden, true);
3605 device_listener_register(&n->primary_listener);
3606 n->migration_state.notify = virtio_net_migration_state_notifier;
3607 add_migration_state_change_notifier(&n->migration_state);
3608 n->host_features |= (1ULL << VIRTIO_NET_F_STANDBY);
3611 virtio_net_set_config_size(n, n->host_features);
3612 virtio_init(vdev, VIRTIO_ID_NET, n->config_size);
3615 * We set a lower limit on RX queue size to what it always was.
3616 * Guests that want a smaller ring can always resize it without
3617 * help from us (using virtio 1 and up).
3619 if (n->net_conf.rx_queue_size < VIRTIO_NET_RX_QUEUE_MIN_SIZE ||
3620 n->net_conf.rx_queue_size > VIRTQUEUE_MAX_SIZE ||
3621 !is_power_of_2(n->net_conf.rx_queue_size)) {
3622 error_setg(errp, "Invalid rx_queue_size (= %" PRIu16 "), "
3623 "must be a power of 2 between %d and %d.",
3624 n->net_conf.rx_queue_size, VIRTIO_NET_RX_QUEUE_MIN_SIZE,
3625 VIRTQUEUE_MAX_SIZE);
3626 virtio_cleanup(vdev);
3627 return;
3630 if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE ||
3631 n->net_conf.tx_queue_size > VIRTQUEUE_MAX_SIZE ||
3632 !is_power_of_2(n->net_conf.tx_queue_size)) {
3633 error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), "
3634 "must be a power of 2 between %d and %d",
3635 n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE,
3636 VIRTQUEUE_MAX_SIZE);
3637 virtio_cleanup(vdev);
3638 return;
3641 n->max_ncs = MAX(n->nic_conf.peers.queues, 1);
3644 * Figure out the datapath queue pairs since the backend could
3645 * provide control queue via peers as well.
3647 if (n->nic_conf.peers.queues) {
3648 for (i = 0; i < n->max_ncs; i++) {
3649 if (n->nic_conf.peers.ncs[i]->is_datapath) {
3650 ++n->max_queue_pairs;
3654 n->max_queue_pairs = MAX(n->max_queue_pairs, 1);
3656 if (n->max_queue_pairs * 2 + 1 > VIRTIO_QUEUE_MAX) {
3657 error_setg(errp, "Invalid number of queue pairs (= %" PRIu32 "), "
3658 "must be a positive integer less than %d.",
3659 n->max_queue_pairs, (VIRTIO_QUEUE_MAX - 1) / 2);
3660 virtio_cleanup(vdev);
3661 return;
3663 n->vqs = g_new0(VirtIONetQueue, n->max_queue_pairs);
3664 n->curr_queue_pairs = 1;
3665 n->tx_timeout = n->net_conf.txtimer;
3667 if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer")
3668 && strcmp(n->net_conf.tx, "bh")) {
3669 warn_report("virtio-net: "
3670 "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
3671 n->net_conf.tx);
3672 error_printf("Defaulting to \"bh\"");
3675 n->net_conf.tx_queue_size = MIN(virtio_net_max_tx_queue_size(n),
3676 n->net_conf.tx_queue_size);
3678 for (i = 0; i < n->max_queue_pairs; i++) {
3679 virtio_net_add_queue(n, i);
3682 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
3683 qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
3684 memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
3685 n->status = VIRTIO_NET_S_LINK_UP;
3686 qemu_announce_timer_reset(&n->announce_timer, migrate_announce_params(),
3687 QEMU_CLOCK_VIRTUAL,
3688 virtio_net_announce_timer, n);
3689 n->announce_timer.round = 0;
3691 if (n->netclient_type) {
3693 * Happen when virtio_net_set_netclient_name has been called.
3695 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
3696 n->netclient_type, n->netclient_name, n);
3697 } else {
3698 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
3699 object_get_typename(OBJECT(dev)), dev->id, n);
3702 for (i = 0; i < n->max_queue_pairs; i++) {
3703 n->nic->ncs[i].do_not_pad = true;
3706 peer_test_vnet_hdr(n);
3707 if (peer_has_vnet_hdr(n)) {
3708 for (i = 0; i < n->max_queue_pairs; i++) {
3709 qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true);
3711 n->host_hdr_len = sizeof(struct virtio_net_hdr);
3712 } else {
3713 n->host_hdr_len = 0;
3716 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a);
3718 n->vqs[0].tx_waiting = 0;
3719 n->tx_burst = n->net_conf.txburst;
3720 virtio_net_set_mrg_rx_bufs(n, 0, 0, 0);
3721 n->promisc = 1; /* for compatibility */
3723 n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
3725 n->vlans = g_malloc0(MAX_VLAN >> 3);
3727 nc = qemu_get_queue(n->nic);
3728 nc->rxfilter_notify_enabled = 1;
3730 if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) {
3731 struct virtio_net_config netcfg = {};
3732 memcpy(&netcfg.mac, &n->nic_conf.macaddr, ETH_ALEN);
3733 vhost_net_set_config(get_vhost_net(nc->peer),
3734 (uint8_t *)&netcfg, 0, ETH_ALEN, VHOST_SET_CONFIG_TYPE_MASTER);
3736 QTAILQ_INIT(&n->rsc_chains);
3737 n->qdev = dev;
3739 net_rx_pkt_init(&n->rx_pkt);
3741 if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
3742 virtio_net_load_ebpf(n);
3746 static void virtio_net_device_unrealize(DeviceState *dev)
3748 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3749 VirtIONet *n = VIRTIO_NET(dev);
3750 int i, max_queue_pairs;
3752 if (virtio_has_feature(n->host_features, VIRTIO_NET_F_RSS)) {
3753 virtio_net_unload_ebpf(n);
3756 /* This will stop vhost backend if appropriate. */
3757 virtio_net_set_status(vdev, 0);
3759 g_free(n->netclient_name);
3760 n->netclient_name = NULL;
3761 g_free(n->netclient_type);
3762 n->netclient_type = NULL;
3764 g_free(n->mac_table.macs);
3765 g_free(n->vlans);
3767 if (n->failover) {
3768 qobject_unref(n->primary_opts);
3769 device_listener_unregister(&n->primary_listener);
3770 remove_migration_state_change_notifier(&n->migration_state);
3771 } else {
3772 assert(n->primary_opts == NULL);
3775 max_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
3776 for (i = 0; i < max_queue_pairs; i++) {
3777 virtio_net_del_queue(n, i);
3779 /* delete also control vq */
3780 virtio_del_queue(vdev, max_queue_pairs * 2);
3781 qemu_announce_timer_del(&n->announce_timer, false);
3782 g_free(n->vqs);
3783 qemu_del_nic(n->nic);
3784 virtio_net_rsc_cleanup(n);
3785 g_free(n->rss_data.indirections_table);
3786 net_rx_pkt_uninit(n->rx_pkt);
3787 virtio_cleanup(vdev);
3790 static void virtio_net_instance_init(Object *obj)
3792 VirtIONet *n = VIRTIO_NET(obj);
3795 * The default config_size is sizeof(struct virtio_net_config).
3796 * Can be overriden with virtio_net_set_config_size.
3798 n->config_size = sizeof(struct virtio_net_config);
3799 device_add_bootindex_property(obj, &n->nic_conf.bootindex,
3800 "bootindex", "/ethernet-phy@0",
3801 DEVICE(n));
3803 ebpf_rss_init(&n->ebpf_rss);
3806 static int virtio_net_pre_save(void *opaque)
3808 VirtIONet *n = opaque;
3810 /* At this point, backend must be stopped, otherwise
3811 * it might keep writing to memory. */
3812 assert(!n->vhost_started);
3814 return 0;
3817 static bool primary_unplug_pending(void *opaque)
3819 DeviceState *dev = opaque;
3820 DeviceState *primary;
3821 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
3822 VirtIONet *n = VIRTIO_NET(vdev);
3824 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
3825 return false;
3827 primary = failover_find_primary_device(n);
3828 return primary ? primary->pending_deleted_event : false;
3831 static bool dev_unplug_pending(void *opaque)
3833 DeviceState *dev = opaque;
3834 VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(dev);
3836 return vdc->primary_unplug_pending(dev);
3839 static struct vhost_dev *virtio_net_get_vhost(VirtIODevice *vdev)
3841 VirtIONet *n = VIRTIO_NET(vdev);
3842 NetClientState *nc = qemu_get_queue(n->nic);
3843 struct vhost_net *net = get_vhost_net(nc->peer);
3844 return &net->dev;
3847 static const VMStateDescription vmstate_virtio_net = {
3848 .name = "virtio-net",
3849 .minimum_version_id = VIRTIO_NET_VM_VERSION,
3850 .version_id = VIRTIO_NET_VM_VERSION,
3851 .fields = (VMStateField[]) {
3852 VMSTATE_VIRTIO_DEVICE,
3853 VMSTATE_END_OF_LIST()
3855 .pre_save = virtio_net_pre_save,
3856 .dev_unplug_pending = dev_unplug_pending,
3859 static Property virtio_net_properties[] = {
3860 DEFINE_PROP_BIT64("csum", VirtIONet, host_features,
3861 VIRTIO_NET_F_CSUM, true),
3862 DEFINE_PROP_BIT64("guest_csum", VirtIONet, host_features,
3863 VIRTIO_NET_F_GUEST_CSUM, true),
3864 DEFINE_PROP_BIT64("gso", VirtIONet, host_features, VIRTIO_NET_F_GSO, true),
3865 DEFINE_PROP_BIT64("guest_tso4", VirtIONet, host_features,
3866 VIRTIO_NET_F_GUEST_TSO4, true),
3867 DEFINE_PROP_BIT64("guest_tso6", VirtIONet, host_features,
3868 VIRTIO_NET_F_GUEST_TSO6, true),
3869 DEFINE_PROP_BIT64("guest_ecn", VirtIONet, host_features,
3870 VIRTIO_NET_F_GUEST_ECN, true),
3871 DEFINE_PROP_BIT64("guest_ufo", VirtIONet, host_features,
3872 VIRTIO_NET_F_GUEST_UFO, true),
3873 DEFINE_PROP_BIT64("guest_announce", VirtIONet, host_features,
3874 VIRTIO_NET_F_GUEST_ANNOUNCE, true),
3875 DEFINE_PROP_BIT64("host_tso4", VirtIONet, host_features,
3876 VIRTIO_NET_F_HOST_TSO4, true),
3877 DEFINE_PROP_BIT64("host_tso6", VirtIONet, host_features,
3878 VIRTIO_NET_F_HOST_TSO6, true),
3879 DEFINE_PROP_BIT64("host_ecn", VirtIONet, host_features,
3880 VIRTIO_NET_F_HOST_ECN, true),
3881 DEFINE_PROP_BIT64("host_ufo", VirtIONet, host_features,
3882 VIRTIO_NET_F_HOST_UFO, true),
3883 DEFINE_PROP_BIT64("mrg_rxbuf", VirtIONet, host_features,
3884 VIRTIO_NET_F_MRG_RXBUF, true),
3885 DEFINE_PROP_BIT64("status", VirtIONet, host_features,
3886 VIRTIO_NET_F_STATUS, true),
3887 DEFINE_PROP_BIT64("ctrl_vq", VirtIONet, host_features,
3888 VIRTIO_NET_F_CTRL_VQ, true),
3889 DEFINE_PROP_BIT64("ctrl_rx", VirtIONet, host_features,
3890 VIRTIO_NET_F_CTRL_RX, true),
3891 DEFINE_PROP_BIT64("ctrl_vlan", VirtIONet, host_features,
3892 VIRTIO_NET_F_CTRL_VLAN, true),
3893 DEFINE_PROP_BIT64("ctrl_rx_extra", VirtIONet, host_features,
3894 VIRTIO_NET_F_CTRL_RX_EXTRA, true),
3895 DEFINE_PROP_BIT64("ctrl_mac_addr", VirtIONet, host_features,
3896 VIRTIO_NET_F_CTRL_MAC_ADDR, true),
3897 DEFINE_PROP_BIT64("ctrl_guest_offloads", VirtIONet, host_features,
3898 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, true),
3899 DEFINE_PROP_BIT64("mq", VirtIONet, host_features, VIRTIO_NET_F_MQ, false),
3900 DEFINE_PROP_BIT64("rss", VirtIONet, host_features,
3901 VIRTIO_NET_F_RSS, false),
3902 DEFINE_PROP_BIT64("hash", VirtIONet, host_features,
3903 VIRTIO_NET_F_HASH_REPORT, false),
3904 DEFINE_PROP_BIT64("guest_rsc_ext", VirtIONet, host_features,
3905 VIRTIO_NET_F_RSC_EXT, false),
3906 DEFINE_PROP_UINT32("rsc_interval", VirtIONet, rsc_timeout,
3907 VIRTIO_NET_RSC_DEFAULT_INTERVAL),
3908 DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf),
3909 DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer,
3910 TX_TIMER_INTERVAL),
3911 DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
3912 DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
3913 DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
3914 VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
3915 DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size,
3916 VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE),
3917 DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0),
3918 DEFINE_PROP_BOOL("x-mtu-bypass-backend", VirtIONet, mtu_bypass_backend,
3919 true),
3920 DEFINE_PROP_INT32("speed", VirtIONet, net_conf.speed, SPEED_UNKNOWN),
3921 DEFINE_PROP_STRING("duplex", VirtIONet, net_conf.duplex_str),
3922 DEFINE_PROP_BOOL("failover", VirtIONet, failover, false),
3923 DEFINE_PROP_END_OF_LIST(),
3926 static void virtio_net_class_init(ObjectClass *klass, void *data)
3928 DeviceClass *dc = DEVICE_CLASS(klass);
3929 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
3931 device_class_set_props(dc, virtio_net_properties);
3932 dc->vmsd = &vmstate_virtio_net;
3933 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
3934 vdc->realize = virtio_net_device_realize;
3935 vdc->unrealize = virtio_net_device_unrealize;
3936 vdc->get_config = virtio_net_get_config;
3937 vdc->set_config = virtio_net_set_config;
3938 vdc->get_features = virtio_net_get_features;
3939 vdc->set_features = virtio_net_set_features;
3940 vdc->bad_features = virtio_net_bad_features;
3941 vdc->reset = virtio_net_reset;
3942 vdc->queue_reset = virtio_net_queue_reset;
3943 vdc->queue_enable = virtio_net_queue_enable;
3944 vdc->set_status = virtio_net_set_status;
3945 vdc->guest_notifier_mask = virtio_net_guest_notifier_mask;
3946 vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;
3947 vdc->legacy_features |= (0x1 << VIRTIO_NET_F_GSO);
3948 vdc->post_load = virtio_net_post_load_virtio;
3949 vdc->vmsd = &vmstate_virtio_net_device;
3950 vdc->primary_unplug_pending = primary_unplug_pending;
3951 vdc->get_vhost = virtio_net_get_vhost;
3954 static const TypeInfo virtio_net_info = {
3955 .name = TYPE_VIRTIO_NET,
3956 .parent = TYPE_VIRTIO_DEVICE,
3957 .instance_size = sizeof(VirtIONet),
3958 .instance_init = virtio_net_instance_init,
3959 .class_init = virtio_net_class_init,
3962 static void virtio_register_types(void)
3964 type_register_static(&virtio_net_info);
3967 type_init(virtio_register_types)