Include qemu/main-loop.h less
[qemu/ar7.git] / hw / net / virtio-net.c
blobcdf4d774da19c16c90227ddd67aee42deecf7c04
1 /*
2 * Virtio Network Device
4 * Copyright IBM, Corp. 2007
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/iov.h"
16 #include "qemu/main-loop.h"
17 #include "qemu/module.h"
18 #include "hw/virtio/virtio.h"
19 #include "net/net.h"
20 #include "net/checksum.h"
21 #include "net/tap.h"
22 #include "qemu/error-report.h"
23 #include "qemu/timer.h"
24 #include "hw/virtio/virtio-net.h"
25 #include "net/vhost_net.h"
26 #include "net/announce.h"
27 #include "hw/virtio/virtio-bus.h"
28 #include "qapi/error.h"
29 #include "qapi/qapi-events-net.h"
30 #include "hw/virtio/virtio-access.h"
31 #include "migration/misc.h"
32 #include "standard-headers/linux/ethtool.h"
33 #include "trace.h"
35 #define VIRTIO_NET_VM_VERSION 11
37 #define MAC_TABLE_ENTRIES 64
38 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
40 /* previously fixed value */
41 #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
42 #define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256
44 /* for now, only allow larger queues; with virtio-1, guest can downsize */
45 #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
46 #define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
48 #define VIRTIO_NET_IP4_ADDR_SIZE 8 /* ipv4 saddr + daddr */
50 #define VIRTIO_NET_TCP_FLAG 0x3F
51 #define VIRTIO_NET_TCP_HDR_LENGTH 0xF000
53 /* IPv4 max payload, 16 bits in the header */
54 #define VIRTIO_NET_MAX_IP4_PAYLOAD (65535 - sizeof(struct ip_header))
55 #define VIRTIO_NET_MAX_TCP_PAYLOAD 65535
57 /* header length value in ip header without option */
58 #define VIRTIO_NET_IP4_HEADER_LENGTH 5
60 #define VIRTIO_NET_IP6_ADDR_SIZE 32 /* ipv6 saddr + daddr */
61 #define VIRTIO_NET_MAX_IP6_PAYLOAD VIRTIO_NET_MAX_TCP_PAYLOAD
63 /* Purge coalesced packets timer interval, This value affects the performance
64 a lot, and should be tuned carefully, '300000'(300us) is the recommended
65 value to pass the WHQL test, '50000' can gain 2x netperf throughput with
66 tso/gso/gro 'off'. */
67 #define VIRTIO_NET_RSC_DEFAULT_INTERVAL 300000
69 /* temporary until standard header include it */
70 #if !defined(VIRTIO_NET_HDR_F_RSC_INFO)
72 #define VIRTIO_NET_HDR_F_RSC_INFO 4 /* rsc_ext data in csum_ fields */
73 #define VIRTIO_NET_F_RSC_EXT 61
75 static inline __virtio16 *virtio_net_rsc_ext_num_packets(
76 struct virtio_net_hdr *hdr)
78 return &hdr->csum_start;
81 static inline __virtio16 *virtio_net_rsc_ext_num_dupacks(
82 struct virtio_net_hdr *hdr)
84 return &hdr->csum_offset;
87 #endif
89 static VirtIOFeature feature_sizes[] = {
90 {.flags = 1ULL << VIRTIO_NET_F_MAC,
91 .end = virtio_endof(struct virtio_net_config, mac)},
92 {.flags = 1ULL << VIRTIO_NET_F_STATUS,
93 .end = virtio_endof(struct virtio_net_config, status)},
94 {.flags = 1ULL << VIRTIO_NET_F_MQ,
95 .end = virtio_endof(struct virtio_net_config, max_virtqueue_pairs)},
96 {.flags = 1ULL << VIRTIO_NET_F_MTU,
97 .end = virtio_endof(struct virtio_net_config, mtu)},
98 {.flags = 1ULL << VIRTIO_NET_F_SPEED_DUPLEX,
99 .end = virtio_endof(struct virtio_net_config, duplex)},
103 static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc)
105 VirtIONet *n = qemu_get_nic_opaque(nc);
107 return &n->vqs[nc->queue_index];
110 static int vq2q(int queue_index)
112 return queue_index / 2;
115 /* TODO
116 * - we could suppress RX interrupt if we were so inclined.
119 static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
121 VirtIONet *n = VIRTIO_NET(vdev);
122 struct virtio_net_config netcfg;
124 virtio_stw_p(vdev, &netcfg.status, n->status);
125 virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues);
126 virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu);
127 memcpy(netcfg.mac, n->mac, ETH_ALEN);
128 virtio_stl_p(vdev, &netcfg.speed, n->net_conf.speed);
129 netcfg.duplex = n->net_conf.duplex;
130 memcpy(config, &netcfg, n->config_size);
133 static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
135 VirtIONet *n = VIRTIO_NET(vdev);
136 struct virtio_net_config netcfg = {};
138 memcpy(&netcfg, config, n->config_size);
140 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
141 !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
142 memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
143 memcpy(n->mac, netcfg.mac, ETH_ALEN);
144 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
148 static bool virtio_net_started(VirtIONet *n, uint8_t status)
150 VirtIODevice *vdev = VIRTIO_DEVICE(n);
151 return (status & VIRTIO_CONFIG_S_DRIVER_OK) &&
152 (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running;
155 static void virtio_net_announce_notify(VirtIONet *net)
157 VirtIODevice *vdev = VIRTIO_DEVICE(net);
158 trace_virtio_net_announce_notify();
160 net->status |= VIRTIO_NET_S_ANNOUNCE;
161 virtio_notify_config(vdev);
164 static void virtio_net_announce_timer(void *opaque)
166 VirtIONet *n = opaque;
167 trace_virtio_net_announce_timer(n->announce_timer.round);
169 n->announce_timer.round--;
170 virtio_net_announce_notify(n);
173 static void virtio_net_announce(NetClientState *nc)
175 VirtIONet *n = qemu_get_nic_opaque(nc);
176 VirtIODevice *vdev = VIRTIO_DEVICE(n);
179 * Make sure the virtio migration announcement timer isn't running
180 * If it is, let it trigger announcement so that we do not cause
181 * confusion.
183 if (n->announce_timer.round) {
184 return;
187 if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
188 virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
189 virtio_net_announce_notify(n);
193 static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
195 VirtIODevice *vdev = VIRTIO_DEVICE(n);
196 NetClientState *nc = qemu_get_queue(n->nic);
197 int queues = n->multiqueue ? n->max_queues : 1;
199 if (!get_vhost_net(nc->peer)) {
200 return;
203 if ((virtio_net_started(n, status) && !nc->peer->link_down) ==
204 !!n->vhost_started) {
205 return;
207 if (!n->vhost_started) {
208 int r, i;
210 if (n->needs_vnet_hdr_swap) {
211 error_report("backend does not support %s vnet headers; "
212 "falling back on userspace virtio",
213 virtio_is_big_endian(vdev) ? "BE" : "LE");
214 return;
217 /* Any packets outstanding? Purge them to avoid touching rings
218 * when vhost is running.
220 for (i = 0; i < queues; i++) {
221 NetClientState *qnc = qemu_get_subqueue(n->nic, i);
223 /* Purge both directions: TX and RX. */
224 qemu_net_queue_purge(qnc->peer->incoming_queue, qnc);
225 qemu_net_queue_purge(qnc->incoming_queue, qnc->peer);
228 if (virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_MTU)) {
229 r = vhost_net_set_mtu(get_vhost_net(nc->peer), n->net_conf.mtu);
230 if (r < 0) {
231 error_report("%uBytes MTU not supported by the backend",
232 n->net_conf.mtu);
234 return;
238 n->vhost_started = 1;
239 r = vhost_net_start(vdev, n->nic->ncs, queues);
240 if (r < 0) {
241 error_report("unable to start vhost net: %d: "
242 "falling back on userspace virtio", -r);
243 n->vhost_started = 0;
245 } else {
246 vhost_net_stop(vdev, n->nic->ncs, queues);
247 n->vhost_started = 0;
251 static int virtio_net_set_vnet_endian_one(VirtIODevice *vdev,
252 NetClientState *peer,
253 bool enable)
255 if (virtio_is_big_endian(vdev)) {
256 return qemu_set_vnet_be(peer, enable);
257 } else {
258 return qemu_set_vnet_le(peer, enable);
262 static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs,
263 int queues, bool enable)
265 int i;
267 for (i = 0; i < queues; i++) {
268 if (virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, enable) < 0 &&
269 enable) {
270 while (--i >= 0) {
271 virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, false);
274 return true;
278 return false;
281 static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status)
283 VirtIODevice *vdev = VIRTIO_DEVICE(n);
284 int queues = n->multiqueue ? n->max_queues : 1;
286 if (virtio_net_started(n, status)) {
287 /* Before using the device, we tell the network backend about the
288 * endianness to use when parsing vnet headers. If the backend
289 * can't do it, we fallback onto fixing the headers in the core
290 * virtio-net code.
292 n->needs_vnet_hdr_swap = virtio_net_set_vnet_endian(vdev, n->nic->ncs,
293 queues, true);
294 } else if (virtio_net_started(n, vdev->status)) {
295 /* After using the device, we need to reset the network backend to
296 * the default (guest native endianness), otherwise the guest may
297 * lose network connectivity if it is rebooted into a different
298 * endianness.
300 virtio_net_set_vnet_endian(vdev, n->nic->ncs, queues, false);
304 static void virtio_net_drop_tx_queue_data(VirtIODevice *vdev, VirtQueue *vq)
306 unsigned int dropped = virtqueue_drop_all(vq);
307 if (dropped) {
308 virtio_notify(vdev, vq);
312 static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
314 VirtIONet *n = VIRTIO_NET(vdev);
315 VirtIONetQueue *q;
316 int i;
317 uint8_t queue_status;
319 virtio_net_vnet_endian_status(n, status);
320 virtio_net_vhost_status(n, status);
322 for (i = 0; i < n->max_queues; i++) {
323 NetClientState *ncs = qemu_get_subqueue(n->nic, i);
324 bool queue_started;
325 q = &n->vqs[i];
327 if ((!n->multiqueue && i != 0) || i >= n->curr_queues) {
328 queue_status = 0;
329 } else {
330 queue_status = status;
332 queue_started =
333 virtio_net_started(n, queue_status) && !n->vhost_started;
335 if (queue_started) {
336 qemu_flush_queued_packets(ncs);
339 if (!q->tx_waiting) {
340 continue;
343 if (queue_started) {
344 if (q->tx_timer) {
345 timer_mod(q->tx_timer,
346 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
347 } else {
348 qemu_bh_schedule(q->tx_bh);
350 } else {
351 if (q->tx_timer) {
352 timer_del(q->tx_timer);
353 } else {
354 qemu_bh_cancel(q->tx_bh);
356 if ((n->status & VIRTIO_NET_S_LINK_UP) == 0 &&
357 (queue_status & VIRTIO_CONFIG_S_DRIVER_OK) &&
358 vdev->vm_running) {
359 /* if tx is waiting we are likely have some packets in tx queue
360 * and disabled notification */
361 q->tx_waiting = 0;
362 virtio_queue_set_notification(q->tx_vq, 1);
363 virtio_net_drop_tx_queue_data(vdev, q->tx_vq);
369 static void virtio_net_set_link_status(NetClientState *nc)
371 VirtIONet *n = qemu_get_nic_opaque(nc);
372 VirtIODevice *vdev = VIRTIO_DEVICE(n);
373 uint16_t old_status = n->status;
375 if (nc->link_down)
376 n->status &= ~VIRTIO_NET_S_LINK_UP;
377 else
378 n->status |= VIRTIO_NET_S_LINK_UP;
380 if (n->status != old_status)
381 virtio_notify_config(vdev);
383 virtio_net_set_status(vdev, vdev->status);
386 static void rxfilter_notify(NetClientState *nc)
388 VirtIONet *n = qemu_get_nic_opaque(nc);
390 if (nc->rxfilter_notify_enabled) {
391 gchar *path = object_get_canonical_path(OBJECT(n->qdev));
392 qapi_event_send_nic_rx_filter_changed(!!n->netclient_name,
393 n->netclient_name, path);
394 g_free(path);
396 /* disable event notification to avoid events flooding */
397 nc->rxfilter_notify_enabled = 0;
401 static intList *get_vlan_table(VirtIONet *n)
403 intList *list, *entry;
404 int i, j;
406 list = NULL;
407 for (i = 0; i < MAX_VLAN >> 5; i++) {
408 for (j = 0; n->vlans[i] && j <= 0x1f; j++) {
409 if (n->vlans[i] & (1U << j)) {
410 entry = g_malloc0(sizeof(*entry));
411 entry->value = (i << 5) + j;
412 entry->next = list;
413 list = entry;
418 return list;
421 static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc)
423 VirtIONet *n = qemu_get_nic_opaque(nc);
424 VirtIODevice *vdev = VIRTIO_DEVICE(n);
425 RxFilterInfo *info;
426 strList *str_list, *entry;
427 int i;
429 info = g_malloc0(sizeof(*info));
430 info->name = g_strdup(nc->name);
431 info->promiscuous = n->promisc;
433 if (n->nouni) {
434 info->unicast = RX_STATE_NONE;
435 } else if (n->alluni) {
436 info->unicast = RX_STATE_ALL;
437 } else {
438 info->unicast = RX_STATE_NORMAL;
441 if (n->nomulti) {
442 info->multicast = RX_STATE_NONE;
443 } else if (n->allmulti) {
444 info->multicast = RX_STATE_ALL;
445 } else {
446 info->multicast = RX_STATE_NORMAL;
449 info->broadcast_allowed = n->nobcast;
450 info->multicast_overflow = n->mac_table.multi_overflow;
451 info->unicast_overflow = n->mac_table.uni_overflow;
453 info->main_mac = qemu_mac_strdup_printf(n->mac);
455 str_list = NULL;
456 for (i = 0; i < n->mac_table.first_multi; i++) {
457 entry = g_malloc0(sizeof(*entry));
458 entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
459 entry->next = str_list;
460 str_list = entry;
462 info->unicast_table = str_list;
464 str_list = NULL;
465 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
466 entry = g_malloc0(sizeof(*entry));
467 entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
468 entry->next = str_list;
469 str_list = entry;
471 info->multicast_table = str_list;
472 info->vlan_table = get_vlan_table(n);
474 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) {
475 info->vlan = RX_STATE_ALL;
476 } else if (!info->vlan_table) {
477 info->vlan = RX_STATE_NONE;
478 } else {
479 info->vlan = RX_STATE_NORMAL;
482 /* enable event notification after query */
483 nc->rxfilter_notify_enabled = 1;
485 return info;
488 static void virtio_net_reset(VirtIODevice *vdev)
490 VirtIONet *n = VIRTIO_NET(vdev);
491 int i;
493 /* Reset back to compatibility mode */
494 n->promisc = 1;
495 n->allmulti = 0;
496 n->alluni = 0;
497 n->nomulti = 0;
498 n->nouni = 0;
499 n->nobcast = 0;
500 /* multiqueue is disabled by default */
501 n->curr_queues = 1;
502 timer_del(n->announce_timer.tm);
503 n->announce_timer.round = 0;
504 n->status &= ~VIRTIO_NET_S_ANNOUNCE;
506 /* Flush any MAC and VLAN filter table state */
507 n->mac_table.in_use = 0;
508 n->mac_table.first_multi = 0;
509 n->mac_table.multi_overflow = 0;
510 n->mac_table.uni_overflow = 0;
511 memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
512 memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac));
513 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
514 memset(n->vlans, 0, MAX_VLAN >> 3);
516 /* Flush any async TX */
517 for (i = 0; i < n->max_queues; i++) {
518 NetClientState *nc = qemu_get_subqueue(n->nic, i);
520 if (nc->peer) {
521 qemu_flush_or_purge_queued_packets(nc->peer, true);
522 assert(!virtio_net_get_subqueue(nc)->async_tx.elem);
527 static void peer_test_vnet_hdr(VirtIONet *n)
529 NetClientState *nc = qemu_get_queue(n->nic);
530 if (!nc->peer) {
531 return;
534 n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer);
537 static int peer_has_vnet_hdr(VirtIONet *n)
539 return n->has_vnet_hdr;
542 static int peer_has_ufo(VirtIONet *n)
544 if (!peer_has_vnet_hdr(n))
545 return 0;
547 n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer);
549 return n->has_ufo;
552 static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs,
553 int version_1)
555 int i;
556 NetClientState *nc;
558 n->mergeable_rx_bufs = mergeable_rx_bufs;
560 if (version_1) {
561 n->guest_hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
562 } else {
563 n->guest_hdr_len = n->mergeable_rx_bufs ?
564 sizeof(struct virtio_net_hdr_mrg_rxbuf) :
565 sizeof(struct virtio_net_hdr);
568 for (i = 0; i < n->max_queues; i++) {
569 nc = qemu_get_subqueue(n->nic, i);
571 if (peer_has_vnet_hdr(n) &&
572 qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) {
573 qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len);
574 n->host_hdr_len = n->guest_hdr_len;
579 static int virtio_net_max_tx_queue_size(VirtIONet *n)
581 NetClientState *peer = n->nic_conf.peers.ncs[0];
584 * Backends other than vhost-user don't support max queue size.
586 if (!peer) {
587 return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
590 if (peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) {
591 return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
594 return VIRTQUEUE_MAX_SIZE;
597 static int peer_attach(VirtIONet *n, int index)
599 NetClientState *nc = qemu_get_subqueue(n->nic, index);
601 if (!nc->peer) {
602 return 0;
605 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
606 vhost_set_vring_enable(nc->peer, 1);
609 if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) {
610 return 0;
613 if (n->max_queues == 1) {
614 return 0;
617 return tap_enable(nc->peer);
620 static int peer_detach(VirtIONet *n, int index)
622 NetClientState *nc = qemu_get_subqueue(n->nic, index);
624 if (!nc->peer) {
625 return 0;
628 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
629 vhost_set_vring_enable(nc->peer, 0);
632 if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) {
633 return 0;
636 return tap_disable(nc->peer);
639 static void virtio_net_set_queues(VirtIONet *n)
641 int i;
642 int r;
644 if (n->nic->peer_deleted) {
645 return;
648 for (i = 0; i < n->max_queues; i++) {
649 if (i < n->curr_queues) {
650 r = peer_attach(n, i);
651 assert(!r);
652 } else {
653 r = peer_detach(n, i);
654 assert(!r);
659 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue);
661 static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
662 Error **errp)
664 VirtIONet *n = VIRTIO_NET(vdev);
665 NetClientState *nc = qemu_get_queue(n->nic);
667 /* Firstly sync all virtio-net possible supported features */
668 features |= n->host_features;
670 virtio_add_feature(&features, VIRTIO_NET_F_MAC);
672 if (!peer_has_vnet_hdr(n)) {
673 virtio_clear_feature(&features, VIRTIO_NET_F_CSUM);
674 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4);
675 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6);
676 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN);
678 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM);
679 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4);
680 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6);
681 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN);
684 if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
685 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO);
686 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO);
689 if (!get_vhost_net(nc->peer)) {
690 return features;
693 features = vhost_net_get_features(get_vhost_net(nc->peer), features);
694 vdev->backend_features = features;
696 if (n->mtu_bypass_backend &&
697 (n->host_features & 1ULL << VIRTIO_NET_F_MTU)) {
698 features |= (1ULL << VIRTIO_NET_F_MTU);
701 return features;
704 static uint64_t virtio_net_bad_features(VirtIODevice *vdev)
706 uint64_t features = 0;
708 /* Linux kernel 2.6.25. It understood MAC (as everyone must),
709 * but also these: */
710 virtio_add_feature(&features, VIRTIO_NET_F_MAC);
711 virtio_add_feature(&features, VIRTIO_NET_F_CSUM);
712 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO4);
713 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO6);
714 virtio_add_feature(&features, VIRTIO_NET_F_HOST_ECN);
716 return features;
719 static void virtio_net_apply_guest_offloads(VirtIONet *n)
721 qemu_set_offload(qemu_get_queue(n->nic)->peer,
722 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)),
723 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)),
724 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)),
725 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)),
726 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO)));
729 static uint64_t virtio_net_guest_offloads_by_features(uint32_t features)
731 static const uint64_t guest_offloads_mask =
732 (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
733 (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
734 (1ULL << VIRTIO_NET_F_GUEST_TSO6) |
735 (1ULL << VIRTIO_NET_F_GUEST_ECN) |
736 (1ULL << VIRTIO_NET_F_GUEST_UFO);
738 return guest_offloads_mask & features;
741 static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n)
743 VirtIODevice *vdev = VIRTIO_DEVICE(n);
744 return virtio_net_guest_offloads_by_features(vdev->guest_features);
747 static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
749 VirtIONet *n = VIRTIO_NET(vdev);
750 int i;
752 if (n->mtu_bypass_backend &&
753 !virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_MTU)) {
754 features &= ~(1ULL << VIRTIO_NET_F_MTU);
757 virtio_net_set_multiqueue(n,
758 virtio_has_feature(features, VIRTIO_NET_F_MQ));
760 virtio_net_set_mrg_rx_bufs(n,
761 virtio_has_feature(features,
762 VIRTIO_NET_F_MRG_RXBUF),
763 virtio_has_feature(features,
764 VIRTIO_F_VERSION_1));
766 n->rsc4_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) &&
767 virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO4);
768 n->rsc6_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) &&
769 virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO6);
771 if (n->has_vnet_hdr) {
772 n->curr_guest_offloads =
773 virtio_net_guest_offloads_by_features(features);
774 virtio_net_apply_guest_offloads(n);
777 for (i = 0; i < n->max_queues; i++) {
778 NetClientState *nc = qemu_get_subqueue(n->nic, i);
780 if (!get_vhost_net(nc->peer)) {
781 continue;
783 vhost_net_ack_features(get_vhost_net(nc->peer), features);
786 if (virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) {
787 memset(n->vlans, 0, MAX_VLAN >> 3);
788 } else {
789 memset(n->vlans, 0xff, MAX_VLAN >> 3);
793 static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd,
794 struct iovec *iov, unsigned int iov_cnt)
796 uint8_t on;
797 size_t s;
798 NetClientState *nc = qemu_get_queue(n->nic);
800 s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on));
801 if (s != sizeof(on)) {
802 return VIRTIO_NET_ERR;
805 if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) {
806 n->promisc = on;
807 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) {
808 n->allmulti = on;
809 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) {
810 n->alluni = on;
811 } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) {
812 n->nomulti = on;
813 } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) {
814 n->nouni = on;
815 } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) {
816 n->nobcast = on;
817 } else {
818 return VIRTIO_NET_ERR;
821 rxfilter_notify(nc);
823 return VIRTIO_NET_OK;
826 static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd,
827 struct iovec *iov, unsigned int iov_cnt)
829 VirtIODevice *vdev = VIRTIO_DEVICE(n);
830 uint64_t offloads;
831 size_t s;
833 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
834 return VIRTIO_NET_ERR;
837 s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads));
838 if (s != sizeof(offloads)) {
839 return VIRTIO_NET_ERR;
842 if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) {
843 uint64_t supported_offloads;
845 offloads = virtio_ldq_p(vdev, &offloads);
847 if (!n->has_vnet_hdr) {
848 return VIRTIO_NET_ERR;
851 n->rsc4_enabled = virtio_has_feature(offloads, VIRTIO_NET_F_RSC_EXT) &&
852 virtio_has_feature(offloads, VIRTIO_NET_F_GUEST_TSO4);
853 n->rsc6_enabled = virtio_has_feature(offloads, VIRTIO_NET_F_RSC_EXT) &&
854 virtio_has_feature(offloads, VIRTIO_NET_F_GUEST_TSO6);
855 virtio_clear_feature(&offloads, VIRTIO_NET_F_RSC_EXT);
857 supported_offloads = virtio_net_supported_guest_offloads(n);
858 if (offloads & ~supported_offloads) {
859 return VIRTIO_NET_ERR;
862 n->curr_guest_offloads = offloads;
863 virtio_net_apply_guest_offloads(n);
865 return VIRTIO_NET_OK;
866 } else {
867 return VIRTIO_NET_ERR;
871 static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd,
872 struct iovec *iov, unsigned int iov_cnt)
874 VirtIODevice *vdev = VIRTIO_DEVICE(n);
875 struct virtio_net_ctrl_mac mac_data;
876 size_t s;
877 NetClientState *nc = qemu_get_queue(n->nic);
879 if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) {
880 if (iov_size(iov, iov_cnt) != sizeof(n->mac)) {
881 return VIRTIO_NET_ERR;
883 s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac));
884 assert(s == sizeof(n->mac));
885 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
886 rxfilter_notify(nc);
888 return VIRTIO_NET_OK;
891 if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) {
892 return VIRTIO_NET_ERR;
895 int in_use = 0;
896 int first_multi = 0;
897 uint8_t uni_overflow = 0;
898 uint8_t multi_overflow = 0;
899 uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
901 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
902 sizeof(mac_data.entries));
903 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
904 if (s != sizeof(mac_data.entries)) {
905 goto error;
907 iov_discard_front(&iov, &iov_cnt, s);
909 if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) {
910 goto error;
913 if (mac_data.entries <= MAC_TABLE_ENTRIES) {
914 s = iov_to_buf(iov, iov_cnt, 0, macs,
915 mac_data.entries * ETH_ALEN);
916 if (s != mac_data.entries * ETH_ALEN) {
917 goto error;
919 in_use += mac_data.entries;
920 } else {
921 uni_overflow = 1;
924 iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN);
926 first_multi = in_use;
928 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
929 sizeof(mac_data.entries));
930 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
931 if (s != sizeof(mac_data.entries)) {
932 goto error;
935 iov_discard_front(&iov, &iov_cnt, s);
937 if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) {
938 goto error;
941 if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) {
942 s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN],
943 mac_data.entries * ETH_ALEN);
944 if (s != mac_data.entries * ETH_ALEN) {
945 goto error;
947 in_use += mac_data.entries;
948 } else {
949 multi_overflow = 1;
952 n->mac_table.in_use = in_use;
953 n->mac_table.first_multi = first_multi;
954 n->mac_table.uni_overflow = uni_overflow;
955 n->mac_table.multi_overflow = multi_overflow;
956 memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN);
957 g_free(macs);
958 rxfilter_notify(nc);
960 return VIRTIO_NET_OK;
962 error:
963 g_free(macs);
964 return VIRTIO_NET_ERR;
967 static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd,
968 struct iovec *iov, unsigned int iov_cnt)
970 VirtIODevice *vdev = VIRTIO_DEVICE(n);
971 uint16_t vid;
972 size_t s;
973 NetClientState *nc = qemu_get_queue(n->nic);
975 s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid));
976 vid = virtio_lduw_p(vdev, &vid);
977 if (s != sizeof(vid)) {
978 return VIRTIO_NET_ERR;
981 if (vid >= MAX_VLAN)
982 return VIRTIO_NET_ERR;
984 if (cmd == VIRTIO_NET_CTRL_VLAN_ADD)
985 n->vlans[vid >> 5] |= (1U << (vid & 0x1f));
986 else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL)
987 n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f));
988 else
989 return VIRTIO_NET_ERR;
991 rxfilter_notify(nc);
993 return VIRTIO_NET_OK;
996 static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd,
997 struct iovec *iov, unsigned int iov_cnt)
999 trace_virtio_net_handle_announce(n->announce_timer.round);
1000 if (cmd == VIRTIO_NET_CTRL_ANNOUNCE_ACK &&
1001 n->status & VIRTIO_NET_S_ANNOUNCE) {
1002 n->status &= ~VIRTIO_NET_S_ANNOUNCE;
1003 if (n->announce_timer.round) {
1004 qemu_announce_timer_step(&n->announce_timer);
1006 return VIRTIO_NET_OK;
1007 } else {
1008 return VIRTIO_NET_ERR;
1012 static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
1013 struct iovec *iov, unsigned int iov_cnt)
1015 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1016 struct virtio_net_ctrl_mq mq;
1017 size_t s;
1018 uint16_t queues;
1020 s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq));
1021 if (s != sizeof(mq)) {
1022 return VIRTIO_NET_ERR;
1025 if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
1026 return VIRTIO_NET_ERR;
1029 queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs);
1031 if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
1032 queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
1033 queues > n->max_queues ||
1034 !n->multiqueue) {
1035 return VIRTIO_NET_ERR;
1038 n->curr_queues = queues;
1039 /* stop the backend before changing the number of queues to avoid handling a
1040 * disabled queue */
1041 virtio_net_set_status(vdev, vdev->status);
1042 virtio_net_set_queues(n);
1044 return VIRTIO_NET_OK;
1047 static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
1049 VirtIONet *n = VIRTIO_NET(vdev);
1050 struct virtio_net_ctrl_hdr ctrl;
1051 virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
1052 VirtQueueElement *elem;
1053 size_t s;
1054 struct iovec *iov, *iov2;
1055 unsigned int iov_cnt;
1057 for (;;) {
1058 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
1059 if (!elem) {
1060 break;
1062 if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) ||
1063 iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) {
1064 virtio_error(vdev, "virtio-net ctrl missing headers");
1065 virtqueue_detach_element(vq, elem, 0);
1066 g_free(elem);
1067 break;
1070 iov_cnt = elem->out_num;
1071 iov2 = iov = g_memdup(elem->out_sg, sizeof(struct iovec) * elem->out_num);
1072 s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl));
1073 iov_discard_front(&iov, &iov_cnt, sizeof(ctrl));
1074 if (s != sizeof(ctrl)) {
1075 status = VIRTIO_NET_ERR;
1076 } else if (ctrl.class == VIRTIO_NET_CTRL_RX) {
1077 status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt);
1078 } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) {
1079 status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt);
1080 } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
1081 status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt);
1082 } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) {
1083 status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt);
1084 } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
1085 status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt);
1086 } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
1087 status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt);
1090 s = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, sizeof(status));
1091 assert(s == sizeof(status));
1093 virtqueue_push(vq, elem, sizeof(status));
1094 virtio_notify(vdev, vq);
1095 g_free(iov2);
1096 g_free(elem);
1100 /* RX */
1102 static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
1104 VirtIONet *n = VIRTIO_NET(vdev);
1105 int queue_index = vq2q(virtio_get_queue_index(vq));
1107 qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index));
1110 static int virtio_net_can_receive(NetClientState *nc)
1112 VirtIONet *n = qemu_get_nic_opaque(nc);
1113 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1114 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1116 if (!vdev->vm_running) {
1117 return 0;
1120 if (nc->queue_index >= n->curr_queues) {
1121 return 0;
1124 if (!virtio_queue_ready(q->rx_vq) ||
1125 !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1126 return 0;
1129 return 1;
1132 static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize)
1134 VirtIONet *n = q->n;
1135 if (virtio_queue_empty(q->rx_vq) ||
1136 (n->mergeable_rx_bufs &&
1137 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
1138 virtio_queue_set_notification(q->rx_vq, 1);
1140 /* To avoid a race condition where the guest has made some buffers
1141 * available after the above check but before notification was
1142 * enabled, check for available buffers again.
1144 if (virtio_queue_empty(q->rx_vq) ||
1145 (n->mergeable_rx_bufs &&
1146 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
1147 return 0;
1151 virtio_queue_set_notification(q->rx_vq, 0);
1152 return 1;
1155 static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr)
1157 virtio_tswap16s(vdev, &hdr->hdr_len);
1158 virtio_tswap16s(vdev, &hdr->gso_size);
1159 virtio_tswap16s(vdev, &hdr->csum_start);
1160 virtio_tswap16s(vdev, &hdr->csum_offset);
1163 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
1164 * it never finds out that the packets don't have valid checksums. This
1165 * causes dhclient to get upset. Fedora's carried a patch for ages to
1166 * fix this with Xen but it hasn't appeared in an upstream release of
1167 * dhclient yet.
1169 * To avoid breaking existing guests, we catch udp packets and add
1170 * checksums. This is terrible but it's better than hacking the guest
1171 * kernels.
1173 * N.B. if we introduce a zero-copy API, this operation is no longer free so
1174 * we should provide a mechanism to disable it to avoid polluting the host
1175 * cache.
1177 static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
1178 uint8_t *buf, size_t size)
1180 if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */
1181 (size > 27 && size < 1500) && /* normal sized MTU */
1182 (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */
1183 (buf[23] == 17) && /* ip.protocol == UDP */
1184 (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */
1185 net_checksum_calculate(buf, size);
1186 hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
1190 static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
1191 const void *buf, size_t size)
1193 if (n->has_vnet_hdr) {
1194 /* FIXME this cast is evil */
1195 void *wbuf = (void *)buf;
1196 work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
1197 size - n->host_hdr_len);
1199 if (n->needs_vnet_hdr_swap) {
1200 virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
1202 iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
1203 } else {
1204 struct virtio_net_hdr hdr = {
1205 .flags = 0,
1206 .gso_type = VIRTIO_NET_HDR_GSO_NONE
1208 iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr);
1212 static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
1214 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1215 static const uint8_t vlan[] = {0x81, 0x00};
1216 uint8_t *ptr = (uint8_t *)buf;
1217 int i;
1219 if (n->promisc)
1220 return 1;
1222 ptr += n->host_hdr_len;
1224 if (!memcmp(&ptr[12], vlan, sizeof(vlan))) {
1225 int vid = lduw_be_p(ptr + 14) & 0xfff;
1226 if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f))))
1227 return 0;
1230 if (ptr[0] & 1) { // multicast
1231 if (!memcmp(ptr, bcast, sizeof(bcast))) {
1232 return !n->nobcast;
1233 } else if (n->nomulti) {
1234 return 0;
1235 } else if (n->allmulti || n->mac_table.multi_overflow) {
1236 return 1;
1239 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
1240 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
1241 return 1;
1244 } else { // unicast
1245 if (n->nouni) {
1246 return 0;
1247 } else if (n->alluni || n->mac_table.uni_overflow) {
1248 return 1;
1249 } else if (!memcmp(ptr, n->mac, ETH_ALEN)) {
1250 return 1;
1253 for (i = 0; i < n->mac_table.first_multi; i++) {
1254 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
1255 return 1;
1260 return 0;
1263 static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
1264 size_t size)
1266 VirtIONet *n = qemu_get_nic_opaque(nc);
1267 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1268 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1269 struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
1270 struct virtio_net_hdr_mrg_rxbuf mhdr;
1271 unsigned mhdr_cnt = 0;
1272 size_t offset, i, guest_offset;
1274 if (!virtio_net_can_receive(nc)) {
1275 return -1;
1278 /* hdr_len refers to the header we supply to the guest */
1279 if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
1280 return 0;
1283 if (!receive_filter(n, buf, size))
1284 return size;
1286 offset = i = 0;
1288 while (offset < size) {
1289 VirtQueueElement *elem;
1290 int len, total;
1291 const struct iovec *sg;
1293 total = 0;
1295 elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement));
1296 if (!elem) {
1297 if (i) {
1298 virtio_error(vdev, "virtio-net unexpected empty queue: "
1299 "i %zd mergeable %d offset %zd, size %zd, "
1300 "guest hdr len %zd, host hdr len %zd "
1301 "guest features 0x%" PRIx64,
1302 i, n->mergeable_rx_bufs, offset, size,
1303 n->guest_hdr_len, n->host_hdr_len,
1304 vdev->guest_features);
1306 return -1;
1309 if (elem->in_num < 1) {
1310 virtio_error(vdev,
1311 "virtio-net receive queue contains no in buffers");
1312 virtqueue_detach_element(q->rx_vq, elem, 0);
1313 g_free(elem);
1314 return -1;
1317 sg = elem->in_sg;
1318 if (i == 0) {
1319 assert(offset == 0);
1320 if (n->mergeable_rx_bufs) {
1321 mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
1322 sg, elem->in_num,
1323 offsetof(typeof(mhdr), num_buffers),
1324 sizeof(mhdr.num_buffers));
1327 receive_header(n, sg, elem->in_num, buf, size);
1328 offset = n->host_hdr_len;
1329 total += n->guest_hdr_len;
1330 guest_offset = n->guest_hdr_len;
1331 } else {
1332 guest_offset = 0;
1335 /* copy in packet. ugh */
1336 len = iov_from_buf(sg, elem->in_num, guest_offset,
1337 buf + offset, size - offset);
1338 total += len;
1339 offset += len;
1340 /* If buffers can't be merged, at this point we
1341 * must have consumed the complete packet.
1342 * Otherwise, drop it. */
1343 if (!n->mergeable_rx_bufs && offset < size) {
1344 virtqueue_unpop(q->rx_vq, elem, total);
1345 g_free(elem);
1346 return size;
1349 /* signal other side */
1350 virtqueue_fill(q->rx_vq, elem, total, i++);
1351 g_free(elem);
1354 if (mhdr_cnt) {
1355 virtio_stw_p(vdev, &mhdr.num_buffers, i);
1356 iov_from_buf(mhdr_sg, mhdr_cnt,
1358 &mhdr.num_buffers, sizeof mhdr.num_buffers);
1361 virtqueue_flush(q->rx_vq, i);
1362 virtio_notify(vdev, q->rx_vq);
1364 return size;
1367 static ssize_t virtio_net_do_receive(NetClientState *nc, const uint8_t *buf,
1368 size_t size)
1370 ssize_t r;
1372 rcu_read_lock();
1373 r = virtio_net_receive_rcu(nc, buf, size);
1374 rcu_read_unlock();
1375 return r;
1378 static void virtio_net_rsc_extract_unit4(VirtioNetRscChain *chain,
1379 const uint8_t *buf,
1380 VirtioNetRscUnit *unit)
1382 uint16_t ip_hdrlen;
1383 struct ip_header *ip;
1385 ip = (struct ip_header *)(buf + chain->n->guest_hdr_len
1386 + sizeof(struct eth_header));
1387 unit->ip = (void *)ip;
1388 ip_hdrlen = (ip->ip_ver_len & 0xF) << 2;
1389 unit->ip_plen = &ip->ip_len;
1390 unit->tcp = (struct tcp_header *)(((uint8_t *)unit->ip) + ip_hdrlen);
1391 unit->tcp_hdrlen = (htons(unit->tcp->th_offset_flags) & 0xF000) >> 10;
1392 unit->payload = htons(*unit->ip_plen) - ip_hdrlen - unit->tcp_hdrlen;
1395 static void virtio_net_rsc_extract_unit6(VirtioNetRscChain *chain,
1396 const uint8_t *buf,
1397 VirtioNetRscUnit *unit)
1399 struct ip6_header *ip6;
1401 ip6 = (struct ip6_header *)(buf + chain->n->guest_hdr_len
1402 + sizeof(struct eth_header));
1403 unit->ip = ip6;
1404 unit->ip_plen = &(ip6->ip6_ctlun.ip6_un1.ip6_un1_plen);
1405 unit->tcp = (struct tcp_header *)(((uint8_t *)unit->ip)\
1406 + sizeof(struct ip6_header));
1407 unit->tcp_hdrlen = (htons(unit->tcp->th_offset_flags) & 0xF000) >> 10;
1409 /* There is a difference between payload lenght in ipv4 and v6,
1410 ip header is excluded in ipv6 */
1411 unit->payload = htons(*unit->ip_plen) - unit->tcp_hdrlen;
1414 static size_t virtio_net_rsc_drain_seg(VirtioNetRscChain *chain,
1415 VirtioNetRscSeg *seg)
1417 int ret;
1418 struct virtio_net_hdr *h;
1420 h = (struct virtio_net_hdr *)seg->buf;
1421 h->flags = 0;
1422 h->gso_type = VIRTIO_NET_HDR_GSO_NONE;
1424 if (seg->is_coalesced) {
1425 *virtio_net_rsc_ext_num_packets(h) = seg->packets;
1426 *virtio_net_rsc_ext_num_dupacks(h) = seg->dup_ack;
1427 h->flags = VIRTIO_NET_HDR_F_RSC_INFO;
1428 if (chain->proto == ETH_P_IP) {
1429 h->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1430 } else {
1431 h->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1435 ret = virtio_net_do_receive(seg->nc, seg->buf, seg->size);
1436 QTAILQ_REMOVE(&chain->buffers, seg, next);
1437 g_free(seg->buf);
1438 g_free(seg);
1440 return ret;
1443 static void virtio_net_rsc_purge(void *opq)
1445 VirtioNetRscSeg *seg, *rn;
1446 VirtioNetRscChain *chain = (VirtioNetRscChain *)opq;
1448 QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn) {
1449 if (virtio_net_rsc_drain_seg(chain, seg) == 0) {
1450 chain->stat.purge_failed++;
1451 continue;
1455 chain->stat.timer++;
1456 if (!QTAILQ_EMPTY(&chain->buffers)) {
1457 timer_mod(chain->drain_timer,
1458 qemu_clock_get_ns(QEMU_CLOCK_HOST) + chain->n->rsc_timeout);
1462 static void virtio_net_rsc_cleanup(VirtIONet *n)
1464 VirtioNetRscChain *chain, *rn_chain;
1465 VirtioNetRscSeg *seg, *rn_seg;
1467 QTAILQ_FOREACH_SAFE(chain, &n->rsc_chains, next, rn_chain) {
1468 QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, rn_seg) {
1469 QTAILQ_REMOVE(&chain->buffers, seg, next);
1470 g_free(seg->buf);
1471 g_free(seg);
1474 timer_del(chain->drain_timer);
1475 timer_free(chain->drain_timer);
1476 QTAILQ_REMOVE(&n->rsc_chains, chain, next);
1477 g_free(chain);
1481 static void virtio_net_rsc_cache_buf(VirtioNetRscChain *chain,
1482 NetClientState *nc,
1483 const uint8_t *buf, size_t size)
1485 uint16_t hdr_len;
1486 VirtioNetRscSeg *seg;
1488 hdr_len = chain->n->guest_hdr_len;
1489 seg = g_malloc(sizeof(VirtioNetRscSeg));
1490 seg->buf = g_malloc(hdr_len + sizeof(struct eth_header)
1491 + sizeof(struct ip6_header) + VIRTIO_NET_MAX_TCP_PAYLOAD);
1492 memcpy(seg->buf, buf, size);
1493 seg->size = size;
1494 seg->packets = 1;
1495 seg->dup_ack = 0;
1496 seg->is_coalesced = 0;
1497 seg->nc = nc;
1499 QTAILQ_INSERT_TAIL(&chain->buffers, seg, next);
1500 chain->stat.cache++;
1502 switch (chain->proto) {
1503 case ETH_P_IP:
1504 virtio_net_rsc_extract_unit4(chain, seg->buf, &seg->unit);
1505 break;
1506 case ETH_P_IPV6:
1507 virtio_net_rsc_extract_unit6(chain, seg->buf, &seg->unit);
1508 break;
1509 default:
1510 g_assert_not_reached();
1514 static int32_t virtio_net_rsc_handle_ack(VirtioNetRscChain *chain,
1515 VirtioNetRscSeg *seg,
1516 const uint8_t *buf,
1517 struct tcp_header *n_tcp,
1518 struct tcp_header *o_tcp)
1520 uint32_t nack, oack;
1521 uint16_t nwin, owin;
1523 nack = htonl(n_tcp->th_ack);
1524 nwin = htons(n_tcp->th_win);
1525 oack = htonl(o_tcp->th_ack);
1526 owin = htons(o_tcp->th_win);
1528 if ((nack - oack) >= VIRTIO_NET_MAX_TCP_PAYLOAD) {
1529 chain->stat.ack_out_of_win++;
1530 return RSC_FINAL;
1531 } else if (nack == oack) {
1532 /* duplicated ack or window probe */
1533 if (nwin == owin) {
1534 /* duplicated ack, add dup ack count due to whql test up to 1 */
1535 chain->stat.dup_ack++;
1536 return RSC_FINAL;
1537 } else {
1538 /* Coalesce window update */
1539 o_tcp->th_win = n_tcp->th_win;
1540 chain->stat.win_update++;
1541 return RSC_COALESCE;
1543 } else {
1544 /* pure ack, go to 'C', finalize*/
1545 chain->stat.pure_ack++;
1546 return RSC_FINAL;
1550 static int32_t virtio_net_rsc_coalesce_data(VirtioNetRscChain *chain,
1551 VirtioNetRscSeg *seg,
1552 const uint8_t *buf,
1553 VirtioNetRscUnit *n_unit)
1555 void *data;
1556 uint16_t o_ip_len;
1557 uint32_t nseq, oseq;
1558 VirtioNetRscUnit *o_unit;
1560 o_unit = &seg->unit;
1561 o_ip_len = htons(*o_unit->ip_plen);
1562 nseq = htonl(n_unit->tcp->th_seq);
1563 oseq = htonl(o_unit->tcp->th_seq);
1565 /* out of order or retransmitted. */
1566 if ((nseq - oseq) > VIRTIO_NET_MAX_TCP_PAYLOAD) {
1567 chain->stat.data_out_of_win++;
1568 return RSC_FINAL;
1571 data = ((uint8_t *)n_unit->tcp) + n_unit->tcp_hdrlen;
1572 if (nseq == oseq) {
1573 if ((o_unit->payload == 0) && n_unit->payload) {
1574 /* From no payload to payload, normal case, not a dup ack or etc */
1575 chain->stat.data_after_pure_ack++;
1576 goto coalesce;
1577 } else {
1578 return virtio_net_rsc_handle_ack(chain, seg, buf,
1579 n_unit->tcp, o_unit->tcp);
1581 } else if ((nseq - oseq) != o_unit->payload) {
1582 /* Not a consistent packet, out of order */
1583 chain->stat.data_out_of_order++;
1584 return RSC_FINAL;
1585 } else {
1586 coalesce:
1587 if ((o_ip_len + n_unit->payload) > chain->max_payload) {
1588 chain->stat.over_size++;
1589 return RSC_FINAL;
1592 /* Here comes the right data, the payload length in v4/v6 is different,
1593 so use the field value to update and record the new data len */
1594 o_unit->payload += n_unit->payload; /* update new data len */
1596 /* update field in ip header */
1597 *o_unit->ip_plen = htons(o_ip_len + n_unit->payload);
1599 /* Bring 'PUSH' big, the whql test guide says 'PUSH' can be coalesced
1600 for windows guest, while this may change the behavior for linux
1601 guest (only if it uses RSC feature). */
1602 o_unit->tcp->th_offset_flags = n_unit->tcp->th_offset_flags;
1604 o_unit->tcp->th_ack = n_unit->tcp->th_ack;
1605 o_unit->tcp->th_win = n_unit->tcp->th_win;
1607 memmove(seg->buf + seg->size, data, n_unit->payload);
1608 seg->size += n_unit->payload;
1609 seg->packets++;
1610 chain->stat.coalesced++;
1611 return RSC_COALESCE;
1615 static int32_t virtio_net_rsc_coalesce4(VirtioNetRscChain *chain,
1616 VirtioNetRscSeg *seg,
1617 const uint8_t *buf, size_t size,
1618 VirtioNetRscUnit *unit)
1620 struct ip_header *ip1, *ip2;
1622 ip1 = (struct ip_header *)(unit->ip);
1623 ip2 = (struct ip_header *)(seg->unit.ip);
1624 if ((ip1->ip_src ^ ip2->ip_src) || (ip1->ip_dst ^ ip2->ip_dst)
1625 || (unit->tcp->th_sport ^ seg->unit.tcp->th_sport)
1626 || (unit->tcp->th_dport ^ seg->unit.tcp->th_dport)) {
1627 chain->stat.no_match++;
1628 return RSC_NO_MATCH;
1631 return virtio_net_rsc_coalesce_data(chain, seg, buf, unit);
1634 static int32_t virtio_net_rsc_coalesce6(VirtioNetRscChain *chain,
1635 VirtioNetRscSeg *seg,
1636 const uint8_t *buf, size_t size,
1637 VirtioNetRscUnit *unit)
1639 struct ip6_header *ip1, *ip2;
1641 ip1 = (struct ip6_header *)(unit->ip);
1642 ip2 = (struct ip6_header *)(seg->unit.ip);
1643 if (memcmp(&ip1->ip6_src, &ip2->ip6_src, sizeof(struct in6_address))
1644 || memcmp(&ip1->ip6_dst, &ip2->ip6_dst, sizeof(struct in6_address))
1645 || (unit->tcp->th_sport ^ seg->unit.tcp->th_sport)
1646 || (unit->tcp->th_dport ^ seg->unit.tcp->th_dport)) {
1647 chain->stat.no_match++;
1648 return RSC_NO_MATCH;
1651 return virtio_net_rsc_coalesce_data(chain, seg, buf, unit);
1654 /* Packets with 'SYN' should bypass, other flag should be sent after drain
1655 * to prevent out of order */
1656 static int virtio_net_rsc_tcp_ctrl_check(VirtioNetRscChain *chain,
1657 struct tcp_header *tcp)
1659 uint16_t tcp_hdr;
1660 uint16_t tcp_flag;
1662 tcp_flag = htons(tcp->th_offset_flags);
1663 tcp_hdr = (tcp_flag & VIRTIO_NET_TCP_HDR_LENGTH) >> 10;
1664 tcp_flag &= VIRTIO_NET_TCP_FLAG;
1665 tcp_flag = htons(tcp->th_offset_flags) & 0x3F;
1666 if (tcp_flag & TH_SYN) {
1667 chain->stat.tcp_syn++;
1668 return RSC_BYPASS;
1671 if (tcp_flag & (TH_FIN | TH_URG | TH_RST | TH_ECE | TH_CWR)) {
1672 chain->stat.tcp_ctrl_drain++;
1673 return RSC_FINAL;
1676 if (tcp_hdr > sizeof(struct tcp_header)) {
1677 chain->stat.tcp_all_opt++;
1678 return RSC_FINAL;
1681 return RSC_CANDIDATE;
1684 static size_t virtio_net_rsc_do_coalesce(VirtioNetRscChain *chain,
1685 NetClientState *nc,
1686 const uint8_t *buf, size_t size,
1687 VirtioNetRscUnit *unit)
1689 int ret;
1690 VirtioNetRscSeg *seg, *nseg;
1692 if (QTAILQ_EMPTY(&chain->buffers)) {
1693 chain->stat.empty_cache++;
1694 virtio_net_rsc_cache_buf(chain, nc, buf, size);
1695 timer_mod(chain->drain_timer,
1696 qemu_clock_get_ns(QEMU_CLOCK_HOST) + chain->n->rsc_timeout);
1697 return size;
1700 QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, nseg) {
1701 if (chain->proto == ETH_P_IP) {
1702 ret = virtio_net_rsc_coalesce4(chain, seg, buf, size, unit);
1703 } else {
1704 ret = virtio_net_rsc_coalesce6(chain, seg, buf, size, unit);
1707 if (ret == RSC_FINAL) {
1708 if (virtio_net_rsc_drain_seg(chain, seg) == 0) {
1709 /* Send failed */
1710 chain->stat.final_failed++;
1711 return 0;
1714 /* Send current packet */
1715 return virtio_net_do_receive(nc, buf, size);
1716 } else if (ret == RSC_NO_MATCH) {
1717 continue;
1718 } else {
1719 /* Coalesced, mark coalesced flag to tell calc cksum for ipv4 */
1720 seg->is_coalesced = 1;
1721 return size;
1725 chain->stat.no_match_cache++;
1726 virtio_net_rsc_cache_buf(chain, nc, buf, size);
1727 return size;
1730 /* Drain a connection data, this is to avoid out of order segments */
1731 static size_t virtio_net_rsc_drain_flow(VirtioNetRscChain *chain,
1732 NetClientState *nc,
1733 const uint8_t *buf, size_t size,
1734 uint16_t ip_start, uint16_t ip_size,
1735 uint16_t tcp_port)
1737 VirtioNetRscSeg *seg, *nseg;
1738 uint32_t ppair1, ppair2;
1740 ppair1 = *(uint32_t *)(buf + tcp_port);
1741 QTAILQ_FOREACH_SAFE(seg, &chain->buffers, next, nseg) {
1742 ppair2 = *(uint32_t *)(seg->buf + tcp_port);
1743 if (memcmp(buf + ip_start, seg->buf + ip_start, ip_size)
1744 || (ppair1 != ppair2)) {
1745 continue;
1747 if (virtio_net_rsc_drain_seg(chain, seg) == 0) {
1748 chain->stat.drain_failed++;
1751 break;
1754 return virtio_net_do_receive(nc, buf, size);
1757 static int32_t virtio_net_rsc_sanity_check4(VirtioNetRscChain *chain,
1758 struct ip_header *ip,
1759 const uint8_t *buf, size_t size)
1761 uint16_t ip_len;
1763 /* Not an ipv4 packet */
1764 if (((ip->ip_ver_len & 0xF0) >> 4) != IP_HEADER_VERSION_4) {
1765 chain->stat.ip_option++;
1766 return RSC_BYPASS;
1769 /* Don't handle packets with ip option */
1770 if ((ip->ip_ver_len & 0xF) != VIRTIO_NET_IP4_HEADER_LENGTH) {
1771 chain->stat.ip_option++;
1772 return RSC_BYPASS;
1775 if (ip->ip_p != IPPROTO_TCP) {
1776 chain->stat.bypass_not_tcp++;
1777 return RSC_BYPASS;
1780 /* Don't handle packets with ip fragment */
1781 if (!(htons(ip->ip_off) & IP_DF)) {
1782 chain->stat.ip_frag++;
1783 return RSC_BYPASS;
1786 /* Don't handle packets with ecn flag */
1787 if (IPTOS_ECN(ip->ip_tos)) {
1788 chain->stat.ip_ecn++;
1789 return RSC_BYPASS;
1792 ip_len = htons(ip->ip_len);
1793 if (ip_len < (sizeof(struct ip_header) + sizeof(struct tcp_header))
1794 || ip_len > (size - chain->n->guest_hdr_len -
1795 sizeof(struct eth_header))) {
1796 chain->stat.ip_hacked++;
1797 return RSC_BYPASS;
1800 return RSC_CANDIDATE;
1803 static size_t virtio_net_rsc_receive4(VirtioNetRscChain *chain,
1804 NetClientState *nc,
1805 const uint8_t *buf, size_t size)
1807 int32_t ret;
1808 uint16_t hdr_len;
1809 VirtioNetRscUnit unit;
1811 hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len;
1813 if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip_header)
1814 + sizeof(struct tcp_header))) {
1815 chain->stat.bypass_not_tcp++;
1816 return virtio_net_do_receive(nc, buf, size);
1819 virtio_net_rsc_extract_unit4(chain, buf, &unit);
1820 if (virtio_net_rsc_sanity_check4(chain, unit.ip, buf, size)
1821 != RSC_CANDIDATE) {
1822 return virtio_net_do_receive(nc, buf, size);
1825 ret = virtio_net_rsc_tcp_ctrl_check(chain, unit.tcp);
1826 if (ret == RSC_BYPASS) {
1827 return virtio_net_do_receive(nc, buf, size);
1828 } else if (ret == RSC_FINAL) {
1829 return virtio_net_rsc_drain_flow(chain, nc, buf, size,
1830 ((hdr_len + sizeof(struct eth_header)) + 12),
1831 VIRTIO_NET_IP4_ADDR_SIZE,
1832 hdr_len + sizeof(struct eth_header) + sizeof(struct ip_header));
1835 return virtio_net_rsc_do_coalesce(chain, nc, buf, size, &unit);
1838 static int32_t virtio_net_rsc_sanity_check6(VirtioNetRscChain *chain,
1839 struct ip6_header *ip6,
1840 const uint8_t *buf, size_t size)
1842 uint16_t ip_len;
1844 if (((ip6->ip6_ctlun.ip6_un1.ip6_un1_flow & 0xF0) >> 4)
1845 != IP_HEADER_VERSION_6) {
1846 return RSC_BYPASS;
1849 /* Both option and protocol is checked in this */
1850 if (ip6->ip6_ctlun.ip6_un1.ip6_un1_nxt != IPPROTO_TCP) {
1851 chain->stat.bypass_not_tcp++;
1852 return RSC_BYPASS;
1855 ip_len = htons(ip6->ip6_ctlun.ip6_un1.ip6_un1_plen);
1856 if (ip_len < sizeof(struct tcp_header) ||
1857 ip_len > (size - chain->n->guest_hdr_len - sizeof(struct eth_header)
1858 - sizeof(struct ip6_header))) {
1859 chain->stat.ip_hacked++;
1860 return RSC_BYPASS;
1863 /* Don't handle packets with ecn flag */
1864 if (IP6_ECN(ip6->ip6_ctlun.ip6_un3.ip6_un3_ecn)) {
1865 chain->stat.ip_ecn++;
1866 return RSC_BYPASS;
1869 return RSC_CANDIDATE;
1872 static size_t virtio_net_rsc_receive6(void *opq, NetClientState *nc,
1873 const uint8_t *buf, size_t size)
1875 int32_t ret;
1876 uint16_t hdr_len;
1877 VirtioNetRscChain *chain;
1878 VirtioNetRscUnit unit;
1880 chain = (VirtioNetRscChain *)opq;
1881 hdr_len = ((VirtIONet *)(chain->n))->guest_hdr_len;
1883 if (size < (hdr_len + sizeof(struct eth_header) + sizeof(struct ip6_header)
1884 + sizeof(tcp_header))) {
1885 return virtio_net_do_receive(nc, buf, size);
1888 virtio_net_rsc_extract_unit6(chain, buf, &unit);
1889 if (RSC_CANDIDATE != virtio_net_rsc_sanity_check6(chain,
1890 unit.ip, buf, size)) {
1891 return virtio_net_do_receive(nc, buf, size);
1894 ret = virtio_net_rsc_tcp_ctrl_check(chain, unit.tcp);
1895 if (ret == RSC_BYPASS) {
1896 return virtio_net_do_receive(nc, buf, size);
1897 } else if (ret == RSC_FINAL) {
1898 return virtio_net_rsc_drain_flow(chain, nc, buf, size,
1899 ((hdr_len + sizeof(struct eth_header)) + 8),
1900 VIRTIO_NET_IP6_ADDR_SIZE,
1901 hdr_len + sizeof(struct eth_header)
1902 + sizeof(struct ip6_header));
1905 return virtio_net_rsc_do_coalesce(chain, nc, buf, size, &unit);
1908 static VirtioNetRscChain *virtio_net_rsc_lookup_chain(VirtIONet *n,
1909 NetClientState *nc,
1910 uint16_t proto)
1912 VirtioNetRscChain *chain;
1914 if ((proto != (uint16_t)ETH_P_IP) && (proto != (uint16_t)ETH_P_IPV6)) {
1915 return NULL;
1918 QTAILQ_FOREACH(chain, &n->rsc_chains, next) {
1919 if (chain->proto == proto) {
1920 return chain;
1924 chain = g_malloc(sizeof(*chain));
1925 chain->n = n;
1926 chain->proto = proto;
1927 if (proto == (uint16_t)ETH_P_IP) {
1928 chain->max_payload = VIRTIO_NET_MAX_IP4_PAYLOAD;
1929 chain->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1930 } else {
1931 chain->max_payload = VIRTIO_NET_MAX_IP6_PAYLOAD;
1932 chain->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1934 chain->drain_timer = timer_new_ns(QEMU_CLOCK_HOST,
1935 virtio_net_rsc_purge, chain);
1936 memset(&chain->stat, 0, sizeof(chain->stat));
1938 QTAILQ_INIT(&chain->buffers);
1939 QTAILQ_INSERT_TAIL(&n->rsc_chains, chain, next);
1941 return chain;
1944 static ssize_t virtio_net_rsc_receive(NetClientState *nc,
1945 const uint8_t *buf,
1946 size_t size)
1948 uint16_t proto;
1949 VirtioNetRscChain *chain;
1950 struct eth_header *eth;
1951 VirtIONet *n;
1953 n = qemu_get_nic_opaque(nc);
1954 if (size < (n->host_hdr_len + sizeof(struct eth_header))) {
1955 return virtio_net_do_receive(nc, buf, size);
1958 eth = (struct eth_header *)(buf + n->guest_hdr_len);
1959 proto = htons(eth->h_proto);
1961 chain = virtio_net_rsc_lookup_chain(n, nc, proto);
1962 if (chain) {
1963 chain->stat.received++;
1964 if (proto == (uint16_t)ETH_P_IP && n->rsc4_enabled) {
1965 return virtio_net_rsc_receive4(chain, nc, buf, size);
1966 } else if (proto == (uint16_t)ETH_P_IPV6 && n->rsc6_enabled) {
1967 return virtio_net_rsc_receive6(chain, nc, buf, size);
1970 return virtio_net_do_receive(nc, buf, size);
1973 static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf,
1974 size_t size)
1976 VirtIONet *n = qemu_get_nic_opaque(nc);
1977 if ((n->rsc4_enabled || n->rsc6_enabled)) {
1978 return virtio_net_rsc_receive(nc, buf, size);
1979 } else {
1980 return virtio_net_do_receive(nc, buf, size);
1984 static int32_t virtio_net_flush_tx(VirtIONetQueue *q);
1986 static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
1988 VirtIONet *n = qemu_get_nic_opaque(nc);
1989 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1990 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1992 virtqueue_push(q->tx_vq, q->async_tx.elem, 0);
1993 virtio_notify(vdev, q->tx_vq);
1995 g_free(q->async_tx.elem);
1996 q->async_tx.elem = NULL;
1998 virtio_queue_set_notification(q->tx_vq, 1);
1999 virtio_net_flush_tx(q);
2002 /* TX */
2003 static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
2005 VirtIONet *n = q->n;
2006 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2007 VirtQueueElement *elem;
2008 int32_t num_packets = 0;
2009 int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
2010 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
2011 return num_packets;
2014 if (q->async_tx.elem) {
2015 virtio_queue_set_notification(q->tx_vq, 0);
2016 return num_packets;
2019 for (;;) {
2020 ssize_t ret;
2021 unsigned int out_num;
2022 struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], *out_sg;
2023 struct virtio_net_hdr_mrg_rxbuf mhdr;
2025 elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement));
2026 if (!elem) {
2027 break;
2030 out_num = elem->out_num;
2031 out_sg = elem->out_sg;
2032 if (out_num < 1) {
2033 virtio_error(vdev, "virtio-net header not in first element");
2034 virtqueue_detach_element(q->tx_vq, elem, 0);
2035 g_free(elem);
2036 return -EINVAL;
2039 if (n->has_vnet_hdr) {
2040 if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) <
2041 n->guest_hdr_len) {
2042 virtio_error(vdev, "virtio-net header incorrect");
2043 virtqueue_detach_element(q->tx_vq, elem, 0);
2044 g_free(elem);
2045 return -EINVAL;
2047 if (n->needs_vnet_hdr_swap) {
2048 virtio_net_hdr_swap(vdev, (void *) &mhdr);
2049 sg2[0].iov_base = &mhdr;
2050 sg2[0].iov_len = n->guest_hdr_len;
2051 out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1,
2052 out_sg, out_num,
2053 n->guest_hdr_len, -1);
2054 if (out_num == VIRTQUEUE_MAX_SIZE) {
2055 goto drop;
2057 out_num += 1;
2058 out_sg = sg2;
2062 * If host wants to see the guest header as is, we can
2063 * pass it on unchanged. Otherwise, copy just the parts
2064 * that host is interested in.
2066 assert(n->host_hdr_len <= n->guest_hdr_len);
2067 if (n->host_hdr_len != n->guest_hdr_len) {
2068 unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg),
2069 out_sg, out_num,
2070 0, n->host_hdr_len);
2071 sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num,
2072 out_sg, out_num,
2073 n->guest_hdr_len, -1);
2074 out_num = sg_num;
2075 out_sg = sg;
2078 ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index),
2079 out_sg, out_num, virtio_net_tx_complete);
2080 if (ret == 0) {
2081 virtio_queue_set_notification(q->tx_vq, 0);
2082 q->async_tx.elem = elem;
2083 return -EBUSY;
2086 drop:
2087 virtqueue_push(q->tx_vq, elem, 0);
2088 virtio_notify(vdev, q->tx_vq);
2089 g_free(elem);
2091 if (++num_packets >= n->tx_burst) {
2092 break;
2095 return num_packets;
2098 static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
2100 VirtIONet *n = VIRTIO_NET(vdev);
2101 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
2103 if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
2104 virtio_net_drop_tx_queue_data(vdev, vq);
2105 return;
2108 /* This happens when device was stopped but VCPU wasn't. */
2109 if (!vdev->vm_running) {
2110 q->tx_waiting = 1;
2111 return;
2114 if (q->tx_waiting) {
2115 virtio_queue_set_notification(vq, 1);
2116 timer_del(q->tx_timer);
2117 q->tx_waiting = 0;
2118 if (virtio_net_flush_tx(q) == -EINVAL) {
2119 return;
2121 } else {
2122 timer_mod(q->tx_timer,
2123 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
2124 q->tx_waiting = 1;
2125 virtio_queue_set_notification(vq, 0);
2129 static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)
2131 VirtIONet *n = VIRTIO_NET(vdev);
2132 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
2134 if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
2135 virtio_net_drop_tx_queue_data(vdev, vq);
2136 return;
2139 if (unlikely(q->tx_waiting)) {
2140 return;
2142 q->tx_waiting = 1;
2143 /* This happens when device was stopped but VCPU wasn't. */
2144 if (!vdev->vm_running) {
2145 return;
2147 virtio_queue_set_notification(vq, 0);
2148 qemu_bh_schedule(q->tx_bh);
2151 static void virtio_net_tx_timer(void *opaque)
2153 VirtIONetQueue *q = opaque;
2154 VirtIONet *n = q->n;
2155 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2156 /* This happens when device was stopped but BH wasn't. */
2157 if (!vdev->vm_running) {
2158 /* Make sure tx waiting is set, so we'll run when restarted. */
2159 assert(q->tx_waiting);
2160 return;
2163 q->tx_waiting = 0;
2165 /* Just in case the driver is not ready on more */
2166 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
2167 return;
2170 virtio_queue_set_notification(q->tx_vq, 1);
2171 virtio_net_flush_tx(q);
2174 static void virtio_net_tx_bh(void *opaque)
2176 VirtIONetQueue *q = opaque;
2177 VirtIONet *n = q->n;
2178 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2179 int32_t ret;
2181 /* This happens when device was stopped but BH wasn't. */
2182 if (!vdev->vm_running) {
2183 /* Make sure tx waiting is set, so we'll run when restarted. */
2184 assert(q->tx_waiting);
2185 return;
2188 q->tx_waiting = 0;
2190 /* Just in case the driver is not ready on more */
2191 if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) {
2192 return;
2195 ret = virtio_net_flush_tx(q);
2196 if (ret == -EBUSY || ret == -EINVAL) {
2197 return; /* Notification re-enable handled by tx_complete or device
2198 * broken */
2201 /* If we flush a full burst of packets, assume there are
2202 * more coming and immediately reschedule */
2203 if (ret >= n->tx_burst) {
2204 qemu_bh_schedule(q->tx_bh);
2205 q->tx_waiting = 1;
2206 return;
2209 /* If less than a full burst, re-enable notification and flush
2210 * anything that may have come in while we weren't looking. If
2211 * we find something, assume the guest is still active and reschedule */
2212 virtio_queue_set_notification(q->tx_vq, 1);
2213 ret = virtio_net_flush_tx(q);
2214 if (ret == -EINVAL) {
2215 return;
2216 } else if (ret > 0) {
2217 virtio_queue_set_notification(q->tx_vq, 0);
2218 qemu_bh_schedule(q->tx_bh);
2219 q->tx_waiting = 1;
2223 static void virtio_net_add_queue(VirtIONet *n, int index)
2225 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2227 n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
2228 virtio_net_handle_rx);
2230 if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
2231 n->vqs[index].tx_vq =
2232 virtio_add_queue(vdev, n->net_conf.tx_queue_size,
2233 virtio_net_handle_tx_timer);
2234 n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
2235 virtio_net_tx_timer,
2236 &n->vqs[index]);
2237 } else {
2238 n->vqs[index].tx_vq =
2239 virtio_add_queue(vdev, n->net_conf.tx_queue_size,
2240 virtio_net_handle_tx_bh);
2241 n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
2244 n->vqs[index].tx_waiting = 0;
2245 n->vqs[index].n = n;
2248 static void virtio_net_del_queue(VirtIONet *n, int index)
2250 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2251 VirtIONetQueue *q = &n->vqs[index];
2252 NetClientState *nc = qemu_get_subqueue(n->nic, index);
2254 qemu_purge_queued_packets(nc);
2256 virtio_del_queue(vdev, index * 2);
2257 if (q->tx_timer) {
2258 timer_del(q->tx_timer);
2259 timer_free(q->tx_timer);
2260 q->tx_timer = NULL;
2261 } else {
2262 qemu_bh_delete(q->tx_bh);
2263 q->tx_bh = NULL;
2265 q->tx_waiting = 0;
2266 virtio_del_queue(vdev, index * 2 + 1);
2269 static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues)
2271 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2272 int old_num_queues = virtio_get_num_queues(vdev);
2273 int new_num_queues = new_max_queues * 2 + 1;
2274 int i;
2276 assert(old_num_queues >= 3);
2277 assert(old_num_queues % 2 == 1);
2279 if (old_num_queues == new_num_queues) {
2280 return;
2284 * We always need to remove and add ctrl vq if
2285 * old_num_queues != new_num_queues. Remove ctrl_vq first,
2286 * and then we only enter one of the following two loops.
2288 virtio_del_queue(vdev, old_num_queues - 1);
2290 for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) {
2291 /* new_num_queues < old_num_queues */
2292 virtio_net_del_queue(n, i / 2);
2295 for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) {
2296 /* new_num_queues > old_num_queues */
2297 virtio_net_add_queue(n, i / 2);
2300 /* add ctrl_vq last */
2301 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
2304 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
2306 int max = multiqueue ? n->max_queues : 1;
2308 n->multiqueue = multiqueue;
2309 virtio_net_change_num_queues(n, max);
2311 virtio_net_set_queues(n);
2314 static int virtio_net_post_load_device(void *opaque, int version_id)
2316 VirtIONet *n = opaque;
2317 VirtIODevice *vdev = VIRTIO_DEVICE(n);
2318 int i, link_down;
2320 trace_virtio_net_post_load_device();
2321 virtio_net_set_mrg_rx_bufs(n, n->mergeable_rx_bufs,
2322 virtio_vdev_has_feature(vdev,
2323 VIRTIO_F_VERSION_1));
2325 /* MAC_TABLE_ENTRIES may be different from the saved image */
2326 if (n->mac_table.in_use > MAC_TABLE_ENTRIES) {
2327 n->mac_table.in_use = 0;
2330 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
2331 n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
2334 if (peer_has_vnet_hdr(n)) {
2335 virtio_net_apply_guest_offloads(n);
2338 virtio_net_set_queues(n);
2340 /* Find the first multicast entry in the saved MAC filter */
2341 for (i = 0; i < n->mac_table.in_use; i++) {
2342 if (n->mac_table.macs[i * ETH_ALEN] & 1) {
2343 break;
2346 n->mac_table.first_multi = i;
2348 /* nc.link_down can't be migrated, so infer link_down according
2349 * to link status bit in n->status */
2350 link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0;
2351 for (i = 0; i < n->max_queues; i++) {
2352 qemu_get_subqueue(n->nic, i)->link_down = link_down;
2355 if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
2356 virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
2357 qemu_announce_timer_reset(&n->announce_timer, migrate_announce_params(),
2358 QEMU_CLOCK_VIRTUAL,
2359 virtio_net_announce_timer, n);
2360 if (n->announce_timer.round) {
2361 timer_mod(n->announce_timer.tm,
2362 qemu_clock_get_ms(n->announce_timer.type));
2363 } else {
2364 qemu_announce_timer_del(&n->announce_timer, false);
2368 return 0;
2371 /* tx_waiting field of a VirtIONetQueue */
2372 static const VMStateDescription vmstate_virtio_net_queue_tx_waiting = {
2373 .name = "virtio-net-queue-tx_waiting",
2374 .fields = (VMStateField[]) {
2375 VMSTATE_UINT32(tx_waiting, VirtIONetQueue),
2376 VMSTATE_END_OF_LIST()
2380 static bool max_queues_gt_1(void *opaque, int version_id)
2382 return VIRTIO_NET(opaque)->max_queues > 1;
2385 static bool has_ctrl_guest_offloads(void *opaque, int version_id)
2387 return virtio_vdev_has_feature(VIRTIO_DEVICE(opaque),
2388 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
2391 static bool mac_table_fits(void *opaque, int version_id)
2393 return VIRTIO_NET(opaque)->mac_table.in_use <= MAC_TABLE_ENTRIES;
2396 static bool mac_table_doesnt_fit(void *opaque, int version_id)
2398 return !mac_table_fits(opaque, version_id);
2401 /* This temporary type is shared by all the WITH_TMP methods
2402 * although only some fields are used by each.
2404 struct VirtIONetMigTmp {
2405 VirtIONet *parent;
2406 VirtIONetQueue *vqs_1;
2407 uint16_t curr_queues_1;
2408 uint8_t has_ufo;
2409 uint32_t has_vnet_hdr;
2412 /* The 2nd and subsequent tx_waiting flags are loaded later than
2413 * the 1st entry in the queues and only if there's more than one
2414 * entry. We use the tmp mechanism to calculate a temporary
2415 * pointer and count and also validate the count.
2418 static int virtio_net_tx_waiting_pre_save(void *opaque)
2420 struct VirtIONetMigTmp *tmp = opaque;
2422 tmp->vqs_1 = tmp->parent->vqs + 1;
2423 tmp->curr_queues_1 = tmp->parent->curr_queues - 1;
2424 if (tmp->parent->curr_queues == 0) {
2425 tmp->curr_queues_1 = 0;
2428 return 0;
2431 static int virtio_net_tx_waiting_pre_load(void *opaque)
2433 struct VirtIONetMigTmp *tmp = opaque;
2435 /* Reuse the pointer setup from save */
2436 virtio_net_tx_waiting_pre_save(opaque);
2438 if (tmp->parent->curr_queues > tmp->parent->max_queues) {
2439 error_report("virtio-net: curr_queues %x > max_queues %x",
2440 tmp->parent->curr_queues, tmp->parent->max_queues);
2442 return -EINVAL;
2445 return 0; /* all good */
2448 static const VMStateDescription vmstate_virtio_net_tx_waiting = {
2449 .name = "virtio-net-tx_waiting",
2450 .pre_load = virtio_net_tx_waiting_pre_load,
2451 .pre_save = virtio_net_tx_waiting_pre_save,
2452 .fields = (VMStateField[]) {
2453 VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp,
2454 curr_queues_1,
2455 vmstate_virtio_net_queue_tx_waiting,
2456 struct VirtIONetQueue),
2457 VMSTATE_END_OF_LIST()
2461 /* the 'has_ufo' flag is just tested; if the incoming stream has the
2462 * flag set we need to check that we have it
2464 static int virtio_net_ufo_post_load(void *opaque, int version_id)
2466 struct VirtIONetMigTmp *tmp = opaque;
2468 if (tmp->has_ufo && !peer_has_ufo(tmp->parent)) {
2469 error_report("virtio-net: saved image requires TUN_F_UFO support");
2470 return -EINVAL;
2473 return 0;
2476 static int virtio_net_ufo_pre_save(void *opaque)
2478 struct VirtIONetMigTmp *tmp = opaque;
2480 tmp->has_ufo = tmp->parent->has_ufo;
2482 return 0;
2485 static const VMStateDescription vmstate_virtio_net_has_ufo = {
2486 .name = "virtio-net-ufo",
2487 .post_load = virtio_net_ufo_post_load,
2488 .pre_save = virtio_net_ufo_pre_save,
2489 .fields = (VMStateField[]) {
2490 VMSTATE_UINT8(has_ufo, struct VirtIONetMigTmp),
2491 VMSTATE_END_OF_LIST()
2495 /* the 'has_vnet_hdr' flag is just tested; if the incoming stream has the
2496 * flag set we need to check that we have it
2498 static int virtio_net_vnet_post_load(void *opaque, int version_id)
2500 struct VirtIONetMigTmp *tmp = opaque;
2502 if (tmp->has_vnet_hdr && !peer_has_vnet_hdr(tmp->parent)) {
2503 error_report("virtio-net: saved image requires vnet_hdr=on");
2504 return -EINVAL;
2507 return 0;
2510 static int virtio_net_vnet_pre_save(void *opaque)
2512 struct VirtIONetMigTmp *tmp = opaque;
2514 tmp->has_vnet_hdr = tmp->parent->has_vnet_hdr;
2516 return 0;
2519 static const VMStateDescription vmstate_virtio_net_has_vnet = {
2520 .name = "virtio-net-vnet",
2521 .post_load = virtio_net_vnet_post_load,
2522 .pre_save = virtio_net_vnet_pre_save,
2523 .fields = (VMStateField[]) {
2524 VMSTATE_UINT32(has_vnet_hdr, struct VirtIONetMigTmp),
2525 VMSTATE_END_OF_LIST()
2529 static const VMStateDescription vmstate_virtio_net_device = {
2530 .name = "virtio-net-device",
2531 .version_id = VIRTIO_NET_VM_VERSION,
2532 .minimum_version_id = VIRTIO_NET_VM_VERSION,
2533 .post_load = virtio_net_post_load_device,
2534 .fields = (VMStateField[]) {
2535 VMSTATE_UINT8_ARRAY(mac, VirtIONet, ETH_ALEN),
2536 VMSTATE_STRUCT_POINTER(vqs, VirtIONet,
2537 vmstate_virtio_net_queue_tx_waiting,
2538 VirtIONetQueue),
2539 VMSTATE_UINT32(mergeable_rx_bufs, VirtIONet),
2540 VMSTATE_UINT16(status, VirtIONet),
2541 VMSTATE_UINT8(promisc, VirtIONet),
2542 VMSTATE_UINT8(allmulti, VirtIONet),
2543 VMSTATE_UINT32(mac_table.in_use, VirtIONet),
2545 /* Guarded pair: If it fits we load it, else we throw it away
2546 * - can happen if source has a larger MAC table.; post-load
2547 * sets flags in this case.
2549 VMSTATE_VBUFFER_MULTIPLY(mac_table.macs, VirtIONet,
2550 0, mac_table_fits, mac_table.in_use,
2551 ETH_ALEN),
2552 VMSTATE_UNUSED_VARRAY_UINT32(VirtIONet, mac_table_doesnt_fit, 0,
2553 mac_table.in_use, ETH_ALEN),
2555 /* Note: This is an array of uint32's that's always been saved as a
2556 * buffer; hold onto your endiannesses; it's actually used as a bitmap
2557 * but based on the uint.
2559 VMSTATE_BUFFER_POINTER_UNSAFE(vlans, VirtIONet, 0, MAX_VLAN >> 3),
2560 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
2561 vmstate_virtio_net_has_vnet),
2562 VMSTATE_UINT8(mac_table.multi_overflow, VirtIONet),
2563 VMSTATE_UINT8(mac_table.uni_overflow, VirtIONet),
2564 VMSTATE_UINT8(alluni, VirtIONet),
2565 VMSTATE_UINT8(nomulti, VirtIONet),
2566 VMSTATE_UINT8(nouni, VirtIONet),
2567 VMSTATE_UINT8(nobcast, VirtIONet),
2568 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
2569 vmstate_virtio_net_has_ufo),
2570 VMSTATE_SINGLE_TEST(max_queues, VirtIONet, max_queues_gt_1, 0,
2571 vmstate_info_uint16_equal, uint16_t),
2572 VMSTATE_UINT16_TEST(curr_queues, VirtIONet, max_queues_gt_1),
2573 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
2574 vmstate_virtio_net_tx_waiting),
2575 VMSTATE_UINT64_TEST(curr_guest_offloads, VirtIONet,
2576 has_ctrl_guest_offloads),
2577 VMSTATE_END_OF_LIST()
2581 static NetClientInfo net_virtio_info = {
2582 .type = NET_CLIENT_DRIVER_NIC,
2583 .size = sizeof(NICState),
2584 .can_receive = virtio_net_can_receive,
2585 .receive = virtio_net_receive,
2586 .link_status_changed = virtio_net_set_link_status,
2587 .query_rx_filter = virtio_net_query_rxfilter,
2588 .announce = virtio_net_announce,
2591 static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
2593 VirtIONet *n = VIRTIO_NET(vdev);
2594 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
2595 assert(n->vhost_started);
2596 return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
2599 static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
2600 bool mask)
2602 VirtIONet *n = VIRTIO_NET(vdev);
2603 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
2604 assert(n->vhost_started);
2605 vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
2606 vdev, idx, mask);
2609 static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
2611 virtio_add_feature(&host_features, VIRTIO_NET_F_MAC);
2613 n->config_size = virtio_feature_get_config_size(feature_sizes,
2614 host_features);
2617 void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
2618 const char *type)
2621 * The name can be NULL, the netclient name will be type.x.
2623 assert(type != NULL);
2625 g_free(n->netclient_name);
2626 g_free(n->netclient_type);
2627 n->netclient_name = g_strdup(name);
2628 n->netclient_type = g_strdup(type);
2631 static void virtio_net_device_realize(DeviceState *dev, Error **errp)
2633 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2634 VirtIONet *n = VIRTIO_NET(dev);
2635 NetClientState *nc;
2636 int i;
2638 if (n->net_conf.mtu) {
2639 n->host_features |= (1ULL << VIRTIO_NET_F_MTU);
2642 if (n->net_conf.duplex_str) {
2643 if (strncmp(n->net_conf.duplex_str, "half", 5) == 0) {
2644 n->net_conf.duplex = DUPLEX_HALF;
2645 } else if (strncmp(n->net_conf.duplex_str, "full", 5) == 0) {
2646 n->net_conf.duplex = DUPLEX_FULL;
2647 } else {
2648 error_setg(errp, "'duplex' must be 'half' or 'full'");
2650 n->host_features |= (1ULL << VIRTIO_NET_F_SPEED_DUPLEX);
2651 } else {
2652 n->net_conf.duplex = DUPLEX_UNKNOWN;
2655 if (n->net_conf.speed < SPEED_UNKNOWN) {
2656 error_setg(errp, "'speed' must be between 0 and INT_MAX");
2657 } else if (n->net_conf.speed >= 0) {
2658 n->host_features |= (1ULL << VIRTIO_NET_F_SPEED_DUPLEX);
2661 virtio_net_set_config_size(n, n->host_features);
2662 virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
2665 * We set a lower limit on RX queue size to what it always was.
2666 * Guests that want a smaller ring can always resize it without
2667 * help from us (using virtio 1 and up).
2669 if (n->net_conf.rx_queue_size < VIRTIO_NET_RX_QUEUE_MIN_SIZE ||
2670 n->net_conf.rx_queue_size > VIRTQUEUE_MAX_SIZE ||
2671 !is_power_of_2(n->net_conf.rx_queue_size)) {
2672 error_setg(errp, "Invalid rx_queue_size (= %" PRIu16 "), "
2673 "must be a power of 2 between %d and %d.",
2674 n->net_conf.rx_queue_size, VIRTIO_NET_RX_QUEUE_MIN_SIZE,
2675 VIRTQUEUE_MAX_SIZE);
2676 virtio_cleanup(vdev);
2677 return;
2680 if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE ||
2681 n->net_conf.tx_queue_size > VIRTQUEUE_MAX_SIZE ||
2682 !is_power_of_2(n->net_conf.tx_queue_size)) {
2683 error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), "
2684 "must be a power of 2 between %d and %d",
2685 n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE,
2686 VIRTQUEUE_MAX_SIZE);
2687 virtio_cleanup(vdev);
2688 return;
2691 n->max_queues = MAX(n->nic_conf.peers.queues, 1);
2692 if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) {
2693 error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
2694 "must be a positive integer less than %d.",
2695 n->max_queues, (VIRTIO_QUEUE_MAX - 1) / 2);
2696 virtio_cleanup(vdev);
2697 return;
2699 n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues);
2700 n->curr_queues = 1;
2701 n->tx_timeout = n->net_conf.txtimer;
2703 if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer")
2704 && strcmp(n->net_conf.tx, "bh")) {
2705 warn_report("virtio-net: "
2706 "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
2707 n->net_conf.tx);
2708 error_printf("Defaulting to \"bh\"");
2711 n->net_conf.tx_queue_size = MIN(virtio_net_max_tx_queue_size(n),
2712 n->net_conf.tx_queue_size);
2714 for (i = 0; i < n->max_queues; i++) {
2715 virtio_net_add_queue(n, i);
2718 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
2719 qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
2720 memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
2721 n->status = VIRTIO_NET_S_LINK_UP;
2722 qemu_announce_timer_reset(&n->announce_timer, migrate_announce_params(),
2723 QEMU_CLOCK_VIRTUAL,
2724 virtio_net_announce_timer, n);
2725 n->announce_timer.round = 0;
2727 if (n->netclient_type) {
2729 * Happen when virtio_net_set_netclient_name has been called.
2731 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
2732 n->netclient_type, n->netclient_name, n);
2733 } else {
2734 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
2735 object_get_typename(OBJECT(dev)), dev->id, n);
2738 peer_test_vnet_hdr(n);
2739 if (peer_has_vnet_hdr(n)) {
2740 for (i = 0; i < n->max_queues; i++) {
2741 qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true);
2743 n->host_hdr_len = sizeof(struct virtio_net_hdr);
2744 } else {
2745 n->host_hdr_len = 0;
2748 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a);
2750 n->vqs[0].tx_waiting = 0;
2751 n->tx_burst = n->net_conf.txburst;
2752 virtio_net_set_mrg_rx_bufs(n, 0, 0);
2753 n->promisc = 1; /* for compatibility */
2755 n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
2757 n->vlans = g_malloc0(MAX_VLAN >> 3);
2759 nc = qemu_get_queue(n->nic);
2760 nc->rxfilter_notify_enabled = 1;
2762 QTAILQ_INIT(&n->rsc_chains);
2763 n->qdev = dev;
2766 static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
2768 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2769 VirtIONet *n = VIRTIO_NET(dev);
2770 int i, max_queues;
2772 /* This will stop vhost backend if appropriate. */
2773 virtio_net_set_status(vdev, 0);
2775 g_free(n->netclient_name);
2776 n->netclient_name = NULL;
2777 g_free(n->netclient_type);
2778 n->netclient_type = NULL;
2780 g_free(n->mac_table.macs);
2781 g_free(n->vlans);
2783 max_queues = n->multiqueue ? n->max_queues : 1;
2784 for (i = 0; i < max_queues; i++) {
2785 virtio_net_del_queue(n, i);
2788 qemu_announce_timer_del(&n->announce_timer, false);
2789 g_free(n->vqs);
2790 qemu_del_nic(n->nic);
2791 virtio_net_rsc_cleanup(n);
2792 virtio_cleanup(vdev);
2795 static void virtio_net_instance_init(Object *obj)
2797 VirtIONet *n = VIRTIO_NET(obj);
2800 * The default config_size is sizeof(struct virtio_net_config).
2801 * Can be overriden with virtio_net_set_config_size.
2803 n->config_size = sizeof(struct virtio_net_config);
2804 device_add_bootindex_property(obj, &n->nic_conf.bootindex,
2805 "bootindex", "/ethernet-phy@0",
2806 DEVICE(n), NULL);
2809 static int virtio_net_pre_save(void *opaque)
2811 VirtIONet *n = opaque;
2813 /* At this point, backend must be stopped, otherwise
2814 * it might keep writing to memory. */
2815 assert(!n->vhost_started);
2817 return 0;
2820 static const VMStateDescription vmstate_virtio_net = {
2821 .name = "virtio-net",
2822 .minimum_version_id = VIRTIO_NET_VM_VERSION,
2823 .version_id = VIRTIO_NET_VM_VERSION,
2824 .fields = (VMStateField[]) {
2825 VMSTATE_VIRTIO_DEVICE,
2826 VMSTATE_END_OF_LIST()
2828 .pre_save = virtio_net_pre_save,
2831 static Property virtio_net_properties[] = {
2832 DEFINE_PROP_BIT64("csum", VirtIONet, host_features,
2833 VIRTIO_NET_F_CSUM, true),
2834 DEFINE_PROP_BIT64("guest_csum", VirtIONet, host_features,
2835 VIRTIO_NET_F_GUEST_CSUM, true),
2836 DEFINE_PROP_BIT64("gso", VirtIONet, host_features, VIRTIO_NET_F_GSO, true),
2837 DEFINE_PROP_BIT64("guest_tso4", VirtIONet, host_features,
2838 VIRTIO_NET_F_GUEST_TSO4, true),
2839 DEFINE_PROP_BIT64("guest_tso6", VirtIONet, host_features,
2840 VIRTIO_NET_F_GUEST_TSO6, true),
2841 DEFINE_PROP_BIT64("guest_ecn", VirtIONet, host_features,
2842 VIRTIO_NET_F_GUEST_ECN, true),
2843 DEFINE_PROP_BIT64("guest_ufo", VirtIONet, host_features,
2844 VIRTIO_NET_F_GUEST_UFO, true),
2845 DEFINE_PROP_BIT64("guest_announce", VirtIONet, host_features,
2846 VIRTIO_NET_F_GUEST_ANNOUNCE, true),
2847 DEFINE_PROP_BIT64("host_tso4", VirtIONet, host_features,
2848 VIRTIO_NET_F_HOST_TSO4, true),
2849 DEFINE_PROP_BIT64("host_tso6", VirtIONet, host_features,
2850 VIRTIO_NET_F_HOST_TSO6, true),
2851 DEFINE_PROP_BIT64("host_ecn", VirtIONet, host_features,
2852 VIRTIO_NET_F_HOST_ECN, true),
2853 DEFINE_PROP_BIT64("host_ufo", VirtIONet, host_features,
2854 VIRTIO_NET_F_HOST_UFO, true),
2855 DEFINE_PROP_BIT64("mrg_rxbuf", VirtIONet, host_features,
2856 VIRTIO_NET_F_MRG_RXBUF, true),
2857 DEFINE_PROP_BIT64("status", VirtIONet, host_features,
2858 VIRTIO_NET_F_STATUS, true),
2859 DEFINE_PROP_BIT64("ctrl_vq", VirtIONet, host_features,
2860 VIRTIO_NET_F_CTRL_VQ, true),
2861 DEFINE_PROP_BIT64("ctrl_rx", VirtIONet, host_features,
2862 VIRTIO_NET_F_CTRL_RX, true),
2863 DEFINE_PROP_BIT64("ctrl_vlan", VirtIONet, host_features,
2864 VIRTIO_NET_F_CTRL_VLAN, true),
2865 DEFINE_PROP_BIT64("ctrl_rx_extra", VirtIONet, host_features,
2866 VIRTIO_NET_F_CTRL_RX_EXTRA, true),
2867 DEFINE_PROP_BIT64("ctrl_mac_addr", VirtIONet, host_features,
2868 VIRTIO_NET_F_CTRL_MAC_ADDR, true),
2869 DEFINE_PROP_BIT64("ctrl_guest_offloads", VirtIONet, host_features,
2870 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, true),
2871 DEFINE_PROP_BIT64("mq", VirtIONet, host_features, VIRTIO_NET_F_MQ, false),
2872 DEFINE_PROP_BIT64("guest_rsc_ext", VirtIONet, host_features,
2873 VIRTIO_NET_F_RSC_EXT, false),
2874 DEFINE_PROP_UINT32("rsc_interval", VirtIONet, rsc_timeout,
2875 VIRTIO_NET_RSC_DEFAULT_INTERVAL),
2876 DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf),
2877 DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer,
2878 TX_TIMER_INTERVAL),
2879 DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
2880 DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
2881 DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
2882 VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
2883 DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size,
2884 VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE),
2885 DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0),
2886 DEFINE_PROP_BOOL("x-mtu-bypass-backend", VirtIONet, mtu_bypass_backend,
2887 true),
2888 DEFINE_PROP_INT32("speed", VirtIONet, net_conf.speed, SPEED_UNKNOWN),
2889 DEFINE_PROP_STRING("duplex", VirtIONet, net_conf.duplex_str),
2890 DEFINE_PROP_END_OF_LIST(),
2893 static void virtio_net_class_init(ObjectClass *klass, void *data)
2895 DeviceClass *dc = DEVICE_CLASS(klass);
2896 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
2898 dc->props = virtio_net_properties;
2899 dc->vmsd = &vmstate_virtio_net;
2900 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
2901 vdc->realize = virtio_net_device_realize;
2902 vdc->unrealize = virtio_net_device_unrealize;
2903 vdc->get_config = virtio_net_get_config;
2904 vdc->set_config = virtio_net_set_config;
2905 vdc->get_features = virtio_net_get_features;
2906 vdc->set_features = virtio_net_set_features;
2907 vdc->bad_features = virtio_net_bad_features;
2908 vdc->reset = virtio_net_reset;
2909 vdc->set_status = virtio_net_set_status;
2910 vdc->guest_notifier_mask = virtio_net_guest_notifier_mask;
2911 vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;
2912 vdc->legacy_features |= (0x1 << VIRTIO_NET_F_GSO);
2913 vdc->vmsd = &vmstate_virtio_net_device;
2916 static const TypeInfo virtio_net_info = {
2917 .name = TYPE_VIRTIO_NET,
2918 .parent = TYPE_VIRTIO_DEVICE,
2919 .instance_size = sizeof(VirtIONet),
2920 .instance_init = virtio_net_instance_init,
2921 .class_init = virtio_net_class_init,
2924 static void virtio_register_types(void)
2926 type_register_static(&virtio_net_info);
2929 type_init(virtio_register_types)