virtion-net: Prefer is_power_of_2()
[qemu/kevin.git] / hw / net / virtio-net.c
blob657d099c5440b0835f8355932394f1caeffd6672
1 /*
2 * Virtio Network Device
4 * Copyright IBM, Corp. 2007
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/iov.h"
16 #include "hw/virtio/virtio.h"
17 #include "net/net.h"
18 #include "net/checksum.h"
19 #include "net/tap.h"
20 #include "qemu/error-report.h"
21 #include "qemu/timer.h"
22 #include "hw/virtio/virtio-net.h"
23 #include "net/vhost_net.h"
24 #include "hw/virtio/virtio-bus.h"
25 #include "qapi/qmp/qjson.h"
26 #include "qapi-event.h"
27 #include "hw/virtio/virtio-access.h"
28 #include "migration/misc.h"
30 #define VIRTIO_NET_VM_VERSION 11
32 #define MAC_TABLE_ENTRIES 64
33 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
35 /* previously fixed value */
36 #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
37 #define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256
39 /* for now, only allow larger queues; with virtio-1, guest can downsize */
40 #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
41 #define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
44 * Calculate the number of bytes up to and including the given 'field' of
45 * 'container'.
47 #define endof(container, field) \
48 (offsetof(container, field) + sizeof(((container *)0)->field))
50 typedef struct VirtIOFeature {
51 uint32_t flags;
52 size_t end;
53 } VirtIOFeature;
55 static VirtIOFeature feature_sizes[] = {
56 {.flags = 1 << VIRTIO_NET_F_MAC,
57 .end = endof(struct virtio_net_config, mac)},
58 {.flags = 1 << VIRTIO_NET_F_STATUS,
59 .end = endof(struct virtio_net_config, status)},
60 {.flags = 1 << VIRTIO_NET_F_MQ,
61 .end = endof(struct virtio_net_config, max_virtqueue_pairs)},
62 {.flags = 1 << VIRTIO_NET_F_MTU,
63 .end = endof(struct virtio_net_config, mtu)},
67 static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc)
69 VirtIONet *n = qemu_get_nic_opaque(nc);
71 return &n->vqs[nc->queue_index];
74 static int vq2q(int queue_index)
76 return queue_index / 2;
79 /* TODO
80 * - we could suppress RX interrupt if we were so inclined.
83 static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
85 VirtIONet *n = VIRTIO_NET(vdev);
86 struct virtio_net_config netcfg;
88 virtio_stw_p(vdev, &netcfg.status, n->status);
89 virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues);
90 virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu);
91 memcpy(netcfg.mac, n->mac, ETH_ALEN);
92 memcpy(config, &netcfg, n->config_size);
95 static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
97 VirtIONet *n = VIRTIO_NET(vdev);
98 struct virtio_net_config netcfg = {};
100 memcpy(&netcfg, config, n->config_size);
102 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
103 !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
104 memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
105 memcpy(n->mac, netcfg.mac, ETH_ALEN);
106 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
110 static bool virtio_net_started(VirtIONet *n, uint8_t status)
112 VirtIODevice *vdev = VIRTIO_DEVICE(n);
113 return (status & VIRTIO_CONFIG_S_DRIVER_OK) &&
114 (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running;
117 static void virtio_net_announce_timer(void *opaque)
119 VirtIONet *n = opaque;
120 VirtIODevice *vdev = VIRTIO_DEVICE(n);
122 n->announce_counter--;
123 n->status |= VIRTIO_NET_S_ANNOUNCE;
124 virtio_notify_config(vdev);
127 static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
129 VirtIODevice *vdev = VIRTIO_DEVICE(n);
130 NetClientState *nc = qemu_get_queue(n->nic);
131 int queues = n->multiqueue ? n->max_queues : 1;
133 if (!get_vhost_net(nc->peer)) {
134 return;
137 if ((virtio_net_started(n, status) && !nc->peer->link_down) ==
138 !!n->vhost_started) {
139 return;
141 if (!n->vhost_started) {
142 int r, i;
144 if (n->needs_vnet_hdr_swap) {
145 error_report("backend does not support %s vnet headers; "
146 "falling back on userspace virtio",
147 virtio_is_big_endian(vdev) ? "BE" : "LE");
148 return;
151 /* Any packets outstanding? Purge them to avoid touching rings
152 * when vhost is running.
154 for (i = 0; i < queues; i++) {
155 NetClientState *qnc = qemu_get_subqueue(n->nic, i);
157 /* Purge both directions: TX and RX. */
158 qemu_net_queue_purge(qnc->peer->incoming_queue, qnc);
159 qemu_net_queue_purge(qnc->incoming_queue, qnc->peer);
162 if (virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_MTU)) {
163 r = vhost_net_set_mtu(get_vhost_net(nc->peer), n->net_conf.mtu);
164 if (r < 0) {
165 error_report("%uBytes MTU not supported by the backend",
166 n->net_conf.mtu);
168 return;
172 n->vhost_started = 1;
173 r = vhost_net_start(vdev, n->nic->ncs, queues);
174 if (r < 0) {
175 error_report("unable to start vhost net: %d: "
176 "falling back on userspace virtio", -r);
177 n->vhost_started = 0;
179 } else {
180 vhost_net_stop(vdev, n->nic->ncs, queues);
181 n->vhost_started = 0;
185 static int virtio_net_set_vnet_endian_one(VirtIODevice *vdev,
186 NetClientState *peer,
187 bool enable)
189 if (virtio_is_big_endian(vdev)) {
190 return qemu_set_vnet_be(peer, enable);
191 } else {
192 return qemu_set_vnet_le(peer, enable);
196 static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs,
197 int queues, bool enable)
199 int i;
201 for (i = 0; i < queues; i++) {
202 if (virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, enable) < 0 &&
203 enable) {
204 while (--i >= 0) {
205 virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, false);
208 return true;
212 return false;
215 static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status)
217 VirtIODevice *vdev = VIRTIO_DEVICE(n);
218 int queues = n->multiqueue ? n->max_queues : 1;
220 if (virtio_net_started(n, status)) {
221 /* Before using the device, we tell the network backend about the
222 * endianness to use when parsing vnet headers. If the backend
223 * can't do it, we fallback onto fixing the headers in the core
224 * virtio-net code.
226 n->needs_vnet_hdr_swap = virtio_net_set_vnet_endian(vdev, n->nic->ncs,
227 queues, true);
228 } else if (virtio_net_started(n, vdev->status)) {
229 /* After using the device, we need to reset the network backend to
230 * the default (guest native endianness), otherwise the guest may
231 * lose network connectivity if it is rebooted into a different
232 * endianness.
234 virtio_net_set_vnet_endian(vdev, n->nic->ncs, queues, false);
238 static void virtio_net_drop_tx_queue_data(VirtIODevice *vdev, VirtQueue *vq)
240 unsigned int dropped = virtqueue_drop_all(vq);
241 if (dropped) {
242 virtio_notify(vdev, vq);
246 static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
248 VirtIONet *n = VIRTIO_NET(vdev);
249 VirtIONetQueue *q;
250 int i;
251 uint8_t queue_status;
253 virtio_net_vnet_endian_status(n, status);
254 virtio_net_vhost_status(n, status);
256 for (i = 0; i < n->max_queues; i++) {
257 NetClientState *ncs = qemu_get_subqueue(n->nic, i);
258 bool queue_started;
259 q = &n->vqs[i];
261 if ((!n->multiqueue && i != 0) || i >= n->curr_queues) {
262 queue_status = 0;
263 } else {
264 queue_status = status;
266 queue_started =
267 virtio_net_started(n, queue_status) && !n->vhost_started;
269 if (queue_started) {
270 qemu_flush_queued_packets(ncs);
273 if (!q->tx_waiting) {
274 continue;
277 if (queue_started) {
278 if (q->tx_timer) {
279 timer_mod(q->tx_timer,
280 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
281 } else {
282 qemu_bh_schedule(q->tx_bh);
284 } else {
285 if (q->tx_timer) {
286 timer_del(q->tx_timer);
287 } else {
288 qemu_bh_cancel(q->tx_bh);
290 if ((n->status & VIRTIO_NET_S_LINK_UP) == 0 &&
291 (queue_status & VIRTIO_CONFIG_S_DRIVER_OK)) {
292 /* if tx is waiting we are likely have some packets in tx queue
293 * and disabled notification */
294 q->tx_waiting = 0;
295 virtio_queue_set_notification(q->tx_vq, 1);
296 virtio_net_drop_tx_queue_data(vdev, q->tx_vq);
302 static void virtio_net_set_link_status(NetClientState *nc)
304 VirtIONet *n = qemu_get_nic_opaque(nc);
305 VirtIODevice *vdev = VIRTIO_DEVICE(n);
306 uint16_t old_status = n->status;
308 if (nc->link_down)
309 n->status &= ~VIRTIO_NET_S_LINK_UP;
310 else
311 n->status |= VIRTIO_NET_S_LINK_UP;
313 if (n->status != old_status)
314 virtio_notify_config(vdev);
316 virtio_net_set_status(vdev, vdev->status);
319 static void rxfilter_notify(NetClientState *nc)
321 VirtIONet *n = qemu_get_nic_opaque(nc);
323 if (nc->rxfilter_notify_enabled) {
324 gchar *path = object_get_canonical_path(OBJECT(n->qdev));
325 qapi_event_send_nic_rx_filter_changed(!!n->netclient_name,
326 n->netclient_name, path, &error_abort);
327 g_free(path);
329 /* disable event notification to avoid events flooding */
330 nc->rxfilter_notify_enabled = 0;
334 static intList *get_vlan_table(VirtIONet *n)
336 intList *list, *entry;
337 int i, j;
339 list = NULL;
340 for (i = 0; i < MAX_VLAN >> 5; i++) {
341 for (j = 0; n->vlans[i] && j <= 0x1f; j++) {
342 if (n->vlans[i] & (1U << j)) {
343 entry = g_malloc0(sizeof(*entry));
344 entry->value = (i << 5) + j;
345 entry->next = list;
346 list = entry;
351 return list;
354 static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc)
356 VirtIONet *n = qemu_get_nic_opaque(nc);
357 VirtIODevice *vdev = VIRTIO_DEVICE(n);
358 RxFilterInfo *info;
359 strList *str_list, *entry;
360 int i;
362 info = g_malloc0(sizeof(*info));
363 info->name = g_strdup(nc->name);
364 info->promiscuous = n->promisc;
366 if (n->nouni) {
367 info->unicast = RX_STATE_NONE;
368 } else if (n->alluni) {
369 info->unicast = RX_STATE_ALL;
370 } else {
371 info->unicast = RX_STATE_NORMAL;
374 if (n->nomulti) {
375 info->multicast = RX_STATE_NONE;
376 } else if (n->allmulti) {
377 info->multicast = RX_STATE_ALL;
378 } else {
379 info->multicast = RX_STATE_NORMAL;
382 info->broadcast_allowed = n->nobcast;
383 info->multicast_overflow = n->mac_table.multi_overflow;
384 info->unicast_overflow = n->mac_table.uni_overflow;
386 info->main_mac = qemu_mac_strdup_printf(n->mac);
388 str_list = NULL;
389 for (i = 0; i < n->mac_table.first_multi; i++) {
390 entry = g_malloc0(sizeof(*entry));
391 entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
392 entry->next = str_list;
393 str_list = entry;
395 info->unicast_table = str_list;
397 str_list = NULL;
398 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
399 entry = g_malloc0(sizeof(*entry));
400 entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
401 entry->next = str_list;
402 str_list = entry;
404 info->multicast_table = str_list;
405 info->vlan_table = get_vlan_table(n);
407 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) {
408 info->vlan = RX_STATE_ALL;
409 } else if (!info->vlan_table) {
410 info->vlan = RX_STATE_NONE;
411 } else {
412 info->vlan = RX_STATE_NORMAL;
415 /* enable event notification after query */
416 nc->rxfilter_notify_enabled = 1;
418 return info;
421 static void virtio_net_reset(VirtIODevice *vdev)
423 VirtIONet *n = VIRTIO_NET(vdev);
425 /* Reset back to compatibility mode */
426 n->promisc = 1;
427 n->allmulti = 0;
428 n->alluni = 0;
429 n->nomulti = 0;
430 n->nouni = 0;
431 n->nobcast = 0;
432 /* multiqueue is disabled by default */
433 n->curr_queues = 1;
434 timer_del(n->announce_timer);
435 n->announce_counter = 0;
436 n->status &= ~VIRTIO_NET_S_ANNOUNCE;
438 /* Flush any MAC and VLAN filter table state */
439 n->mac_table.in_use = 0;
440 n->mac_table.first_multi = 0;
441 n->mac_table.multi_overflow = 0;
442 n->mac_table.uni_overflow = 0;
443 memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
444 memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac));
445 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
446 memset(n->vlans, 0, MAX_VLAN >> 3);
449 static void peer_test_vnet_hdr(VirtIONet *n)
451 NetClientState *nc = qemu_get_queue(n->nic);
452 if (!nc->peer) {
453 return;
456 n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer);
459 static int peer_has_vnet_hdr(VirtIONet *n)
461 return n->has_vnet_hdr;
464 static int peer_has_ufo(VirtIONet *n)
466 if (!peer_has_vnet_hdr(n))
467 return 0;
469 n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer);
471 return n->has_ufo;
474 static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs,
475 int version_1)
477 int i;
478 NetClientState *nc;
480 n->mergeable_rx_bufs = mergeable_rx_bufs;
482 if (version_1) {
483 n->guest_hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
484 } else {
485 n->guest_hdr_len = n->mergeable_rx_bufs ?
486 sizeof(struct virtio_net_hdr_mrg_rxbuf) :
487 sizeof(struct virtio_net_hdr);
490 for (i = 0; i < n->max_queues; i++) {
491 nc = qemu_get_subqueue(n->nic, i);
493 if (peer_has_vnet_hdr(n) &&
494 qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) {
495 qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len);
496 n->host_hdr_len = n->guest_hdr_len;
501 static int virtio_net_max_tx_queue_size(VirtIONet *n)
503 NetClientState *peer = n->nic_conf.peers.ncs[0];
506 * Backends other than vhost-user don't support max queue size.
508 if (!peer) {
509 return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
512 if (peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) {
513 return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
516 return VIRTQUEUE_MAX_SIZE;
519 static int peer_attach(VirtIONet *n, int index)
521 NetClientState *nc = qemu_get_subqueue(n->nic, index);
523 if (!nc->peer) {
524 return 0;
527 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
528 vhost_set_vring_enable(nc->peer, 1);
531 if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) {
532 return 0;
535 if (n->max_queues == 1) {
536 return 0;
539 return tap_enable(nc->peer);
542 static int peer_detach(VirtIONet *n, int index)
544 NetClientState *nc = qemu_get_subqueue(n->nic, index);
546 if (!nc->peer) {
547 return 0;
550 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
551 vhost_set_vring_enable(nc->peer, 0);
554 if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) {
555 return 0;
558 return tap_disable(nc->peer);
561 static void virtio_net_set_queues(VirtIONet *n)
563 int i;
564 int r;
566 if (n->nic->peer_deleted) {
567 return;
570 for (i = 0; i < n->max_queues; i++) {
571 if (i < n->curr_queues) {
572 r = peer_attach(n, i);
573 assert(!r);
574 } else {
575 r = peer_detach(n, i);
576 assert(!r);
581 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue);
583 static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
584 Error **errp)
586 VirtIONet *n = VIRTIO_NET(vdev);
587 NetClientState *nc = qemu_get_queue(n->nic);
589 /* Firstly sync all virtio-net possible supported features */
590 features |= n->host_features;
592 virtio_add_feature(&features, VIRTIO_NET_F_MAC);
594 if (!peer_has_vnet_hdr(n)) {
595 virtio_clear_feature(&features, VIRTIO_NET_F_CSUM);
596 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4);
597 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6);
598 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN);
600 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM);
601 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4);
602 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6);
603 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN);
606 if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
607 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO);
608 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO);
611 if (!get_vhost_net(nc->peer)) {
612 return features;
614 features = vhost_net_get_features(get_vhost_net(nc->peer), features);
615 vdev->backend_features = features;
617 if (n->mtu_bypass_backend &&
618 (n->host_features & 1ULL << VIRTIO_NET_F_MTU)) {
619 features |= (1ULL << VIRTIO_NET_F_MTU);
622 return features;
625 static uint64_t virtio_net_bad_features(VirtIODevice *vdev)
627 uint64_t features = 0;
629 /* Linux kernel 2.6.25. It understood MAC (as everyone must),
630 * but also these: */
631 virtio_add_feature(&features, VIRTIO_NET_F_MAC);
632 virtio_add_feature(&features, VIRTIO_NET_F_CSUM);
633 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO4);
634 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO6);
635 virtio_add_feature(&features, VIRTIO_NET_F_HOST_ECN);
637 return features;
640 static void virtio_net_apply_guest_offloads(VirtIONet *n)
642 qemu_set_offload(qemu_get_queue(n->nic)->peer,
643 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)),
644 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)),
645 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)),
646 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)),
647 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO)));
650 static uint64_t virtio_net_guest_offloads_by_features(uint32_t features)
652 static const uint64_t guest_offloads_mask =
653 (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
654 (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
655 (1ULL << VIRTIO_NET_F_GUEST_TSO6) |
656 (1ULL << VIRTIO_NET_F_GUEST_ECN) |
657 (1ULL << VIRTIO_NET_F_GUEST_UFO);
659 return guest_offloads_mask & features;
662 static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n)
664 VirtIODevice *vdev = VIRTIO_DEVICE(n);
665 return virtio_net_guest_offloads_by_features(vdev->guest_features);
668 static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
670 VirtIONet *n = VIRTIO_NET(vdev);
671 int i;
673 if (n->mtu_bypass_backend &&
674 !virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_MTU)) {
675 features &= ~(1ULL << VIRTIO_NET_F_MTU);
678 virtio_net_set_multiqueue(n,
679 virtio_has_feature(features, VIRTIO_NET_F_MQ));
681 virtio_net_set_mrg_rx_bufs(n,
682 virtio_has_feature(features,
683 VIRTIO_NET_F_MRG_RXBUF),
684 virtio_has_feature(features,
685 VIRTIO_F_VERSION_1));
687 if (n->has_vnet_hdr) {
688 n->curr_guest_offloads =
689 virtio_net_guest_offloads_by_features(features);
690 virtio_net_apply_guest_offloads(n);
693 for (i = 0; i < n->max_queues; i++) {
694 NetClientState *nc = qemu_get_subqueue(n->nic, i);
696 if (!get_vhost_net(nc->peer)) {
697 continue;
699 vhost_net_ack_features(get_vhost_net(nc->peer), features);
702 if (virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) {
703 memset(n->vlans, 0, MAX_VLAN >> 3);
704 } else {
705 memset(n->vlans, 0xff, MAX_VLAN >> 3);
709 static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd,
710 struct iovec *iov, unsigned int iov_cnt)
712 uint8_t on;
713 size_t s;
714 NetClientState *nc = qemu_get_queue(n->nic);
716 s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on));
717 if (s != sizeof(on)) {
718 return VIRTIO_NET_ERR;
721 if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) {
722 n->promisc = on;
723 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) {
724 n->allmulti = on;
725 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) {
726 n->alluni = on;
727 } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) {
728 n->nomulti = on;
729 } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) {
730 n->nouni = on;
731 } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) {
732 n->nobcast = on;
733 } else {
734 return VIRTIO_NET_ERR;
737 rxfilter_notify(nc);
739 return VIRTIO_NET_OK;
742 static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd,
743 struct iovec *iov, unsigned int iov_cnt)
745 VirtIODevice *vdev = VIRTIO_DEVICE(n);
746 uint64_t offloads;
747 size_t s;
749 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
750 return VIRTIO_NET_ERR;
753 s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads));
754 if (s != sizeof(offloads)) {
755 return VIRTIO_NET_ERR;
758 if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) {
759 uint64_t supported_offloads;
761 if (!n->has_vnet_hdr) {
762 return VIRTIO_NET_ERR;
765 supported_offloads = virtio_net_supported_guest_offloads(n);
766 if (offloads & ~supported_offloads) {
767 return VIRTIO_NET_ERR;
770 n->curr_guest_offloads = offloads;
771 virtio_net_apply_guest_offloads(n);
773 return VIRTIO_NET_OK;
774 } else {
775 return VIRTIO_NET_ERR;
779 static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd,
780 struct iovec *iov, unsigned int iov_cnt)
782 VirtIODevice *vdev = VIRTIO_DEVICE(n);
783 struct virtio_net_ctrl_mac mac_data;
784 size_t s;
785 NetClientState *nc = qemu_get_queue(n->nic);
787 if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) {
788 if (iov_size(iov, iov_cnt) != sizeof(n->mac)) {
789 return VIRTIO_NET_ERR;
791 s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac));
792 assert(s == sizeof(n->mac));
793 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
794 rxfilter_notify(nc);
796 return VIRTIO_NET_OK;
799 if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) {
800 return VIRTIO_NET_ERR;
803 int in_use = 0;
804 int first_multi = 0;
805 uint8_t uni_overflow = 0;
806 uint8_t multi_overflow = 0;
807 uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
809 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
810 sizeof(mac_data.entries));
811 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
812 if (s != sizeof(mac_data.entries)) {
813 goto error;
815 iov_discard_front(&iov, &iov_cnt, s);
817 if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) {
818 goto error;
821 if (mac_data.entries <= MAC_TABLE_ENTRIES) {
822 s = iov_to_buf(iov, iov_cnt, 0, macs,
823 mac_data.entries * ETH_ALEN);
824 if (s != mac_data.entries * ETH_ALEN) {
825 goto error;
827 in_use += mac_data.entries;
828 } else {
829 uni_overflow = 1;
832 iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN);
834 first_multi = in_use;
836 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
837 sizeof(mac_data.entries));
838 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
839 if (s != sizeof(mac_data.entries)) {
840 goto error;
843 iov_discard_front(&iov, &iov_cnt, s);
845 if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) {
846 goto error;
849 if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) {
850 s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN],
851 mac_data.entries * ETH_ALEN);
852 if (s != mac_data.entries * ETH_ALEN) {
853 goto error;
855 in_use += mac_data.entries;
856 } else {
857 multi_overflow = 1;
860 n->mac_table.in_use = in_use;
861 n->mac_table.first_multi = first_multi;
862 n->mac_table.uni_overflow = uni_overflow;
863 n->mac_table.multi_overflow = multi_overflow;
864 memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN);
865 g_free(macs);
866 rxfilter_notify(nc);
868 return VIRTIO_NET_OK;
870 error:
871 g_free(macs);
872 return VIRTIO_NET_ERR;
875 static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd,
876 struct iovec *iov, unsigned int iov_cnt)
878 VirtIODevice *vdev = VIRTIO_DEVICE(n);
879 uint16_t vid;
880 size_t s;
881 NetClientState *nc = qemu_get_queue(n->nic);
883 s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid));
884 vid = virtio_lduw_p(vdev, &vid);
885 if (s != sizeof(vid)) {
886 return VIRTIO_NET_ERR;
889 if (vid >= MAX_VLAN)
890 return VIRTIO_NET_ERR;
892 if (cmd == VIRTIO_NET_CTRL_VLAN_ADD)
893 n->vlans[vid >> 5] |= (1U << (vid & 0x1f));
894 else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL)
895 n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f));
896 else
897 return VIRTIO_NET_ERR;
899 rxfilter_notify(nc);
901 return VIRTIO_NET_OK;
904 static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd,
905 struct iovec *iov, unsigned int iov_cnt)
907 if (cmd == VIRTIO_NET_CTRL_ANNOUNCE_ACK &&
908 n->status & VIRTIO_NET_S_ANNOUNCE) {
909 n->status &= ~VIRTIO_NET_S_ANNOUNCE;
910 if (n->announce_counter) {
911 timer_mod(n->announce_timer,
912 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
913 self_announce_delay(n->announce_counter));
915 return VIRTIO_NET_OK;
916 } else {
917 return VIRTIO_NET_ERR;
921 static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
922 struct iovec *iov, unsigned int iov_cnt)
924 VirtIODevice *vdev = VIRTIO_DEVICE(n);
925 struct virtio_net_ctrl_mq mq;
926 size_t s;
927 uint16_t queues;
929 s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq));
930 if (s != sizeof(mq)) {
931 return VIRTIO_NET_ERR;
934 if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
935 return VIRTIO_NET_ERR;
938 queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs);
940 if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
941 queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
942 queues > n->max_queues ||
943 !n->multiqueue) {
944 return VIRTIO_NET_ERR;
947 n->curr_queues = queues;
948 /* stop the backend before changing the number of queues to avoid handling a
949 * disabled queue */
950 virtio_net_set_status(vdev, vdev->status);
951 virtio_net_set_queues(n);
953 return VIRTIO_NET_OK;
956 static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
958 VirtIONet *n = VIRTIO_NET(vdev);
959 struct virtio_net_ctrl_hdr ctrl;
960 virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
961 VirtQueueElement *elem;
962 size_t s;
963 struct iovec *iov, *iov2;
964 unsigned int iov_cnt;
966 for (;;) {
967 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
968 if (!elem) {
969 break;
971 if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) ||
972 iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) {
973 virtio_error(vdev, "virtio-net ctrl missing headers");
974 virtqueue_detach_element(vq, elem, 0);
975 g_free(elem);
976 break;
979 iov_cnt = elem->out_num;
980 iov2 = iov = g_memdup(elem->out_sg, sizeof(struct iovec) * elem->out_num);
981 s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl));
982 iov_discard_front(&iov, &iov_cnt, sizeof(ctrl));
983 if (s != sizeof(ctrl)) {
984 status = VIRTIO_NET_ERR;
985 } else if (ctrl.class == VIRTIO_NET_CTRL_RX) {
986 status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt);
987 } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) {
988 status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt);
989 } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
990 status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt);
991 } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) {
992 status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt);
993 } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
994 status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt);
995 } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
996 status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt);
999 s = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, sizeof(status));
1000 assert(s == sizeof(status));
1002 virtqueue_push(vq, elem, sizeof(status));
1003 virtio_notify(vdev, vq);
1004 g_free(iov2);
1005 g_free(elem);
1009 /* RX */
1011 static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
1013 VirtIONet *n = VIRTIO_NET(vdev);
1014 int queue_index = vq2q(virtio_get_queue_index(vq));
1016 qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index));
1019 static int virtio_net_can_receive(NetClientState *nc)
1021 VirtIONet *n = qemu_get_nic_opaque(nc);
1022 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1023 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1025 if (!vdev->vm_running) {
1026 return 0;
1029 if (nc->queue_index >= n->curr_queues) {
1030 return 0;
1033 if (!virtio_queue_ready(q->rx_vq) ||
1034 !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1035 return 0;
1038 return 1;
1041 static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize)
1043 VirtIONet *n = q->n;
1044 if (virtio_queue_empty(q->rx_vq) ||
1045 (n->mergeable_rx_bufs &&
1046 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
1047 virtio_queue_set_notification(q->rx_vq, 1);
1049 /* To avoid a race condition where the guest has made some buffers
1050 * available after the above check but before notification was
1051 * enabled, check for available buffers again.
1053 if (virtio_queue_empty(q->rx_vq) ||
1054 (n->mergeable_rx_bufs &&
1055 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
1056 return 0;
1060 virtio_queue_set_notification(q->rx_vq, 0);
1061 return 1;
1064 static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr)
1066 virtio_tswap16s(vdev, &hdr->hdr_len);
1067 virtio_tswap16s(vdev, &hdr->gso_size);
1068 virtio_tswap16s(vdev, &hdr->csum_start);
1069 virtio_tswap16s(vdev, &hdr->csum_offset);
1072 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
1073 * it never finds out that the packets don't have valid checksums. This
1074 * causes dhclient to get upset. Fedora's carried a patch for ages to
1075 * fix this with Xen but it hasn't appeared in an upstream release of
1076 * dhclient yet.
1078 * To avoid breaking existing guests, we catch udp packets and add
1079 * checksums. This is terrible but it's better than hacking the guest
1080 * kernels.
1082 * N.B. if we introduce a zero-copy API, this operation is no longer free so
1083 * we should provide a mechanism to disable it to avoid polluting the host
1084 * cache.
1086 static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
1087 uint8_t *buf, size_t size)
1089 if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */
1090 (size > 27 && size < 1500) && /* normal sized MTU */
1091 (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */
1092 (buf[23] == 17) && /* ip.protocol == UDP */
1093 (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */
1094 net_checksum_calculate(buf, size);
1095 hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
1099 static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
1100 const void *buf, size_t size)
1102 if (n->has_vnet_hdr) {
1103 /* FIXME this cast is evil */
1104 void *wbuf = (void *)buf;
1105 work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
1106 size - n->host_hdr_len);
1108 if (n->needs_vnet_hdr_swap) {
1109 virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
1111 iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
1112 } else {
1113 struct virtio_net_hdr hdr = {
1114 .flags = 0,
1115 .gso_type = VIRTIO_NET_HDR_GSO_NONE
1117 iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr);
1121 static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
1123 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1124 static const uint8_t vlan[] = {0x81, 0x00};
1125 uint8_t *ptr = (uint8_t *)buf;
1126 int i;
1128 if (n->promisc)
1129 return 1;
1131 ptr += n->host_hdr_len;
1133 if (!memcmp(&ptr[12], vlan, sizeof(vlan))) {
1134 int vid = lduw_be_p(ptr + 14) & 0xfff;
1135 if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f))))
1136 return 0;
1139 if (ptr[0] & 1) { // multicast
1140 if (!memcmp(ptr, bcast, sizeof(bcast))) {
1141 return !n->nobcast;
1142 } else if (n->nomulti) {
1143 return 0;
1144 } else if (n->allmulti || n->mac_table.multi_overflow) {
1145 return 1;
1148 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
1149 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
1150 return 1;
1153 } else { // unicast
1154 if (n->nouni) {
1155 return 0;
1156 } else if (n->alluni || n->mac_table.uni_overflow) {
1157 return 1;
1158 } else if (!memcmp(ptr, n->mac, ETH_ALEN)) {
1159 return 1;
1162 for (i = 0; i < n->mac_table.first_multi; i++) {
1163 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
1164 return 1;
1169 return 0;
1172 static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
1173 size_t size)
1175 VirtIONet *n = qemu_get_nic_opaque(nc);
1176 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1177 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1178 struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
1179 struct virtio_net_hdr_mrg_rxbuf mhdr;
1180 unsigned mhdr_cnt = 0;
1181 size_t offset, i, guest_offset;
1183 if (!virtio_net_can_receive(nc)) {
1184 return -1;
1187 /* hdr_len refers to the header we supply to the guest */
1188 if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
1189 return 0;
1192 if (!receive_filter(n, buf, size))
1193 return size;
1195 offset = i = 0;
1197 while (offset < size) {
1198 VirtQueueElement *elem;
1199 int len, total;
1200 const struct iovec *sg;
1202 total = 0;
1204 elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement));
1205 if (!elem) {
1206 if (i) {
1207 virtio_error(vdev, "virtio-net unexpected empty queue: "
1208 "i %zd mergeable %d offset %zd, size %zd, "
1209 "guest hdr len %zd, host hdr len %zd "
1210 "guest features 0x%" PRIx64,
1211 i, n->mergeable_rx_bufs, offset, size,
1212 n->guest_hdr_len, n->host_hdr_len,
1213 vdev->guest_features);
1215 return -1;
1218 if (elem->in_num < 1) {
1219 virtio_error(vdev,
1220 "virtio-net receive queue contains no in buffers");
1221 virtqueue_detach_element(q->rx_vq, elem, 0);
1222 g_free(elem);
1223 return -1;
1226 sg = elem->in_sg;
1227 if (i == 0) {
1228 assert(offset == 0);
1229 if (n->mergeable_rx_bufs) {
1230 mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
1231 sg, elem->in_num,
1232 offsetof(typeof(mhdr), num_buffers),
1233 sizeof(mhdr.num_buffers));
1236 receive_header(n, sg, elem->in_num, buf, size);
1237 offset = n->host_hdr_len;
1238 total += n->guest_hdr_len;
1239 guest_offset = n->guest_hdr_len;
1240 } else {
1241 guest_offset = 0;
1244 /* copy in packet. ugh */
1245 len = iov_from_buf(sg, elem->in_num, guest_offset,
1246 buf + offset, size - offset);
1247 total += len;
1248 offset += len;
1249 /* If buffers can't be merged, at this point we
1250 * must have consumed the complete packet.
1251 * Otherwise, drop it. */
1252 if (!n->mergeable_rx_bufs && offset < size) {
1253 virtqueue_unpop(q->rx_vq, elem, total);
1254 g_free(elem);
1255 return size;
1258 /* signal other side */
1259 virtqueue_fill(q->rx_vq, elem, total, i++);
1260 g_free(elem);
1263 if (mhdr_cnt) {
1264 virtio_stw_p(vdev, &mhdr.num_buffers, i);
1265 iov_from_buf(mhdr_sg, mhdr_cnt,
1267 &mhdr.num_buffers, sizeof mhdr.num_buffers);
1270 virtqueue_flush(q->rx_vq, i);
1271 virtio_notify(vdev, q->rx_vq);
1273 return size;
1276 static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf,
1277 size_t size)
1279 ssize_t r;
1281 rcu_read_lock();
1282 r = virtio_net_receive_rcu(nc, buf, size);
1283 rcu_read_unlock();
1284 return r;
1287 static int32_t virtio_net_flush_tx(VirtIONetQueue *q);
1289 static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
1291 VirtIONet *n = qemu_get_nic_opaque(nc);
1292 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1293 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1295 virtqueue_push(q->tx_vq, q->async_tx.elem, 0);
1296 virtio_notify(vdev, q->tx_vq);
1298 g_free(q->async_tx.elem);
1299 q->async_tx.elem = NULL;
1301 virtio_queue_set_notification(q->tx_vq, 1);
1302 virtio_net_flush_tx(q);
1305 /* TX */
1306 static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
1308 VirtIONet *n = q->n;
1309 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1310 VirtQueueElement *elem;
1311 int32_t num_packets = 0;
1312 int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
1313 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1314 return num_packets;
1317 if (q->async_tx.elem) {
1318 virtio_queue_set_notification(q->tx_vq, 0);
1319 return num_packets;
1322 for (;;) {
1323 ssize_t ret;
1324 unsigned int out_num;
1325 struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], *out_sg;
1326 struct virtio_net_hdr_mrg_rxbuf mhdr;
1328 elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement));
1329 if (!elem) {
1330 break;
1333 out_num = elem->out_num;
1334 out_sg = elem->out_sg;
1335 if (out_num < 1) {
1336 virtio_error(vdev, "virtio-net header not in first element");
1337 virtqueue_detach_element(q->tx_vq, elem, 0);
1338 g_free(elem);
1339 return -EINVAL;
1342 if (n->has_vnet_hdr) {
1343 if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) <
1344 n->guest_hdr_len) {
1345 virtio_error(vdev, "virtio-net header incorrect");
1346 virtqueue_detach_element(q->tx_vq, elem, 0);
1347 g_free(elem);
1348 return -EINVAL;
1350 if (n->needs_vnet_hdr_swap) {
1351 virtio_net_hdr_swap(vdev, (void *) &mhdr);
1352 sg2[0].iov_base = &mhdr;
1353 sg2[0].iov_len = n->guest_hdr_len;
1354 out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1,
1355 out_sg, out_num,
1356 n->guest_hdr_len, -1);
1357 if (out_num == VIRTQUEUE_MAX_SIZE) {
1358 goto drop;
1360 out_num += 1;
1361 out_sg = sg2;
1365 * If host wants to see the guest header as is, we can
1366 * pass it on unchanged. Otherwise, copy just the parts
1367 * that host is interested in.
1369 assert(n->host_hdr_len <= n->guest_hdr_len);
1370 if (n->host_hdr_len != n->guest_hdr_len) {
1371 unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg),
1372 out_sg, out_num,
1373 0, n->host_hdr_len);
1374 sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num,
1375 out_sg, out_num,
1376 n->guest_hdr_len, -1);
1377 out_num = sg_num;
1378 out_sg = sg;
1381 ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index),
1382 out_sg, out_num, virtio_net_tx_complete);
1383 if (ret == 0) {
1384 virtio_queue_set_notification(q->tx_vq, 0);
1385 q->async_tx.elem = elem;
1386 return -EBUSY;
1389 drop:
1390 virtqueue_push(q->tx_vq, elem, 0);
1391 virtio_notify(vdev, q->tx_vq);
1392 g_free(elem);
1394 if (++num_packets >= n->tx_burst) {
1395 break;
1398 return num_packets;
1401 static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
1403 VirtIONet *n = VIRTIO_NET(vdev);
1404 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
1406 if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
1407 virtio_net_drop_tx_queue_data(vdev, vq);
1408 return;
1411 /* This happens when device was stopped but VCPU wasn't. */
1412 if (!vdev->vm_running) {
1413 q->tx_waiting = 1;
1414 return;
1417 if (q->tx_waiting) {
1418 virtio_queue_set_notification(vq, 1);
1419 timer_del(q->tx_timer);
1420 q->tx_waiting = 0;
1421 if (virtio_net_flush_tx(q) == -EINVAL) {
1422 return;
1424 } else {
1425 timer_mod(q->tx_timer,
1426 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
1427 q->tx_waiting = 1;
1428 virtio_queue_set_notification(vq, 0);
1432 static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)
1434 VirtIONet *n = VIRTIO_NET(vdev);
1435 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
1437 if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
1438 virtio_net_drop_tx_queue_data(vdev, vq);
1439 return;
1442 if (unlikely(q->tx_waiting)) {
1443 return;
1445 q->tx_waiting = 1;
1446 /* This happens when device was stopped but VCPU wasn't. */
1447 if (!vdev->vm_running) {
1448 return;
1450 virtio_queue_set_notification(vq, 0);
1451 qemu_bh_schedule(q->tx_bh);
1454 static void virtio_net_tx_timer(void *opaque)
1456 VirtIONetQueue *q = opaque;
1457 VirtIONet *n = q->n;
1458 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1459 /* This happens when device was stopped but BH wasn't. */
1460 if (!vdev->vm_running) {
1461 /* Make sure tx waiting is set, so we'll run when restarted. */
1462 assert(q->tx_waiting);
1463 return;
1466 q->tx_waiting = 0;
1468 /* Just in case the driver is not ready on more */
1469 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1470 return;
1473 virtio_queue_set_notification(q->tx_vq, 1);
1474 virtio_net_flush_tx(q);
1477 static void virtio_net_tx_bh(void *opaque)
1479 VirtIONetQueue *q = opaque;
1480 VirtIONet *n = q->n;
1481 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1482 int32_t ret;
1484 /* This happens when device was stopped but BH wasn't. */
1485 if (!vdev->vm_running) {
1486 /* Make sure tx waiting is set, so we'll run when restarted. */
1487 assert(q->tx_waiting);
1488 return;
1491 q->tx_waiting = 0;
1493 /* Just in case the driver is not ready on more */
1494 if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) {
1495 return;
1498 ret = virtio_net_flush_tx(q);
1499 if (ret == -EBUSY || ret == -EINVAL) {
1500 return; /* Notification re-enable handled by tx_complete or device
1501 * broken */
1504 /* If we flush a full burst of packets, assume there are
1505 * more coming and immediately reschedule */
1506 if (ret >= n->tx_burst) {
1507 qemu_bh_schedule(q->tx_bh);
1508 q->tx_waiting = 1;
1509 return;
1512 /* If less than a full burst, re-enable notification and flush
1513 * anything that may have come in while we weren't looking. If
1514 * we find something, assume the guest is still active and reschedule */
1515 virtio_queue_set_notification(q->tx_vq, 1);
1516 ret = virtio_net_flush_tx(q);
1517 if (ret == -EINVAL) {
1518 return;
1519 } else if (ret > 0) {
1520 virtio_queue_set_notification(q->tx_vq, 0);
1521 qemu_bh_schedule(q->tx_bh);
1522 q->tx_waiting = 1;
1526 static void virtio_net_add_queue(VirtIONet *n, int index)
1528 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1530 n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
1531 virtio_net_handle_rx);
1533 if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
1534 n->vqs[index].tx_vq =
1535 virtio_add_queue(vdev, n->net_conf.tx_queue_size,
1536 virtio_net_handle_tx_timer);
1537 n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1538 virtio_net_tx_timer,
1539 &n->vqs[index]);
1540 } else {
1541 n->vqs[index].tx_vq =
1542 virtio_add_queue(vdev, n->net_conf.tx_queue_size,
1543 virtio_net_handle_tx_bh);
1544 n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
1547 n->vqs[index].tx_waiting = 0;
1548 n->vqs[index].n = n;
1551 static void virtio_net_del_queue(VirtIONet *n, int index)
1553 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1554 VirtIONetQueue *q = &n->vqs[index];
1555 NetClientState *nc = qemu_get_subqueue(n->nic, index);
1557 qemu_purge_queued_packets(nc);
1559 virtio_del_queue(vdev, index * 2);
1560 if (q->tx_timer) {
1561 timer_del(q->tx_timer);
1562 timer_free(q->tx_timer);
1563 q->tx_timer = NULL;
1564 } else {
1565 qemu_bh_delete(q->tx_bh);
1566 q->tx_bh = NULL;
1568 q->tx_waiting = 0;
1569 virtio_del_queue(vdev, index * 2 + 1);
1572 static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues)
1574 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1575 int old_num_queues = virtio_get_num_queues(vdev);
1576 int new_num_queues = new_max_queues * 2 + 1;
1577 int i;
1579 assert(old_num_queues >= 3);
1580 assert(old_num_queues % 2 == 1);
1582 if (old_num_queues == new_num_queues) {
1583 return;
1587 * We always need to remove and add ctrl vq if
1588 * old_num_queues != new_num_queues. Remove ctrl_vq first,
1589 * and then we only enter one of the following too loops.
1591 virtio_del_queue(vdev, old_num_queues - 1);
1593 for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) {
1594 /* new_num_queues < old_num_queues */
1595 virtio_net_del_queue(n, i / 2);
1598 for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) {
1599 /* new_num_queues > old_num_queues */
1600 virtio_net_add_queue(n, i / 2);
1603 /* add ctrl_vq last */
1604 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
1607 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
1609 int max = multiqueue ? n->max_queues : 1;
1611 n->multiqueue = multiqueue;
1612 virtio_net_change_num_queues(n, max);
1614 virtio_net_set_queues(n);
1617 static int virtio_net_post_load_device(void *opaque, int version_id)
1619 VirtIONet *n = opaque;
1620 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1621 int i, link_down;
1623 virtio_net_set_mrg_rx_bufs(n, n->mergeable_rx_bufs,
1624 virtio_vdev_has_feature(vdev,
1625 VIRTIO_F_VERSION_1));
1627 /* MAC_TABLE_ENTRIES may be different from the saved image */
1628 if (n->mac_table.in_use > MAC_TABLE_ENTRIES) {
1629 n->mac_table.in_use = 0;
1632 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
1633 n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
1636 if (peer_has_vnet_hdr(n)) {
1637 virtio_net_apply_guest_offloads(n);
1640 virtio_net_set_queues(n);
1642 /* Find the first multicast entry in the saved MAC filter */
1643 for (i = 0; i < n->mac_table.in_use; i++) {
1644 if (n->mac_table.macs[i * ETH_ALEN] & 1) {
1645 break;
1648 n->mac_table.first_multi = i;
1650 /* nc.link_down can't be migrated, so infer link_down according
1651 * to link status bit in n->status */
1652 link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0;
1653 for (i = 0; i < n->max_queues; i++) {
1654 qemu_get_subqueue(n->nic, i)->link_down = link_down;
1657 if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
1658 virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
1659 n->announce_counter = SELF_ANNOUNCE_ROUNDS;
1660 timer_mod(n->announce_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL));
1663 return 0;
1666 /* tx_waiting field of a VirtIONetQueue */
1667 static const VMStateDescription vmstate_virtio_net_queue_tx_waiting = {
1668 .name = "virtio-net-queue-tx_waiting",
1669 .fields = (VMStateField[]) {
1670 VMSTATE_UINT32(tx_waiting, VirtIONetQueue),
1671 VMSTATE_END_OF_LIST()
1675 static bool max_queues_gt_1(void *opaque, int version_id)
1677 return VIRTIO_NET(opaque)->max_queues > 1;
1680 static bool has_ctrl_guest_offloads(void *opaque, int version_id)
1682 return virtio_vdev_has_feature(VIRTIO_DEVICE(opaque),
1683 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
1686 static bool mac_table_fits(void *opaque, int version_id)
1688 return VIRTIO_NET(opaque)->mac_table.in_use <= MAC_TABLE_ENTRIES;
1691 static bool mac_table_doesnt_fit(void *opaque, int version_id)
1693 return !mac_table_fits(opaque, version_id);
1696 /* This temporary type is shared by all the WITH_TMP methods
1697 * although only some fields are used by each.
1699 struct VirtIONetMigTmp {
1700 VirtIONet *parent;
1701 VirtIONetQueue *vqs_1;
1702 uint16_t curr_queues_1;
1703 uint8_t has_ufo;
1704 uint32_t has_vnet_hdr;
1707 /* The 2nd and subsequent tx_waiting flags are loaded later than
1708 * the 1st entry in the queues and only if there's more than one
1709 * entry. We use the tmp mechanism to calculate a temporary
1710 * pointer and count and also validate the count.
1713 static void virtio_net_tx_waiting_pre_save(void *opaque)
1715 struct VirtIONetMigTmp *tmp = opaque;
1717 tmp->vqs_1 = tmp->parent->vqs + 1;
1718 tmp->curr_queues_1 = tmp->parent->curr_queues - 1;
1719 if (tmp->parent->curr_queues == 0) {
1720 tmp->curr_queues_1 = 0;
1724 static int virtio_net_tx_waiting_pre_load(void *opaque)
1726 struct VirtIONetMigTmp *tmp = opaque;
1728 /* Reuse the pointer setup from save */
1729 virtio_net_tx_waiting_pre_save(opaque);
1731 if (tmp->parent->curr_queues > tmp->parent->max_queues) {
1732 error_report("virtio-net: curr_queues %x > max_queues %x",
1733 tmp->parent->curr_queues, tmp->parent->max_queues);
1735 return -EINVAL;
1738 return 0; /* all good */
1741 static const VMStateDescription vmstate_virtio_net_tx_waiting = {
1742 .name = "virtio-net-tx_waiting",
1743 .pre_load = virtio_net_tx_waiting_pre_load,
1744 .pre_save = virtio_net_tx_waiting_pre_save,
1745 .fields = (VMStateField[]) {
1746 VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp,
1747 curr_queues_1,
1748 vmstate_virtio_net_queue_tx_waiting,
1749 struct VirtIONetQueue),
1750 VMSTATE_END_OF_LIST()
1754 /* the 'has_ufo' flag is just tested; if the incoming stream has the
1755 * flag set we need to check that we have it
1757 static int virtio_net_ufo_post_load(void *opaque, int version_id)
1759 struct VirtIONetMigTmp *tmp = opaque;
1761 if (tmp->has_ufo && !peer_has_ufo(tmp->parent)) {
1762 error_report("virtio-net: saved image requires TUN_F_UFO support");
1763 return -EINVAL;
1766 return 0;
1769 static void virtio_net_ufo_pre_save(void *opaque)
1771 struct VirtIONetMigTmp *tmp = opaque;
1773 tmp->has_ufo = tmp->parent->has_ufo;
1776 static const VMStateDescription vmstate_virtio_net_has_ufo = {
1777 .name = "virtio-net-ufo",
1778 .post_load = virtio_net_ufo_post_load,
1779 .pre_save = virtio_net_ufo_pre_save,
1780 .fields = (VMStateField[]) {
1781 VMSTATE_UINT8(has_ufo, struct VirtIONetMigTmp),
1782 VMSTATE_END_OF_LIST()
1786 /* the 'has_vnet_hdr' flag is just tested; if the incoming stream has the
1787 * flag set we need to check that we have it
1789 static int virtio_net_vnet_post_load(void *opaque, int version_id)
1791 struct VirtIONetMigTmp *tmp = opaque;
1793 if (tmp->has_vnet_hdr && !peer_has_vnet_hdr(tmp->parent)) {
1794 error_report("virtio-net: saved image requires vnet_hdr=on");
1795 return -EINVAL;
1798 return 0;
1801 static void virtio_net_vnet_pre_save(void *opaque)
1803 struct VirtIONetMigTmp *tmp = opaque;
1805 tmp->has_vnet_hdr = tmp->parent->has_vnet_hdr;
1808 static const VMStateDescription vmstate_virtio_net_has_vnet = {
1809 .name = "virtio-net-vnet",
1810 .post_load = virtio_net_vnet_post_load,
1811 .pre_save = virtio_net_vnet_pre_save,
1812 .fields = (VMStateField[]) {
1813 VMSTATE_UINT32(has_vnet_hdr, struct VirtIONetMigTmp),
1814 VMSTATE_END_OF_LIST()
1818 static const VMStateDescription vmstate_virtio_net_device = {
1819 .name = "virtio-net-device",
1820 .version_id = VIRTIO_NET_VM_VERSION,
1821 .minimum_version_id = VIRTIO_NET_VM_VERSION,
1822 .post_load = virtio_net_post_load_device,
1823 .fields = (VMStateField[]) {
1824 VMSTATE_UINT8_ARRAY(mac, VirtIONet, ETH_ALEN),
1825 VMSTATE_STRUCT_POINTER(vqs, VirtIONet,
1826 vmstate_virtio_net_queue_tx_waiting,
1827 VirtIONetQueue),
1828 VMSTATE_UINT32(mergeable_rx_bufs, VirtIONet),
1829 VMSTATE_UINT16(status, VirtIONet),
1830 VMSTATE_UINT8(promisc, VirtIONet),
1831 VMSTATE_UINT8(allmulti, VirtIONet),
1832 VMSTATE_UINT32(mac_table.in_use, VirtIONet),
1834 /* Guarded pair: If it fits we load it, else we throw it away
1835 * - can happen if source has a larger MAC table.; post-load
1836 * sets flags in this case.
1838 VMSTATE_VBUFFER_MULTIPLY(mac_table.macs, VirtIONet,
1839 0, mac_table_fits, mac_table.in_use,
1840 ETH_ALEN),
1841 VMSTATE_UNUSED_VARRAY_UINT32(VirtIONet, mac_table_doesnt_fit, 0,
1842 mac_table.in_use, ETH_ALEN),
1844 /* Note: This is an array of uint32's that's always been saved as a
1845 * buffer; hold onto your endiannesses; it's actually used as a bitmap
1846 * but based on the uint.
1848 VMSTATE_BUFFER_POINTER_UNSAFE(vlans, VirtIONet, 0, MAX_VLAN >> 3),
1849 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
1850 vmstate_virtio_net_has_vnet),
1851 VMSTATE_UINT8(mac_table.multi_overflow, VirtIONet),
1852 VMSTATE_UINT8(mac_table.uni_overflow, VirtIONet),
1853 VMSTATE_UINT8(alluni, VirtIONet),
1854 VMSTATE_UINT8(nomulti, VirtIONet),
1855 VMSTATE_UINT8(nouni, VirtIONet),
1856 VMSTATE_UINT8(nobcast, VirtIONet),
1857 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
1858 vmstate_virtio_net_has_ufo),
1859 VMSTATE_SINGLE_TEST(max_queues, VirtIONet, max_queues_gt_1, 0,
1860 vmstate_info_uint16_equal, uint16_t),
1861 VMSTATE_UINT16_TEST(curr_queues, VirtIONet, max_queues_gt_1),
1862 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
1863 vmstate_virtio_net_tx_waiting),
1864 VMSTATE_UINT64_TEST(curr_guest_offloads, VirtIONet,
1865 has_ctrl_guest_offloads),
1866 VMSTATE_END_OF_LIST()
1870 static NetClientInfo net_virtio_info = {
1871 .type = NET_CLIENT_DRIVER_NIC,
1872 .size = sizeof(NICState),
1873 .can_receive = virtio_net_can_receive,
1874 .receive = virtio_net_receive,
1875 .link_status_changed = virtio_net_set_link_status,
1876 .query_rx_filter = virtio_net_query_rxfilter,
1879 static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
1881 VirtIONet *n = VIRTIO_NET(vdev);
1882 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
1883 assert(n->vhost_started);
1884 return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
1887 static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
1888 bool mask)
1890 VirtIONet *n = VIRTIO_NET(vdev);
1891 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
1892 assert(n->vhost_started);
1893 vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
1894 vdev, idx, mask);
1897 static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
1899 int i, config_size = 0;
1900 virtio_add_feature(&host_features, VIRTIO_NET_F_MAC);
1902 for (i = 0; feature_sizes[i].flags != 0; i++) {
1903 if (host_features & feature_sizes[i].flags) {
1904 config_size = MAX(feature_sizes[i].end, config_size);
1907 n->config_size = config_size;
1910 void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
1911 const char *type)
1914 * The name can be NULL, the netclient name will be type.x.
1916 assert(type != NULL);
1918 g_free(n->netclient_name);
1919 g_free(n->netclient_type);
1920 n->netclient_name = g_strdup(name);
1921 n->netclient_type = g_strdup(type);
1924 static void virtio_net_device_realize(DeviceState *dev, Error **errp)
1926 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1927 VirtIONet *n = VIRTIO_NET(dev);
1928 NetClientState *nc;
1929 int i;
1931 if (n->net_conf.mtu) {
1932 n->host_features |= (0x1 << VIRTIO_NET_F_MTU);
1935 virtio_net_set_config_size(n, n->host_features);
1936 virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
1939 * We set a lower limit on RX queue size to what it always was.
1940 * Guests that want a smaller ring can always resize it without
1941 * help from us (using virtio 1 and up).
1943 if (n->net_conf.rx_queue_size < VIRTIO_NET_RX_QUEUE_MIN_SIZE ||
1944 n->net_conf.rx_queue_size > VIRTQUEUE_MAX_SIZE ||
1945 !is_power_of_2(n->net_conf.rx_queue_size)) {
1946 error_setg(errp, "Invalid rx_queue_size (= %" PRIu16 "), "
1947 "must be a power of 2 between %d and %d.",
1948 n->net_conf.rx_queue_size, VIRTIO_NET_RX_QUEUE_MIN_SIZE,
1949 VIRTQUEUE_MAX_SIZE);
1950 virtio_cleanup(vdev);
1951 return;
1954 if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE ||
1955 n->net_conf.tx_queue_size > VIRTQUEUE_MAX_SIZE ||
1956 !is_power_of_2(n->net_conf.tx_queue_size)) {
1957 error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), "
1958 "must be a power of 2 between %d and %d",
1959 n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE,
1960 VIRTQUEUE_MAX_SIZE);
1961 virtio_cleanup(vdev);
1962 return;
1965 n->max_queues = MAX(n->nic_conf.peers.queues, 1);
1966 if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) {
1967 error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
1968 "must be a positive integer less than %d.",
1969 n->max_queues, (VIRTIO_QUEUE_MAX - 1) / 2);
1970 virtio_cleanup(vdev);
1971 return;
1973 n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues);
1974 n->curr_queues = 1;
1975 n->tx_timeout = n->net_conf.txtimer;
1977 if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer")
1978 && strcmp(n->net_conf.tx, "bh")) {
1979 error_report("virtio-net: "
1980 "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
1981 n->net_conf.tx);
1982 error_report("Defaulting to \"bh\"");
1985 n->net_conf.tx_queue_size = MIN(virtio_net_max_tx_queue_size(n),
1986 n->net_conf.tx_queue_size);
1988 for (i = 0; i < n->max_queues; i++) {
1989 virtio_net_add_queue(n, i);
1992 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
1993 qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
1994 memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
1995 n->status = VIRTIO_NET_S_LINK_UP;
1996 n->announce_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
1997 virtio_net_announce_timer, n);
1999 if (n->netclient_type) {
2001 * Happen when virtio_net_set_netclient_name has been called.
2003 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
2004 n->netclient_type, n->netclient_name, n);
2005 } else {
2006 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
2007 object_get_typename(OBJECT(dev)), dev->id, n);
2010 peer_test_vnet_hdr(n);
2011 if (peer_has_vnet_hdr(n)) {
2012 for (i = 0; i < n->max_queues; i++) {
2013 qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true);
2015 n->host_hdr_len = sizeof(struct virtio_net_hdr);
2016 } else {
2017 n->host_hdr_len = 0;
2020 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a);
2022 n->vqs[0].tx_waiting = 0;
2023 n->tx_burst = n->net_conf.txburst;
2024 virtio_net_set_mrg_rx_bufs(n, 0, 0);
2025 n->promisc = 1; /* for compatibility */
2027 n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
2029 n->vlans = g_malloc0(MAX_VLAN >> 3);
2031 nc = qemu_get_queue(n->nic);
2032 nc->rxfilter_notify_enabled = 1;
2034 n->qdev = dev;
2037 static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
2039 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2040 VirtIONet *n = VIRTIO_NET(dev);
2041 int i, max_queues;
2043 /* This will stop vhost backend if appropriate. */
2044 virtio_net_set_status(vdev, 0);
2046 g_free(n->netclient_name);
2047 n->netclient_name = NULL;
2048 g_free(n->netclient_type);
2049 n->netclient_type = NULL;
2051 g_free(n->mac_table.macs);
2052 g_free(n->vlans);
2054 max_queues = n->multiqueue ? n->max_queues : 1;
2055 for (i = 0; i < max_queues; i++) {
2056 virtio_net_del_queue(n, i);
2059 timer_del(n->announce_timer);
2060 timer_free(n->announce_timer);
2061 g_free(n->vqs);
2062 qemu_del_nic(n->nic);
2063 virtio_cleanup(vdev);
2066 static void virtio_net_instance_init(Object *obj)
2068 VirtIONet *n = VIRTIO_NET(obj);
2071 * The default config_size is sizeof(struct virtio_net_config).
2072 * Can be overriden with virtio_net_set_config_size.
2074 n->config_size = sizeof(struct virtio_net_config);
2075 device_add_bootindex_property(obj, &n->nic_conf.bootindex,
2076 "bootindex", "/ethernet-phy@0",
2077 DEVICE(n), NULL);
2080 static void virtio_net_pre_save(void *opaque)
2082 VirtIONet *n = opaque;
2084 /* At this point, backend must be stopped, otherwise
2085 * it might keep writing to memory. */
2086 assert(!n->vhost_started);
2089 static const VMStateDescription vmstate_virtio_net = {
2090 .name = "virtio-net",
2091 .minimum_version_id = VIRTIO_NET_VM_VERSION,
2092 .version_id = VIRTIO_NET_VM_VERSION,
2093 .fields = (VMStateField[]) {
2094 VMSTATE_VIRTIO_DEVICE,
2095 VMSTATE_END_OF_LIST()
2097 .pre_save = virtio_net_pre_save,
2100 static Property virtio_net_properties[] = {
2101 DEFINE_PROP_BIT("csum", VirtIONet, host_features, VIRTIO_NET_F_CSUM, true),
2102 DEFINE_PROP_BIT("guest_csum", VirtIONet, host_features,
2103 VIRTIO_NET_F_GUEST_CSUM, true),
2104 DEFINE_PROP_BIT("gso", VirtIONet, host_features, VIRTIO_NET_F_GSO, true),
2105 DEFINE_PROP_BIT("guest_tso4", VirtIONet, host_features,
2106 VIRTIO_NET_F_GUEST_TSO4, true),
2107 DEFINE_PROP_BIT("guest_tso6", VirtIONet, host_features,
2108 VIRTIO_NET_F_GUEST_TSO6, true),
2109 DEFINE_PROP_BIT("guest_ecn", VirtIONet, host_features,
2110 VIRTIO_NET_F_GUEST_ECN, true),
2111 DEFINE_PROP_BIT("guest_ufo", VirtIONet, host_features,
2112 VIRTIO_NET_F_GUEST_UFO, true),
2113 DEFINE_PROP_BIT("guest_announce", VirtIONet, host_features,
2114 VIRTIO_NET_F_GUEST_ANNOUNCE, true),
2115 DEFINE_PROP_BIT("host_tso4", VirtIONet, host_features,
2116 VIRTIO_NET_F_HOST_TSO4, true),
2117 DEFINE_PROP_BIT("host_tso6", VirtIONet, host_features,
2118 VIRTIO_NET_F_HOST_TSO6, true),
2119 DEFINE_PROP_BIT("host_ecn", VirtIONet, host_features,
2120 VIRTIO_NET_F_HOST_ECN, true),
2121 DEFINE_PROP_BIT("host_ufo", VirtIONet, host_features,
2122 VIRTIO_NET_F_HOST_UFO, true),
2123 DEFINE_PROP_BIT("mrg_rxbuf", VirtIONet, host_features,
2124 VIRTIO_NET_F_MRG_RXBUF, true),
2125 DEFINE_PROP_BIT("status", VirtIONet, host_features,
2126 VIRTIO_NET_F_STATUS, true),
2127 DEFINE_PROP_BIT("ctrl_vq", VirtIONet, host_features,
2128 VIRTIO_NET_F_CTRL_VQ, true),
2129 DEFINE_PROP_BIT("ctrl_rx", VirtIONet, host_features,
2130 VIRTIO_NET_F_CTRL_RX, true),
2131 DEFINE_PROP_BIT("ctrl_vlan", VirtIONet, host_features,
2132 VIRTIO_NET_F_CTRL_VLAN, true),
2133 DEFINE_PROP_BIT("ctrl_rx_extra", VirtIONet, host_features,
2134 VIRTIO_NET_F_CTRL_RX_EXTRA, true),
2135 DEFINE_PROP_BIT("ctrl_mac_addr", VirtIONet, host_features,
2136 VIRTIO_NET_F_CTRL_MAC_ADDR, true),
2137 DEFINE_PROP_BIT("ctrl_guest_offloads", VirtIONet, host_features,
2138 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, true),
2139 DEFINE_PROP_BIT("mq", VirtIONet, host_features, VIRTIO_NET_F_MQ, false),
2140 DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf),
2141 DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer,
2142 TX_TIMER_INTERVAL),
2143 DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
2144 DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
2145 DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
2146 VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
2147 DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size,
2148 VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE),
2149 DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0),
2150 DEFINE_PROP_BOOL("x-mtu-bypass-backend", VirtIONet, mtu_bypass_backend,
2151 true),
2152 DEFINE_PROP_END_OF_LIST(),
2155 static void virtio_net_class_init(ObjectClass *klass, void *data)
2157 DeviceClass *dc = DEVICE_CLASS(klass);
2158 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
2160 dc->props = virtio_net_properties;
2161 dc->vmsd = &vmstate_virtio_net;
2162 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
2163 vdc->realize = virtio_net_device_realize;
2164 vdc->unrealize = virtio_net_device_unrealize;
2165 vdc->get_config = virtio_net_get_config;
2166 vdc->set_config = virtio_net_set_config;
2167 vdc->get_features = virtio_net_get_features;
2168 vdc->set_features = virtio_net_set_features;
2169 vdc->bad_features = virtio_net_bad_features;
2170 vdc->reset = virtio_net_reset;
2171 vdc->set_status = virtio_net_set_status;
2172 vdc->guest_notifier_mask = virtio_net_guest_notifier_mask;
2173 vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;
2174 vdc->legacy_features |= (0x1 << VIRTIO_NET_F_GSO);
2175 vdc->vmsd = &vmstate_virtio_net_device;
2178 static const TypeInfo virtio_net_info = {
2179 .name = TYPE_VIRTIO_NET,
2180 .parent = TYPE_VIRTIO_DEVICE,
2181 .instance_size = sizeof(VirtIONet),
2182 .instance_init = virtio_net_instance_init,
2183 .class_init = virtio_net_class_init,
2186 static void virtio_register_types(void)
2188 type_register_static(&virtio_net_info);
2191 type_init(virtio_register_types)