qxl: call qemu_spice_display_init_common for secondary devices
[qemu/ar7.git] / hw / net / virtio-net.c
blob148071a3969986aec119c040d6924ea8e7a145d7
1 /*
2 * Virtio Network Device
4 * Copyright IBM, Corp. 2007
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qemu/iov.h"
16 #include "hw/virtio/virtio.h"
17 #include "net/net.h"
18 #include "net/checksum.h"
19 #include "net/tap.h"
20 #include "qemu/error-report.h"
21 #include "qemu/timer.h"
22 #include "hw/virtio/virtio-net.h"
23 #include "net/vhost_net.h"
24 #include "hw/virtio/virtio-bus.h"
25 #include "qapi/qmp/qjson.h"
26 #include "qapi-event.h"
27 #include "hw/virtio/virtio-access.h"
28 #include "migration/misc.h"
30 #define VIRTIO_NET_VM_VERSION 11
32 #define MAC_TABLE_ENTRIES 64
33 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
35 /* previously fixed value */
36 #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
37 #define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256
39 /* for now, only allow larger queues; with virtio-1, guest can downsize */
40 #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
41 #define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
44 * Calculate the number of bytes up to and including the given 'field' of
45 * 'container'.
47 #define endof(container, field) \
48 (offsetof(container, field) + sizeof(((container *)0)->field))
50 typedef struct VirtIOFeature {
51 uint32_t flags;
52 size_t end;
53 } VirtIOFeature;
55 static VirtIOFeature feature_sizes[] = {
56 {.flags = 1 << VIRTIO_NET_F_MAC,
57 .end = endof(struct virtio_net_config, mac)},
58 {.flags = 1 << VIRTIO_NET_F_STATUS,
59 .end = endof(struct virtio_net_config, status)},
60 {.flags = 1 << VIRTIO_NET_F_MQ,
61 .end = endof(struct virtio_net_config, max_virtqueue_pairs)},
62 {.flags = 1 << VIRTIO_NET_F_MTU,
63 .end = endof(struct virtio_net_config, mtu)},
67 static VirtIONetQueue *virtio_net_get_subqueue(NetClientState *nc)
69 VirtIONet *n = qemu_get_nic_opaque(nc);
71 return &n->vqs[nc->queue_index];
74 static int vq2q(int queue_index)
76 return queue_index / 2;
79 /* TODO
80 * - we could suppress RX interrupt if we were so inclined.
83 static void virtio_net_get_config(VirtIODevice *vdev, uint8_t *config)
85 VirtIONet *n = VIRTIO_NET(vdev);
86 struct virtio_net_config netcfg;
88 virtio_stw_p(vdev, &netcfg.status, n->status);
89 virtio_stw_p(vdev, &netcfg.max_virtqueue_pairs, n->max_queues);
90 virtio_stw_p(vdev, &netcfg.mtu, n->net_conf.mtu);
91 memcpy(netcfg.mac, n->mac, ETH_ALEN);
92 memcpy(config, &netcfg, n->config_size);
95 static void virtio_net_set_config(VirtIODevice *vdev, const uint8_t *config)
97 VirtIONet *n = VIRTIO_NET(vdev);
98 struct virtio_net_config netcfg = {};
100 memcpy(&netcfg, config, n->config_size);
102 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR) &&
103 !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1) &&
104 memcmp(netcfg.mac, n->mac, ETH_ALEN)) {
105 memcpy(n->mac, netcfg.mac, ETH_ALEN);
106 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
110 static bool virtio_net_started(VirtIONet *n, uint8_t status)
112 VirtIODevice *vdev = VIRTIO_DEVICE(n);
113 return (status & VIRTIO_CONFIG_S_DRIVER_OK) &&
114 (n->status & VIRTIO_NET_S_LINK_UP) && vdev->vm_running;
117 static void virtio_net_announce_timer(void *opaque)
119 VirtIONet *n = opaque;
120 VirtIODevice *vdev = VIRTIO_DEVICE(n);
122 n->announce_counter--;
123 n->status |= VIRTIO_NET_S_ANNOUNCE;
124 virtio_notify_config(vdev);
127 static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
129 VirtIODevice *vdev = VIRTIO_DEVICE(n);
130 NetClientState *nc = qemu_get_queue(n->nic);
131 int queues = n->multiqueue ? n->max_queues : 1;
133 if (!get_vhost_net(nc->peer)) {
134 return;
137 if ((virtio_net_started(n, status) && !nc->peer->link_down) ==
138 !!n->vhost_started) {
139 return;
141 if (!n->vhost_started) {
142 int r, i;
144 if (n->needs_vnet_hdr_swap) {
145 error_report("backend does not support %s vnet headers; "
146 "falling back on userspace virtio",
147 virtio_is_big_endian(vdev) ? "BE" : "LE");
148 return;
151 /* Any packets outstanding? Purge them to avoid touching rings
152 * when vhost is running.
154 for (i = 0; i < queues; i++) {
155 NetClientState *qnc = qemu_get_subqueue(n->nic, i);
157 /* Purge both directions: TX and RX. */
158 qemu_net_queue_purge(qnc->peer->incoming_queue, qnc);
159 qemu_net_queue_purge(qnc->incoming_queue, qnc->peer);
162 if (virtio_has_feature(vdev->guest_features, VIRTIO_NET_F_MTU)) {
163 r = vhost_net_set_mtu(get_vhost_net(nc->peer), n->net_conf.mtu);
164 if (r < 0) {
165 error_report("%uBytes MTU not supported by the backend",
166 n->net_conf.mtu);
168 return;
172 n->vhost_started = 1;
173 r = vhost_net_start(vdev, n->nic->ncs, queues);
174 if (r < 0) {
175 error_report("unable to start vhost net: %d: "
176 "falling back on userspace virtio", -r);
177 n->vhost_started = 0;
179 } else {
180 vhost_net_stop(vdev, n->nic->ncs, queues);
181 n->vhost_started = 0;
185 static int virtio_net_set_vnet_endian_one(VirtIODevice *vdev,
186 NetClientState *peer,
187 bool enable)
189 if (virtio_is_big_endian(vdev)) {
190 return qemu_set_vnet_be(peer, enable);
191 } else {
192 return qemu_set_vnet_le(peer, enable);
196 static bool virtio_net_set_vnet_endian(VirtIODevice *vdev, NetClientState *ncs,
197 int queues, bool enable)
199 int i;
201 for (i = 0; i < queues; i++) {
202 if (virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, enable) < 0 &&
203 enable) {
204 while (--i >= 0) {
205 virtio_net_set_vnet_endian_one(vdev, ncs[i].peer, false);
208 return true;
212 return false;
215 static void virtio_net_vnet_endian_status(VirtIONet *n, uint8_t status)
217 VirtIODevice *vdev = VIRTIO_DEVICE(n);
218 int queues = n->multiqueue ? n->max_queues : 1;
220 if (virtio_net_started(n, status)) {
221 /* Before using the device, we tell the network backend about the
222 * endianness to use when parsing vnet headers. If the backend
223 * can't do it, we fallback onto fixing the headers in the core
224 * virtio-net code.
226 n->needs_vnet_hdr_swap = virtio_net_set_vnet_endian(vdev, n->nic->ncs,
227 queues, true);
228 } else if (virtio_net_started(n, vdev->status)) {
229 /* After using the device, we need to reset the network backend to
230 * the default (guest native endianness), otherwise the guest may
231 * lose network connectivity if it is rebooted into a different
232 * endianness.
234 virtio_net_set_vnet_endian(vdev, n->nic->ncs, queues, false);
238 static void virtio_net_drop_tx_queue_data(VirtIODevice *vdev, VirtQueue *vq)
240 unsigned int dropped = virtqueue_drop_all(vq);
241 if (dropped) {
242 virtio_notify(vdev, vq);
246 static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
248 VirtIONet *n = VIRTIO_NET(vdev);
249 VirtIONetQueue *q;
250 int i;
251 uint8_t queue_status;
253 virtio_net_vnet_endian_status(n, status);
254 virtio_net_vhost_status(n, status);
256 for (i = 0; i < n->max_queues; i++) {
257 NetClientState *ncs = qemu_get_subqueue(n->nic, i);
258 bool queue_started;
259 q = &n->vqs[i];
261 if ((!n->multiqueue && i != 0) || i >= n->curr_queues) {
262 queue_status = 0;
263 } else {
264 queue_status = status;
266 queue_started =
267 virtio_net_started(n, queue_status) && !n->vhost_started;
269 if (queue_started) {
270 qemu_flush_queued_packets(ncs);
273 if (!q->tx_waiting) {
274 continue;
277 if (queue_started) {
278 if (q->tx_timer) {
279 timer_mod(q->tx_timer,
280 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
281 } else {
282 qemu_bh_schedule(q->tx_bh);
284 } else {
285 if (q->tx_timer) {
286 timer_del(q->tx_timer);
287 } else {
288 qemu_bh_cancel(q->tx_bh);
290 if ((n->status & VIRTIO_NET_S_LINK_UP) == 0 &&
291 (queue_status & VIRTIO_CONFIG_S_DRIVER_OK)) {
292 /* if tx is waiting we are likely have some packets in tx queue
293 * and disabled notification */
294 q->tx_waiting = 0;
295 virtio_queue_set_notification(q->tx_vq, 1);
296 virtio_net_drop_tx_queue_data(vdev, q->tx_vq);
302 static void virtio_net_set_link_status(NetClientState *nc)
304 VirtIONet *n = qemu_get_nic_opaque(nc);
305 VirtIODevice *vdev = VIRTIO_DEVICE(n);
306 uint16_t old_status = n->status;
308 if (nc->link_down)
309 n->status &= ~VIRTIO_NET_S_LINK_UP;
310 else
311 n->status |= VIRTIO_NET_S_LINK_UP;
313 if (n->status != old_status)
314 virtio_notify_config(vdev);
316 virtio_net_set_status(vdev, vdev->status);
319 static void rxfilter_notify(NetClientState *nc)
321 VirtIONet *n = qemu_get_nic_opaque(nc);
323 if (nc->rxfilter_notify_enabled) {
324 gchar *path = object_get_canonical_path(OBJECT(n->qdev));
325 qapi_event_send_nic_rx_filter_changed(!!n->netclient_name,
326 n->netclient_name, path, &error_abort);
327 g_free(path);
329 /* disable event notification to avoid events flooding */
330 nc->rxfilter_notify_enabled = 0;
334 static intList *get_vlan_table(VirtIONet *n)
336 intList *list, *entry;
337 int i, j;
339 list = NULL;
340 for (i = 0; i < MAX_VLAN >> 5; i++) {
341 for (j = 0; n->vlans[i] && j <= 0x1f; j++) {
342 if (n->vlans[i] & (1U << j)) {
343 entry = g_malloc0(sizeof(*entry));
344 entry->value = (i << 5) + j;
345 entry->next = list;
346 list = entry;
351 return list;
354 static RxFilterInfo *virtio_net_query_rxfilter(NetClientState *nc)
356 VirtIONet *n = qemu_get_nic_opaque(nc);
357 VirtIODevice *vdev = VIRTIO_DEVICE(n);
358 RxFilterInfo *info;
359 strList *str_list, *entry;
360 int i;
362 info = g_malloc0(sizeof(*info));
363 info->name = g_strdup(nc->name);
364 info->promiscuous = n->promisc;
366 if (n->nouni) {
367 info->unicast = RX_STATE_NONE;
368 } else if (n->alluni) {
369 info->unicast = RX_STATE_ALL;
370 } else {
371 info->unicast = RX_STATE_NORMAL;
374 if (n->nomulti) {
375 info->multicast = RX_STATE_NONE;
376 } else if (n->allmulti) {
377 info->multicast = RX_STATE_ALL;
378 } else {
379 info->multicast = RX_STATE_NORMAL;
382 info->broadcast_allowed = n->nobcast;
383 info->multicast_overflow = n->mac_table.multi_overflow;
384 info->unicast_overflow = n->mac_table.uni_overflow;
386 info->main_mac = qemu_mac_strdup_printf(n->mac);
388 str_list = NULL;
389 for (i = 0; i < n->mac_table.first_multi; i++) {
390 entry = g_malloc0(sizeof(*entry));
391 entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
392 entry->next = str_list;
393 str_list = entry;
395 info->unicast_table = str_list;
397 str_list = NULL;
398 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
399 entry = g_malloc0(sizeof(*entry));
400 entry->value = qemu_mac_strdup_printf(n->mac_table.macs + i * ETH_ALEN);
401 entry->next = str_list;
402 str_list = entry;
404 info->multicast_table = str_list;
405 info->vlan_table = get_vlan_table(n);
407 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VLAN)) {
408 info->vlan = RX_STATE_ALL;
409 } else if (!info->vlan_table) {
410 info->vlan = RX_STATE_NONE;
411 } else {
412 info->vlan = RX_STATE_NORMAL;
415 /* enable event notification after query */
416 nc->rxfilter_notify_enabled = 1;
418 return info;
421 static void virtio_net_reset(VirtIODevice *vdev)
423 VirtIONet *n = VIRTIO_NET(vdev);
425 /* Reset back to compatibility mode */
426 n->promisc = 1;
427 n->allmulti = 0;
428 n->alluni = 0;
429 n->nomulti = 0;
430 n->nouni = 0;
431 n->nobcast = 0;
432 /* multiqueue is disabled by default */
433 n->curr_queues = 1;
434 timer_del(n->announce_timer);
435 n->announce_counter = 0;
436 n->status &= ~VIRTIO_NET_S_ANNOUNCE;
438 /* Flush any MAC and VLAN filter table state */
439 n->mac_table.in_use = 0;
440 n->mac_table.first_multi = 0;
441 n->mac_table.multi_overflow = 0;
442 n->mac_table.uni_overflow = 0;
443 memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN);
444 memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac));
445 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
446 memset(n->vlans, 0, MAX_VLAN >> 3);
449 static void peer_test_vnet_hdr(VirtIONet *n)
451 NetClientState *nc = qemu_get_queue(n->nic);
452 if (!nc->peer) {
453 return;
456 n->has_vnet_hdr = qemu_has_vnet_hdr(nc->peer);
459 static int peer_has_vnet_hdr(VirtIONet *n)
461 return n->has_vnet_hdr;
464 static int peer_has_ufo(VirtIONet *n)
466 if (!peer_has_vnet_hdr(n))
467 return 0;
469 n->has_ufo = qemu_has_ufo(qemu_get_queue(n->nic)->peer);
471 return n->has_ufo;
474 static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs,
475 int version_1)
477 int i;
478 NetClientState *nc;
480 n->mergeable_rx_bufs = mergeable_rx_bufs;
482 if (version_1) {
483 n->guest_hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
484 } else {
485 n->guest_hdr_len = n->mergeable_rx_bufs ?
486 sizeof(struct virtio_net_hdr_mrg_rxbuf) :
487 sizeof(struct virtio_net_hdr);
490 for (i = 0; i < n->max_queues; i++) {
491 nc = qemu_get_subqueue(n->nic, i);
493 if (peer_has_vnet_hdr(n) &&
494 qemu_has_vnet_hdr_len(nc->peer, n->guest_hdr_len)) {
495 qemu_set_vnet_hdr_len(nc->peer, n->guest_hdr_len);
496 n->host_hdr_len = n->guest_hdr_len;
501 static int virtio_net_max_tx_queue_size(VirtIONet *n)
503 NetClientState *peer = n->nic_conf.peers.ncs[0];
506 * Backends other than vhost-user don't support max queue size.
508 if (!peer) {
509 return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
512 if (peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) {
513 return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE;
516 return VIRTQUEUE_MAX_SIZE;
519 static int peer_attach(VirtIONet *n, int index)
521 NetClientState *nc = qemu_get_subqueue(n->nic, index);
523 if (!nc->peer) {
524 return 0;
527 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
528 vhost_set_vring_enable(nc->peer, 1);
531 if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) {
532 return 0;
535 if (n->max_queues == 1) {
536 return 0;
539 return tap_enable(nc->peer);
542 static int peer_detach(VirtIONet *n, int index)
544 NetClientState *nc = qemu_get_subqueue(n->nic, index);
546 if (!nc->peer) {
547 return 0;
550 if (nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) {
551 vhost_set_vring_enable(nc->peer, 0);
554 if (nc->peer->info->type != NET_CLIENT_DRIVER_TAP) {
555 return 0;
558 return tap_disable(nc->peer);
561 static void virtio_net_set_queues(VirtIONet *n)
563 int i;
564 int r;
566 if (n->nic->peer_deleted) {
567 return;
570 for (i = 0; i < n->max_queues; i++) {
571 if (i < n->curr_queues) {
572 r = peer_attach(n, i);
573 assert(!r);
574 } else {
575 r = peer_detach(n, i);
576 assert(!r);
581 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue);
583 static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features,
584 Error **errp)
586 VirtIONet *n = VIRTIO_NET(vdev);
587 NetClientState *nc = qemu_get_queue(n->nic);
589 /* Firstly sync all virtio-net possible supported features */
590 features |= n->host_features;
592 virtio_add_feature(&features, VIRTIO_NET_F_MAC);
594 if (!peer_has_vnet_hdr(n)) {
595 virtio_clear_feature(&features, VIRTIO_NET_F_CSUM);
596 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4);
597 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6);
598 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN);
600 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM);
601 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4);
602 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6);
603 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN);
606 if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) {
607 virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO);
608 virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO);
611 if (!get_vhost_net(nc->peer)) {
612 return features;
614 features = vhost_net_get_features(get_vhost_net(nc->peer), features);
615 vdev->backend_features = features;
617 if (n->mtu_bypass_backend &&
618 (n->host_features & 1ULL << VIRTIO_NET_F_MTU)) {
619 features |= (1ULL << VIRTIO_NET_F_MTU);
622 return features;
625 static uint64_t virtio_net_bad_features(VirtIODevice *vdev)
627 uint64_t features = 0;
629 /* Linux kernel 2.6.25. It understood MAC (as everyone must),
630 * but also these: */
631 virtio_add_feature(&features, VIRTIO_NET_F_MAC);
632 virtio_add_feature(&features, VIRTIO_NET_F_CSUM);
633 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO4);
634 virtio_add_feature(&features, VIRTIO_NET_F_HOST_TSO6);
635 virtio_add_feature(&features, VIRTIO_NET_F_HOST_ECN);
637 return features;
640 static void virtio_net_apply_guest_offloads(VirtIONet *n)
642 qemu_set_offload(qemu_get_queue(n->nic)->peer,
643 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)),
644 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)),
645 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)),
646 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)),
647 !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO)));
650 static uint64_t virtio_net_guest_offloads_by_features(uint32_t features)
652 static const uint64_t guest_offloads_mask =
653 (1ULL << VIRTIO_NET_F_GUEST_CSUM) |
654 (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
655 (1ULL << VIRTIO_NET_F_GUEST_TSO6) |
656 (1ULL << VIRTIO_NET_F_GUEST_ECN) |
657 (1ULL << VIRTIO_NET_F_GUEST_UFO);
659 return guest_offloads_mask & features;
662 static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n)
664 VirtIODevice *vdev = VIRTIO_DEVICE(n);
665 return virtio_net_guest_offloads_by_features(vdev->guest_features);
668 static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features)
670 VirtIONet *n = VIRTIO_NET(vdev);
671 int i;
673 if (n->mtu_bypass_backend &&
674 !virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_MTU)) {
675 features &= ~(1ULL << VIRTIO_NET_F_MTU);
678 virtio_net_set_multiqueue(n,
679 virtio_has_feature(features, VIRTIO_NET_F_MQ));
681 virtio_net_set_mrg_rx_bufs(n,
682 virtio_has_feature(features,
683 VIRTIO_NET_F_MRG_RXBUF),
684 virtio_has_feature(features,
685 VIRTIO_F_VERSION_1));
687 if (n->has_vnet_hdr) {
688 n->curr_guest_offloads =
689 virtio_net_guest_offloads_by_features(features);
690 virtio_net_apply_guest_offloads(n);
693 for (i = 0; i < n->max_queues; i++) {
694 NetClientState *nc = qemu_get_subqueue(n->nic, i);
696 if (!get_vhost_net(nc->peer)) {
697 continue;
699 vhost_net_ack_features(get_vhost_net(nc->peer), features);
702 if (virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) {
703 memset(n->vlans, 0, MAX_VLAN >> 3);
704 } else {
705 memset(n->vlans, 0xff, MAX_VLAN >> 3);
709 static int virtio_net_handle_rx_mode(VirtIONet *n, uint8_t cmd,
710 struct iovec *iov, unsigned int iov_cnt)
712 uint8_t on;
713 size_t s;
714 NetClientState *nc = qemu_get_queue(n->nic);
716 s = iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on));
717 if (s != sizeof(on)) {
718 return VIRTIO_NET_ERR;
721 if (cmd == VIRTIO_NET_CTRL_RX_PROMISC) {
722 n->promisc = on;
723 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI) {
724 n->allmulti = on;
725 } else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI) {
726 n->alluni = on;
727 } else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI) {
728 n->nomulti = on;
729 } else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI) {
730 n->nouni = on;
731 } else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST) {
732 n->nobcast = on;
733 } else {
734 return VIRTIO_NET_ERR;
737 rxfilter_notify(nc);
739 return VIRTIO_NET_OK;
742 static int virtio_net_handle_offloads(VirtIONet *n, uint8_t cmd,
743 struct iovec *iov, unsigned int iov_cnt)
745 VirtIODevice *vdev = VIRTIO_DEVICE(n);
746 uint64_t offloads;
747 size_t s;
749 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
750 return VIRTIO_NET_ERR;
753 s = iov_to_buf(iov, iov_cnt, 0, &offloads, sizeof(offloads));
754 if (s != sizeof(offloads)) {
755 return VIRTIO_NET_ERR;
758 if (cmd == VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET) {
759 uint64_t supported_offloads;
761 offloads = virtio_ldq_p(vdev, &offloads);
763 if (!n->has_vnet_hdr) {
764 return VIRTIO_NET_ERR;
767 supported_offloads = virtio_net_supported_guest_offloads(n);
768 if (offloads & ~supported_offloads) {
769 return VIRTIO_NET_ERR;
772 n->curr_guest_offloads = offloads;
773 virtio_net_apply_guest_offloads(n);
775 return VIRTIO_NET_OK;
776 } else {
777 return VIRTIO_NET_ERR;
781 static int virtio_net_handle_mac(VirtIONet *n, uint8_t cmd,
782 struct iovec *iov, unsigned int iov_cnt)
784 VirtIODevice *vdev = VIRTIO_DEVICE(n);
785 struct virtio_net_ctrl_mac mac_data;
786 size_t s;
787 NetClientState *nc = qemu_get_queue(n->nic);
789 if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) {
790 if (iov_size(iov, iov_cnt) != sizeof(n->mac)) {
791 return VIRTIO_NET_ERR;
793 s = iov_to_buf(iov, iov_cnt, 0, &n->mac, sizeof(n->mac));
794 assert(s == sizeof(n->mac));
795 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac);
796 rxfilter_notify(nc);
798 return VIRTIO_NET_OK;
801 if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) {
802 return VIRTIO_NET_ERR;
805 int in_use = 0;
806 int first_multi = 0;
807 uint8_t uni_overflow = 0;
808 uint8_t multi_overflow = 0;
809 uint8_t *macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
811 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
812 sizeof(mac_data.entries));
813 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
814 if (s != sizeof(mac_data.entries)) {
815 goto error;
817 iov_discard_front(&iov, &iov_cnt, s);
819 if (mac_data.entries * ETH_ALEN > iov_size(iov, iov_cnt)) {
820 goto error;
823 if (mac_data.entries <= MAC_TABLE_ENTRIES) {
824 s = iov_to_buf(iov, iov_cnt, 0, macs,
825 mac_data.entries * ETH_ALEN);
826 if (s != mac_data.entries * ETH_ALEN) {
827 goto error;
829 in_use += mac_data.entries;
830 } else {
831 uni_overflow = 1;
834 iov_discard_front(&iov, &iov_cnt, mac_data.entries * ETH_ALEN);
836 first_multi = in_use;
838 s = iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
839 sizeof(mac_data.entries));
840 mac_data.entries = virtio_ldl_p(vdev, &mac_data.entries);
841 if (s != sizeof(mac_data.entries)) {
842 goto error;
845 iov_discard_front(&iov, &iov_cnt, s);
847 if (mac_data.entries * ETH_ALEN != iov_size(iov, iov_cnt)) {
848 goto error;
851 if (mac_data.entries <= MAC_TABLE_ENTRIES - in_use) {
852 s = iov_to_buf(iov, iov_cnt, 0, &macs[in_use * ETH_ALEN],
853 mac_data.entries * ETH_ALEN);
854 if (s != mac_data.entries * ETH_ALEN) {
855 goto error;
857 in_use += mac_data.entries;
858 } else {
859 multi_overflow = 1;
862 n->mac_table.in_use = in_use;
863 n->mac_table.first_multi = first_multi;
864 n->mac_table.uni_overflow = uni_overflow;
865 n->mac_table.multi_overflow = multi_overflow;
866 memcpy(n->mac_table.macs, macs, MAC_TABLE_ENTRIES * ETH_ALEN);
867 g_free(macs);
868 rxfilter_notify(nc);
870 return VIRTIO_NET_OK;
872 error:
873 g_free(macs);
874 return VIRTIO_NET_ERR;
877 static int virtio_net_handle_vlan_table(VirtIONet *n, uint8_t cmd,
878 struct iovec *iov, unsigned int iov_cnt)
880 VirtIODevice *vdev = VIRTIO_DEVICE(n);
881 uint16_t vid;
882 size_t s;
883 NetClientState *nc = qemu_get_queue(n->nic);
885 s = iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid));
886 vid = virtio_lduw_p(vdev, &vid);
887 if (s != sizeof(vid)) {
888 return VIRTIO_NET_ERR;
891 if (vid >= MAX_VLAN)
892 return VIRTIO_NET_ERR;
894 if (cmd == VIRTIO_NET_CTRL_VLAN_ADD)
895 n->vlans[vid >> 5] |= (1U << (vid & 0x1f));
896 else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL)
897 n->vlans[vid >> 5] &= ~(1U << (vid & 0x1f));
898 else
899 return VIRTIO_NET_ERR;
901 rxfilter_notify(nc);
903 return VIRTIO_NET_OK;
906 static int virtio_net_handle_announce(VirtIONet *n, uint8_t cmd,
907 struct iovec *iov, unsigned int iov_cnt)
909 if (cmd == VIRTIO_NET_CTRL_ANNOUNCE_ACK &&
910 n->status & VIRTIO_NET_S_ANNOUNCE) {
911 n->status &= ~VIRTIO_NET_S_ANNOUNCE;
912 if (n->announce_counter) {
913 timer_mod(n->announce_timer,
914 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
915 self_announce_delay(n->announce_counter));
917 return VIRTIO_NET_OK;
918 } else {
919 return VIRTIO_NET_ERR;
923 static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd,
924 struct iovec *iov, unsigned int iov_cnt)
926 VirtIODevice *vdev = VIRTIO_DEVICE(n);
927 struct virtio_net_ctrl_mq mq;
928 size_t s;
929 uint16_t queues;
931 s = iov_to_buf(iov, iov_cnt, 0, &mq, sizeof(mq));
932 if (s != sizeof(mq)) {
933 return VIRTIO_NET_ERR;
936 if (cmd != VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) {
937 return VIRTIO_NET_ERR;
940 queues = virtio_lduw_p(vdev, &mq.virtqueue_pairs);
942 if (queues < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
943 queues > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
944 queues > n->max_queues ||
945 !n->multiqueue) {
946 return VIRTIO_NET_ERR;
949 n->curr_queues = queues;
950 /* stop the backend before changing the number of queues to avoid handling a
951 * disabled queue */
952 virtio_net_set_status(vdev, vdev->status);
953 virtio_net_set_queues(n);
955 return VIRTIO_NET_OK;
958 static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
960 VirtIONet *n = VIRTIO_NET(vdev);
961 struct virtio_net_ctrl_hdr ctrl;
962 virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
963 VirtQueueElement *elem;
964 size_t s;
965 struct iovec *iov, *iov2;
966 unsigned int iov_cnt;
968 for (;;) {
969 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
970 if (!elem) {
971 break;
973 if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) ||
974 iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) {
975 virtio_error(vdev, "virtio-net ctrl missing headers");
976 virtqueue_detach_element(vq, elem, 0);
977 g_free(elem);
978 break;
981 iov_cnt = elem->out_num;
982 iov2 = iov = g_memdup(elem->out_sg, sizeof(struct iovec) * elem->out_num);
983 s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl));
984 iov_discard_front(&iov, &iov_cnt, sizeof(ctrl));
985 if (s != sizeof(ctrl)) {
986 status = VIRTIO_NET_ERR;
987 } else if (ctrl.class == VIRTIO_NET_CTRL_RX) {
988 status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt);
989 } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) {
990 status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt);
991 } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
992 status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt);
993 } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) {
994 status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt);
995 } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
996 status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt);
997 } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
998 status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt);
1001 s = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, sizeof(status));
1002 assert(s == sizeof(status));
1004 virtqueue_push(vq, elem, sizeof(status));
1005 virtio_notify(vdev, vq);
1006 g_free(iov2);
1007 g_free(elem);
1011 /* RX */
1013 static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
1015 VirtIONet *n = VIRTIO_NET(vdev);
1016 int queue_index = vq2q(virtio_get_queue_index(vq));
1018 qemu_flush_queued_packets(qemu_get_subqueue(n->nic, queue_index));
1021 static int virtio_net_can_receive(NetClientState *nc)
1023 VirtIONet *n = qemu_get_nic_opaque(nc);
1024 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1025 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1027 if (!vdev->vm_running) {
1028 return 0;
1031 if (nc->queue_index >= n->curr_queues) {
1032 return 0;
1035 if (!virtio_queue_ready(q->rx_vq) ||
1036 !(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1037 return 0;
1040 return 1;
1043 static int virtio_net_has_buffers(VirtIONetQueue *q, int bufsize)
1045 VirtIONet *n = q->n;
1046 if (virtio_queue_empty(q->rx_vq) ||
1047 (n->mergeable_rx_bufs &&
1048 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
1049 virtio_queue_set_notification(q->rx_vq, 1);
1051 /* To avoid a race condition where the guest has made some buffers
1052 * available after the above check but before notification was
1053 * enabled, check for available buffers again.
1055 if (virtio_queue_empty(q->rx_vq) ||
1056 (n->mergeable_rx_bufs &&
1057 !virtqueue_avail_bytes(q->rx_vq, bufsize, 0))) {
1058 return 0;
1062 virtio_queue_set_notification(q->rx_vq, 0);
1063 return 1;
1066 static void virtio_net_hdr_swap(VirtIODevice *vdev, struct virtio_net_hdr *hdr)
1068 virtio_tswap16s(vdev, &hdr->hdr_len);
1069 virtio_tswap16s(vdev, &hdr->gso_size);
1070 virtio_tswap16s(vdev, &hdr->csum_start);
1071 virtio_tswap16s(vdev, &hdr->csum_offset);
1074 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
1075 * it never finds out that the packets don't have valid checksums. This
1076 * causes dhclient to get upset. Fedora's carried a patch for ages to
1077 * fix this with Xen but it hasn't appeared in an upstream release of
1078 * dhclient yet.
1080 * To avoid breaking existing guests, we catch udp packets and add
1081 * checksums. This is terrible but it's better than hacking the guest
1082 * kernels.
1084 * N.B. if we introduce a zero-copy API, this operation is no longer free so
1085 * we should provide a mechanism to disable it to avoid polluting the host
1086 * cache.
1088 static void work_around_broken_dhclient(struct virtio_net_hdr *hdr,
1089 uint8_t *buf, size_t size)
1091 if ((hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && /* missing csum */
1092 (size > 27 && size < 1500) && /* normal sized MTU */
1093 (buf[12] == 0x08 && buf[13] == 0x00) && /* ethertype == IPv4 */
1094 (buf[23] == 17) && /* ip.protocol == UDP */
1095 (buf[34] == 0 && buf[35] == 67)) { /* udp.srcport == bootps */
1096 net_checksum_calculate(buf, size);
1097 hdr->flags &= ~VIRTIO_NET_HDR_F_NEEDS_CSUM;
1101 static void receive_header(VirtIONet *n, const struct iovec *iov, int iov_cnt,
1102 const void *buf, size_t size)
1104 if (n->has_vnet_hdr) {
1105 /* FIXME this cast is evil */
1106 void *wbuf = (void *)buf;
1107 work_around_broken_dhclient(wbuf, wbuf + n->host_hdr_len,
1108 size - n->host_hdr_len);
1110 if (n->needs_vnet_hdr_swap) {
1111 virtio_net_hdr_swap(VIRTIO_DEVICE(n), wbuf);
1113 iov_from_buf(iov, iov_cnt, 0, buf, sizeof(struct virtio_net_hdr));
1114 } else {
1115 struct virtio_net_hdr hdr = {
1116 .flags = 0,
1117 .gso_type = VIRTIO_NET_HDR_GSO_NONE
1119 iov_from_buf(iov, iov_cnt, 0, &hdr, sizeof hdr);
1123 static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
1125 static const uint8_t bcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1126 static const uint8_t vlan[] = {0x81, 0x00};
1127 uint8_t *ptr = (uint8_t *)buf;
1128 int i;
1130 if (n->promisc)
1131 return 1;
1133 ptr += n->host_hdr_len;
1135 if (!memcmp(&ptr[12], vlan, sizeof(vlan))) {
1136 int vid = lduw_be_p(ptr + 14) & 0xfff;
1137 if (!(n->vlans[vid >> 5] & (1U << (vid & 0x1f))))
1138 return 0;
1141 if (ptr[0] & 1) { // multicast
1142 if (!memcmp(ptr, bcast, sizeof(bcast))) {
1143 return !n->nobcast;
1144 } else if (n->nomulti) {
1145 return 0;
1146 } else if (n->allmulti || n->mac_table.multi_overflow) {
1147 return 1;
1150 for (i = n->mac_table.first_multi; i < n->mac_table.in_use; i++) {
1151 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
1152 return 1;
1155 } else { // unicast
1156 if (n->nouni) {
1157 return 0;
1158 } else if (n->alluni || n->mac_table.uni_overflow) {
1159 return 1;
1160 } else if (!memcmp(ptr, n->mac, ETH_ALEN)) {
1161 return 1;
1164 for (i = 0; i < n->mac_table.first_multi; i++) {
1165 if (!memcmp(ptr, &n->mac_table.macs[i * ETH_ALEN], ETH_ALEN)) {
1166 return 1;
1171 return 0;
1174 static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf,
1175 size_t size)
1177 VirtIONet *n = qemu_get_nic_opaque(nc);
1178 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1179 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1180 struct iovec mhdr_sg[VIRTQUEUE_MAX_SIZE];
1181 struct virtio_net_hdr_mrg_rxbuf mhdr;
1182 unsigned mhdr_cnt = 0;
1183 size_t offset, i, guest_offset;
1185 if (!virtio_net_can_receive(nc)) {
1186 return -1;
1189 /* hdr_len refers to the header we supply to the guest */
1190 if (!virtio_net_has_buffers(q, size + n->guest_hdr_len - n->host_hdr_len)) {
1191 return 0;
1194 if (!receive_filter(n, buf, size))
1195 return size;
1197 offset = i = 0;
1199 while (offset < size) {
1200 VirtQueueElement *elem;
1201 int len, total;
1202 const struct iovec *sg;
1204 total = 0;
1206 elem = virtqueue_pop(q->rx_vq, sizeof(VirtQueueElement));
1207 if (!elem) {
1208 if (i) {
1209 virtio_error(vdev, "virtio-net unexpected empty queue: "
1210 "i %zd mergeable %d offset %zd, size %zd, "
1211 "guest hdr len %zd, host hdr len %zd "
1212 "guest features 0x%" PRIx64,
1213 i, n->mergeable_rx_bufs, offset, size,
1214 n->guest_hdr_len, n->host_hdr_len,
1215 vdev->guest_features);
1217 return -1;
1220 if (elem->in_num < 1) {
1221 virtio_error(vdev,
1222 "virtio-net receive queue contains no in buffers");
1223 virtqueue_detach_element(q->rx_vq, elem, 0);
1224 g_free(elem);
1225 return -1;
1228 sg = elem->in_sg;
1229 if (i == 0) {
1230 assert(offset == 0);
1231 if (n->mergeable_rx_bufs) {
1232 mhdr_cnt = iov_copy(mhdr_sg, ARRAY_SIZE(mhdr_sg),
1233 sg, elem->in_num,
1234 offsetof(typeof(mhdr), num_buffers),
1235 sizeof(mhdr.num_buffers));
1238 receive_header(n, sg, elem->in_num, buf, size);
1239 offset = n->host_hdr_len;
1240 total += n->guest_hdr_len;
1241 guest_offset = n->guest_hdr_len;
1242 } else {
1243 guest_offset = 0;
1246 /* copy in packet. ugh */
1247 len = iov_from_buf(sg, elem->in_num, guest_offset,
1248 buf + offset, size - offset);
1249 total += len;
1250 offset += len;
1251 /* If buffers can't be merged, at this point we
1252 * must have consumed the complete packet.
1253 * Otherwise, drop it. */
1254 if (!n->mergeable_rx_bufs && offset < size) {
1255 virtqueue_unpop(q->rx_vq, elem, total);
1256 g_free(elem);
1257 return size;
1260 /* signal other side */
1261 virtqueue_fill(q->rx_vq, elem, total, i++);
1262 g_free(elem);
1265 if (mhdr_cnt) {
1266 virtio_stw_p(vdev, &mhdr.num_buffers, i);
1267 iov_from_buf(mhdr_sg, mhdr_cnt,
1269 &mhdr.num_buffers, sizeof mhdr.num_buffers);
1272 virtqueue_flush(q->rx_vq, i);
1273 virtio_notify(vdev, q->rx_vq);
1275 return size;
1278 static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf,
1279 size_t size)
1281 ssize_t r;
1283 rcu_read_lock();
1284 r = virtio_net_receive_rcu(nc, buf, size);
1285 rcu_read_unlock();
1286 return r;
1289 static int32_t virtio_net_flush_tx(VirtIONetQueue *q);
1291 static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
1293 VirtIONet *n = qemu_get_nic_opaque(nc);
1294 VirtIONetQueue *q = virtio_net_get_subqueue(nc);
1295 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1297 virtqueue_push(q->tx_vq, q->async_tx.elem, 0);
1298 virtio_notify(vdev, q->tx_vq);
1300 g_free(q->async_tx.elem);
1301 q->async_tx.elem = NULL;
1303 virtio_queue_set_notification(q->tx_vq, 1);
1304 virtio_net_flush_tx(q);
1307 /* TX */
1308 static int32_t virtio_net_flush_tx(VirtIONetQueue *q)
1310 VirtIONet *n = q->n;
1311 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1312 VirtQueueElement *elem;
1313 int32_t num_packets = 0;
1314 int queue_index = vq2q(virtio_get_queue_index(q->tx_vq));
1315 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1316 return num_packets;
1319 if (q->async_tx.elem) {
1320 virtio_queue_set_notification(q->tx_vq, 0);
1321 return num_packets;
1324 for (;;) {
1325 ssize_t ret;
1326 unsigned int out_num;
1327 struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], *out_sg;
1328 struct virtio_net_hdr_mrg_rxbuf mhdr;
1330 elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement));
1331 if (!elem) {
1332 break;
1335 out_num = elem->out_num;
1336 out_sg = elem->out_sg;
1337 if (out_num < 1) {
1338 virtio_error(vdev, "virtio-net header not in first element");
1339 virtqueue_detach_element(q->tx_vq, elem, 0);
1340 g_free(elem);
1341 return -EINVAL;
1344 if (n->has_vnet_hdr) {
1345 if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) <
1346 n->guest_hdr_len) {
1347 virtio_error(vdev, "virtio-net header incorrect");
1348 virtqueue_detach_element(q->tx_vq, elem, 0);
1349 g_free(elem);
1350 return -EINVAL;
1352 if (n->needs_vnet_hdr_swap) {
1353 virtio_net_hdr_swap(vdev, (void *) &mhdr);
1354 sg2[0].iov_base = &mhdr;
1355 sg2[0].iov_len = n->guest_hdr_len;
1356 out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1,
1357 out_sg, out_num,
1358 n->guest_hdr_len, -1);
1359 if (out_num == VIRTQUEUE_MAX_SIZE) {
1360 goto drop;
1362 out_num += 1;
1363 out_sg = sg2;
1367 * If host wants to see the guest header as is, we can
1368 * pass it on unchanged. Otherwise, copy just the parts
1369 * that host is interested in.
1371 assert(n->host_hdr_len <= n->guest_hdr_len);
1372 if (n->host_hdr_len != n->guest_hdr_len) {
1373 unsigned sg_num = iov_copy(sg, ARRAY_SIZE(sg),
1374 out_sg, out_num,
1375 0, n->host_hdr_len);
1376 sg_num += iov_copy(sg + sg_num, ARRAY_SIZE(sg) - sg_num,
1377 out_sg, out_num,
1378 n->guest_hdr_len, -1);
1379 out_num = sg_num;
1380 out_sg = sg;
1383 ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index),
1384 out_sg, out_num, virtio_net_tx_complete);
1385 if (ret == 0) {
1386 virtio_queue_set_notification(q->tx_vq, 0);
1387 q->async_tx.elem = elem;
1388 return -EBUSY;
1391 drop:
1392 virtqueue_push(q->tx_vq, elem, 0);
1393 virtio_notify(vdev, q->tx_vq);
1394 g_free(elem);
1396 if (++num_packets >= n->tx_burst) {
1397 break;
1400 return num_packets;
1403 static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq)
1405 VirtIONet *n = VIRTIO_NET(vdev);
1406 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
1408 if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
1409 virtio_net_drop_tx_queue_data(vdev, vq);
1410 return;
1413 /* This happens when device was stopped but VCPU wasn't. */
1414 if (!vdev->vm_running) {
1415 q->tx_waiting = 1;
1416 return;
1419 if (q->tx_waiting) {
1420 virtio_queue_set_notification(vq, 1);
1421 timer_del(q->tx_timer);
1422 q->tx_waiting = 0;
1423 if (virtio_net_flush_tx(q) == -EINVAL) {
1424 return;
1426 } else {
1427 timer_mod(q->tx_timer,
1428 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + n->tx_timeout);
1429 q->tx_waiting = 1;
1430 virtio_queue_set_notification(vq, 0);
1434 static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq)
1436 VirtIONet *n = VIRTIO_NET(vdev);
1437 VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))];
1439 if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) {
1440 virtio_net_drop_tx_queue_data(vdev, vq);
1441 return;
1444 if (unlikely(q->tx_waiting)) {
1445 return;
1447 q->tx_waiting = 1;
1448 /* This happens when device was stopped but VCPU wasn't. */
1449 if (!vdev->vm_running) {
1450 return;
1452 virtio_queue_set_notification(vq, 0);
1453 qemu_bh_schedule(q->tx_bh);
1456 static void virtio_net_tx_timer(void *opaque)
1458 VirtIONetQueue *q = opaque;
1459 VirtIONet *n = q->n;
1460 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1461 /* This happens when device was stopped but BH wasn't. */
1462 if (!vdev->vm_running) {
1463 /* Make sure tx waiting is set, so we'll run when restarted. */
1464 assert(q->tx_waiting);
1465 return;
1468 q->tx_waiting = 0;
1470 /* Just in case the driver is not ready on more */
1471 if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) {
1472 return;
1475 virtio_queue_set_notification(q->tx_vq, 1);
1476 virtio_net_flush_tx(q);
1479 static void virtio_net_tx_bh(void *opaque)
1481 VirtIONetQueue *q = opaque;
1482 VirtIONet *n = q->n;
1483 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1484 int32_t ret;
1486 /* This happens when device was stopped but BH wasn't. */
1487 if (!vdev->vm_running) {
1488 /* Make sure tx waiting is set, so we'll run when restarted. */
1489 assert(q->tx_waiting);
1490 return;
1493 q->tx_waiting = 0;
1495 /* Just in case the driver is not ready on more */
1496 if (unlikely(!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK))) {
1497 return;
1500 ret = virtio_net_flush_tx(q);
1501 if (ret == -EBUSY || ret == -EINVAL) {
1502 return; /* Notification re-enable handled by tx_complete or device
1503 * broken */
1506 /* If we flush a full burst of packets, assume there are
1507 * more coming and immediately reschedule */
1508 if (ret >= n->tx_burst) {
1509 qemu_bh_schedule(q->tx_bh);
1510 q->tx_waiting = 1;
1511 return;
1514 /* If less than a full burst, re-enable notification and flush
1515 * anything that may have come in while we weren't looking. If
1516 * we find something, assume the guest is still active and reschedule */
1517 virtio_queue_set_notification(q->tx_vq, 1);
1518 ret = virtio_net_flush_tx(q);
1519 if (ret == -EINVAL) {
1520 return;
1521 } else if (ret > 0) {
1522 virtio_queue_set_notification(q->tx_vq, 0);
1523 qemu_bh_schedule(q->tx_bh);
1524 q->tx_waiting = 1;
1528 static void virtio_net_add_queue(VirtIONet *n, int index)
1530 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1532 n->vqs[index].rx_vq = virtio_add_queue(vdev, n->net_conf.rx_queue_size,
1533 virtio_net_handle_rx);
1535 if (n->net_conf.tx && !strcmp(n->net_conf.tx, "timer")) {
1536 n->vqs[index].tx_vq =
1537 virtio_add_queue(vdev, n->net_conf.tx_queue_size,
1538 virtio_net_handle_tx_timer);
1539 n->vqs[index].tx_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
1540 virtio_net_tx_timer,
1541 &n->vqs[index]);
1542 } else {
1543 n->vqs[index].tx_vq =
1544 virtio_add_queue(vdev, n->net_conf.tx_queue_size,
1545 virtio_net_handle_tx_bh);
1546 n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]);
1549 n->vqs[index].tx_waiting = 0;
1550 n->vqs[index].n = n;
1553 static void virtio_net_del_queue(VirtIONet *n, int index)
1555 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1556 VirtIONetQueue *q = &n->vqs[index];
1557 NetClientState *nc = qemu_get_subqueue(n->nic, index);
1559 qemu_purge_queued_packets(nc);
1561 virtio_del_queue(vdev, index * 2);
1562 if (q->tx_timer) {
1563 timer_del(q->tx_timer);
1564 timer_free(q->tx_timer);
1565 q->tx_timer = NULL;
1566 } else {
1567 qemu_bh_delete(q->tx_bh);
1568 q->tx_bh = NULL;
1570 q->tx_waiting = 0;
1571 virtio_del_queue(vdev, index * 2 + 1);
1574 static void virtio_net_change_num_queues(VirtIONet *n, int new_max_queues)
1576 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1577 int old_num_queues = virtio_get_num_queues(vdev);
1578 int new_num_queues = new_max_queues * 2 + 1;
1579 int i;
1581 assert(old_num_queues >= 3);
1582 assert(old_num_queues % 2 == 1);
1584 if (old_num_queues == new_num_queues) {
1585 return;
1589 * We always need to remove and add ctrl vq if
1590 * old_num_queues != new_num_queues. Remove ctrl_vq first,
1591 * and then we only enter one of the following too loops.
1593 virtio_del_queue(vdev, old_num_queues - 1);
1595 for (i = new_num_queues - 1; i < old_num_queues - 1; i += 2) {
1596 /* new_num_queues < old_num_queues */
1597 virtio_net_del_queue(n, i / 2);
1600 for (i = old_num_queues - 1; i < new_num_queues - 1; i += 2) {
1601 /* new_num_queues > old_num_queues */
1602 virtio_net_add_queue(n, i / 2);
1605 /* add ctrl_vq last */
1606 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
1609 static void virtio_net_set_multiqueue(VirtIONet *n, int multiqueue)
1611 int max = multiqueue ? n->max_queues : 1;
1613 n->multiqueue = multiqueue;
1614 virtio_net_change_num_queues(n, max);
1616 virtio_net_set_queues(n);
1619 static int virtio_net_post_load_device(void *opaque, int version_id)
1621 VirtIONet *n = opaque;
1622 VirtIODevice *vdev = VIRTIO_DEVICE(n);
1623 int i, link_down;
1625 virtio_net_set_mrg_rx_bufs(n, n->mergeable_rx_bufs,
1626 virtio_vdev_has_feature(vdev,
1627 VIRTIO_F_VERSION_1));
1629 /* MAC_TABLE_ENTRIES may be different from the saved image */
1630 if (n->mac_table.in_use > MAC_TABLE_ENTRIES) {
1631 n->mac_table.in_use = 0;
1634 if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
1635 n->curr_guest_offloads = virtio_net_supported_guest_offloads(n);
1638 if (peer_has_vnet_hdr(n)) {
1639 virtio_net_apply_guest_offloads(n);
1642 virtio_net_set_queues(n);
1644 /* Find the first multicast entry in the saved MAC filter */
1645 for (i = 0; i < n->mac_table.in_use; i++) {
1646 if (n->mac_table.macs[i * ETH_ALEN] & 1) {
1647 break;
1650 n->mac_table.first_multi = i;
1652 /* nc.link_down can't be migrated, so infer link_down according
1653 * to link status bit in n->status */
1654 link_down = (n->status & VIRTIO_NET_S_LINK_UP) == 0;
1655 for (i = 0; i < n->max_queues; i++) {
1656 qemu_get_subqueue(n->nic, i)->link_down = link_down;
1659 if (virtio_vdev_has_feature(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE) &&
1660 virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) {
1661 n->announce_counter = SELF_ANNOUNCE_ROUNDS;
1662 timer_mod(n->announce_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL));
1665 return 0;
1668 /* tx_waiting field of a VirtIONetQueue */
1669 static const VMStateDescription vmstate_virtio_net_queue_tx_waiting = {
1670 .name = "virtio-net-queue-tx_waiting",
1671 .fields = (VMStateField[]) {
1672 VMSTATE_UINT32(tx_waiting, VirtIONetQueue),
1673 VMSTATE_END_OF_LIST()
1677 static bool max_queues_gt_1(void *opaque, int version_id)
1679 return VIRTIO_NET(opaque)->max_queues > 1;
1682 static bool has_ctrl_guest_offloads(void *opaque, int version_id)
1684 return virtio_vdev_has_feature(VIRTIO_DEVICE(opaque),
1685 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS);
1688 static bool mac_table_fits(void *opaque, int version_id)
1690 return VIRTIO_NET(opaque)->mac_table.in_use <= MAC_TABLE_ENTRIES;
1693 static bool mac_table_doesnt_fit(void *opaque, int version_id)
1695 return !mac_table_fits(opaque, version_id);
1698 /* This temporary type is shared by all the WITH_TMP methods
1699 * although only some fields are used by each.
1701 struct VirtIONetMigTmp {
1702 VirtIONet *parent;
1703 VirtIONetQueue *vqs_1;
1704 uint16_t curr_queues_1;
1705 uint8_t has_ufo;
1706 uint32_t has_vnet_hdr;
1709 /* The 2nd and subsequent tx_waiting flags are loaded later than
1710 * the 1st entry in the queues and only if there's more than one
1711 * entry. We use the tmp mechanism to calculate a temporary
1712 * pointer and count and also validate the count.
1715 static void virtio_net_tx_waiting_pre_save(void *opaque)
1717 struct VirtIONetMigTmp *tmp = opaque;
1719 tmp->vqs_1 = tmp->parent->vqs + 1;
1720 tmp->curr_queues_1 = tmp->parent->curr_queues - 1;
1721 if (tmp->parent->curr_queues == 0) {
1722 tmp->curr_queues_1 = 0;
1726 static int virtio_net_tx_waiting_pre_load(void *opaque)
1728 struct VirtIONetMigTmp *tmp = opaque;
1730 /* Reuse the pointer setup from save */
1731 virtio_net_tx_waiting_pre_save(opaque);
1733 if (tmp->parent->curr_queues > tmp->parent->max_queues) {
1734 error_report("virtio-net: curr_queues %x > max_queues %x",
1735 tmp->parent->curr_queues, tmp->parent->max_queues);
1737 return -EINVAL;
1740 return 0; /* all good */
1743 static const VMStateDescription vmstate_virtio_net_tx_waiting = {
1744 .name = "virtio-net-tx_waiting",
1745 .pre_load = virtio_net_tx_waiting_pre_load,
1746 .pre_save = virtio_net_tx_waiting_pre_save,
1747 .fields = (VMStateField[]) {
1748 VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp,
1749 curr_queues_1,
1750 vmstate_virtio_net_queue_tx_waiting,
1751 struct VirtIONetQueue),
1752 VMSTATE_END_OF_LIST()
1756 /* the 'has_ufo' flag is just tested; if the incoming stream has the
1757 * flag set we need to check that we have it
1759 static int virtio_net_ufo_post_load(void *opaque, int version_id)
1761 struct VirtIONetMigTmp *tmp = opaque;
1763 if (tmp->has_ufo && !peer_has_ufo(tmp->parent)) {
1764 error_report("virtio-net: saved image requires TUN_F_UFO support");
1765 return -EINVAL;
1768 return 0;
1771 static void virtio_net_ufo_pre_save(void *opaque)
1773 struct VirtIONetMigTmp *tmp = opaque;
1775 tmp->has_ufo = tmp->parent->has_ufo;
1778 static const VMStateDescription vmstate_virtio_net_has_ufo = {
1779 .name = "virtio-net-ufo",
1780 .post_load = virtio_net_ufo_post_load,
1781 .pre_save = virtio_net_ufo_pre_save,
1782 .fields = (VMStateField[]) {
1783 VMSTATE_UINT8(has_ufo, struct VirtIONetMigTmp),
1784 VMSTATE_END_OF_LIST()
1788 /* the 'has_vnet_hdr' flag is just tested; if the incoming stream has the
1789 * flag set we need to check that we have it
1791 static int virtio_net_vnet_post_load(void *opaque, int version_id)
1793 struct VirtIONetMigTmp *tmp = opaque;
1795 if (tmp->has_vnet_hdr && !peer_has_vnet_hdr(tmp->parent)) {
1796 error_report("virtio-net: saved image requires vnet_hdr=on");
1797 return -EINVAL;
1800 return 0;
1803 static void virtio_net_vnet_pre_save(void *opaque)
1805 struct VirtIONetMigTmp *tmp = opaque;
1807 tmp->has_vnet_hdr = tmp->parent->has_vnet_hdr;
1810 static const VMStateDescription vmstate_virtio_net_has_vnet = {
1811 .name = "virtio-net-vnet",
1812 .post_load = virtio_net_vnet_post_load,
1813 .pre_save = virtio_net_vnet_pre_save,
1814 .fields = (VMStateField[]) {
1815 VMSTATE_UINT32(has_vnet_hdr, struct VirtIONetMigTmp),
1816 VMSTATE_END_OF_LIST()
1820 static const VMStateDescription vmstate_virtio_net_device = {
1821 .name = "virtio-net-device",
1822 .version_id = VIRTIO_NET_VM_VERSION,
1823 .minimum_version_id = VIRTIO_NET_VM_VERSION,
1824 .post_load = virtio_net_post_load_device,
1825 .fields = (VMStateField[]) {
1826 VMSTATE_UINT8_ARRAY(mac, VirtIONet, ETH_ALEN),
1827 VMSTATE_STRUCT_POINTER(vqs, VirtIONet,
1828 vmstate_virtio_net_queue_tx_waiting,
1829 VirtIONetQueue),
1830 VMSTATE_UINT32(mergeable_rx_bufs, VirtIONet),
1831 VMSTATE_UINT16(status, VirtIONet),
1832 VMSTATE_UINT8(promisc, VirtIONet),
1833 VMSTATE_UINT8(allmulti, VirtIONet),
1834 VMSTATE_UINT32(mac_table.in_use, VirtIONet),
1836 /* Guarded pair: If it fits we load it, else we throw it away
1837 * - can happen if source has a larger MAC table.; post-load
1838 * sets flags in this case.
1840 VMSTATE_VBUFFER_MULTIPLY(mac_table.macs, VirtIONet,
1841 0, mac_table_fits, mac_table.in_use,
1842 ETH_ALEN),
1843 VMSTATE_UNUSED_VARRAY_UINT32(VirtIONet, mac_table_doesnt_fit, 0,
1844 mac_table.in_use, ETH_ALEN),
1846 /* Note: This is an array of uint32's that's always been saved as a
1847 * buffer; hold onto your endiannesses; it's actually used as a bitmap
1848 * but based on the uint.
1850 VMSTATE_BUFFER_POINTER_UNSAFE(vlans, VirtIONet, 0, MAX_VLAN >> 3),
1851 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
1852 vmstate_virtio_net_has_vnet),
1853 VMSTATE_UINT8(mac_table.multi_overflow, VirtIONet),
1854 VMSTATE_UINT8(mac_table.uni_overflow, VirtIONet),
1855 VMSTATE_UINT8(alluni, VirtIONet),
1856 VMSTATE_UINT8(nomulti, VirtIONet),
1857 VMSTATE_UINT8(nouni, VirtIONet),
1858 VMSTATE_UINT8(nobcast, VirtIONet),
1859 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
1860 vmstate_virtio_net_has_ufo),
1861 VMSTATE_SINGLE_TEST(max_queues, VirtIONet, max_queues_gt_1, 0,
1862 vmstate_info_uint16_equal, uint16_t),
1863 VMSTATE_UINT16_TEST(curr_queues, VirtIONet, max_queues_gt_1),
1864 VMSTATE_WITH_TMP(VirtIONet, struct VirtIONetMigTmp,
1865 vmstate_virtio_net_tx_waiting),
1866 VMSTATE_UINT64_TEST(curr_guest_offloads, VirtIONet,
1867 has_ctrl_guest_offloads),
1868 VMSTATE_END_OF_LIST()
1872 static NetClientInfo net_virtio_info = {
1873 .type = NET_CLIENT_DRIVER_NIC,
1874 .size = sizeof(NICState),
1875 .can_receive = virtio_net_can_receive,
1876 .receive = virtio_net_receive,
1877 .link_status_changed = virtio_net_set_link_status,
1878 .query_rx_filter = virtio_net_query_rxfilter,
1881 static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx)
1883 VirtIONet *n = VIRTIO_NET(vdev);
1884 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
1885 assert(n->vhost_started);
1886 return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx);
1889 static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx,
1890 bool mask)
1892 VirtIONet *n = VIRTIO_NET(vdev);
1893 NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx));
1894 assert(n->vhost_started);
1895 vhost_net_virtqueue_mask(get_vhost_net(nc->peer),
1896 vdev, idx, mask);
1899 static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features)
1901 int i, config_size = 0;
1902 virtio_add_feature(&host_features, VIRTIO_NET_F_MAC);
1904 for (i = 0; feature_sizes[i].flags != 0; i++) {
1905 if (host_features & feature_sizes[i].flags) {
1906 config_size = MAX(feature_sizes[i].end, config_size);
1909 n->config_size = config_size;
1912 void virtio_net_set_netclient_name(VirtIONet *n, const char *name,
1913 const char *type)
1916 * The name can be NULL, the netclient name will be type.x.
1918 assert(type != NULL);
1920 g_free(n->netclient_name);
1921 g_free(n->netclient_type);
1922 n->netclient_name = g_strdup(name);
1923 n->netclient_type = g_strdup(type);
1926 static void virtio_net_device_realize(DeviceState *dev, Error **errp)
1928 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
1929 VirtIONet *n = VIRTIO_NET(dev);
1930 NetClientState *nc;
1931 int i;
1933 if (n->net_conf.mtu) {
1934 n->host_features |= (0x1 << VIRTIO_NET_F_MTU);
1937 virtio_net_set_config_size(n, n->host_features);
1938 virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size);
1941 * We set a lower limit on RX queue size to what it always was.
1942 * Guests that want a smaller ring can always resize it without
1943 * help from us (using virtio 1 and up).
1945 if (n->net_conf.rx_queue_size < VIRTIO_NET_RX_QUEUE_MIN_SIZE ||
1946 n->net_conf.rx_queue_size > VIRTQUEUE_MAX_SIZE ||
1947 !is_power_of_2(n->net_conf.rx_queue_size)) {
1948 error_setg(errp, "Invalid rx_queue_size (= %" PRIu16 "), "
1949 "must be a power of 2 between %d and %d.",
1950 n->net_conf.rx_queue_size, VIRTIO_NET_RX_QUEUE_MIN_SIZE,
1951 VIRTQUEUE_MAX_SIZE);
1952 virtio_cleanup(vdev);
1953 return;
1956 if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE ||
1957 n->net_conf.tx_queue_size > VIRTQUEUE_MAX_SIZE ||
1958 !is_power_of_2(n->net_conf.tx_queue_size)) {
1959 error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), "
1960 "must be a power of 2 between %d and %d",
1961 n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE,
1962 VIRTQUEUE_MAX_SIZE);
1963 virtio_cleanup(vdev);
1964 return;
1967 n->max_queues = MAX(n->nic_conf.peers.queues, 1);
1968 if (n->max_queues * 2 + 1 > VIRTIO_QUEUE_MAX) {
1969 error_setg(errp, "Invalid number of queues (= %" PRIu32 "), "
1970 "must be a positive integer less than %d.",
1971 n->max_queues, (VIRTIO_QUEUE_MAX - 1) / 2);
1972 virtio_cleanup(vdev);
1973 return;
1975 n->vqs = g_malloc0(sizeof(VirtIONetQueue) * n->max_queues);
1976 n->curr_queues = 1;
1977 n->tx_timeout = n->net_conf.txtimer;
1979 if (n->net_conf.tx && strcmp(n->net_conf.tx, "timer")
1980 && strcmp(n->net_conf.tx, "bh")) {
1981 error_report("virtio-net: "
1982 "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
1983 n->net_conf.tx);
1984 error_report("Defaulting to \"bh\"");
1987 n->net_conf.tx_queue_size = MIN(virtio_net_max_tx_queue_size(n),
1988 n->net_conf.tx_queue_size);
1990 for (i = 0; i < n->max_queues; i++) {
1991 virtio_net_add_queue(n, i);
1994 n->ctrl_vq = virtio_add_queue(vdev, 64, virtio_net_handle_ctrl);
1995 qemu_macaddr_default_if_unset(&n->nic_conf.macaddr);
1996 memcpy(&n->mac[0], &n->nic_conf.macaddr, sizeof(n->mac));
1997 n->status = VIRTIO_NET_S_LINK_UP;
1998 n->announce_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
1999 virtio_net_announce_timer, n);
2001 if (n->netclient_type) {
2003 * Happen when virtio_net_set_netclient_name has been called.
2005 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
2006 n->netclient_type, n->netclient_name, n);
2007 } else {
2008 n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf,
2009 object_get_typename(OBJECT(dev)), dev->id, n);
2012 peer_test_vnet_hdr(n);
2013 if (peer_has_vnet_hdr(n)) {
2014 for (i = 0; i < n->max_queues; i++) {
2015 qemu_using_vnet_hdr(qemu_get_subqueue(n->nic, i)->peer, true);
2017 n->host_hdr_len = sizeof(struct virtio_net_hdr);
2018 } else {
2019 n->host_hdr_len = 0;
2022 qemu_format_nic_info_str(qemu_get_queue(n->nic), n->nic_conf.macaddr.a);
2024 n->vqs[0].tx_waiting = 0;
2025 n->tx_burst = n->net_conf.txburst;
2026 virtio_net_set_mrg_rx_bufs(n, 0, 0);
2027 n->promisc = 1; /* for compatibility */
2029 n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN);
2031 n->vlans = g_malloc0(MAX_VLAN >> 3);
2033 nc = qemu_get_queue(n->nic);
2034 nc->rxfilter_notify_enabled = 1;
2036 n->qdev = dev;
2039 static void virtio_net_device_unrealize(DeviceState *dev, Error **errp)
2041 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
2042 VirtIONet *n = VIRTIO_NET(dev);
2043 int i, max_queues;
2045 /* This will stop vhost backend if appropriate. */
2046 virtio_net_set_status(vdev, 0);
2048 g_free(n->netclient_name);
2049 n->netclient_name = NULL;
2050 g_free(n->netclient_type);
2051 n->netclient_type = NULL;
2053 g_free(n->mac_table.macs);
2054 g_free(n->vlans);
2056 max_queues = n->multiqueue ? n->max_queues : 1;
2057 for (i = 0; i < max_queues; i++) {
2058 virtio_net_del_queue(n, i);
2061 timer_del(n->announce_timer);
2062 timer_free(n->announce_timer);
2063 g_free(n->vqs);
2064 qemu_del_nic(n->nic);
2065 virtio_cleanup(vdev);
2068 static void virtio_net_instance_init(Object *obj)
2070 VirtIONet *n = VIRTIO_NET(obj);
2073 * The default config_size is sizeof(struct virtio_net_config).
2074 * Can be overriden with virtio_net_set_config_size.
2076 n->config_size = sizeof(struct virtio_net_config);
2077 device_add_bootindex_property(obj, &n->nic_conf.bootindex,
2078 "bootindex", "/ethernet-phy@0",
2079 DEVICE(n), NULL);
2082 static void virtio_net_pre_save(void *opaque)
2084 VirtIONet *n = opaque;
2086 /* At this point, backend must be stopped, otherwise
2087 * it might keep writing to memory. */
2088 assert(!n->vhost_started);
2091 static const VMStateDescription vmstate_virtio_net = {
2092 .name = "virtio-net",
2093 .minimum_version_id = VIRTIO_NET_VM_VERSION,
2094 .version_id = VIRTIO_NET_VM_VERSION,
2095 .fields = (VMStateField[]) {
2096 VMSTATE_VIRTIO_DEVICE,
2097 VMSTATE_END_OF_LIST()
2099 .pre_save = virtio_net_pre_save,
2102 static Property virtio_net_properties[] = {
2103 DEFINE_PROP_BIT("csum", VirtIONet, host_features, VIRTIO_NET_F_CSUM, true),
2104 DEFINE_PROP_BIT("guest_csum", VirtIONet, host_features,
2105 VIRTIO_NET_F_GUEST_CSUM, true),
2106 DEFINE_PROP_BIT("gso", VirtIONet, host_features, VIRTIO_NET_F_GSO, true),
2107 DEFINE_PROP_BIT("guest_tso4", VirtIONet, host_features,
2108 VIRTIO_NET_F_GUEST_TSO4, true),
2109 DEFINE_PROP_BIT("guest_tso6", VirtIONet, host_features,
2110 VIRTIO_NET_F_GUEST_TSO6, true),
2111 DEFINE_PROP_BIT("guest_ecn", VirtIONet, host_features,
2112 VIRTIO_NET_F_GUEST_ECN, true),
2113 DEFINE_PROP_BIT("guest_ufo", VirtIONet, host_features,
2114 VIRTIO_NET_F_GUEST_UFO, true),
2115 DEFINE_PROP_BIT("guest_announce", VirtIONet, host_features,
2116 VIRTIO_NET_F_GUEST_ANNOUNCE, true),
2117 DEFINE_PROP_BIT("host_tso4", VirtIONet, host_features,
2118 VIRTIO_NET_F_HOST_TSO4, true),
2119 DEFINE_PROP_BIT("host_tso6", VirtIONet, host_features,
2120 VIRTIO_NET_F_HOST_TSO6, true),
2121 DEFINE_PROP_BIT("host_ecn", VirtIONet, host_features,
2122 VIRTIO_NET_F_HOST_ECN, true),
2123 DEFINE_PROP_BIT("host_ufo", VirtIONet, host_features,
2124 VIRTIO_NET_F_HOST_UFO, true),
2125 DEFINE_PROP_BIT("mrg_rxbuf", VirtIONet, host_features,
2126 VIRTIO_NET_F_MRG_RXBUF, true),
2127 DEFINE_PROP_BIT("status", VirtIONet, host_features,
2128 VIRTIO_NET_F_STATUS, true),
2129 DEFINE_PROP_BIT("ctrl_vq", VirtIONet, host_features,
2130 VIRTIO_NET_F_CTRL_VQ, true),
2131 DEFINE_PROP_BIT("ctrl_rx", VirtIONet, host_features,
2132 VIRTIO_NET_F_CTRL_RX, true),
2133 DEFINE_PROP_BIT("ctrl_vlan", VirtIONet, host_features,
2134 VIRTIO_NET_F_CTRL_VLAN, true),
2135 DEFINE_PROP_BIT("ctrl_rx_extra", VirtIONet, host_features,
2136 VIRTIO_NET_F_CTRL_RX_EXTRA, true),
2137 DEFINE_PROP_BIT("ctrl_mac_addr", VirtIONet, host_features,
2138 VIRTIO_NET_F_CTRL_MAC_ADDR, true),
2139 DEFINE_PROP_BIT("ctrl_guest_offloads", VirtIONet, host_features,
2140 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, true),
2141 DEFINE_PROP_BIT("mq", VirtIONet, host_features, VIRTIO_NET_F_MQ, false),
2142 DEFINE_NIC_PROPERTIES(VirtIONet, nic_conf),
2143 DEFINE_PROP_UINT32("x-txtimer", VirtIONet, net_conf.txtimer,
2144 TX_TIMER_INTERVAL),
2145 DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST),
2146 DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx),
2147 DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size,
2148 VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE),
2149 DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size,
2150 VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE),
2151 DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0),
2152 DEFINE_PROP_BOOL("x-mtu-bypass-backend", VirtIONet, mtu_bypass_backend,
2153 true),
2154 DEFINE_PROP_END_OF_LIST(),
2157 static void virtio_net_class_init(ObjectClass *klass, void *data)
2159 DeviceClass *dc = DEVICE_CLASS(klass);
2160 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
2162 dc->props = virtio_net_properties;
2163 dc->vmsd = &vmstate_virtio_net;
2164 set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
2165 vdc->realize = virtio_net_device_realize;
2166 vdc->unrealize = virtio_net_device_unrealize;
2167 vdc->get_config = virtio_net_get_config;
2168 vdc->set_config = virtio_net_set_config;
2169 vdc->get_features = virtio_net_get_features;
2170 vdc->set_features = virtio_net_set_features;
2171 vdc->bad_features = virtio_net_bad_features;
2172 vdc->reset = virtio_net_reset;
2173 vdc->set_status = virtio_net_set_status;
2174 vdc->guest_notifier_mask = virtio_net_guest_notifier_mask;
2175 vdc->guest_notifier_pending = virtio_net_guest_notifier_pending;
2176 vdc->legacy_features |= (0x1 << VIRTIO_NET_F_GSO);
2177 vdc->vmsd = &vmstate_virtio_net_device;
2180 static const TypeInfo virtio_net_info = {
2181 .name = TYPE_VIRTIO_NET,
2182 .parent = TYPE_VIRTIO_DEVICE,
2183 .instance_size = sizeof(VirtIONet),
2184 .instance_init = virtio_net_instance_init,
2185 .class_init = virtio_net_class_init,
2188 static void virtio_register_types(void)
2190 type_register_static(&virtio_net_info);
2193 type_init(virtio_register_types)