2 * Virtio Network Device
4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
16 #include "hw/virtio/virtio.h"
18 #include "net/checksum.h"
20 #include "qemu/error-report.h"
21 #include "qemu/timer.h"
22 #include "hw/virtio/virtio-net.h"
23 #include "net/vhost_net.h"
24 #include "hw/virtio/virtio-bus.h"
25 #include "qapi/qmp/qjson.h"
26 #include "qapi-event.h"
27 #include "hw/virtio/virtio-access.h"
28 #include "migration/misc.h"
30 #define VIRTIO_NET_VM_VERSION 11
32 #define MAC_TABLE_ENTRIES 64
33 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
35 /* previously fixed value */
36 #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
37 #define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256
39 /* for now, only allow larger queues; with virtio-1, guest can downsize */
40 #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
41 #define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
44 * Calculate the number of bytes up to and including the given 'field' of
47 #define endof(container, field) \
48 (offsetof(container, field) + sizeof(((container *)0)->field))
50 typedef struct VirtIOFeature
{
55 static VirtIOFeature feature_sizes
[] = {
56 {.flags
= 1 << VIRTIO_NET_F_MAC
,
57 .end
= endof(struct virtio_net_config
, mac
)},
58 {.flags
= 1 << VIRTIO_NET_F_STATUS
,
59 .end
= endof(struct virtio_net_config
, status
)},
60 {.flags
= 1 << VIRTIO_NET_F_MQ
,
61 .end
= endof(struct virtio_net_config
, max_virtqueue_pairs
)},
62 {.flags
= 1 << VIRTIO_NET_F_MTU
,
63 .end
= endof(struct virtio_net_config
, mtu
)},
67 static VirtIONetQueue
*virtio_net_get_subqueue(NetClientState
*nc
)
69 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
71 return &n
->vqs
[nc
->queue_index
];
74 static int vq2q(int queue_index
)
76 return queue_index
/ 2;
80 * - we could suppress RX interrupt if we were so inclined.
83 static void virtio_net_get_config(VirtIODevice
*vdev
, uint8_t *config
)
85 VirtIONet
*n
= VIRTIO_NET(vdev
);
86 struct virtio_net_config netcfg
;
88 virtio_stw_p(vdev
, &netcfg
.status
, n
->status
);
89 virtio_stw_p(vdev
, &netcfg
.max_virtqueue_pairs
, n
->max_queues
);
90 virtio_stw_p(vdev
, &netcfg
.mtu
, n
->net_conf
.mtu
);
91 memcpy(netcfg
.mac
, n
->mac
, ETH_ALEN
);
92 memcpy(config
, &netcfg
, n
->config_size
);
95 static void virtio_net_set_config(VirtIODevice
*vdev
, const uint8_t *config
)
97 VirtIONet
*n
= VIRTIO_NET(vdev
);
98 struct virtio_net_config netcfg
= {};
100 memcpy(&netcfg
, config
, n
->config_size
);
102 if (!virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_MAC_ADDR
) &&
103 !virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
) &&
104 memcmp(netcfg
.mac
, n
->mac
, ETH_ALEN
)) {
105 memcpy(n
->mac
, netcfg
.mac
, ETH_ALEN
);
106 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->mac
);
110 static bool virtio_net_started(VirtIONet
*n
, uint8_t status
)
112 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
113 return (status
& VIRTIO_CONFIG_S_DRIVER_OK
) &&
114 (n
->status
& VIRTIO_NET_S_LINK_UP
) && vdev
->vm_running
;
117 static void virtio_net_announce_timer(void *opaque
)
119 VirtIONet
*n
= opaque
;
120 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
122 n
->announce_counter
--;
123 n
->status
|= VIRTIO_NET_S_ANNOUNCE
;
124 virtio_notify_config(vdev
);
127 static void virtio_net_vhost_status(VirtIONet
*n
, uint8_t status
)
129 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
130 NetClientState
*nc
= qemu_get_queue(n
->nic
);
131 int queues
= n
->multiqueue
? n
->max_queues
: 1;
133 if (!get_vhost_net(nc
->peer
)) {
137 if ((virtio_net_started(n
, status
) && !nc
->peer
->link_down
) ==
138 !!n
->vhost_started
) {
141 if (!n
->vhost_started
) {
144 if (n
->needs_vnet_hdr_swap
) {
145 error_report("backend does not support %s vnet headers; "
146 "falling back on userspace virtio",
147 virtio_is_big_endian(vdev
) ? "BE" : "LE");
151 /* Any packets outstanding? Purge them to avoid touching rings
152 * when vhost is running.
154 for (i
= 0; i
< queues
; i
++) {
155 NetClientState
*qnc
= qemu_get_subqueue(n
->nic
, i
);
157 /* Purge both directions: TX and RX. */
158 qemu_net_queue_purge(qnc
->peer
->incoming_queue
, qnc
);
159 qemu_net_queue_purge(qnc
->incoming_queue
, qnc
->peer
);
162 if (virtio_has_feature(vdev
->guest_features
, VIRTIO_NET_F_MTU
)) {
163 r
= vhost_net_set_mtu(get_vhost_net(nc
->peer
), n
->net_conf
.mtu
);
165 error_report("%uBytes MTU not supported by the backend",
172 n
->vhost_started
= 1;
173 r
= vhost_net_start(vdev
, n
->nic
->ncs
, queues
);
175 error_report("unable to start vhost net: %d: "
176 "falling back on userspace virtio", -r
);
177 n
->vhost_started
= 0;
180 vhost_net_stop(vdev
, n
->nic
->ncs
, queues
);
181 n
->vhost_started
= 0;
185 static int virtio_net_set_vnet_endian_one(VirtIODevice
*vdev
,
186 NetClientState
*peer
,
189 if (virtio_is_big_endian(vdev
)) {
190 return qemu_set_vnet_be(peer
, enable
);
192 return qemu_set_vnet_le(peer
, enable
);
196 static bool virtio_net_set_vnet_endian(VirtIODevice
*vdev
, NetClientState
*ncs
,
197 int queues
, bool enable
)
201 for (i
= 0; i
< queues
; i
++) {
202 if (virtio_net_set_vnet_endian_one(vdev
, ncs
[i
].peer
, enable
) < 0 &&
205 virtio_net_set_vnet_endian_one(vdev
, ncs
[i
].peer
, false);
215 static void virtio_net_vnet_endian_status(VirtIONet
*n
, uint8_t status
)
217 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
218 int queues
= n
->multiqueue
? n
->max_queues
: 1;
220 if (virtio_net_started(n
, status
)) {
221 /* Before using the device, we tell the network backend about the
222 * endianness to use when parsing vnet headers. If the backend
223 * can't do it, we fallback onto fixing the headers in the core
226 n
->needs_vnet_hdr_swap
= virtio_net_set_vnet_endian(vdev
, n
->nic
->ncs
,
228 } else if (virtio_net_started(n
, vdev
->status
)) {
229 /* After using the device, we need to reset the network backend to
230 * the default (guest native endianness), otherwise the guest may
231 * lose network connectivity if it is rebooted into a different
234 virtio_net_set_vnet_endian(vdev
, n
->nic
->ncs
, queues
, false);
238 static void virtio_net_drop_tx_queue_data(VirtIODevice
*vdev
, VirtQueue
*vq
)
240 unsigned int dropped
= virtqueue_drop_all(vq
);
242 virtio_notify(vdev
, vq
);
246 static void virtio_net_set_status(struct VirtIODevice
*vdev
, uint8_t status
)
248 VirtIONet
*n
= VIRTIO_NET(vdev
);
251 uint8_t queue_status
;
253 virtio_net_vnet_endian_status(n
, status
);
254 virtio_net_vhost_status(n
, status
);
256 for (i
= 0; i
< n
->max_queues
; i
++) {
257 NetClientState
*ncs
= qemu_get_subqueue(n
->nic
, i
);
261 if ((!n
->multiqueue
&& i
!= 0) || i
>= n
->curr_queues
) {
264 queue_status
= status
;
267 virtio_net_started(n
, queue_status
) && !n
->vhost_started
;
270 qemu_flush_queued_packets(ncs
);
273 if (!q
->tx_waiting
) {
279 timer_mod(q
->tx_timer
,
280 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + n
->tx_timeout
);
282 qemu_bh_schedule(q
->tx_bh
);
286 timer_del(q
->tx_timer
);
288 qemu_bh_cancel(q
->tx_bh
);
290 if ((n
->status
& VIRTIO_NET_S_LINK_UP
) == 0 &&
291 (queue_status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
292 /* if tx is waiting we are likely have some packets in tx queue
293 * and disabled notification */
295 virtio_queue_set_notification(q
->tx_vq
, 1);
296 virtio_net_drop_tx_queue_data(vdev
, q
->tx_vq
);
302 static void virtio_net_set_link_status(NetClientState
*nc
)
304 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
305 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
306 uint16_t old_status
= n
->status
;
309 n
->status
&= ~VIRTIO_NET_S_LINK_UP
;
311 n
->status
|= VIRTIO_NET_S_LINK_UP
;
313 if (n
->status
!= old_status
)
314 virtio_notify_config(vdev
);
316 virtio_net_set_status(vdev
, vdev
->status
);
319 static void rxfilter_notify(NetClientState
*nc
)
321 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
323 if (nc
->rxfilter_notify_enabled
) {
324 gchar
*path
= object_get_canonical_path(OBJECT(n
->qdev
));
325 qapi_event_send_nic_rx_filter_changed(!!n
->netclient_name
,
326 n
->netclient_name
, path
, &error_abort
);
329 /* disable event notification to avoid events flooding */
330 nc
->rxfilter_notify_enabled
= 0;
334 static intList
*get_vlan_table(VirtIONet
*n
)
336 intList
*list
, *entry
;
340 for (i
= 0; i
< MAX_VLAN
>> 5; i
++) {
341 for (j
= 0; n
->vlans
[i
] && j
<= 0x1f; j
++) {
342 if (n
->vlans
[i
] & (1U << j
)) {
343 entry
= g_malloc0(sizeof(*entry
));
344 entry
->value
= (i
<< 5) + j
;
354 static RxFilterInfo
*virtio_net_query_rxfilter(NetClientState
*nc
)
356 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
357 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
359 strList
*str_list
, *entry
;
362 info
= g_malloc0(sizeof(*info
));
363 info
->name
= g_strdup(nc
->name
);
364 info
->promiscuous
= n
->promisc
;
367 info
->unicast
= RX_STATE_NONE
;
368 } else if (n
->alluni
) {
369 info
->unicast
= RX_STATE_ALL
;
371 info
->unicast
= RX_STATE_NORMAL
;
375 info
->multicast
= RX_STATE_NONE
;
376 } else if (n
->allmulti
) {
377 info
->multicast
= RX_STATE_ALL
;
379 info
->multicast
= RX_STATE_NORMAL
;
382 info
->broadcast_allowed
= n
->nobcast
;
383 info
->multicast_overflow
= n
->mac_table
.multi_overflow
;
384 info
->unicast_overflow
= n
->mac_table
.uni_overflow
;
386 info
->main_mac
= qemu_mac_strdup_printf(n
->mac
);
389 for (i
= 0; i
< n
->mac_table
.first_multi
; i
++) {
390 entry
= g_malloc0(sizeof(*entry
));
391 entry
->value
= qemu_mac_strdup_printf(n
->mac_table
.macs
+ i
* ETH_ALEN
);
392 entry
->next
= str_list
;
395 info
->unicast_table
= str_list
;
398 for (i
= n
->mac_table
.first_multi
; i
< n
->mac_table
.in_use
; i
++) {
399 entry
= g_malloc0(sizeof(*entry
));
400 entry
->value
= qemu_mac_strdup_printf(n
->mac_table
.macs
+ i
* ETH_ALEN
);
401 entry
->next
= str_list
;
404 info
->multicast_table
= str_list
;
405 info
->vlan_table
= get_vlan_table(n
);
407 if (!virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_VLAN
)) {
408 info
->vlan
= RX_STATE_ALL
;
409 } else if (!info
->vlan_table
) {
410 info
->vlan
= RX_STATE_NONE
;
412 info
->vlan
= RX_STATE_NORMAL
;
415 /* enable event notification after query */
416 nc
->rxfilter_notify_enabled
= 1;
421 static void virtio_net_reset(VirtIODevice
*vdev
)
423 VirtIONet
*n
= VIRTIO_NET(vdev
);
425 /* Reset back to compatibility mode */
432 /* multiqueue is disabled by default */
434 timer_del(n
->announce_timer
);
435 n
->announce_counter
= 0;
436 n
->status
&= ~VIRTIO_NET_S_ANNOUNCE
;
438 /* Flush any MAC and VLAN filter table state */
439 n
->mac_table
.in_use
= 0;
440 n
->mac_table
.first_multi
= 0;
441 n
->mac_table
.multi_overflow
= 0;
442 n
->mac_table
.uni_overflow
= 0;
443 memset(n
->mac_table
.macs
, 0, MAC_TABLE_ENTRIES
* ETH_ALEN
);
444 memcpy(&n
->mac
[0], &n
->nic
->conf
->macaddr
, sizeof(n
->mac
));
445 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->mac
);
446 memset(n
->vlans
, 0, MAX_VLAN
>> 3);
449 static void peer_test_vnet_hdr(VirtIONet
*n
)
451 NetClientState
*nc
= qemu_get_queue(n
->nic
);
456 n
->has_vnet_hdr
= qemu_has_vnet_hdr(nc
->peer
);
459 static int peer_has_vnet_hdr(VirtIONet
*n
)
461 return n
->has_vnet_hdr
;
464 static int peer_has_ufo(VirtIONet
*n
)
466 if (!peer_has_vnet_hdr(n
))
469 n
->has_ufo
= qemu_has_ufo(qemu_get_queue(n
->nic
)->peer
);
474 static void virtio_net_set_mrg_rx_bufs(VirtIONet
*n
, int mergeable_rx_bufs
,
480 n
->mergeable_rx_bufs
= mergeable_rx_bufs
;
483 n
->guest_hdr_len
= sizeof(struct virtio_net_hdr_mrg_rxbuf
);
485 n
->guest_hdr_len
= n
->mergeable_rx_bufs
?
486 sizeof(struct virtio_net_hdr_mrg_rxbuf
) :
487 sizeof(struct virtio_net_hdr
);
490 for (i
= 0; i
< n
->max_queues
; i
++) {
491 nc
= qemu_get_subqueue(n
->nic
, i
);
493 if (peer_has_vnet_hdr(n
) &&
494 qemu_has_vnet_hdr_len(nc
->peer
, n
->guest_hdr_len
)) {
495 qemu_set_vnet_hdr_len(nc
->peer
, n
->guest_hdr_len
);
496 n
->host_hdr_len
= n
->guest_hdr_len
;
501 static int virtio_net_max_tx_queue_size(VirtIONet
*n
)
503 NetClientState
*peer
= n
->nic_conf
.peers
.ncs
[0];
506 * Backends other than vhost-user don't support max queue size.
509 return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
;
512 if (peer
->info
->type
!= NET_CLIENT_DRIVER_VHOST_USER
) {
513 return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
;
516 return VIRTQUEUE_MAX_SIZE
;
519 static int peer_attach(VirtIONet
*n
, int index
)
521 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, index
);
527 if (nc
->peer
->info
->type
== NET_CLIENT_DRIVER_VHOST_USER
) {
528 vhost_set_vring_enable(nc
->peer
, 1);
531 if (nc
->peer
->info
->type
!= NET_CLIENT_DRIVER_TAP
) {
535 if (n
->max_queues
== 1) {
539 return tap_enable(nc
->peer
);
542 static int peer_detach(VirtIONet
*n
, int index
)
544 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, index
);
550 if (nc
->peer
->info
->type
== NET_CLIENT_DRIVER_VHOST_USER
) {
551 vhost_set_vring_enable(nc
->peer
, 0);
554 if (nc
->peer
->info
->type
!= NET_CLIENT_DRIVER_TAP
) {
558 return tap_disable(nc
->peer
);
561 static void virtio_net_set_queues(VirtIONet
*n
)
566 if (n
->nic
->peer_deleted
) {
570 for (i
= 0; i
< n
->max_queues
; i
++) {
571 if (i
< n
->curr_queues
) {
572 r
= peer_attach(n
, i
);
575 r
= peer_detach(n
, i
);
581 static void virtio_net_set_multiqueue(VirtIONet
*n
, int multiqueue
);
583 static uint64_t virtio_net_get_features(VirtIODevice
*vdev
, uint64_t features
,
586 VirtIONet
*n
= VIRTIO_NET(vdev
);
587 NetClientState
*nc
= qemu_get_queue(n
->nic
);
589 /* Firstly sync all virtio-net possible supported features */
590 features
|= n
->host_features
;
592 virtio_add_feature(&features
, VIRTIO_NET_F_MAC
);
594 if (!peer_has_vnet_hdr(n
)) {
595 virtio_clear_feature(&features
, VIRTIO_NET_F_CSUM
);
596 virtio_clear_feature(&features
, VIRTIO_NET_F_HOST_TSO4
);
597 virtio_clear_feature(&features
, VIRTIO_NET_F_HOST_TSO6
);
598 virtio_clear_feature(&features
, VIRTIO_NET_F_HOST_ECN
);
600 virtio_clear_feature(&features
, VIRTIO_NET_F_GUEST_CSUM
);
601 virtio_clear_feature(&features
, VIRTIO_NET_F_GUEST_TSO4
);
602 virtio_clear_feature(&features
, VIRTIO_NET_F_GUEST_TSO6
);
603 virtio_clear_feature(&features
, VIRTIO_NET_F_GUEST_ECN
);
606 if (!peer_has_vnet_hdr(n
) || !peer_has_ufo(n
)) {
607 virtio_clear_feature(&features
, VIRTIO_NET_F_GUEST_UFO
);
608 virtio_clear_feature(&features
, VIRTIO_NET_F_HOST_UFO
);
611 if (!get_vhost_net(nc
->peer
)) {
614 features
= vhost_net_get_features(get_vhost_net(nc
->peer
), features
);
615 vdev
->backend_features
= features
;
617 if (n
->mtu_bypass_backend
&&
618 (n
->host_features
& 1ULL << VIRTIO_NET_F_MTU
)) {
619 features
|= (1ULL << VIRTIO_NET_F_MTU
);
625 static uint64_t virtio_net_bad_features(VirtIODevice
*vdev
)
627 uint64_t features
= 0;
629 /* Linux kernel 2.6.25. It understood MAC (as everyone must),
631 virtio_add_feature(&features
, VIRTIO_NET_F_MAC
);
632 virtio_add_feature(&features
, VIRTIO_NET_F_CSUM
);
633 virtio_add_feature(&features
, VIRTIO_NET_F_HOST_TSO4
);
634 virtio_add_feature(&features
, VIRTIO_NET_F_HOST_TSO6
);
635 virtio_add_feature(&features
, VIRTIO_NET_F_HOST_ECN
);
640 static void virtio_net_apply_guest_offloads(VirtIONet
*n
)
642 qemu_set_offload(qemu_get_queue(n
->nic
)->peer
,
643 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_CSUM
)),
644 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_TSO4
)),
645 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_TSO6
)),
646 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_ECN
)),
647 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_UFO
)));
650 static uint64_t virtio_net_guest_offloads_by_features(uint32_t features
)
652 static const uint64_t guest_offloads_mask
=
653 (1ULL << VIRTIO_NET_F_GUEST_CSUM
) |
654 (1ULL << VIRTIO_NET_F_GUEST_TSO4
) |
655 (1ULL << VIRTIO_NET_F_GUEST_TSO6
) |
656 (1ULL << VIRTIO_NET_F_GUEST_ECN
) |
657 (1ULL << VIRTIO_NET_F_GUEST_UFO
);
659 return guest_offloads_mask
& features
;
662 static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet
*n
)
664 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
665 return virtio_net_guest_offloads_by_features(vdev
->guest_features
);
668 static void virtio_net_set_features(VirtIODevice
*vdev
, uint64_t features
)
670 VirtIONet
*n
= VIRTIO_NET(vdev
);
673 if (n
->mtu_bypass_backend
&&
674 !virtio_has_feature(vdev
->backend_features
, VIRTIO_NET_F_MTU
)) {
675 features
&= ~(1ULL << VIRTIO_NET_F_MTU
);
678 virtio_net_set_multiqueue(n
,
679 virtio_has_feature(features
, VIRTIO_NET_F_MQ
));
681 virtio_net_set_mrg_rx_bufs(n
,
682 virtio_has_feature(features
,
683 VIRTIO_NET_F_MRG_RXBUF
),
684 virtio_has_feature(features
,
685 VIRTIO_F_VERSION_1
));
687 if (n
->has_vnet_hdr
) {
688 n
->curr_guest_offloads
=
689 virtio_net_guest_offloads_by_features(features
);
690 virtio_net_apply_guest_offloads(n
);
693 for (i
= 0; i
< n
->max_queues
; i
++) {
694 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, i
);
696 if (!get_vhost_net(nc
->peer
)) {
699 vhost_net_ack_features(get_vhost_net(nc
->peer
), features
);
702 if (virtio_has_feature(features
, VIRTIO_NET_F_CTRL_VLAN
)) {
703 memset(n
->vlans
, 0, MAX_VLAN
>> 3);
705 memset(n
->vlans
, 0xff, MAX_VLAN
>> 3);
709 static int virtio_net_handle_rx_mode(VirtIONet
*n
, uint8_t cmd
,
710 struct iovec
*iov
, unsigned int iov_cnt
)
714 NetClientState
*nc
= qemu_get_queue(n
->nic
);
716 s
= iov_to_buf(iov
, iov_cnt
, 0, &on
, sizeof(on
));
717 if (s
!= sizeof(on
)) {
718 return VIRTIO_NET_ERR
;
721 if (cmd
== VIRTIO_NET_CTRL_RX_PROMISC
) {
723 } else if (cmd
== VIRTIO_NET_CTRL_RX_ALLMULTI
) {
725 } else if (cmd
== VIRTIO_NET_CTRL_RX_ALLUNI
) {
727 } else if (cmd
== VIRTIO_NET_CTRL_RX_NOMULTI
) {
729 } else if (cmd
== VIRTIO_NET_CTRL_RX_NOUNI
) {
731 } else if (cmd
== VIRTIO_NET_CTRL_RX_NOBCAST
) {
734 return VIRTIO_NET_ERR
;
739 return VIRTIO_NET_OK
;
742 static int virtio_net_handle_offloads(VirtIONet
*n
, uint8_t cmd
,
743 struct iovec
*iov
, unsigned int iov_cnt
)
745 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
749 if (!virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
)) {
750 return VIRTIO_NET_ERR
;
753 s
= iov_to_buf(iov
, iov_cnt
, 0, &offloads
, sizeof(offloads
));
754 if (s
!= sizeof(offloads
)) {
755 return VIRTIO_NET_ERR
;
758 if (cmd
== VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET
) {
759 uint64_t supported_offloads
;
761 offloads
= virtio_ldq_p(vdev
, &offloads
);
763 if (!n
->has_vnet_hdr
) {
764 return VIRTIO_NET_ERR
;
767 supported_offloads
= virtio_net_supported_guest_offloads(n
);
768 if (offloads
& ~supported_offloads
) {
769 return VIRTIO_NET_ERR
;
772 n
->curr_guest_offloads
= offloads
;
773 virtio_net_apply_guest_offloads(n
);
775 return VIRTIO_NET_OK
;
777 return VIRTIO_NET_ERR
;
781 static int virtio_net_handle_mac(VirtIONet
*n
, uint8_t cmd
,
782 struct iovec
*iov
, unsigned int iov_cnt
)
784 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
785 struct virtio_net_ctrl_mac mac_data
;
787 NetClientState
*nc
= qemu_get_queue(n
->nic
);
789 if (cmd
== VIRTIO_NET_CTRL_MAC_ADDR_SET
) {
790 if (iov_size(iov
, iov_cnt
) != sizeof(n
->mac
)) {
791 return VIRTIO_NET_ERR
;
793 s
= iov_to_buf(iov
, iov_cnt
, 0, &n
->mac
, sizeof(n
->mac
));
794 assert(s
== sizeof(n
->mac
));
795 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->mac
);
798 return VIRTIO_NET_OK
;
801 if (cmd
!= VIRTIO_NET_CTRL_MAC_TABLE_SET
) {
802 return VIRTIO_NET_ERR
;
807 uint8_t uni_overflow
= 0;
808 uint8_t multi_overflow
= 0;
809 uint8_t *macs
= g_malloc0(MAC_TABLE_ENTRIES
* ETH_ALEN
);
811 s
= iov_to_buf(iov
, iov_cnt
, 0, &mac_data
.entries
,
812 sizeof(mac_data
.entries
));
813 mac_data
.entries
= virtio_ldl_p(vdev
, &mac_data
.entries
);
814 if (s
!= sizeof(mac_data
.entries
)) {
817 iov_discard_front(&iov
, &iov_cnt
, s
);
819 if (mac_data
.entries
* ETH_ALEN
> iov_size(iov
, iov_cnt
)) {
823 if (mac_data
.entries
<= MAC_TABLE_ENTRIES
) {
824 s
= iov_to_buf(iov
, iov_cnt
, 0, macs
,
825 mac_data
.entries
* ETH_ALEN
);
826 if (s
!= mac_data
.entries
* ETH_ALEN
) {
829 in_use
+= mac_data
.entries
;
834 iov_discard_front(&iov
, &iov_cnt
, mac_data
.entries
* ETH_ALEN
);
836 first_multi
= in_use
;
838 s
= iov_to_buf(iov
, iov_cnt
, 0, &mac_data
.entries
,
839 sizeof(mac_data
.entries
));
840 mac_data
.entries
= virtio_ldl_p(vdev
, &mac_data
.entries
);
841 if (s
!= sizeof(mac_data
.entries
)) {
845 iov_discard_front(&iov
, &iov_cnt
, s
);
847 if (mac_data
.entries
* ETH_ALEN
!= iov_size(iov
, iov_cnt
)) {
851 if (mac_data
.entries
<= MAC_TABLE_ENTRIES
- in_use
) {
852 s
= iov_to_buf(iov
, iov_cnt
, 0, &macs
[in_use
* ETH_ALEN
],
853 mac_data
.entries
* ETH_ALEN
);
854 if (s
!= mac_data
.entries
* ETH_ALEN
) {
857 in_use
+= mac_data
.entries
;
862 n
->mac_table
.in_use
= in_use
;
863 n
->mac_table
.first_multi
= first_multi
;
864 n
->mac_table
.uni_overflow
= uni_overflow
;
865 n
->mac_table
.multi_overflow
= multi_overflow
;
866 memcpy(n
->mac_table
.macs
, macs
, MAC_TABLE_ENTRIES
* ETH_ALEN
);
870 return VIRTIO_NET_OK
;
874 return VIRTIO_NET_ERR
;
877 static int virtio_net_handle_vlan_table(VirtIONet
*n
, uint8_t cmd
,
878 struct iovec
*iov
, unsigned int iov_cnt
)
880 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
883 NetClientState
*nc
= qemu_get_queue(n
->nic
);
885 s
= iov_to_buf(iov
, iov_cnt
, 0, &vid
, sizeof(vid
));
886 vid
= virtio_lduw_p(vdev
, &vid
);
887 if (s
!= sizeof(vid
)) {
888 return VIRTIO_NET_ERR
;
892 return VIRTIO_NET_ERR
;
894 if (cmd
== VIRTIO_NET_CTRL_VLAN_ADD
)
895 n
->vlans
[vid
>> 5] |= (1U << (vid
& 0x1f));
896 else if (cmd
== VIRTIO_NET_CTRL_VLAN_DEL
)
897 n
->vlans
[vid
>> 5] &= ~(1U << (vid
& 0x1f));
899 return VIRTIO_NET_ERR
;
903 return VIRTIO_NET_OK
;
906 static int virtio_net_handle_announce(VirtIONet
*n
, uint8_t cmd
,
907 struct iovec
*iov
, unsigned int iov_cnt
)
909 if (cmd
== VIRTIO_NET_CTRL_ANNOUNCE_ACK
&&
910 n
->status
& VIRTIO_NET_S_ANNOUNCE
) {
911 n
->status
&= ~VIRTIO_NET_S_ANNOUNCE
;
912 if (n
->announce_counter
) {
913 timer_mod(n
->announce_timer
,
914 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) +
915 self_announce_delay(n
->announce_counter
));
917 return VIRTIO_NET_OK
;
919 return VIRTIO_NET_ERR
;
923 static int virtio_net_handle_mq(VirtIONet
*n
, uint8_t cmd
,
924 struct iovec
*iov
, unsigned int iov_cnt
)
926 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
927 struct virtio_net_ctrl_mq mq
;
931 s
= iov_to_buf(iov
, iov_cnt
, 0, &mq
, sizeof(mq
));
932 if (s
!= sizeof(mq
)) {
933 return VIRTIO_NET_ERR
;
936 if (cmd
!= VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET
) {
937 return VIRTIO_NET_ERR
;
940 queues
= virtio_lduw_p(vdev
, &mq
.virtqueue_pairs
);
942 if (queues
< VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN
||
943 queues
> VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX
||
944 queues
> n
->max_queues
||
946 return VIRTIO_NET_ERR
;
949 n
->curr_queues
= queues
;
950 /* stop the backend before changing the number of queues to avoid handling a
952 virtio_net_set_status(vdev
, vdev
->status
);
953 virtio_net_set_queues(n
);
955 return VIRTIO_NET_OK
;
958 static void virtio_net_handle_ctrl(VirtIODevice
*vdev
, VirtQueue
*vq
)
960 VirtIONet
*n
= VIRTIO_NET(vdev
);
961 struct virtio_net_ctrl_hdr ctrl
;
962 virtio_net_ctrl_ack status
= VIRTIO_NET_ERR
;
963 VirtQueueElement
*elem
;
965 struct iovec
*iov
, *iov2
;
966 unsigned int iov_cnt
;
969 elem
= virtqueue_pop(vq
, sizeof(VirtQueueElement
));
973 if (iov_size(elem
->in_sg
, elem
->in_num
) < sizeof(status
) ||
974 iov_size(elem
->out_sg
, elem
->out_num
) < sizeof(ctrl
)) {
975 virtio_error(vdev
, "virtio-net ctrl missing headers");
976 virtqueue_detach_element(vq
, elem
, 0);
981 iov_cnt
= elem
->out_num
;
982 iov2
= iov
= g_memdup(elem
->out_sg
, sizeof(struct iovec
) * elem
->out_num
);
983 s
= iov_to_buf(iov
, iov_cnt
, 0, &ctrl
, sizeof(ctrl
));
984 iov_discard_front(&iov
, &iov_cnt
, sizeof(ctrl
));
985 if (s
!= sizeof(ctrl
)) {
986 status
= VIRTIO_NET_ERR
;
987 } else if (ctrl
.class == VIRTIO_NET_CTRL_RX
) {
988 status
= virtio_net_handle_rx_mode(n
, ctrl
.cmd
, iov
, iov_cnt
);
989 } else if (ctrl
.class == VIRTIO_NET_CTRL_MAC
) {
990 status
= virtio_net_handle_mac(n
, ctrl
.cmd
, iov
, iov_cnt
);
991 } else if (ctrl
.class == VIRTIO_NET_CTRL_VLAN
) {
992 status
= virtio_net_handle_vlan_table(n
, ctrl
.cmd
, iov
, iov_cnt
);
993 } else if (ctrl
.class == VIRTIO_NET_CTRL_ANNOUNCE
) {
994 status
= virtio_net_handle_announce(n
, ctrl
.cmd
, iov
, iov_cnt
);
995 } else if (ctrl
.class == VIRTIO_NET_CTRL_MQ
) {
996 status
= virtio_net_handle_mq(n
, ctrl
.cmd
, iov
, iov_cnt
);
997 } else if (ctrl
.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS
) {
998 status
= virtio_net_handle_offloads(n
, ctrl
.cmd
, iov
, iov_cnt
);
1001 s
= iov_from_buf(elem
->in_sg
, elem
->in_num
, 0, &status
, sizeof(status
));
1002 assert(s
== sizeof(status
));
1004 virtqueue_push(vq
, elem
, sizeof(status
));
1005 virtio_notify(vdev
, vq
);
1013 static void virtio_net_handle_rx(VirtIODevice
*vdev
, VirtQueue
*vq
)
1015 VirtIONet
*n
= VIRTIO_NET(vdev
);
1016 int queue_index
= vq2q(virtio_get_queue_index(vq
));
1018 qemu_flush_queued_packets(qemu_get_subqueue(n
->nic
, queue_index
));
1021 static int virtio_net_can_receive(NetClientState
*nc
)
1023 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
1024 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1025 VirtIONetQueue
*q
= virtio_net_get_subqueue(nc
);
1027 if (!vdev
->vm_running
) {
1031 if (nc
->queue_index
>= n
->curr_queues
) {
1035 if (!virtio_queue_ready(q
->rx_vq
) ||
1036 !(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
1043 static int virtio_net_has_buffers(VirtIONetQueue
*q
, int bufsize
)
1045 VirtIONet
*n
= q
->n
;
1046 if (virtio_queue_empty(q
->rx_vq
) ||
1047 (n
->mergeable_rx_bufs
&&
1048 !virtqueue_avail_bytes(q
->rx_vq
, bufsize
, 0))) {
1049 virtio_queue_set_notification(q
->rx_vq
, 1);
1051 /* To avoid a race condition where the guest has made some buffers
1052 * available after the above check but before notification was
1053 * enabled, check for available buffers again.
1055 if (virtio_queue_empty(q
->rx_vq
) ||
1056 (n
->mergeable_rx_bufs
&&
1057 !virtqueue_avail_bytes(q
->rx_vq
, bufsize
, 0))) {
1062 virtio_queue_set_notification(q
->rx_vq
, 0);
1066 static void virtio_net_hdr_swap(VirtIODevice
*vdev
, struct virtio_net_hdr
*hdr
)
1068 virtio_tswap16s(vdev
, &hdr
->hdr_len
);
1069 virtio_tswap16s(vdev
, &hdr
->gso_size
);
1070 virtio_tswap16s(vdev
, &hdr
->csum_start
);
1071 virtio_tswap16s(vdev
, &hdr
->csum_offset
);
1074 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
1075 * it never finds out that the packets don't have valid checksums. This
1076 * causes dhclient to get upset. Fedora's carried a patch for ages to
1077 * fix this with Xen but it hasn't appeared in an upstream release of
1080 * To avoid breaking existing guests, we catch udp packets and add
1081 * checksums. This is terrible but it's better than hacking the guest
1084 * N.B. if we introduce a zero-copy API, this operation is no longer free so
1085 * we should provide a mechanism to disable it to avoid polluting the host
1088 static void work_around_broken_dhclient(struct virtio_net_hdr
*hdr
,
1089 uint8_t *buf
, size_t size
)
1091 if ((hdr
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) && /* missing csum */
1092 (size
> 27 && size
< 1500) && /* normal sized MTU */
1093 (buf
[12] == 0x08 && buf
[13] == 0x00) && /* ethertype == IPv4 */
1094 (buf
[23] == 17) && /* ip.protocol == UDP */
1095 (buf
[34] == 0 && buf
[35] == 67)) { /* udp.srcport == bootps */
1096 net_checksum_calculate(buf
, size
);
1097 hdr
->flags
&= ~VIRTIO_NET_HDR_F_NEEDS_CSUM
;
1101 static void receive_header(VirtIONet
*n
, const struct iovec
*iov
, int iov_cnt
,
1102 const void *buf
, size_t size
)
1104 if (n
->has_vnet_hdr
) {
1105 /* FIXME this cast is evil */
1106 void *wbuf
= (void *)buf
;
1107 work_around_broken_dhclient(wbuf
, wbuf
+ n
->host_hdr_len
,
1108 size
- n
->host_hdr_len
);
1110 if (n
->needs_vnet_hdr_swap
) {
1111 virtio_net_hdr_swap(VIRTIO_DEVICE(n
), wbuf
);
1113 iov_from_buf(iov
, iov_cnt
, 0, buf
, sizeof(struct virtio_net_hdr
));
1115 struct virtio_net_hdr hdr
= {
1117 .gso_type
= VIRTIO_NET_HDR_GSO_NONE
1119 iov_from_buf(iov
, iov_cnt
, 0, &hdr
, sizeof hdr
);
1123 static int receive_filter(VirtIONet
*n
, const uint8_t *buf
, int size
)
1125 static const uint8_t bcast
[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1126 static const uint8_t vlan
[] = {0x81, 0x00};
1127 uint8_t *ptr
= (uint8_t *)buf
;
1133 ptr
+= n
->host_hdr_len
;
1135 if (!memcmp(&ptr
[12], vlan
, sizeof(vlan
))) {
1136 int vid
= lduw_be_p(ptr
+ 14) & 0xfff;
1137 if (!(n
->vlans
[vid
>> 5] & (1U << (vid
& 0x1f))))
1141 if (ptr
[0] & 1) { // multicast
1142 if (!memcmp(ptr
, bcast
, sizeof(bcast
))) {
1144 } else if (n
->nomulti
) {
1146 } else if (n
->allmulti
|| n
->mac_table
.multi_overflow
) {
1150 for (i
= n
->mac_table
.first_multi
; i
< n
->mac_table
.in_use
; i
++) {
1151 if (!memcmp(ptr
, &n
->mac_table
.macs
[i
* ETH_ALEN
], ETH_ALEN
)) {
1158 } else if (n
->alluni
|| n
->mac_table
.uni_overflow
) {
1160 } else if (!memcmp(ptr
, n
->mac
, ETH_ALEN
)) {
1164 for (i
= 0; i
< n
->mac_table
.first_multi
; i
++) {
1165 if (!memcmp(ptr
, &n
->mac_table
.macs
[i
* ETH_ALEN
], ETH_ALEN
)) {
1174 static ssize_t
virtio_net_receive_rcu(NetClientState
*nc
, const uint8_t *buf
,
1177 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
1178 VirtIONetQueue
*q
= virtio_net_get_subqueue(nc
);
1179 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1180 struct iovec mhdr_sg
[VIRTQUEUE_MAX_SIZE
];
1181 struct virtio_net_hdr_mrg_rxbuf mhdr
;
1182 unsigned mhdr_cnt
= 0;
1183 size_t offset
, i
, guest_offset
;
1185 if (!virtio_net_can_receive(nc
)) {
1189 /* hdr_len refers to the header we supply to the guest */
1190 if (!virtio_net_has_buffers(q
, size
+ n
->guest_hdr_len
- n
->host_hdr_len
)) {
1194 if (!receive_filter(n
, buf
, size
))
1199 while (offset
< size
) {
1200 VirtQueueElement
*elem
;
1202 const struct iovec
*sg
;
1206 elem
= virtqueue_pop(q
->rx_vq
, sizeof(VirtQueueElement
));
1209 virtio_error(vdev
, "virtio-net unexpected empty queue: "
1210 "i %zd mergeable %d offset %zd, size %zd, "
1211 "guest hdr len %zd, host hdr len %zd "
1212 "guest features 0x%" PRIx64
,
1213 i
, n
->mergeable_rx_bufs
, offset
, size
,
1214 n
->guest_hdr_len
, n
->host_hdr_len
,
1215 vdev
->guest_features
);
1220 if (elem
->in_num
< 1) {
1222 "virtio-net receive queue contains no in buffers");
1223 virtqueue_detach_element(q
->rx_vq
, elem
, 0);
1230 assert(offset
== 0);
1231 if (n
->mergeable_rx_bufs
) {
1232 mhdr_cnt
= iov_copy(mhdr_sg
, ARRAY_SIZE(mhdr_sg
),
1234 offsetof(typeof(mhdr
), num_buffers
),
1235 sizeof(mhdr
.num_buffers
));
1238 receive_header(n
, sg
, elem
->in_num
, buf
, size
);
1239 offset
= n
->host_hdr_len
;
1240 total
+= n
->guest_hdr_len
;
1241 guest_offset
= n
->guest_hdr_len
;
1246 /* copy in packet. ugh */
1247 len
= iov_from_buf(sg
, elem
->in_num
, guest_offset
,
1248 buf
+ offset
, size
- offset
);
1251 /* If buffers can't be merged, at this point we
1252 * must have consumed the complete packet.
1253 * Otherwise, drop it. */
1254 if (!n
->mergeable_rx_bufs
&& offset
< size
) {
1255 virtqueue_unpop(q
->rx_vq
, elem
, total
);
1260 /* signal other side */
1261 virtqueue_fill(q
->rx_vq
, elem
, total
, i
++);
1266 virtio_stw_p(vdev
, &mhdr
.num_buffers
, i
);
1267 iov_from_buf(mhdr_sg
, mhdr_cnt
,
1269 &mhdr
.num_buffers
, sizeof mhdr
.num_buffers
);
1272 virtqueue_flush(q
->rx_vq
, i
);
1273 virtio_notify(vdev
, q
->rx_vq
);
1278 static ssize_t
virtio_net_receive(NetClientState
*nc
, const uint8_t *buf
,
1284 r
= virtio_net_receive_rcu(nc
, buf
, size
);
1289 static int32_t virtio_net_flush_tx(VirtIONetQueue
*q
);
1291 static void virtio_net_tx_complete(NetClientState
*nc
, ssize_t len
)
1293 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
1294 VirtIONetQueue
*q
= virtio_net_get_subqueue(nc
);
1295 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1297 virtqueue_push(q
->tx_vq
, q
->async_tx
.elem
, 0);
1298 virtio_notify(vdev
, q
->tx_vq
);
1300 g_free(q
->async_tx
.elem
);
1301 q
->async_tx
.elem
= NULL
;
1303 virtio_queue_set_notification(q
->tx_vq
, 1);
1304 virtio_net_flush_tx(q
);
1308 static int32_t virtio_net_flush_tx(VirtIONetQueue
*q
)
1310 VirtIONet
*n
= q
->n
;
1311 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1312 VirtQueueElement
*elem
;
1313 int32_t num_packets
= 0;
1314 int queue_index
= vq2q(virtio_get_queue_index(q
->tx_vq
));
1315 if (!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
1319 if (q
->async_tx
.elem
) {
1320 virtio_queue_set_notification(q
->tx_vq
, 0);
1326 unsigned int out_num
;
1327 struct iovec sg
[VIRTQUEUE_MAX_SIZE
], sg2
[VIRTQUEUE_MAX_SIZE
+ 1], *out_sg
;
1328 struct virtio_net_hdr_mrg_rxbuf mhdr
;
1330 elem
= virtqueue_pop(q
->tx_vq
, sizeof(VirtQueueElement
));
1335 out_num
= elem
->out_num
;
1336 out_sg
= elem
->out_sg
;
1338 virtio_error(vdev
, "virtio-net header not in first element");
1339 virtqueue_detach_element(q
->tx_vq
, elem
, 0);
1344 if (n
->has_vnet_hdr
) {
1345 if (iov_to_buf(out_sg
, out_num
, 0, &mhdr
, n
->guest_hdr_len
) <
1347 virtio_error(vdev
, "virtio-net header incorrect");
1348 virtqueue_detach_element(q
->tx_vq
, elem
, 0);
1352 if (n
->needs_vnet_hdr_swap
) {
1353 virtio_net_hdr_swap(vdev
, (void *) &mhdr
);
1354 sg2
[0].iov_base
= &mhdr
;
1355 sg2
[0].iov_len
= n
->guest_hdr_len
;
1356 out_num
= iov_copy(&sg2
[1], ARRAY_SIZE(sg2
) - 1,
1358 n
->guest_hdr_len
, -1);
1359 if (out_num
== VIRTQUEUE_MAX_SIZE
) {
1367 * If host wants to see the guest header as is, we can
1368 * pass it on unchanged. Otherwise, copy just the parts
1369 * that host is interested in.
1371 assert(n
->host_hdr_len
<= n
->guest_hdr_len
);
1372 if (n
->host_hdr_len
!= n
->guest_hdr_len
) {
1373 unsigned sg_num
= iov_copy(sg
, ARRAY_SIZE(sg
),
1375 0, n
->host_hdr_len
);
1376 sg_num
+= iov_copy(sg
+ sg_num
, ARRAY_SIZE(sg
) - sg_num
,
1378 n
->guest_hdr_len
, -1);
1383 ret
= qemu_sendv_packet_async(qemu_get_subqueue(n
->nic
, queue_index
),
1384 out_sg
, out_num
, virtio_net_tx_complete
);
1386 virtio_queue_set_notification(q
->tx_vq
, 0);
1387 q
->async_tx
.elem
= elem
;
1392 virtqueue_push(q
->tx_vq
, elem
, 0);
1393 virtio_notify(vdev
, q
->tx_vq
);
1396 if (++num_packets
>= n
->tx_burst
) {
1403 static void virtio_net_handle_tx_timer(VirtIODevice
*vdev
, VirtQueue
*vq
)
1405 VirtIONet
*n
= VIRTIO_NET(vdev
);
1406 VirtIONetQueue
*q
= &n
->vqs
[vq2q(virtio_get_queue_index(vq
))];
1408 if (unlikely((n
->status
& VIRTIO_NET_S_LINK_UP
) == 0)) {
1409 virtio_net_drop_tx_queue_data(vdev
, vq
);
1413 /* This happens when device was stopped but VCPU wasn't. */
1414 if (!vdev
->vm_running
) {
1419 if (q
->tx_waiting
) {
1420 virtio_queue_set_notification(vq
, 1);
1421 timer_del(q
->tx_timer
);
1423 if (virtio_net_flush_tx(q
) == -EINVAL
) {
1427 timer_mod(q
->tx_timer
,
1428 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + n
->tx_timeout
);
1430 virtio_queue_set_notification(vq
, 0);
1434 static void virtio_net_handle_tx_bh(VirtIODevice
*vdev
, VirtQueue
*vq
)
1436 VirtIONet
*n
= VIRTIO_NET(vdev
);
1437 VirtIONetQueue
*q
= &n
->vqs
[vq2q(virtio_get_queue_index(vq
))];
1439 if (unlikely((n
->status
& VIRTIO_NET_S_LINK_UP
) == 0)) {
1440 virtio_net_drop_tx_queue_data(vdev
, vq
);
1444 if (unlikely(q
->tx_waiting
)) {
1448 /* This happens when device was stopped but VCPU wasn't. */
1449 if (!vdev
->vm_running
) {
1452 virtio_queue_set_notification(vq
, 0);
1453 qemu_bh_schedule(q
->tx_bh
);
1456 static void virtio_net_tx_timer(void *opaque
)
1458 VirtIONetQueue
*q
= opaque
;
1459 VirtIONet
*n
= q
->n
;
1460 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1461 /* This happens when device was stopped but BH wasn't. */
1462 if (!vdev
->vm_running
) {
1463 /* Make sure tx waiting is set, so we'll run when restarted. */
1464 assert(q
->tx_waiting
);
1470 /* Just in case the driver is not ready on more */
1471 if (!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
1475 virtio_queue_set_notification(q
->tx_vq
, 1);
1476 virtio_net_flush_tx(q
);
1479 static void virtio_net_tx_bh(void *opaque
)
1481 VirtIONetQueue
*q
= opaque
;
1482 VirtIONet
*n
= q
->n
;
1483 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1486 /* This happens when device was stopped but BH wasn't. */
1487 if (!vdev
->vm_running
) {
1488 /* Make sure tx waiting is set, so we'll run when restarted. */
1489 assert(q
->tx_waiting
);
1495 /* Just in case the driver is not ready on more */
1496 if (unlikely(!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
))) {
1500 ret
= virtio_net_flush_tx(q
);
1501 if (ret
== -EBUSY
|| ret
== -EINVAL
) {
1502 return; /* Notification re-enable handled by tx_complete or device
1506 /* If we flush a full burst of packets, assume there are
1507 * more coming and immediately reschedule */
1508 if (ret
>= n
->tx_burst
) {
1509 qemu_bh_schedule(q
->tx_bh
);
1514 /* If less than a full burst, re-enable notification and flush
1515 * anything that may have come in while we weren't looking. If
1516 * we find something, assume the guest is still active and reschedule */
1517 virtio_queue_set_notification(q
->tx_vq
, 1);
1518 ret
= virtio_net_flush_tx(q
);
1519 if (ret
== -EINVAL
) {
1521 } else if (ret
> 0) {
1522 virtio_queue_set_notification(q
->tx_vq
, 0);
1523 qemu_bh_schedule(q
->tx_bh
);
1528 static void virtio_net_add_queue(VirtIONet
*n
, int index
)
1530 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1532 n
->vqs
[index
].rx_vq
= virtio_add_queue(vdev
, n
->net_conf
.rx_queue_size
,
1533 virtio_net_handle_rx
);
1535 if (n
->net_conf
.tx
&& !strcmp(n
->net_conf
.tx
, "timer")) {
1536 n
->vqs
[index
].tx_vq
=
1537 virtio_add_queue(vdev
, n
->net_conf
.tx_queue_size
,
1538 virtio_net_handle_tx_timer
);
1539 n
->vqs
[index
].tx_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
1540 virtio_net_tx_timer
,
1543 n
->vqs
[index
].tx_vq
=
1544 virtio_add_queue(vdev
, n
->net_conf
.tx_queue_size
,
1545 virtio_net_handle_tx_bh
);
1546 n
->vqs
[index
].tx_bh
= qemu_bh_new(virtio_net_tx_bh
, &n
->vqs
[index
]);
1549 n
->vqs
[index
].tx_waiting
= 0;
1550 n
->vqs
[index
].n
= n
;
1553 static void virtio_net_del_queue(VirtIONet
*n
, int index
)
1555 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1556 VirtIONetQueue
*q
= &n
->vqs
[index
];
1557 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, index
);
1559 qemu_purge_queued_packets(nc
);
1561 virtio_del_queue(vdev
, index
* 2);
1563 timer_del(q
->tx_timer
);
1564 timer_free(q
->tx_timer
);
1567 qemu_bh_delete(q
->tx_bh
);
1571 virtio_del_queue(vdev
, index
* 2 + 1);
1574 static void virtio_net_change_num_queues(VirtIONet
*n
, int new_max_queues
)
1576 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1577 int old_num_queues
= virtio_get_num_queues(vdev
);
1578 int new_num_queues
= new_max_queues
* 2 + 1;
1581 assert(old_num_queues
>= 3);
1582 assert(old_num_queues
% 2 == 1);
1584 if (old_num_queues
== new_num_queues
) {
1589 * We always need to remove and add ctrl vq if
1590 * old_num_queues != new_num_queues. Remove ctrl_vq first,
1591 * and then we only enter one of the following too loops.
1593 virtio_del_queue(vdev
, old_num_queues
- 1);
1595 for (i
= new_num_queues
- 1; i
< old_num_queues
- 1; i
+= 2) {
1596 /* new_num_queues < old_num_queues */
1597 virtio_net_del_queue(n
, i
/ 2);
1600 for (i
= old_num_queues
- 1; i
< new_num_queues
- 1; i
+= 2) {
1601 /* new_num_queues > old_num_queues */
1602 virtio_net_add_queue(n
, i
/ 2);
1605 /* add ctrl_vq last */
1606 n
->ctrl_vq
= virtio_add_queue(vdev
, 64, virtio_net_handle_ctrl
);
1609 static void virtio_net_set_multiqueue(VirtIONet
*n
, int multiqueue
)
1611 int max
= multiqueue
? n
->max_queues
: 1;
1613 n
->multiqueue
= multiqueue
;
1614 virtio_net_change_num_queues(n
, max
);
1616 virtio_net_set_queues(n
);
1619 static int virtio_net_post_load_device(void *opaque
, int version_id
)
1621 VirtIONet
*n
= opaque
;
1622 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1625 virtio_net_set_mrg_rx_bufs(n
, n
->mergeable_rx_bufs
,
1626 virtio_vdev_has_feature(vdev
,
1627 VIRTIO_F_VERSION_1
));
1629 /* MAC_TABLE_ENTRIES may be different from the saved image */
1630 if (n
->mac_table
.in_use
> MAC_TABLE_ENTRIES
) {
1631 n
->mac_table
.in_use
= 0;
1634 if (!virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
)) {
1635 n
->curr_guest_offloads
= virtio_net_supported_guest_offloads(n
);
1638 if (peer_has_vnet_hdr(n
)) {
1639 virtio_net_apply_guest_offloads(n
);
1642 virtio_net_set_queues(n
);
1644 /* Find the first multicast entry in the saved MAC filter */
1645 for (i
= 0; i
< n
->mac_table
.in_use
; i
++) {
1646 if (n
->mac_table
.macs
[i
* ETH_ALEN
] & 1) {
1650 n
->mac_table
.first_multi
= i
;
1652 /* nc.link_down can't be migrated, so infer link_down according
1653 * to link status bit in n->status */
1654 link_down
= (n
->status
& VIRTIO_NET_S_LINK_UP
) == 0;
1655 for (i
= 0; i
< n
->max_queues
; i
++) {
1656 qemu_get_subqueue(n
->nic
, i
)->link_down
= link_down
;
1659 if (virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_GUEST_ANNOUNCE
) &&
1660 virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_VQ
)) {
1661 n
->announce_counter
= SELF_ANNOUNCE_ROUNDS
;
1662 timer_mod(n
->announce_timer
, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
));
1668 /* tx_waiting field of a VirtIONetQueue */
1669 static const VMStateDescription vmstate_virtio_net_queue_tx_waiting
= {
1670 .name
= "virtio-net-queue-tx_waiting",
1671 .fields
= (VMStateField
[]) {
1672 VMSTATE_UINT32(tx_waiting
, VirtIONetQueue
),
1673 VMSTATE_END_OF_LIST()
1677 static bool max_queues_gt_1(void *opaque
, int version_id
)
1679 return VIRTIO_NET(opaque
)->max_queues
> 1;
1682 static bool has_ctrl_guest_offloads(void *opaque
, int version_id
)
1684 return virtio_vdev_has_feature(VIRTIO_DEVICE(opaque
),
1685 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
);
1688 static bool mac_table_fits(void *opaque
, int version_id
)
1690 return VIRTIO_NET(opaque
)->mac_table
.in_use
<= MAC_TABLE_ENTRIES
;
1693 static bool mac_table_doesnt_fit(void *opaque
, int version_id
)
1695 return !mac_table_fits(opaque
, version_id
);
1698 /* This temporary type is shared by all the WITH_TMP methods
1699 * although only some fields are used by each.
1701 struct VirtIONetMigTmp
{
1703 VirtIONetQueue
*vqs_1
;
1704 uint16_t curr_queues_1
;
1706 uint32_t has_vnet_hdr
;
1709 /* The 2nd and subsequent tx_waiting flags are loaded later than
1710 * the 1st entry in the queues and only if there's more than one
1711 * entry. We use the tmp mechanism to calculate a temporary
1712 * pointer and count and also validate the count.
1715 static int virtio_net_tx_waiting_pre_save(void *opaque
)
1717 struct VirtIONetMigTmp
*tmp
= opaque
;
1719 tmp
->vqs_1
= tmp
->parent
->vqs
+ 1;
1720 tmp
->curr_queues_1
= tmp
->parent
->curr_queues
- 1;
1721 if (tmp
->parent
->curr_queues
== 0) {
1722 tmp
->curr_queues_1
= 0;
1728 static int virtio_net_tx_waiting_pre_load(void *opaque
)
1730 struct VirtIONetMigTmp
*tmp
= opaque
;
1732 /* Reuse the pointer setup from save */
1733 virtio_net_tx_waiting_pre_save(opaque
);
1735 if (tmp
->parent
->curr_queues
> tmp
->parent
->max_queues
) {
1736 error_report("virtio-net: curr_queues %x > max_queues %x",
1737 tmp
->parent
->curr_queues
, tmp
->parent
->max_queues
);
1742 return 0; /* all good */
1745 static const VMStateDescription vmstate_virtio_net_tx_waiting
= {
1746 .name
= "virtio-net-tx_waiting",
1747 .pre_load
= virtio_net_tx_waiting_pre_load
,
1748 .pre_save
= virtio_net_tx_waiting_pre_save
,
1749 .fields
= (VMStateField
[]) {
1750 VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1
, struct VirtIONetMigTmp
,
1752 vmstate_virtio_net_queue_tx_waiting
,
1753 struct VirtIONetQueue
),
1754 VMSTATE_END_OF_LIST()
1758 /* the 'has_ufo' flag is just tested; if the incoming stream has the
1759 * flag set we need to check that we have it
1761 static int virtio_net_ufo_post_load(void *opaque
, int version_id
)
1763 struct VirtIONetMigTmp
*tmp
= opaque
;
1765 if (tmp
->has_ufo
&& !peer_has_ufo(tmp
->parent
)) {
1766 error_report("virtio-net: saved image requires TUN_F_UFO support");
1773 static int virtio_net_ufo_pre_save(void *opaque
)
1775 struct VirtIONetMigTmp
*tmp
= opaque
;
1777 tmp
->has_ufo
= tmp
->parent
->has_ufo
;
1782 static const VMStateDescription vmstate_virtio_net_has_ufo
= {
1783 .name
= "virtio-net-ufo",
1784 .post_load
= virtio_net_ufo_post_load
,
1785 .pre_save
= virtio_net_ufo_pre_save
,
1786 .fields
= (VMStateField
[]) {
1787 VMSTATE_UINT8(has_ufo
, struct VirtIONetMigTmp
),
1788 VMSTATE_END_OF_LIST()
1792 /* the 'has_vnet_hdr' flag is just tested; if the incoming stream has the
1793 * flag set we need to check that we have it
1795 static int virtio_net_vnet_post_load(void *opaque
, int version_id
)
1797 struct VirtIONetMigTmp
*tmp
= opaque
;
1799 if (tmp
->has_vnet_hdr
&& !peer_has_vnet_hdr(tmp
->parent
)) {
1800 error_report("virtio-net: saved image requires vnet_hdr=on");
1807 static int virtio_net_vnet_pre_save(void *opaque
)
1809 struct VirtIONetMigTmp
*tmp
= opaque
;
1811 tmp
->has_vnet_hdr
= tmp
->parent
->has_vnet_hdr
;
1816 static const VMStateDescription vmstate_virtio_net_has_vnet
= {
1817 .name
= "virtio-net-vnet",
1818 .post_load
= virtio_net_vnet_post_load
,
1819 .pre_save
= virtio_net_vnet_pre_save
,
1820 .fields
= (VMStateField
[]) {
1821 VMSTATE_UINT32(has_vnet_hdr
, struct VirtIONetMigTmp
),
1822 VMSTATE_END_OF_LIST()
1826 static const VMStateDescription vmstate_virtio_net_device
= {
1827 .name
= "virtio-net-device",
1828 .version_id
= VIRTIO_NET_VM_VERSION
,
1829 .minimum_version_id
= VIRTIO_NET_VM_VERSION
,
1830 .post_load
= virtio_net_post_load_device
,
1831 .fields
= (VMStateField
[]) {
1832 VMSTATE_UINT8_ARRAY(mac
, VirtIONet
, ETH_ALEN
),
1833 VMSTATE_STRUCT_POINTER(vqs
, VirtIONet
,
1834 vmstate_virtio_net_queue_tx_waiting
,
1836 VMSTATE_UINT32(mergeable_rx_bufs
, VirtIONet
),
1837 VMSTATE_UINT16(status
, VirtIONet
),
1838 VMSTATE_UINT8(promisc
, VirtIONet
),
1839 VMSTATE_UINT8(allmulti
, VirtIONet
),
1840 VMSTATE_UINT32(mac_table
.in_use
, VirtIONet
),
1842 /* Guarded pair: If it fits we load it, else we throw it away
1843 * - can happen if source has a larger MAC table.; post-load
1844 * sets flags in this case.
1846 VMSTATE_VBUFFER_MULTIPLY(mac_table
.macs
, VirtIONet
,
1847 0, mac_table_fits
, mac_table
.in_use
,
1849 VMSTATE_UNUSED_VARRAY_UINT32(VirtIONet
, mac_table_doesnt_fit
, 0,
1850 mac_table
.in_use
, ETH_ALEN
),
1852 /* Note: This is an array of uint32's that's always been saved as a
1853 * buffer; hold onto your endiannesses; it's actually used as a bitmap
1854 * but based on the uint.
1856 VMSTATE_BUFFER_POINTER_UNSAFE(vlans
, VirtIONet
, 0, MAX_VLAN
>> 3),
1857 VMSTATE_WITH_TMP(VirtIONet
, struct VirtIONetMigTmp
,
1858 vmstate_virtio_net_has_vnet
),
1859 VMSTATE_UINT8(mac_table
.multi_overflow
, VirtIONet
),
1860 VMSTATE_UINT8(mac_table
.uni_overflow
, VirtIONet
),
1861 VMSTATE_UINT8(alluni
, VirtIONet
),
1862 VMSTATE_UINT8(nomulti
, VirtIONet
),
1863 VMSTATE_UINT8(nouni
, VirtIONet
),
1864 VMSTATE_UINT8(nobcast
, VirtIONet
),
1865 VMSTATE_WITH_TMP(VirtIONet
, struct VirtIONetMigTmp
,
1866 vmstate_virtio_net_has_ufo
),
1867 VMSTATE_SINGLE_TEST(max_queues
, VirtIONet
, max_queues_gt_1
, 0,
1868 vmstate_info_uint16_equal
, uint16_t),
1869 VMSTATE_UINT16_TEST(curr_queues
, VirtIONet
, max_queues_gt_1
),
1870 VMSTATE_WITH_TMP(VirtIONet
, struct VirtIONetMigTmp
,
1871 vmstate_virtio_net_tx_waiting
),
1872 VMSTATE_UINT64_TEST(curr_guest_offloads
, VirtIONet
,
1873 has_ctrl_guest_offloads
),
1874 VMSTATE_END_OF_LIST()
1878 static NetClientInfo net_virtio_info
= {
1879 .type
= NET_CLIENT_DRIVER_NIC
,
1880 .size
= sizeof(NICState
),
1881 .can_receive
= virtio_net_can_receive
,
1882 .receive
= virtio_net_receive
,
1883 .link_status_changed
= virtio_net_set_link_status
,
1884 .query_rx_filter
= virtio_net_query_rxfilter
,
1887 static bool virtio_net_guest_notifier_pending(VirtIODevice
*vdev
, int idx
)
1889 VirtIONet
*n
= VIRTIO_NET(vdev
);
1890 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, vq2q(idx
));
1891 assert(n
->vhost_started
);
1892 return vhost_net_virtqueue_pending(get_vhost_net(nc
->peer
), idx
);
1895 static void virtio_net_guest_notifier_mask(VirtIODevice
*vdev
, int idx
,
1898 VirtIONet
*n
= VIRTIO_NET(vdev
);
1899 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, vq2q(idx
));
1900 assert(n
->vhost_started
);
1901 vhost_net_virtqueue_mask(get_vhost_net(nc
->peer
),
1905 static void virtio_net_set_config_size(VirtIONet
*n
, uint64_t host_features
)
1907 int i
, config_size
= 0;
1908 virtio_add_feature(&host_features
, VIRTIO_NET_F_MAC
);
1910 for (i
= 0; feature_sizes
[i
].flags
!= 0; i
++) {
1911 if (host_features
& feature_sizes
[i
].flags
) {
1912 config_size
= MAX(feature_sizes
[i
].end
, config_size
);
1915 n
->config_size
= config_size
;
1918 void virtio_net_set_netclient_name(VirtIONet
*n
, const char *name
,
1922 * The name can be NULL, the netclient name will be type.x.
1924 assert(type
!= NULL
);
1926 g_free(n
->netclient_name
);
1927 g_free(n
->netclient_type
);
1928 n
->netclient_name
= g_strdup(name
);
1929 n
->netclient_type
= g_strdup(type
);
1932 static void virtio_net_device_realize(DeviceState
*dev
, Error
**errp
)
1934 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
1935 VirtIONet
*n
= VIRTIO_NET(dev
);
1939 if (n
->net_conf
.mtu
) {
1940 n
->host_features
|= (0x1 << VIRTIO_NET_F_MTU
);
1943 virtio_net_set_config_size(n
, n
->host_features
);
1944 virtio_init(vdev
, "virtio-net", VIRTIO_ID_NET
, n
->config_size
);
1947 * We set a lower limit on RX queue size to what it always was.
1948 * Guests that want a smaller ring can always resize it without
1949 * help from us (using virtio 1 and up).
1951 if (n
->net_conf
.rx_queue_size
< VIRTIO_NET_RX_QUEUE_MIN_SIZE
||
1952 n
->net_conf
.rx_queue_size
> VIRTQUEUE_MAX_SIZE
||
1953 !is_power_of_2(n
->net_conf
.rx_queue_size
)) {
1954 error_setg(errp
, "Invalid rx_queue_size (= %" PRIu16
"), "
1955 "must be a power of 2 between %d and %d.",
1956 n
->net_conf
.rx_queue_size
, VIRTIO_NET_RX_QUEUE_MIN_SIZE
,
1957 VIRTQUEUE_MAX_SIZE
);
1958 virtio_cleanup(vdev
);
1962 if (n
->net_conf
.tx_queue_size
< VIRTIO_NET_TX_QUEUE_MIN_SIZE
||
1963 n
->net_conf
.tx_queue_size
> VIRTQUEUE_MAX_SIZE
||
1964 !is_power_of_2(n
->net_conf
.tx_queue_size
)) {
1965 error_setg(errp
, "Invalid tx_queue_size (= %" PRIu16
"), "
1966 "must be a power of 2 between %d and %d",
1967 n
->net_conf
.tx_queue_size
, VIRTIO_NET_TX_QUEUE_MIN_SIZE
,
1968 VIRTQUEUE_MAX_SIZE
);
1969 virtio_cleanup(vdev
);
1973 n
->max_queues
= MAX(n
->nic_conf
.peers
.queues
, 1);
1974 if (n
->max_queues
* 2 + 1 > VIRTIO_QUEUE_MAX
) {
1975 error_setg(errp
, "Invalid number of queues (= %" PRIu32
"), "
1976 "must be a positive integer less than %d.",
1977 n
->max_queues
, (VIRTIO_QUEUE_MAX
- 1) / 2);
1978 virtio_cleanup(vdev
);
1981 n
->vqs
= g_malloc0(sizeof(VirtIONetQueue
) * n
->max_queues
);
1983 n
->tx_timeout
= n
->net_conf
.txtimer
;
1985 if (n
->net_conf
.tx
&& strcmp(n
->net_conf
.tx
, "timer")
1986 && strcmp(n
->net_conf
.tx
, "bh")) {
1987 error_report("virtio-net: "
1988 "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
1990 error_report("Defaulting to \"bh\"");
1993 n
->net_conf
.tx_queue_size
= MIN(virtio_net_max_tx_queue_size(n
),
1994 n
->net_conf
.tx_queue_size
);
1996 for (i
= 0; i
< n
->max_queues
; i
++) {
1997 virtio_net_add_queue(n
, i
);
2000 n
->ctrl_vq
= virtio_add_queue(vdev
, 64, virtio_net_handle_ctrl
);
2001 qemu_macaddr_default_if_unset(&n
->nic_conf
.macaddr
);
2002 memcpy(&n
->mac
[0], &n
->nic_conf
.macaddr
, sizeof(n
->mac
));
2003 n
->status
= VIRTIO_NET_S_LINK_UP
;
2004 n
->announce_timer
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
2005 virtio_net_announce_timer
, n
);
2007 if (n
->netclient_type
) {
2009 * Happen when virtio_net_set_netclient_name has been called.
2011 n
->nic
= qemu_new_nic(&net_virtio_info
, &n
->nic_conf
,
2012 n
->netclient_type
, n
->netclient_name
, n
);
2014 n
->nic
= qemu_new_nic(&net_virtio_info
, &n
->nic_conf
,
2015 object_get_typename(OBJECT(dev
)), dev
->id
, n
);
2018 peer_test_vnet_hdr(n
);
2019 if (peer_has_vnet_hdr(n
)) {
2020 for (i
= 0; i
< n
->max_queues
; i
++) {
2021 qemu_using_vnet_hdr(qemu_get_subqueue(n
->nic
, i
)->peer
, true);
2023 n
->host_hdr_len
= sizeof(struct virtio_net_hdr
);
2025 n
->host_hdr_len
= 0;
2028 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->nic_conf
.macaddr
.a
);
2030 n
->vqs
[0].tx_waiting
= 0;
2031 n
->tx_burst
= n
->net_conf
.txburst
;
2032 virtio_net_set_mrg_rx_bufs(n
, 0, 0);
2033 n
->promisc
= 1; /* for compatibility */
2035 n
->mac_table
.macs
= g_malloc0(MAC_TABLE_ENTRIES
* ETH_ALEN
);
2037 n
->vlans
= g_malloc0(MAX_VLAN
>> 3);
2039 nc
= qemu_get_queue(n
->nic
);
2040 nc
->rxfilter_notify_enabled
= 1;
2045 static void virtio_net_device_unrealize(DeviceState
*dev
, Error
**errp
)
2047 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
2048 VirtIONet
*n
= VIRTIO_NET(dev
);
2051 /* This will stop vhost backend if appropriate. */
2052 virtio_net_set_status(vdev
, 0);
2054 g_free(n
->netclient_name
);
2055 n
->netclient_name
= NULL
;
2056 g_free(n
->netclient_type
);
2057 n
->netclient_type
= NULL
;
2059 g_free(n
->mac_table
.macs
);
2062 max_queues
= n
->multiqueue
? n
->max_queues
: 1;
2063 for (i
= 0; i
< max_queues
; i
++) {
2064 virtio_net_del_queue(n
, i
);
2067 timer_del(n
->announce_timer
);
2068 timer_free(n
->announce_timer
);
2070 qemu_del_nic(n
->nic
);
2071 virtio_cleanup(vdev
);
2074 static void virtio_net_instance_init(Object
*obj
)
2076 VirtIONet
*n
= VIRTIO_NET(obj
);
2079 * The default config_size is sizeof(struct virtio_net_config).
2080 * Can be overriden with virtio_net_set_config_size.
2082 n
->config_size
= sizeof(struct virtio_net_config
);
2083 device_add_bootindex_property(obj
, &n
->nic_conf
.bootindex
,
2084 "bootindex", "/ethernet-phy@0",
2088 static int virtio_net_pre_save(void *opaque
)
2090 VirtIONet
*n
= opaque
;
2092 /* At this point, backend must be stopped, otherwise
2093 * it might keep writing to memory. */
2094 assert(!n
->vhost_started
);
2099 static const VMStateDescription vmstate_virtio_net
= {
2100 .name
= "virtio-net",
2101 .minimum_version_id
= VIRTIO_NET_VM_VERSION
,
2102 .version_id
= VIRTIO_NET_VM_VERSION
,
2103 .fields
= (VMStateField
[]) {
2104 VMSTATE_VIRTIO_DEVICE
,
2105 VMSTATE_END_OF_LIST()
2107 .pre_save
= virtio_net_pre_save
,
2110 static Property virtio_net_properties
[] = {
2111 DEFINE_PROP_BIT("csum", VirtIONet
, host_features
, VIRTIO_NET_F_CSUM
, true),
2112 DEFINE_PROP_BIT("guest_csum", VirtIONet
, host_features
,
2113 VIRTIO_NET_F_GUEST_CSUM
, true),
2114 DEFINE_PROP_BIT("gso", VirtIONet
, host_features
, VIRTIO_NET_F_GSO
, true),
2115 DEFINE_PROP_BIT("guest_tso4", VirtIONet
, host_features
,
2116 VIRTIO_NET_F_GUEST_TSO4
, true),
2117 DEFINE_PROP_BIT("guest_tso6", VirtIONet
, host_features
,
2118 VIRTIO_NET_F_GUEST_TSO6
, true),
2119 DEFINE_PROP_BIT("guest_ecn", VirtIONet
, host_features
,
2120 VIRTIO_NET_F_GUEST_ECN
, true),
2121 DEFINE_PROP_BIT("guest_ufo", VirtIONet
, host_features
,
2122 VIRTIO_NET_F_GUEST_UFO
, true),
2123 DEFINE_PROP_BIT("guest_announce", VirtIONet
, host_features
,
2124 VIRTIO_NET_F_GUEST_ANNOUNCE
, true),
2125 DEFINE_PROP_BIT("host_tso4", VirtIONet
, host_features
,
2126 VIRTIO_NET_F_HOST_TSO4
, true),
2127 DEFINE_PROP_BIT("host_tso6", VirtIONet
, host_features
,
2128 VIRTIO_NET_F_HOST_TSO6
, true),
2129 DEFINE_PROP_BIT("host_ecn", VirtIONet
, host_features
,
2130 VIRTIO_NET_F_HOST_ECN
, true),
2131 DEFINE_PROP_BIT("host_ufo", VirtIONet
, host_features
,
2132 VIRTIO_NET_F_HOST_UFO
, true),
2133 DEFINE_PROP_BIT("mrg_rxbuf", VirtIONet
, host_features
,
2134 VIRTIO_NET_F_MRG_RXBUF
, true),
2135 DEFINE_PROP_BIT("status", VirtIONet
, host_features
,
2136 VIRTIO_NET_F_STATUS
, true),
2137 DEFINE_PROP_BIT("ctrl_vq", VirtIONet
, host_features
,
2138 VIRTIO_NET_F_CTRL_VQ
, true),
2139 DEFINE_PROP_BIT("ctrl_rx", VirtIONet
, host_features
,
2140 VIRTIO_NET_F_CTRL_RX
, true),
2141 DEFINE_PROP_BIT("ctrl_vlan", VirtIONet
, host_features
,
2142 VIRTIO_NET_F_CTRL_VLAN
, true),
2143 DEFINE_PROP_BIT("ctrl_rx_extra", VirtIONet
, host_features
,
2144 VIRTIO_NET_F_CTRL_RX_EXTRA
, true),
2145 DEFINE_PROP_BIT("ctrl_mac_addr", VirtIONet
, host_features
,
2146 VIRTIO_NET_F_CTRL_MAC_ADDR
, true),
2147 DEFINE_PROP_BIT("ctrl_guest_offloads", VirtIONet
, host_features
,
2148 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
, true),
2149 DEFINE_PROP_BIT("mq", VirtIONet
, host_features
, VIRTIO_NET_F_MQ
, false),
2150 DEFINE_NIC_PROPERTIES(VirtIONet
, nic_conf
),
2151 DEFINE_PROP_UINT32("x-txtimer", VirtIONet
, net_conf
.txtimer
,
2153 DEFINE_PROP_INT32("x-txburst", VirtIONet
, net_conf
.txburst
, TX_BURST
),
2154 DEFINE_PROP_STRING("tx", VirtIONet
, net_conf
.tx
),
2155 DEFINE_PROP_UINT16("rx_queue_size", VirtIONet
, net_conf
.rx_queue_size
,
2156 VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
),
2157 DEFINE_PROP_UINT16("tx_queue_size", VirtIONet
, net_conf
.tx_queue_size
,
2158 VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE
),
2159 DEFINE_PROP_UINT16("host_mtu", VirtIONet
, net_conf
.mtu
, 0),
2160 DEFINE_PROP_BOOL("x-mtu-bypass-backend", VirtIONet
, mtu_bypass_backend
,
2162 DEFINE_PROP_END_OF_LIST(),
2165 static void virtio_net_class_init(ObjectClass
*klass
, void *data
)
2167 DeviceClass
*dc
= DEVICE_CLASS(klass
);
2168 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
2170 dc
->props
= virtio_net_properties
;
2171 dc
->vmsd
= &vmstate_virtio_net
;
2172 set_bit(DEVICE_CATEGORY_NETWORK
, dc
->categories
);
2173 vdc
->realize
= virtio_net_device_realize
;
2174 vdc
->unrealize
= virtio_net_device_unrealize
;
2175 vdc
->get_config
= virtio_net_get_config
;
2176 vdc
->set_config
= virtio_net_set_config
;
2177 vdc
->get_features
= virtio_net_get_features
;
2178 vdc
->set_features
= virtio_net_set_features
;
2179 vdc
->bad_features
= virtio_net_bad_features
;
2180 vdc
->reset
= virtio_net_reset
;
2181 vdc
->set_status
= virtio_net_set_status
;
2182 vdc
->guest_notifier_mask
= virtio_net_guest_notifier_mask
;
2183 vdc
->guest_notifier_pending
= virtio_net_guest_notifier_pending
;
2184 vdc
->legacy_features
|= (0x1 << VIRTIO_NET_F_GSO
);
2185 vdc
->vmsd
= &vmstate_virtio_net_device
;
2188 static const TypeInfo virtio_net_info
= {
2189 .name
= TYPE_VIRTIO_NET
,
2190 .parent
= TYPE_VIRTIO_DEVICE
,
2191 .instance_size
= sizeof(VirtIONet
),
2192 .instance_init
= virtio_net_instance_init
,
2193 .class_init
= virtio_net_class_init
,
2196 static void virtio_register_types(void)
2198 type_register_static(&virtio_net_info
);
2201 type_init(virtio_register_types
)