2 * Virtio Network Device
4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
16 #include "hw/virtio/virtio.h"
18 #include "net/checksum.h"
20 #include "qemu/error-report.h"
21 #include "qemu/timer.h"
22 #include "hw/virtio/virtio-net.h"
23 #include "net/vhost_net.h"
24 #include "hw/virtio/virtio-bus.h"
25 #include "qapi/qmp/qjson.h"
26 #include "qapi-event.h"
27 #include "hw/virtio/virtio-access.h"
29 #define VIRTIO_NET_VM_VERSION 11
31 #define MAC_TABLE_ENTRIES 64
32 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
34 /* previously fixed value */
35 #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
36 /* for now, only allow larger queues; with virtio-1, guest can downsize */
37 #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
40 * Calculate the number of bytes up to and including the given 'field' of
43 #define endof(container, field) \
44 (offsetof(container, field) + sizeof(((container *)0)->field))
46 typedef struct VirtIOFeature
{
51 static VirtIOFeature feature_sizes
[] = {
52 {.flags
= 1 << VIRTIO_NET_F_MAC
,
53 .end
= endof(struct virtio_net_config
, mac
)},
54 {.flags
= 1 << VIRTIO_NET_F_STATUS
,
55 .end
= endof(struct virtio_net_config
, status
)},
56 {.flags
= 1 << VIRTIO_NET_F_MQ
,
57 .end
= endof(struct virtio_net_config
, max_virtqueue_pairs
)},
58 {.flags
= 1 << VIRTIO_NET_F_MTU
,
59 .end
= endof(struct virtio_net_config
, mtu
)},
63 static VirtIONetQueue
*virtio_net_get_subqueue(NetClientState
*nc
)
65 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
67 return &n
->vqs
[nc
->queue_index
];
70 static int vq2q(int queue_index
)
72 return queue_index
/ 2;
76 * - we could suppress RX interrupt if we were so inclined.
79 static void virtio_net_get_config(VirtIODevice
*vdev
, uint8_t *config
)
81 VirtIONet
*n
= VIRTIO_NET(vdev
);
82 struct virtio_net_config netcfg
;
84 virtio_stw_p(vdev
, &netcfg
.status
, n
->status
);
85 virtio_stw_p(vdev
, &netcfg
.max_virtqueue_pairs
, n
->max_queues
);
86 virtio_stw_p(vdev
, &netcfg
.mtu
, n
->net_conf
.mtu
);
87 memcpy(netcfg
.mac
, n
->mac
, ETH_ALEN
);
88 memcpy(config
, &netcfg
, n
->config_size
);
91 static void virtio_net_set_config(VirtIODevice
*vdev
, const uint8_t *config
)
93 VirtIONet
*n
= VIRTIO_NET(vdev
);
94 struct virtio_net_config netcfg
= {};
96 memcpy(&netcfg
, config
, n
->config_size
);
98 if (!virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_MAC_ADDR
) &&
99 !virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
) &&
100 memcmp(netcfg
.mac
, n
->mac
, ETH_ALEN
)) {
101 memcpy(n
->mac
, netcfg
.mac
, ETH_ALEN
);
102 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->mac
);
106 static bool virtio_net_started(VirtIONet
*n
, uint8_t status
)
108 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
109 return (status
& VIRTIO_CONFIG_S_DRIVER_OK
) &&
110 (n
->status
& VIRTIO_NET_S_LINK_UP
) && vdev
->vm_running
;
113 static void virtio_net_announce_timer(void *opaque
)
115 VirtIONet
*n
= opaque
;
116 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
118 n
->announce_counter
--;
119 n
->status
|= VIRTIO_NET_S_ANNOUNCE
;
120 virtio_notify_config(vdev
);
123 static void virtio_net_vhost_status(VirtIONet
*n
, uint8_t status
)
125 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
126 NetClientState
*nc
= qemu_get_queue(n
->nic
);
127 int queues
= n
->multiqueue
? n
->max_queues
: 1;
129 if (!get_vhost_net(nc
->peer
)) {
133 if ((virtio_net_started(n
, status
) && !nc
->peer
->link_down
) ==
134 !!n
->vhost_started
) {
137 if (!n
->vhost_started
) {
140 if (n
->needs_vnet_hdr_swap
) {
141 error_report("backend does not support %s vnet headers; "
142 "falling back on userspace virtio",
143 virtio_is_big_endian(vdev
) ? "BE" : "LE");
147 /* Any packets outstanding? Purge them to avoid touching rings
148 * when vhost is running.
150 for (i
= 0; i
< queues
; i
++) {
151 NetClientState
*qnc
= qemu_get_subqueue(n
->nic
, i
);
153 /* Purge both directions: TX and RX. */
154 qemu_net_queue_purge(qnc
->peer
->incoming_queue
, qnc
);
155 qemu_net_queue_purge(qnc
->incoming_queue
, qnc
->peer
);
158 if (virtio_has_feature(vdev
->guest_features
, VIRTIO_NET_F_MTU
)) {
159 r
= vhost_net_set_mtu(get_vhost_net(nc
->peer
), n
->net_conf
.mtu
);
161 error_report("%uBytes MTU not supported by the backend",
168 n
->vhost_started
= 1;
169 r
= vhost_net_start(vdev
, n
->nic
->ncs
, queues
);
171 error_report("unable to start vhost net: %d: "
172 "falling back on userspace virtio", -r
);
173 n
->vhost_started
= 0;
176 vhost_net_stop(vdev
, n
->nic
->ncs
, queues
);
177 n
->vhost_started
= 0;
181 static int virtio_net_set_vnet_endian_one(VirtIODevice
*vdev
,
182 NetClientState
*peer
,
185 if (virtio_is_big_endian(vdev
)) {
186 return qemu_set_vnet_be(peer
, enable
);
188 return qemu_set_vnet_le(peer
, enable
);
192 static bool virtio_net_set_vnet_endian(VirtIODevice
*vdev
, NetClientState
*ncs
,
193 int queues
, bool enable
)
197 for (i
= 0; i
< queues
; i
++) {
198 if (virtio_net_set_vnet_endian_one(vdev
, ncs
[i
].peer
, enable
) < 0 &&
201 virtio_net_set_vnet_endian_one(vdev
, ncs
[i
].peer
, false);
211 static void virtio_net_vnet_endian_status(VirtIONet
*n
, uint8_t status
)
213 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
214 int queues
= n
->multiqueue
? n
->max_queues
: 1;
216 if (virtio_net_started(n
, status
)) {
217 /* Before using the device, we tell the network backend about the
218 * endianness to use when parsing vnet headers. If the backend
219 * can't do it, we fallback onto fixing the headers in the core
222 n
->needs_vnet_hdr_swap
= virtio_net_set_vnet_endian(vdev
, n
->nic
->ncs
,
224 } else if (virtio_net_started(n
, vdev
->status
)) {
225 /* After using the device, we need to reset the network backend to
226 * the default (guest native endianness), otherwise the guest may
227 * lose network connectivity if it is rebooted into a different
230 virtio_net_set_vnet_endian(vdev
, n
->nic
->ncs
, queues
, false);
234 static void virtio_net_drop_tx_queue_data(VirtIODevice
*vdev
, VirtQueue
*vq
)
236 unsigned int dropped
= virtqueue_drop_all(vq
);
238 virtio_notify(vdev
, vq
);
242 static void virtio_net_set_status(struct VirtIODevice
*vdev
, uint8_t status
)
244 VirtIONet
*n
= VIRTIO_NET(vdev
);
247 uint8_t queue_status
;
249 virtio_net_vnet_endian_status(n
, status
);
250 virtio_net_vhost_status(n
, status
);
252 for (i
= 0; i
< n
->max_queues
; i
++) {
253 NetClientState
*ncs
= qemu_get_subqueue(n
->nic
, i
);
257 if ((!n
->multiqueue
&& i
!= 0) || i
>= n
->curr_queues
) {
260 queue_status
= status
;
263 virtio_net_started(n
, queue_status
) && !n
->vhost_started
;
266 qemu_flush_queued_packets(ncs
);
269 if (!q
->tx_waiting
) {
275 timer_mod(q
->tx_timer
,
276 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + n
->tx_timeout
);
278 qemu_bh_schedule(q
->tx_bh
);
282 timer_del(q
->tx_timer
);
284 qemu_bh_cancel(q
->tx_bh
);
286 if ((n
->status
& VIRTIO_NET_S_LINK_UP
) == 0 &&
287 (queue_status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
288 /* if tx is waiting we are likely have some packets in tx queue
289 * and disabled notification */
291 virtio_queue_set_notification(q
->tx_vq
, 1);
292 virtio_net_drop_tx_queue_data(vdev
, q
->tx_vq
);
298 static void virtio_net_set_link_status(NetClientState
*nc
)
300 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
301 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
302 uint16_t old_status
= n
->status
;
305 n
->status
&= ~VIRTIO_NET_S_LINK_UP
;
307 n
->status
|= VIRTIO_NET_S_LINK_UP
;
309 if (n
->status
!= old_status
)
310 virtio_notify_config(vdev
);
312 virtio_net_set_status(vdev
, vdev
->status
);
315 static void rxfilter_notify(NetClientState
*nc
)
317 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
319 if (nc
->rxfilter_notify_enabled
) {
320 gchar
*path
= object_get_canonical_path(OBJECT(n
->qdev
));
321 qapi_event_send_nic_rx_filter_changed(!!n
->netclient_name
,
322 n
->netclient_name
, path
, &error_abort
);
325 /* disable event notification to avoid events flooding */
326 nc
->rxfilter_notify_enabled
= 0;
330 static intList
*get_vlan_table(VirtIONet
*n
)
332 intList
*list
, *entry
;
336 for (i
= 0; i
< MAX_VLAN
>> 5; i
++) {
337 for (j
= 0; n
->vlans
[i
] && j
<= 0x1f; j
++) {
338 if (n
->vlans
[i
] & (1U << j
)) {
339 entry
= g_malloc0(sizeof(*entry
));
340 entry
->value
= (i
<< 5) + j
;
350 static RxFilterInfo
*virtio_net_query_rxfilter(NetClientState
*nc
)
352 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
353 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
355 strList
*str_list
, *entry
;
358 info
= g_malloc0(sizeof(*info
));
359 info
->name
= g_strdup(nc
->name
);
360 info
->promiscuous
= n
->promisc
;
363 info
->unicast
= RX_STATE_NONE
;
364 } else if (n
->alluni
) {
365 info
->unicast
= RX_STATE_ALL
;
367 info
->unicast
= RX_STATE_NORMAL
;
371 info
->multicast
= RX_STATE_NONE
;
372 } else if (n
->allmulti
) {
373 info
->multicast
= RX_STATE_ALL
;
375 info
->multicast
= RX_STATE_NORMAL
;
378 info
->broadcast_allowed
= n
->nobcast
;
379 info
->multicast_overflow
= n
->mac_table
.multi_overflow
;
380 info
->unicast_overflow
= n
->mac_table
.uni_overflow
;
382 info
->main_mac
= qemu_mac_strdup_printf(n
->mac
);
385 for (i
= 0; i
< n
->mac_table
.first_multi
; i
++) {
386 entry
= g_malloc0(sizeof(*entry
));
387 entry
->value
= qemu_mac_strdup_printf(n
->mac_table
.macs
+ i
* ETH_ALEN
);
388 entry
->next
= str_list
;
391 info
->unicast_table
= str_list
;
394 for (i
= n
->mac_table
.first_multi
; i
< n
->mac_table
.in_use
; i
++) {
395 entry
= g_malloc0(sizeof(*entry
));
396 entry
->value
= qemu_mac_strdup_printf(n
->mac_table
.macs
+ i
* ETH_ALEN
);
397 entry
->next
= str_list
;
400 info
->multicast_table
= str_list
;
401 info
->vlan_table
= get_vlan_table(n
);
403 if (!virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_VLAN
)) {
404 info
->vlan
= RX_STATE_ALL
;
405 } else if (!info
->vlan_table
) {
406 info
->vlan
= RX_STATE_NONE
;
408 info
->vlan
= RX_STATE_NORMAL
;
411 /* enable event notification after query */
412 nc
->rxfilter_notify_enabled
= 1;
417 static void virtio_net_reset(VirtIODevice
*vdev
)
419 VirtIONet
*n
= VIRTIO_NET(vdev
);
421 /* Reset back to compatibility mode */
428 /* multiqueue is disabled by default */
430 timer_del(n
->announce_timer
);
431 n
->announce_counter
= 0;
432 n
->status
&= ~VIRTIO_NET_S_ANNOUNCE
;
434 /* Flush any MAC and VLAN filter table state */
435 n
->mac_table
.in_use
= 0;
436 n
->mac_table
.first_multi
= 0;
437 n
->mac_table
.multi_overflow
= 0;
438 n
->mac_table
.uni_overflow
= 0;
439 memset(n
->mac_table
.macs
, 0, MAC_TABLE_ENTRIES
* ETH_ALEN
);
440 memcpy(&n
->mac
[0], &n
->nic
->conf
->macaddr
, sizeof(n
->mac
));
441 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->mac
);
442 memset(n
->vlans
, 0, MAX_VLAN
>> 3);
445 static void peer_test_vnet_hdr(VirtIONet
*n
)
447 NetClientState
*nc
= qemu_get_queue(n
->nic
);
452 n
->has_vnet_hdr
= qemu_has_vnet_hdr(nc
->peer
);
455 static int peer_has_vnet_hdr(VirtIONet
*n
)
457 return n
->has_vnet_hdr
;
460 static int peer_has_ufo(VirtIONet
*n
)
462 if (!peer_has_vnet_hdr(n
))
465 n
->has_ufo
= qemu_has_ufo(qemu_get_queue(n
->nic
)->peer
);
470 static void virtio_net_set_mrg_rx_bufs(VirtIONet
*n
, int mergeable_rx_bufs
,
476 n
->mergeable_rx_bufs
= mergeable_rx_bufs
;
479 n
->guest_hdr_len
= sizeof(struct virtio_net_hdr_mrg_rxbuf
);
481 n
->guest_hdr_len
= n
->mergeable_rx_bufs
?
482 sizeof(struct virtio_net_hdr_mrg_rxbuf
) :
483 sizeof(struct virtio_net_hdr
);
486 for (i
= 0; i
< n
->max_queues
; i
++) {
487 nc
= qemu_get_subqueue(n
->nic
, i
);
489 if (peer_has_vnet_hdr(n
) &&
490 qemu_has_vnet_hdr_len(nc
->peer
, n
->guest_hdr_len
)) {
491 qemu_set_vnet_hdr_len(nc
->peer
, n
->guest_hdr_len
);
492 n
->host_hdr_len
= n
->guest_hdr_len
;
497 static int peer_attach(VirtIONet
*n
, int index
)
499 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, index
);
505 if (nc
->peer
->info
->type
== NET_CLIENT_DRIVER_VHOST_USER
) {
506 vhost_set_vring_enable(nc
->peer
, 1);
509 if (nc
->peer
->info
->type
!= NET_CLIENT_DRIVER_TAP
) {
513 if (n
->max_queues
== 1) {
517 return tap_enable(nc
->peer
);
520 static int peer_detach(VirtIONet
*n
, int index
)
522 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, index
);
528 if (nc
->peer
->info
->type
== NET_CLIENT_DRIVER_VHOST_USER
) {
529 vhost_set_vring_enable(nc
->peer
, 0);
532 if (nc
->peer
->info
->type
!= NET_CLIENT_DRIVER_TAP
) {
536 return tap_disable(nc
->peer
);
539 static void virtio_net_set_queues(VirtIONet
*n
)
544 if (n
->nic
->peer_deleted
) {
548 for (i
= 0; i
< n
->max_queues
; i
++) {
549 if (i
< n
->curr_queues
) {
550 r
= peer_attach(n
, i
);
553 r
= peer_detach(n
, i
);
559 static void virtio_net_set_multiqueue(VirtIONet
*n
, int multiqueue
);
561 static uint64_t virtio_net_get_features(VirtIODevice
*vdev
, uint64_t features
,
564 VirtIONet
*n
= VIRTIO_NET(vdev
);
565 NetClientState
*nc
= qemu_get_queue(n
->nic
);
567 /* Firstly sync all virtio-net possible supported features */
568 features
|= n
->host_features
;
570 virtio_add_feature(&features
, VIRTIO_NET_F_MAC
);
572 if (!peer_has_vnet_hdr(n
)) {
573 virtio_clear_feature(&features
, VIRTIO_NET_F_CSUM
);
574 virtio_clear_feature(&features
, VIRTIO_NET_F_HOST_TSO4
);
575 virtio_clear_feature(&features
, VIRTIO_NET_F_HOST_TSO6
);
576 virtio_clear_feature(&features
, VIRTIO_NET_F_HOST_ECN
);
578 virtio_clear_feature(&features
, VIRTIO_NET_F_GUEST_CSUM
);
579 virtio_clear_feature(&features
, VIRTIO_NET_F_GUEST_TSO4
);
580 virtio_clear_feature(&features
, VIRTIO_NET_F_GUEST_TSO6
);
581 virtio_clear_feature(&features
, VIRTIO_NET_F_GUEST_ECN
);
584 if (!peer_has_vnet_hdr(n
) || !peer_has_ufo(n
)) {
585 virtio_clear_feature(&features
, VIRTIO_NET_F_GUEST_UFO
);
586 virtio_clear_feature(&features
, VIRTIO_NET_F_HOST_UFO
);
589 if (!get_vhost_net(nc
->peer
)) {
592 features
= vhost_net_get_features(get_vhost_net(nc
->peer
), features
);
593 vdev
->backend_features
= features
;
595 if (n
->mtu_bypass_backend
&&
596 (n
->host_features
& 1ULL << VIRTIO_NET_F_MTU
)) {
597 features
|= (1ULL << VIRTIO_NET_F_MTU
);
603 static uint64_t virtio_net_bad_features(VirtIODevice
*vdev
)
605 uint64_t features
= 0;
607 /* Linux kernel 2.6.25. It understood MAC (as everyone must),
609 virtio_add_feature(&features
, VIRTIO_NET_F_MAC
);
610 virtio_add_feature(&features
, VIRTIO_NET_F_CSUM
);
611 virtio_add_feature(&features
, VIRTIO_NET_F_HOST_TSO4
);
612 virtio_add_feature(&features
, VIRTIO_NET_F_HOST_TSO6
);
613 virtio_add_feature(&features
, VIRTIO_NET_F_HOST_ECN
);
618 static void virtio_net_apply_guest_offloads(VirtIONet
*n
)
620 qemu_set_offload(qemu_get_queue(n
->nic
)->peer
,
621 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_CSUM
)),
622 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_TSO4
)),
623 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_TSO6
)),
624 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_ECN
)),
625 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_UFO
)));
628 static uint64_t virtio_net_guest_offloads_by_features(uint32_t features
)
630 static const uint64_t guest_offloads_mask
=
631 (1ULL << VIRTIO_NET_F_GUEST_CSUM
) |
632 (1ULL << VIRTIO_NET_F_GUEST_TSO4
) |
633 (1ULL << VIRTIO_NET_F_GUEST_TSO6
) |
634 (1ULL << VIRTIO_NET_F_GUEST_ECN
) |
635 (1ULL << VIRTIO_NET_F_GUEST_UFO
);
637 return guest_offloads_mask
& features
;
640 static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet
*n
)
642 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
643 return virtio_net_guest_offloads_by_features(vdev
->guest_features
);
646 static void virtio_net_set_features(VirtIODevice
*vdev
, uint64_t features
)
648 VirtIONet
*n
= VIRTIO_NET(vdev
);
651 if (n
->mtu_bypass_backend
&&
652 !virtio_has_feature(vdev
->backend_features
, VIRTIO_NET_F_MTU
)) {
653 features
&= ~(1ULL << VIRTIO_NET_F_MTU
);
656 virtio_net_set_multiqueue(n
,
657 virtio_has_feature(features
, VIRTIO_NET_F_MQ
));
659 virtio_net_set_mrg_rx_bufs(n
,
660 virtio_has_feature(features
,
661 VIRTIO_NET_F_MRG_RXBUF
),
662 virtio_has_feature(features
,
663 VIRTIO_F_VERSION_1
));
665 if (n
->has_vnet_hdr
) {
666 n
->curr_guest_offloads
=
667 virtio_net_guest_offloads_by_features(features
);
668 virtio_net_apply_guest_offloads(n
);
671 for (i
= 0; i
< n
->max_queues
; i
++) {
672 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, i
);
674 if (!get_vhost_net(nc
->peer
)) {
677 vhost_net_ack_features(get_vhost_net(nc
->peer
), features
);
680 if (virtio_has_feature(features
, VIRTIO_NET_F_CTRL_VLAN
)) {
681 memset(n
->vlans
, 0, MAX_VLAN
>> 3);
683 memset(n
->vlans
, 0xff, MAX_VLAN
>> 3);
687 static int virtio_net_handle_rx_mode(VirtIONet
*n
, uint8_t cmd
,
688 struct iovec
*iov
, unsigned int iov_cnt
)
692 NetClientState
*nc
= qemu_get_queue(n
->nic
);
694 s
= iov_to_buf(iov
, iov_cnt
, 0, &on
, sizeof(on
));
695 if (s
!= sizeof(on
)) {
696 return VIRTIO_NET_ERR
;
699 if (cmd
== VIRTIO_NET_CTRL_RX_PROMISC
) {
701 } else if (cmd
== VIRTIO_NET_CTRL_RX_ALLMULTI
) {
703 } else if (cmd
== VIRTIO_NET_CTRL_RX_ALLUNI
) {
705 } else if (cmd
== VIRTIO_NET_CTRL_RX_NOMULTI
) {
707 } else if (cmd
== VIRTIO_NET_CTRL_RX_NOUNI
) {
709 } else if (cmd
== VIRTIO_NET_CTRL_RX_NOBCAST
) {
712 return VIRTIO_NET_ERR
;
717 return VIRTIO_NET_OK
;
720 static int virtio_net_handle_offloads(VirtIONet
*n
, uint8_t cmd
,
721 struct iovec
*iov
, unsigned int iov_cnt
)
723 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
727 if (!virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
)) {
728 return VIRTIO_NET_ERR
;
731 s
= iov_to_buf(iov
, iov_cnt
, 0, &offloads
, sizeof(offloads
));
732 if (s
!= sizeof(offloads
)) {
733 return VIRTIO_NET_ERR
;
736 if (cmd
== VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET
) {
737 uint64_t supported_offloads
;
739 if (!n
->has_vnet_hdr
) {
740 return VIRTIO_NET_ERR
;
743 supported_offloads
= virtio_net_supported_guest_offloads(n
);
744 if (offloads
& ~supported_offloads
) {
745 return VIRTIO_NET_ERR
;
748 n
->curr_guest_offloads
= offloads
;
749 virtio_net_apply_guest_offloads(n
);
751 return VIRTIO_NET_OK
;
753 return VIRTIO_NET_ERR
;
757 static int virtio_net_handle_mac(VirtIONet
*n
, uint8_t cmd
,
758 struct iovec
*iov
, unsigned int iov_cnt
)
760 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
761 struct virtio_net_ctrl_mac mac_data
;
763 NetClientState
*nc
= qemu_get_queue(n
->nic
);
765 if (cmd
== VIRTIO_NET_CTRL_MAC_ADDR_SET
) {
766 if (iov_size(iov
, iov_cnt
) != sizeof(n
->mac
)) {
767 return VIRTIO_NET_ERR
;
769 s
= iov_to_buf(iov
, iov_cnt
, 0, &n
->mac
, sizeof(n
->mac
));
770 assert(s
== sizeof(n
->mac
));
771 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->mac
);
774 return VIRTIO_NET_OK
;
777 if (cmd
!= VIRTIO_NET_CTRL_MAC_TABLE_SET
) {
778 return VIRTIO_NET_ERR
;
783 uint8_t uni_overflow
= 0;
784 uint8_t multi_overflow
= 0;
785 uint8_t *macs
= g_malloc0(MAC_TABLE_ENTRIES
* ETH_ALEN
);
787 s
= iov_to_buf(iov
, iov_cnt
, 0, &mac_data
.entries
,
788 sizeof(mac_data
.entries
));
789 mac_data
.entries
= virtio_ldl_p(vdev
, &mac_data
.entries
);
790 if (s
!= sizeof(mac_data
.entries
)) {
793 iov_discard_front(&iov
, &iov_cnt
, s
);
795 if (mac_data
.entries
* ETH_ALEN
> iov_size(iov
, iov_cnt
)) {
799 if (mac_data
.entries
<= MAC_TABLE_ENTRIES
) {
800 s
= iov_to_buf(iov
, iov_cnt
, 0, macs
,
801 mac_data
.entries
* ETH_ALEN
);
802 if (s
!= mac_data
.entries
* ETH_ALEN
) {
805 in_use
+= mac_data
.entries
;
810 iov_discard_front(&iov
, &iov_cnt
, mac_data
.entries
* ETH_ALEN
);
812 first_multi
= in_use
;
814 s
= iov_to_buf(iov
, iov_cnt
, 0, &mac_data
.entries
,
815 sizeof(mac_data
.entries
));
816 mac_data
.entries
= virtio_ldl_p(vdev
, &mac_data
.entries
);
817 if (s
!= sizeof(mac_data
.entries
)) {
821 iov_discard_front(&iov
, &iov_cnt
, s
);
823 if (mac_data
.entries
* ETH_ALEN
!= iov_size(iov
, iov_cnt
)) {
827 if (mac_data
.entries
<= MAC_TABLE_ENTRIES
- in_use
) {
828 s
= iov_to_buf(iov
, iov_cnt
, 0, &macs
[in_use
* ETH_ALEN
],
829 mac_data
.entries
* ETH_ALEN
);
830 if (s
!= mac_data
.entries
* ETH_ALEN
) {
833 in_use
+= mac_data
.entries
;
838 n
->mac_table
.in_use
= in_use
;
839 n
->mac_table
.first_multi
= first_multi
;
840 n
->mac_table
.uni_overflow
= uni_overflow
;
841 n
->mac_table
.multi_overflow
= multi_overflow
;
842 memcpy(n
->mac_table
.macs
, macs
, MAC_TABLE_ENTRIES
* ETH_ALEN
);
846 return VIRTIO_NET_OK
;
850 return VIRTIO_NET_ERR
;
853 static int virtio_net_handle_vlan_table(VirtIONet
*n
, uint8_t cmd
,
854 struct iovec
*iov
, unsigned int iov_cnt
)
856 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
859 NetClientState
*nc
= qemu_get_queue(n
->nic
);
861 s
= iov_to_buf(iov
, iov_cnt
, 0, &vid
, sizeof(vid
));
862 vid
= virtio_lduw_p(vdev
, &vid
);
863 if (s
!= sizeof(vid
)) {
864 return VIRTIO_NET_ERR
;
868 return VIRTIO_NET_ERR
;
870 if (cmd
== VIRTIO_NET_CTRL_VLAN_ADD
)
871 n
->vlans
[vid
>> 5] |= (1U << (vid
& 0x1f));
872 else if (cmd
== VIRTIO_NET_CTRL_VLAN_DEL
)
873 n
->vlans
[vid
>> 5] &= ~(1U << (vid
& 0x1f));
875 return VIRTIO_NET_ERR
;
879 return VIRTIO_NET_OK
;
882 static int virtio_net_handle_announce(VirtIONet
*n
, uint8_t cmd
,
883 struct iovec
*iov
, unsigned int iov_cnt
)
885 if (cmd
== VIRTIO_NET_CTRL_ANNOUNCE_ACK
&&
886 n
->status
& VIRTIO_NET_S_ANNOUNCE
) {
887 n
->status
&= ~VIRTIO_NET_S_ANNOUNCE
;
888 if (n
->announce_counter
) {
889 timer_mod(n
->announce_timer
,
890 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) +
891 self_announce_delay(n
->announce_counter
));
893 return VIRTIO_NET_OK
;
895 return VIRTIO_NET_ERR
;
899 static int virtio_net_handle_mq(VirtIONet
*n
, uint8_t cmd
,
900 struct iovec
*iov
, unsigned int iov_cnt
)
902 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
903 struct virtio_net_ctrl_mq mq
;
907 s
= iov_to_buf(iov
, iov_cnt
, 0, &mq
, sizeof(mq
));
908 if (s
!= sizeof(mq
)) {
909 return VIRTIO_NET_ERR
;
912 if (cmd
!= VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET
) {
913 return VIRTIO_NET_ERR
;
916 queues
= virtio_lduw_p(vdev
, &mq
.virtqueue_pairs
);
918 if (queues
< VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN
||
919 queues
> VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX
||
920 queues
> n
->max_queues
||
922 return VIRTIO_NET_ERR
;
925 n
->curr_queues
= queues
;
926 /* stop the backend before changing the number of queues to avoid handling a
928 virtio_net_set_status(vdev
, vdev
->status
);
929 virtio_net_set_queues(n
);
931 return VIRTIO_NET_OK
;
934 static void virtio_net_handle_ctrl(VirtIODevice
*vdev
, VirtQueue
*vq
)
936 VirtIONet
*n
= VIRTIO_NET(vdev
);
937 struct virtio_net_ctrl_hdr ctrl
;
938 virtio_net_ctrl_ack status
= VIRTIO_NET_ERR
;
939 VirtQueueElement
*elem
;
941 struct iovec
*iov
, *iov2
;
942 unsigned int iov_cnt
;
945 elem
= virtqueue_pop(vq
, sizeof(VirtQueueElement
));
949 if (iov_size(elem
->in_sg
, elem
->in_num
) < sizeof(status
) ||
950 iov_size(elem
->out_sg
, elem
->out_num
) < sizeof(ctrl
)) {
951 virtio_error(vdev
, "virtio-net ctrl missing headers");
952 virtqueue_detach_element(vq
, elem
, 0);
957 iov_cnt
= elem
->out_num
;
958 iov2
= iov
= g_memdup(elem
->out_sg
, sizeof(struct iovec
) * elem
->out_num
);
959 s
= iov_to_buf(iov
, iov_cnt
, 0, &ctrl
, sizeof(ctrl
));
960 iov_discard_front(&iov
, &iov_cnt
, sizeof(ctrl
));
961 if (s
!= sizeof(ctrl
)) {
962 status
= VIRTIO_NET_ERR
;
963 } else if (ctrl
.class == VIRTIO_NET_CTRL_RX
) {
964 status
= virtio_net_handle_rx_mode(n
, ctrl
.cmd
, iov
, iov_cnt
);
965 } else if (ctrl
.class == VIRTIO_NET_CTRL_MAC
) {
966 status
= virtio_net_handle_mac(n
, ctrl
.cmd
, iov
, iov_cnt
);
967 } else if (ctrl
.class == VIRTIO_NET_CTRL_VLAN
) {
968 status
= virtio_net_handle_vlan_table(n
, ctrl
.cmd
, iov
, iov_cnt
);
969 } else if (ctrl
.class == VIRTIO_NET_CTRL_ANNOUNCE
) {
970 status
= virtio_net_handle_announce(n
, ctrl
.cmd
, iov
, iov_cnt
);
971 } else if (ctrl
.class == VIRTIO_NET_CTRL_MQ
) {
972 status
= virtio_net_handle_mq(n
, ctrl
.cmd
, iov
, iov_cnt
);
973 } else if (ctrl
.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS
) {
974 status
= virtio_net_handle_offloads(n
, ctrl
.cmd
, iov
, iov_cnt
);
977 s
= iov_from_buf(elem
->in_sg
, elem
->in_num
, 0, &status
, sizeof(status
));
978 assert(s
== sizeof(status
));
980 virtqueue_push(vq
, elem
, sizeof(status
));
981 virtio_notify(vdev
, vq
);
989 static void virtio_net_handle_rx(VirtIODevice
*vdev
, VirtQueue
*vq
)
991 VirtIONet
*n
= VIRTIO_NET(vdev
);
992 int queue_index
= vq2q(virtio_get_queue_index(vq
));
994 qemu_flush_queued_packets(qemu_get_subqueue(n
->nic
, queue_index
));
997 static int virtio_net_can_receive(NetClientState
*nc
)
999 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
1000 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1001 VirtIONetQueue
*q
= virtio_net_get_subqueue(nc
);
1003 if (!vdev
->vm_running
) {
1007 if (nc
->queue_index
>= n
->curr_queues
) {
1011 if (!virtio_queue_ready(q
->rx_vq
) ||
1012 !(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
1019 static int virtio_net_has_buffers(VirtIONetQueue
*q
, int bufsize
)
1021 VirtIONet
*n
= q
->n
;
1022 if (virtio_queue_empty(q
->rx_vq
) ||
1023 (n
->mergeable_rx_bufs
&&
1024 !virtqueue_avail_bytes(q
->rx_vq
, bufsize
, 0))) {
1025 virtio_queue_set_notification(q
->rx_vq
, 1);
1027 /* To avoid a race condition where the guest has made some buffers
1028 * available after the above check but before notification was
1029 * enabled, check for available buffers again.
1031 if (virtio_queue_empty(q
->rx_vq
) ||
1032 (n
->mergeable_rx_bufs
&&
1033 !virtqueue_avail_bytes(q
->rx_vq
, bufsize
, 0))) {
1038 virtio_queue_set_notification(q
->rx_vq
, 0);
1042 static void virtio_net_hdr_swap(VirtIODevice
*vdev
, struct virtio_net_hdr
*hdr
)
1044 virtio_tswap16s(vdev
, &hdr
->hdr_len
);
1045 virtio_tswap16s(vdev
, &hdr
->gso_size
);
1046 virtio_tswap16s(vdev
, &hdr
->csum_start
);
1047 virtio_tswap16s(vdev
, &hdr
->csum_offset
);
1050 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
1051 * it never finds out that the packets don't have valid checksums. This
1052 * causes dhclient to get upset. Fedora's carried a patch for ages to
1053 * fix this with Xen but it hasn't appeared in an upstream release of
1056 * To avoid breaking existing guests, we catch udp packets and add
1057 * checksums. This is terrible but it's better than hacking the guest
1060 * N.B. if we introduce a zero-copy API, this operation is no longer free so
1061 * we should provide a mechanism to disable it to avoid polluting the host
1064 static void work_around_broken_dhclient(struct virtio_net_hdr
*hdr
,
1065 uint8_t *buf
, size_t size
)
1067 if ((hdr
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) && /* missing csum */
1068 (size
> 27 && size
< 1500) && /* normal sized MTU */
1069 (buf
[12] == 0x08 && buf
[13] == 0x00) && /* ethertype == IPv4 */
1070 (buf
[23] == 17) && /* ip.protocol == UDP */
1071 (buf
[34] == 0 && buf
[35] == 67)) { /* udp.srcport == bootps */
1072 net_checksum_calculate(buf
, size
);
1073 hdr
->flags
&= ~VIRTIO_NET_HDR_F_NEEDS_CSUM
;
1077 static void receive_header(VirtIONet
*n
, const struct iovec
*iov
, int iov_cnt
,
1078 const void *buf
, size_t size
)
1080 if (n
->has_vnet_hdr
) {
1081 /* FIXME this cast is evil */
1082 void *wbuf
= (void *)buf
;
1083 work_around_broken_dhclient(wbuf
, wbuf
+ n
->host_hdr_len
,
1084 size
- n
->host_hdr_len
);
1086 if (n
->needs_vnet_hdr_swap
) {
1087 virtio_net_hdr_swap(VIRTIO_DEVICE(n
), wbuf
);
1089 iov_from_buf(iov
, iov_cnt
, 0, buf
, sizeof(struct virtio_net_hdr
));
1091 struct virtio_net_hdr hdr
= {
1093 .gso_type
= VIRTIO_NET_HDR_GSO_NONE
1095 iov_from_buf(iov
, iov_cnt
, 0, &hdr
, sizeof hdr
);
1099 static int receive_filter(VirtIONet
*n
, const uint8_t *buf
, int size
)
1101 static const uint8_t bcast
[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1102 static const uint8_t vlan
[] = {0x81, 0x00};
1103 uint8_t *ptr
= (uint8_t *)buf
;
1109 ptr
+= n
->host_hdr_len
;
1111 if (!memcmp(&ptr
[12], vlan
, sizeof(vlan
))) {
1112 int vid
= lduw_be_p(ptr
+ 14) & 0xfff;
1113 if (!(n
->vlans
[vid
>> 5] & (1U << (vid
& 0x1f))))
1117 if (ptr
[0] & 1) { // multicast
1118 if (!memcmp(ptr
, bcast
, sizeof(bcast
))) {
1120 } else if (n
->nomulti
) {
1122 } else if (n
->allmulti
|| n
->mac_table
.multi_overflow
) {
1126 for (i
= n
->mac_table
.first_multi
; i
< n
->mac_table
.in_use
; i
++) {
1127 if (!memcmp(ptr
, &n
->mac_table
.macs
[i
* ETH_ALEN
], ETH_ALEN
)) {
1134 } else if (n
->alluni
|| n
->mac_table
.uni_overflow
) {
1136 } else if (!memcmp(ptr
, n
->mac
, ETH_ALEN
)) {
1140 for (i
= 0; i
< n
->mac_table
.first_multi
; i
++) {
1141 if (!memcmp(ptr
, &n
->mac_table
.macs
[i
* ETH_ALEN
], ETH_ALEN
)) {
1150 static ssize_t
virtio_net_receive_rcu(NetClientState
*nc
, const uint8_t *buf
,
1153 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
1154 VirtIONetQueue
*q
= virtio_net_get_subqueue(nc
);
1155 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1156 struct iovec mhdr_sg
[VIRTQUEUE_MAX_SIZE
];
1157 struct virtio_net_hdr_mrg_rxbuf mhdr
;
1158 unsigned mhdr_cnt
= 0;
1159 size_t offset
, i
, guest_offset
;
1161 if (!virtio_net_can_receive(nc
)) {
1165 /* hdr_len refers to the header we supply to the guest */
1166 if (!virtio_net_has_buffers(q
, size
+ n
->guest_hdr_len
- n
->host_hdr_len
)) {
1170 if (!receive_filter(n
, buf
, size
))
1175 while (offset
< size
) {
1176 VirtQueueElement
*elem
;
1178 const struct iovec
*sg
;
1182 elem
= virtqueue_pop(q
->rx_vq
, sizeof(VirtQueueElement
));
1185 virtio_error(vdev
, "virtio-net unexpected empty queue: "
1186 "i %zd mergeable %d offset %zd, size %zd, "
1187 "guest hdr len %zd, host hdr len %zd "
1188 "guest features 0x%" PRIx64
,
1189 i
, n
->mergeable_rx_bufs
, offset
, size
,
1190 n
->guest_hdr_len
, n
->host_hdr_len
,
1191 vdev
->guest_features
);
1196 if (elem
->in_num
< 1) {
1198 "virtio-net receive queue contains no in buffers");
1199 virtqueue_detach_element(q
->rx_vq
, elem
, 0);
1206 assert(offset
== 0);
1207 if (n
->mergeable_rx_bufs
) {
1208 mhdr_cnt
= iov_copy(mhdr_sg
, ARRAY_SIZE(mhdr_sg
),
1210 offsetof(typeof(mhdr
), num_buffers
),
1211 sizeof(mhdr
.num_buffers
));
1214 receive_header(n
, sg
, elem
->in_num
, buf
, size
);
1215 offset
= n
->host_hdr_len
;
1216 total
+= n
->guest_hdr_len
;
1217 guest_offset
= n
->guest_hdr_len
;
1222 /* copy in packet. ugh */
1223 len
= iov_from_buf(sg
, elem
->in_num
, guest_offset
,
1224 buf
+ offset
, size
- offset
);
1227 /* If buffers can't be merged, at this point we
1228 * must have consumed the complete packet.
1229 * Otherwise, drop it. */
1230 if (!n
->mergeable_rx_bufs
&& offset
< size
) {
1231 virtqueue_unpop(q
->rx_vq
, elem
, total
);
1236 /* signal other side */
1237 virtqueue_fill(q
->rx_vq
, elem
, total
, i
++);
1242 virtio_stw_p(vdev
, &mhdr
.num_buffers
, i
);
1243 iov_from_buf(mhdr_sg
, mhdr_cnt
,
1245 &mhdr
.num_buffers
, sizeof mhdr
.num_buffers
);
1248 virtqueue_flush(q
->rx_vq
, i
);
1249 virtio_notify(vdev
, q
->rx_vq
);
1254 static ssize_t
virtio_net_receive(NetClientState
*nc
, const uint8_t *buf
,
1260 r
= virtio_net_receive_rcu(nc
, buf
, size
);
1265 static int32_t virtio_net_flush_tx(VirtIONetQueue
*q
);
1267 static void virtio_net_tx_complete(NetClientState
*nc
, ssize_t len
)
1269 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
1270 VirtIONetQueue
*q
= virtio_net_get_subqueue(nc
);
1271 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1273 virtqueue_push(q
->tx_vq
, q
->async_tx
.elem
, 0);
1274 virtio_notify(vdev
, q
->tx_vq
);
1276 g_free(q
->async_tx
.elem
);
1277 q
->async_tx
.elem
= NULL
;
1279 virtio_queue_set_notification(q
->tx_vq
, 1);
1280 virtio_net_flush_tx(q
);
1284 static int32_t virtio_net_flush_tx(VirtIONetQueue
*q
)
1286 VirtIONet
*n
= q
->n
;
1287 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1288 VirtQueueElement
*elem
;
1289 int32_t num_packets
= 0;
1290 int queue_index
= vq2q(virtio_get_queue_index(q
->tx_vq
));
1291 if (!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
1295 if (q
->async_tx
.elem
) {
1296 virtio_queue_set_notification(q
->tx_vq
, 0);
1302 unsigned int out_num
;
1303 struct iovec sg
[VIRTQUEUE_MAX_SIZE
], sg2
[VIRTQUEUE_MAX_SIZE
+ 1], *out_sg
;
1304 struct virtio_net_hdr_mrg_rxbuf mhdr
;
1306 elem
= virtqueue_pop(q
->tx_vq
, sizeof(VirtQueueElement
));
1311 out_num
= elem
->out_num
;
1312 out_sg
= elem
->out_sg
;
1314 virtio_error(vdev
, "virtio-net header not in first element");
1315 virtqueue_detach_element(q
->tx_vq
, elem
, 0);
1320 if (n
->has_vnet_hdr
) {
1321 if (iov_to_buf(out_sg
, out_num
, 0, &mhdr
, n
->guest_hdr_len
) <
1323 virtio_error(vdev
, "virtio-net header incorrect");
1324 virtqueue_detach_element(q
->tx_vq
, elem
, 0);
1328 if (n
->needs_vnet_hdr_swap
) {
1329 virtio_net_hdr_swap(vdev
, (void *) &mhdr
);
1330 sg2
[0].iov_base
= &mhdr
;
1331 sg2
[0].iov_len
= n
->guest_hdr_len
;
1332 out_num
= iov_copy(&sg2
[1], ARRAY_SIZE(sg2
) - 1,
1334 n
->guest_hdr_len
, -1);
1335 if (out_num
== VIRTQUEUE_MAX_SIZE
) {
1343 * If host wants to see the guest header as is, we can
1344 * pass it on unchanged. Otherwise, copy just the parts
1345 * that host is interested in.
1347 assert(n
->host_hdr_len
<= n
->guest_hdr_len
);
1348 if (n
->host_hdr_len
!= n
->guest_hdr_len
) {
1349 unsigned sg_num
= iov_copy(sg
, ARRAY_SIZE(sg
),
1351 0, n
->host_hdr_len
);
1352 sg_num
+= iov_copy(sg
+ sg_num
, ARRAY_SIZE(sg
) - sg_num
,
1354 n
->guest_hdr_len
, -1);
1359 ret
= qemu_sendv_packet_async(qemu_get_subqueue(n
->nic
, queue_index
),
1360 out_sg
, out_num
, virtio_net_tx_complete
);
1362 virtio_queue_set_notification(q
->tx_vq
, 0);
1363 q
->async_tx
.elem
= elem
;
1368 virtqueue_push(q
->tx_vq
, elem
, 0);
1369 virtio_notify(vdev
, q
->tx_vq
);
1372 if (++num_packets
>= n
->tx_burst
) {
1379 static void virtio_net_handle_tx_timer(VirtIODevice
*vdev
, VirtQueue
*vq
)
1381 VirtIONet
*n
= VIRTIO_NET(vdev
);
1382 VirtIONetQueue
*q
= &n
->vqs
[vq2q(virtio_get_queue_index(vq
))];
1384 if (unlikely((n
->status
& VIRTIO_NET_S_LINK_UP
) == 0)) {
1385 virtio_net_drop_tx_queue_data(vdev
, vq
);
1389 /* This happens when device was stopped but VCPU wasn't. */
1390 if (!vdev
->vm_running
) {
1395 if (q
->tx_waiting
) {
1396 virtio_queue_set_notification(vq
, 1);
1397 timer_del(q
->tx_timer
);
1399 if (virtio_net_flush_tx(q
) == -EINVAL
) {
1403 timer_mod(q
->tx_timer
,
1404 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + n
->tx_timeout
);
1406 virtio_queue_set_notification(vq
, 0);
1410 static void virtio_net_handle_tx_bh(VirtIODevice
*vdev
, VirtQueue
*vq
)
1412 VirtIONet
*n
= VIRTIO_NET(vdev
);
1413 VirtIONetQueue
*q
= &n
->vqs
[vq2q(virtio_get_queue_index(vq
))];
1415 if (unlikely((n
->status
& VIRTIO_NET_S_LINK_UP
) == 0)) {
1416 virtio_net_drop_tx_queue_data(vdev
, vq
);
1420 if (unlikely(q
->tx_waiting
)) {
1424 /* This happens when device was stopped but VCPU wasn't. */
1425 if (!vdev
->vm_running
) {
1428 virtio_queue_set_notification(vq
, 0);
1429 qemu_bh_schedule(q
->tx_bh
);
1432 static void virtio_net_tx_timer(void *opaque
)
1434 VirtIONetQueue
*q
= opaque
;
1435 VirtIONet
*n
= q
->n
;
1436 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1437 /* This happens when device was stopped but BH wasn't. */
1438 if (!vdev
->vm_running
) {
1439 /* Make sure tx waiting is set, so we'll run when restarted. */
1440 assert(q
->tx_waiting
);
1446 /* Just in case the driver is not ready on more */
1447 if (!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
1451 virtio_queue_set_notification(q
->tx_vq
, 1);
1452 virtio_net_flush_tx(q
);
1455 static void virtio_net_tx_bh(void *opaque
)
1457 VirtIONetQueue
*q
= opaque
;
1458 VirtIONet
*n
= q
->n
;
1459 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1462 /* This happens when device was stopped but BH wasn't. */
1463 if (!vdev
->vm_running
) {
1464 /* Make sure tx waiting is set, so we'll run when restarted. */
1465 assert(q
->tx_waiting
);
1471 /* Just in case the driver is not ready on more */
1472 if (unlikely(!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
))) {
1476 ret
= virtio_net_flush_tx(q
);
1477 if (ret
== -EBUSY
|| ret
== -EINVAL
) {
1478 return; /* Notification re-enable handled by tx_complete or device
1482 /* If we flush a full burst of packets, assume there are
1483 * more coming and immediately reschedule */
1484 if (ret
>= n
->tx_burst
) {
1485 qemu_bh_schedule(q
->tx_bh
);
1490 /* If less than a full burst, re-enable notification and flush
1491 * anything that may have come in while we weren't looking. If
1492 * we find something, assume the guest is still active and reschedule */
1493 virtio_queue_set_notification(q
->tx_vq
, 1);
1494 ret
= virtio_net_flush_tx(q
);
1495 if (ret
== -EINVAL
) {
1497 } else if (ret
> 0) {
1498 virtio_queue_set_notification(q
->tx_vq
, 0);
1499 qemu_bh_schedule(q
->tx_bh
);
1504 static void virtio_net_add_queue(VirtIONet
*n
, int index
)
1506 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1508 n
->vqs
[index
].rx_vq
= virtio_add_queue(vdev
, n
->net_conf
.rx_queue_size
,
1509 virtio_net_handle_rx
);
1510 if (n
->net_conf
.tx
&& !strcmp(n
->net_conf
.tx
, "timer")) {
1511 n
->vqs
[index
].tx_vq
=
1512 virtio_add_queue(vdev
, 256, virtio_net_handle_tx_timer
);
1513 n
->vqs
[index
].tx_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
1514 virtio_net_tx_timer
,
1517 n
->vqs
[index
].tx_vq
=
1518 virtio_add_queue(vdev
, 256, virtio_net_handle_tx_bh
);
1519 n
->vqs
[index
].tx_bh
= qemu_bh_new(virtio_net_tx_bh
, &n
->vqs
[index
]);
1522 n
->vqs
[index
].tx_waiting
= 0;
1523 n
->vqs
[index
].n
= n
;
1526 static void virtio_net_del_queue(VirtIONet
*n
, int index
)
1528 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1529 VirtIONetQueue
*q
= &n
->vqs
[index
];
1530 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, index
);
1532 qemu_purge_queued_packets(nc
);
1534 virtio_del_queue(vdev
, index
* 2);
1536 timer_del(q
->tx_timer
);
1537 timer_free(q
->tx_timer
);
1540 qemu_bh_delete(q
->tx_bh
);
1544 virtio_del_queue(vdev
, index
* 2 + 1);
1547 static void virtio_net_change_num_queues(VirtIONet
*n
, int new_max_queues
)
1549 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1550 int old_num_queues
= virtio_get_num_queues(vdev
);
1551 int new_num_queues
= new_max_queues
* 2 + 1;
1554 assert(old_num_queues
>= 3);
1555 assert(old_num_queues
% 2 == 1);
1557 if (old_num_queues
== new_num_queues
) {
1562 * We always need to remove and add ctrl vq if
1563 * old_num_queues != new_num_queues. Remove ctrl_vq first,
1564 * and then we only enter one of the following too loops.
1566 virtio_del_queue(vdev
, old_num_queues
- 1);
1568 for (i
= new_num_queues
- 1; i
< old_num_queues
- 1; i
+= 2) {
1569 /* new_num_queues < old_num_queues */
1570 virtio_net_del_queue(n
, i
/ 2);
1573 for (i
= old_num_queues
- 1; i
< new_num_queues
- 1; i
+= 2) {
1574 /* new_num_queues > old_num_queues */
1575 virtio_net_add_queue(n
, i
/ 2);
1578 /* add ctrl_vq last */
1579 n
->ctrl_vq
= virtio_add_queue(vdev
, 64, virtio_net_handle_ctrl
);
1582 static void virtio_net_set_multiqueue(VirtIONet
*n
, int multiqueue
)
1584 int max
= multiqueue
? n
->max_queues
: 1;
1586 n
->multiqueue
= multiqueue
;
1587 virtio_net_change_num_queues(n
, max
);
1589 virtio_net_set_queues(n
);
1592 static int virtio_net_post_load_device(void *opaque
, int version_id
)
1594 VirtIONet
*n
= opaque
;
1595 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1598 virtio_net_set_mrg_rx_bufs(n
, n
->mergeable_rx_bufs
,
1599 virtio_vdev_has_feature(vdev
,
1600 VIRTIO_F_VERSION_1
));
1602 /* MAC_TABLE_ENTRIES may be different from the saved image */
1603 if (n
->mac_table
.in_use
> MAC_TABLE_ENTRIES
) {
1604 n
->mac_table
.in_use
= 0;
1607 if (!virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
)) {
1608 n
->curr_guest_offloads
= virtio_net_supported_guest_offloads(n
);
1611 if (peer_has_vnet_hdr(n
)) {
1612 virtio_net_apply_guest_offloads(n
);
1615 virtio_net_set_queues(n
);
1617 /* Find the first multicast entry in the saved MAC filter */
1618 for (i
= 0; i
< n
->mac_table
.in_use
; i
++) {
1619 if (n
->mac_table
.macs
[i
* ETH_ALEN
] & 1) {
1623 n
->mac_table
.first_multi
= i
;
1625 /* nc.link_down can't be migrated, so infer link_down according
1626 * to link status bit in n->status */
1627 link_down
= (n
->status
& VIRTIO_NET_S_LINK_UP
) == 0;
1628 for (i
= 0; i
< n
->max_queues
; i
++) {
1629 qemu_get_subqueue(n
->nic
, i
)->link_down
= link_down
;
1632 if (virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_GUEST_ANNOUNCE
) &&
1633 virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_VQ
)) {
1634 n
->announce_counter
= SELF_ANNOUNCE_ROUNDS
;
1635 timer_mod(n
->announce_timer
, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
));
1641 /* tx_waiting field of a VirtIONetQueue */
1642 static const VMStateDescription vmstate_virtio_net_queue_tx_waiting
= {
1643 .name
= "virtio-net-queue-tx_waiting",
1644 .fields
= (VMStateField
[]) {
1645 VMSTATE_UINT32(tx_waiting
, VirtIONetQueue
),
1646 VMSTATE_END_OF_LIST()
1650 static bool max_queues_gt_1(void *opaque
, int version_id
)
1652 return VIRTIO_NET(opaque
)->max_queues
> 1;
1655 static bool has_ctrl_guest_offloads(void *opaque
, int version_id
)
1657 return virtio_vdev_has_feature(VIRTIO_DEVICE(opaque
),
1658 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
);
1661 static bool mac_table_fits(void *opaque
, int version_id
)
1663 return VIRTIO_NET(opaque
)->mac_table
.in_use
<= MAC_TABLE_ENTRIES
;
1666 static bool mac_table_doesnt_fit(void *opaque
, int version_id
)
1668 return !mac_table_fits(opaque
, version_id
);
1671 /* This temporary type is shared by all the WITH_TMP methods
1672 * although only some fields are used by each.
1674 struct VirtIONetMigTmp
{
1676 VirtIONetQueue
*vqs_1
;
1677 uint16_t curr_queues_1
;
1679 uint32_t has_vnet_hdr
;
1682 /* The 2nd and subsequent tx_waiting flags are loaded later than
1683 * the 1st entry in the queues and only if there's more than one
1684 * entry. We use the tmp mechanism to calculate a temporary
1685 * pointer and count and also validate the count.
1688 static void virtio_net_tx_waiting_pre_save(void *opaque
)
1690 struct VirtIONetMigTmp
*tmp
= opaque
;
1692 tmp
->vqs_1
= tmp
->parent
->vqs
+ 1;
1693 tmp
->curr_queues_1
= tmp
->parent
->curr_queues
- 1;
1694 if (tmp
->parent
->curr_queues
== 0) {
1695 tmp
->curr_queues_1
= 0;
1699 static int virtio_net_tx_waiting_pre_load(void *opaque
)
1701 struct VirtIONetMigTmp
*tmp
= opaque
;
1703 /* Reuse the pointer setup from save */
1704 virtio_net_tx_waiting_pre_save(opaque
);
1706 if (tmp
->parent
->curr_queues
> tmp
->parent
->max_queues
) {
1707 error_report("virtio-net: curr_queues %x > max_queues %x",
1708 tmp
->parent
->curr_queues
, tmp
->parent
->max_queues
);
1713 return 0; /* all good */
1716 static const VMStateDescription vmstate_virtio_net_tx_waiting
= {
1717 .name
= "virtio-net-tx_waiting",
1718 .pre_load
= virtio_net_tx_waiting_pre_load
,
1719 .pre_save
= virtio_net_tx_waiting_pre_save
,
1720 .fields
= (VMStateField
[]) {
1721 VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1
, struct VirtIONetMigTmp
,
1723 vmstate_virtio_net_queue_tx_waiting
,
1724 struct VirtIONetQueue
),
1725 VMSTATE_END_OF_LIST()
1729 /* the 'has_ufo' flag is just tested; if the incoming stream has the
1730 * flag set we need to check that we have it
1732 static int virtio_net_ufo_post_load(void *opaque
, int version_id
)
1734 struct VirtIONetMigTmp
*tmp
= opaque
;
1736 if (tmp
->has_ufo
&& !peer_has_ufo(tmp
->parent
)) {
1737 error_report("virtio-net: saved image requires TUN_F_UFO support");
1744 static void virtio_net_ufo_pre_save(void *opaque
)
1746 struct VirtIONetMigTmp
*tmp
= opaque
;
1748 tmp
->has_ufo
= tmp
->parent
->has_ufo
;
1751 static const VMStateDescription vmstate_virtio_net_has_ufo
= {
1752 .name
= "virtio-net-ufo",
1753 .post_load
= virtio_net_ufo_post_load
,
1754 .pre_save
= virtio_net_ufo_pre_save
,
1755 .fields
= (VMStateField
[]) {
1756 VMSTATE_UINT8(has_ufo
, struct VirtIONetMigTmp
),
1757 VMSTATE_END_OF_LIST()
1761 /* the 'has_vnet_hdr' flag is just tested; if the incoming stream has the
1762 * flag set we need to check that we have it
1764 static int virtio_net_vnet_post_load(void *opaque
, int version_id
)
1766 struct VirtIONetMigTmp
*tmp
= opaque
;
1768 if (tmp
->has_vnet_hdr
&& !peer_has_vnet_hdr(tmp
->parent
)) {
1769 error_report("virtio-net: saved image requires vnet_hdr=on");
1776 static void virtio_net_vnet_pre_save(void *opaque
)
1778 struct VirtIONetMigTmp
*tmp
= opaque
;
1780 tmp
->has_vnet_hdr
= tmp
->parent
->has_vnet_hdr
;
1783 static const VMStateDescription vmstate_virtio_net_has_vnet
= {
1784 .name
= "virtio-net-vnet",
1785 .post_load
= virtio_net_vnet_post_load
,
1786 .pre_save
= virtio_net_vnet_pre_save
,
1787 .fields
= (VMStateField
[]) {
1788 VMSTATE_UINT32(has_vnet_hdr
, struct VirtIONetMigTmp
),
1789 VMSTATE_END_OF_LIST()
1793 static const VMStateDescription vmstate_virtio_net_device
= {
1794 .name
= "virtio-net-device",
1795 .version_id
= VIRTIO_NET_VM_VERSION
,
1796 .minimum_version_id
= VIRTIO_NET_VM_VERSION
,
1797 .post_load
= virtio_net_post_load_device
,
1798 .fields
= (VMStateField
[]) {
1799 VMSTATE_UINT8_ARRAY(mac
, VirtIONet
, ETH_ALEN
),
1800 VMSTATE_STRUCT_POINTER(vqs
, VirtIONet
,
1801 vmstate_virtio_net_queue_tx_waiting
,
1803 VMSTATE_UINT32(mergeable_rx_bufs
, VirtIONet
),
1804 VMSTATE_UINT16(status
, VirtIONet
),
1805 VMSTATE_UINT8(promisc
, VirtIONet
),
1806 VMSTATE_UINT8(allmulti
, VirtIONet
),
1807 VMSTATE_UINT32(mac_table
.in_use
, VirtIONet
),
1809 /* Guarded pair: If it fits we load it, else we throw it away
1810 * - can happen if source has a larger MAC table.; post-load
1811 * sets flags in this case.
1813 VMSTATE_VBUFFER_MULTIPLY(mac_table
.macs
, VirtIONet
,
1814 0, mac_table_fits
, mac_table
.in_use
,
1816 VMSTATE_UNUSED_VARRAY_UINT32(VirtIONet
, mac_table_doesnt_fit
, 0,
1817 mac_table
.in_use
, ETH_ALEN
),
1819 /* Note: This is an array of uint32's that's always been saved as a
1820 * buffer; hold onto your endiannesses; it's actually used as a bitmap
1821 * but based on the uint.
1823 VMSTATE_BUFFER_POINTER_UNSAFE(vlans
, VirtIONet
, 0, MAX_VLAN
>> 3),
1824 VMSTATE_WITH_TMP(VirtIONet
, struct VirtIONetMigTmp
,
1825 vmstate_virtio_net_has_vnet
),
1826 VMSTATE_UINT8(mac_table
.multi_overflow
, VirtIONet
),
1827 VMSTATE_UINT8(mac_table
.uni_overflow
, VirtIONet
),
1828 VMSTATE_UINT8(alluni
, VirtIONet
),
1829 VMSTATE_UINT8(nomulti
, VirtIONet
),
1830 VMSTATE_UINT8(nouni
, VirtIONet
),
1831 VMSTATE_UINT8(nobcast
, VirtIONet
),
1832 VMSTATE_WITH_TMP(VirtIONet
, struct VirtIONetMigTmp
,
1833 vmstate_virtio_net_has_ufo
),
1834 VMSTATE_SINGLE_TEST(max_queues
, VirtIONet
, max_queues_gt_1
, 0,
1835 vmstate_info_uint16_equal
, uint16_t),
1836 VMSTATE_UINT16_TEST(curr_queues
, VirtIONet
, max_queues_gt_1
),
1837 VMSTATE_WITH_TMP(VirtIONet
, struct VirtIONetMigTmp
,
1838 vmstate_virtio_net_tx_waiting
),
1839 VMSTATE_UINT64_TEST(curr_guest_offloads
, VirtIONet
,
1840 has_ctrl_guest_offloads
),
1841 VMSTATE_END_OF_LIST()
1845 static NetClientInfo net_virtio_info
= {
1846 .type
= NET_CLIENT_DRIVER_NIC
,
1847 .size
= sizeof(NICState
),
1848 .can_receive
= virtio_net_can_receive
,
1849 .receive
= virtio_net_receive
,
1850 .link_status_changed
= virtio_net_set_link_status
,
1851 .query_rx_filter
= virtio_net_query_rxfilter
,
1854 static bool virtio_net_guest_notifier_pending(VirtIODevice
*vdev
, int idx
)
1856 VirtIONet
*n
= VIRTIO_NET(vdev
);
1857 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, vq2q(idx
));
1858 assert(n
->vhost_started
);
1859 return vhost_net_virtqueue_pending(get_vhost_net(nc
->peer
), idx
);
1862 static void virtio_net_guest_notifier_mask(VirtIODevice
*vdev
, int idx
,
1865 VirtIONet
*n
= VIRTIO_NET(vdev
);
1866 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, vq2q(idx
));
1867 assert(n
->vhost_started
);
1868 vhost_net_virtqueue_mask(get_vhost_net(nc
->peer
),
1872 static void virtio_net_set_config_size(VirtIONet
*n
, uint64_t host_features
)
1874 int i
, config_size
= 0;
1875 virtio_add_feature(&host_features
, VIRTIO_NET_F_MAC
);
1877 for (i
= 0; feature_sizes
[i
].flags
!= 0; i
++) {
1878 if (host_features
& feature_sizes
[i
].flags
) {
1879 config_size
= MAX(feature_sizes
[i
].end
, config_size
);
1882 n
->config_size
= config_size
;
1885 void virtio_net_set_netclient_name(VirtIONet
*n
, const char *name
,
1889 * The name can be NULL, the netclient name will be type.x.
1891 assert(type
!= NULL
);
1893 g_free(n
->netclient_name
);
1894 g_free(n
->netclient_type
);
1895 n
->netclient_name
= g_strdup(name
);
1896 n
->netclient_type
= g_strdup(type
);
1899 static void virtio_net_device_realize(DeviceState
*dev
, Error
**errp
)
1901 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
1902 VirtIONet
*n
= VIRTIO_NET(dev
);
1906 if (n
->net_conf
.mtu
) {
1907 n
->host_features
|= (0x1 << VIRTIO_NET_F_MTU
);
1910 virtio_net_set_config_size(n
, n
->host_features
);
1911 virtio_init(vdev
, "virtio-net", VIRTIO_ID_NET
, n
->config_size
);
1914 * We set a lower limit on RX queue size to what it always was.
1915 * Guests that want a smaller ring can always resize it without
1916 * help from us (using virtio 1 and up).
1918 if (n
->net_conf
.rx_queue_size
< VIRTIO_NET_RX_QUEUE_MIN_SIZE
||
1919 n
->net_conf
.rx_queue_size
> VIRTQUEUE_MAX_SIZE
||
1920 (n
->net_conf
.rx_queue_size
& (n
->net_conf
.rx_queue_size
- 1))) {
1921 error_setg(errp
, "Invalid rx_queue_size (= %" PRIu16
"), "
1922 "must be a power of 2 between %d and %d.",
1923 n
->net_conf
.rx_queue_size
, VIRTIO_NET_RX_QUEUE_MIN_SIZE
,
1924 VIRTQUEUE_MAX_SIZE
);
1925 virtio_cleanup(vdev
);
1929 n
->max_queues
= MAX(n
->nic_conf
.peers
.queues
, 1);
1930 if (n
->max_queues
* 2 + 1 > VIRTIO_QUEUE_MAX
) {
1931 error_setg(errp
, "Invalid number of queues (= %" PRIu32
"), "
1932 "must be a positive integer less than %d.",
1933 n
->max_queues
, (VIRTIO_QUEUE_MAX
- 1) / 2);
1934 virtio_cleanup(vdev
);
1937 n
->vqs
= g_malloc0(sizeof(VirtIONetQueue
) * n
->max_queues
);
1939 n
->tx_timeout
= n
->net_conf
.txtimer
;
1941 if (n
->net_conf
.tx
&& strcmp(n
->net_conf
.tx
, "timer")
1942 && strcmp(n
->net_conf
.tx
, "bh")) {
1943 error_report("virtio-net: "
1944 "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
1946 error_report("Defaulting to \"bh\"");
1949 for (i
= 0; i
< n
->max_queues
; i
++) {
1950 virtio_net_add_queue(n
, i
);
1953 n
->ctrl_vq
= virtio_add_queue(vdev
, 64, virtio_net_handle_ctrl
);
1954 qemu_macaddr_default_if_unset(&n
->nic_conf
.macaddr
);
1955 memcpy(&n
->mac
[0], &n
->nic_conf
.macaddr
, sizeof(n
->mac
));
1956 n
->status
= VIRTIO_NET_S_LINK_UP
;
1957 n
->announce_timer
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
1958 virtio_net_announce_timer
, n
);
1960 if (n
->netclient_type
) {
1962 * Happen when virtio_net_set_netclient_name has been called.
1964 n
->nic
= qemu_new_nic(&net_virtio_info
, &n
->nic_conf
,
1965 n
->netclient_type
, n
->netclient_name
, n
);
1967 n
->nic
= qemu_new_nic(&net_virtio_info
, &n
->nic_conf
,
1968 object_get_typename(OBJECT(dev
)), dev
->id
, n
);
1971 peer_test_vnet_hdr(n
);
1972 if (peer_has_vnet_hdr(n
)) {
1973 for (i
= 0; i
< n
->max_queues
; i
++) {
1974 qemu_using_vnet_hdr(qemu_get_subqueue(n
->nic
, i
)->peer
, true);
1976 n
->host_hdr_len
= sizeof(struct virtio_net_hdr
);
1978 n
->host_hdr_len
= 0;
1981 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->nic_conf
.macaddr
.a
);
1983 n
->vqs
[0].tx_waiting
= 0;
1984 n
->tx_burst
= n
->net_conf
.txburst
;
1985 virtio_net_set_mrg_rx_bufs(n
, 0, 0);
1986 n
->promisc
= 1; /* for compatibility */
1988 n
->mac_table
.macs
= g_malloc0(MAC_TABLE_ENTRIES
* ETH_ALEN
);
1990 n
->vlans
= g_malloc0(MAX_VLAN
>> 3);
1992 nc
= qemu_get_queue(n
->nic
);
1993 nc
->rxfilter_notify_enabled
= 1;
1998 static void virtio_net_device_unrealize(DeviceState
*dev
, Error
**errp
)
2000 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
2001 VirtIONet
*n
= VIRTIO_NET(dev
);
2004 /* This will stop vhost backend if appropriate. */
2005 virtio_net_set_status(vdev
, 0);
2007 g_free(n
->netclient_name
);
2008 n
->netclient_name
= NULL
;
2009 g_free(n
->netclient_type
);
2010 n
->netclient_type
= NULL
;
2012 g_free(n
->mac_table
.macs
);
2015 max_queues
= n
->multiqueue
? n
->max_queues
: 1;
2016 for (i
= 0; i
< max_queues
; i
++) {
2017 virtio_net_del_queue(n
, i
);
2020 timer_del(n
->announce_timer
);
2021 timer_free(n
->announce_timer
);
2023 qemu_del_nic(n
->nic
);
2024 virtio_cleanup(vdev
);
2027 static void virtio_net_instance_init(Object
*obj
)
2029 VirtIONet
*n
= VIRTIO_NET(obj
);
2032 * The default config_size is sizeof(struct virtio_net_config).
2033 * Can be overriden with virtio_net_set_config_size.
2035 n
->config_size
= sizeof(struct virtio_net_config
);
2036 device_add_bootindex_property(obj
, &n
->nic_conf
.bootindex
,
2037 "bootindex", "/ethernet-phy@0",
2041 static void virtio_net_pre_save(void *opaque
)
2043 VirtIONet
*n
= opaque
;
2045 /* At this point, backend must be stopped, otherwise
2046 * it might keep writing to memory. */
2047 assert(!n
->vhost_started
);
2050 static const VMStateDescription vmstate_virtio_net
= {
2051 .name
= "virtio-net",
2052 .minimum_version_id
= VIRTIO_NET_VM_VERSION
,
2053 .version_id
= VIRTIO_NET_VM_VERSION
,
2054 .fields
= (VMStateField
[]) {
2055 VMSTATE_VIRTIO_DEVICE
,
2056 VMSTATE_END_OF_LIST()
2058 .pre_save
= virtio_net_pre_save
,
2061 static Property virtio_net_properties
[] = {
2062 DEFINE_PROP_BIT("csum", VirtIONet
, host_features
, VIRTIO_NET_F_CSUM
, true),
2063 DEFINE_PROP_BIT("guest_csum", VirtIONet
, host_features
,
2064 VIRTIO_NET_F_GUEST_CSUM
, true),
2065 DEFINE_PROP_BIT("gso", VirtIONet
, host_features
, VIRTIO_NET_F_GSO
, true),
2066 DEFINE_PROP_BIT("guest_tso4", VirtIONet
, host_features
,
2067 VIRTIO_NET_F_GUEST_TSO4
, true),
2068 DEFINE_PROP_BIT("guest_tso6", VirtIONet
, host_features
,
2069 VIRTIO_NET_F_GUEST_TSO6
, true),
2070 DEFINE_PROP_BIT("guest_ecn", VirtIONet
, host_features
,
2071 VIRTIO_NET_F_GUEST_ECN
, true),
2072 DEFINE_PROP_BIT("guest_ufo", VirtIONet
, host_features
,
2073 VIRTIO_NET_F_GUEST_UFO
, true),
2074 DEFINE_PROP_BIT("guest_announce", VirtIONet
, host_features
,
2075 VIRTIO_NET_F_GUEST_ANNOUNCE
, true),
2076 DEFINE_PROP_BIT("host_tso4", VirtIONet
, host_features
,
2077 VIRTIO_NET_F_HOST_TSO4
, true),
2078 DEFINE_PROP_BIT("host_tso6", VirtIONet
, host_features
,
2079 VIRTIO_NET_F_HOST_TSO6
, true),
2080 DEFINE_PROP_BIT("host_ecn", VirtIONet
, host_features
,
2081 VIRTIO_NET_F_HOST_ECN
, true),
2082 DEFINE_PROP_BIT("host_ufo", VirtIONet
, host_features
,
2083 VIRTIO_NET_F_HOST_UFO
, true),
2084 DEFINE_PROP_BIT("mrg_rxbuf", VirtIONet
, host_features
,
2085 VIRTIO_NET_F_MRG_RXBUF
, true),
2086 DEFINE_PROP_BIT("status", VirtIONet
, host_features
,
2087 VIRTIO_NET_F_STATUS
, true),
2088 DEFINE_PROP_BIT("ctrl_vq", VirtIONet
, host_features
,
2089 VIRTIO_NET_F_CTRL_VQ
, true),
2090 DEFINE_PROP_BIT("ctrl_rx", VirtIONet
, host_features
,
2091 VIRTIO_NET_F_CTRL_RX
, true),
2092 DEFINE_PROP_BIT("ctrl_vlan", VirtIONet
, host_features
,
2093 VIRTIO_NET_F_CTRL_VLAN
, true),
2094 DEFINE_PROP_BIT("ctrl_rx_extra", VirtIONet
, host_features
,
2095 VIRTIO_NET_F_CTRL_RX_EXTRA
, true),
2096 DEFINE_PROP_BIT("ctrl_mac_addr", VirtIONet
, host_features
,
2097 VIRTIO_NET_F_CTRL_MAC_ADDR
, true),
2098 DEFINE_PROP_BIT("ctrl_guest_offloads", VirtIONet
, host_features
,
2099 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
, true),
2100 DEFINE_PROP_BIT("mq", VirtIONet
, host_features
, VIRTIO_NET_F_MQ
, false),
2101 DEFINE_NIC_PROPERTIES(VirtIONet
, nic_conf
),
2102 DEFINE_PROP_UINT32("x-txtimer", VirtIONet
, net_conf
.txtimer
,
2104 DEFINE_PROP_INT32("x-txburst", VirtIONet
, net_conf
.txburst
, TX_BURST
),
2105 DEFINE_PROP_STRING("tx", VirtIONet
, net_conf
.tx
),
2106 DEFINE_PROP_UINT16("rx_queue_size", VirtIONet
, net_conf
.rx_queue_size
,
2107 VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
),
2108 DEFINE_PROP_UINT16("host_mtu", VirtIONet
, net_conf
.mtu
, 0),
2109 DEFINE_PROP_BOOL("x-mtu-bypass-backend", VirtIONet
, mtu_bypass_backend
,
2111 DEFINE_PROP_END_OF_LIST(),
2114 static void virtio_net_class_init(ObjectClass
*klass
, void *data
)
2116 DeviceClass
*dc
= DEVICE_CLASS(klass
);
2117 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
2119 dc
->props
= virtio_net_properties
;
2120 dc
->vmsd
= &vmstate_virtio_net
;
2121 set_bit(DEVICE_CATEGORY_NETWORK
, dc
->categories
);
2122 vdc
->realize
= virtio_net_device_realize
;
2123 vdc
->unrealize
= virtio_net_device_unrealize
;
2124 vdc
->get_config
= virtio_net_get_config
;
2125 vdc
->set_config
= virtio_net_set_config
;
2126 vdc
->get_features
= virtio_net_get_features
;
2127 vdc
->set_features
= virtio_net_set_features
;
2128 vdc
->bad_features
= virtio_net_bad_features
;
2129 vdc
->reset
= virtio_net_reset
;
2130 vdc
->set_status
= virtio_net_set_status
;
2131 vdc
->guest_notifier_mask
= virtio_net_guest_notifier_mask
;
2132 vdc
->guest_notifier_pending
= virtio_net_guest_notifier_pending
;
2133 vdc
->legacy_features
|= (0x1 << VIRTIO_NET_F_GSO
);
2134 vdc
->vmsd
= &vmstate_virtio_net_device
;
2137 static const TypeInfo virtio_net_info
= {
2138 .name
= TYPE_VIRTIO_NET
,
2139 .parent
= TYPE_VIRTIO_DEVICE
,
2140 .instance_size
= sizeof(VirtIONet
),
2141 .instance_init
= virtio_net_instance_init
,
2142 .class_init
= virtio_net_class_init
,
2145 static void virtio_register_types(void)
2147 type_register_static(&virtio_net_info
);
2150 type_init(virtio_register_types
)