2 * Virtio Network Device
4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
15 #include "hw/virtio/virtio.h"
17 #include "net/checksum.h"
19 #include "qemu/error-report.h"
20 #include "qemu/timer.h"
21 #include "hw/virtio/virtio-net.h"
22 #include "net/vhost_net.h"
23 #include "hw/virtio/virtio-bus.h"
24 #include "qapi/qmp/qjson.h"
25 #include "qapi-event.h"
26 #include "hw/virtio/virtio-access.h"
28 #define VIRTIO_NET_VM_VERSION 11
30 #define MAC_TABLE_ENTRIES 64
31 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
34 * Calculate the number of bytes up to and including the given 'field' of
37 #define endof(container, field) \
38 (offsetof(container, field) + sizeof(((container *)0)->field))
40 typedef struct VirtIOFeature
{
45 static VirtIOFeature feature_sizes
[] = {
46 {.flags
= 1 << VIRTIO_NET_F_MAC
,
47 .end
= endof(struct virtio_net_config
, mac
)},
48 {.flags
= 1 << VIRTIO_NET_F_STATUS
,
49 .end
= endof(struct virtio_net_config
, status
)},
50 {.flags
= 1 << VIRTIO_NET_F_MQ
,
51 .end
= endof(struct virtio_net_config
, max_virtqueue_pairs
)},
55 static VirtIONetQueue
*virtio_net_get_subqueue(NetClientState
*nc
)
57 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
59 return &n
->vqs
[nc
->queue_index
];
62 static int vq2q(int queue_index
)
64 return queue_index
/ 2;
68 * - we could suppress RX interrupt if we were so inclined.
71 static void virtio_net_get_config(VirtIODevice
*vdev
, uint8_t *config
)
73 VirtIONet
*n
= VIRTIO_NET(vdev
);
74 struct virtio_net_config netcfg
;
76 virtio_stw_p(vdev
, &netcfg
.status
, n
->status
);
77 virtio_stw_p(vdev
, &netcfg
.max_virtqueue_pairs
, n
->max_queues
);
78 memcpy(netcfg
.mac
, n
->mac
, ETH_ALEN
);
79 memcpy(config
, &netcfg
, n
->config_size
);
82 static void virtio_net_set_config(VirtIODevice
*vdev
, const uint8_t *config
)
84 VirtIONet
*n
= VIRTIO_NET(vdev
);
85 struct virtio_net_config netcfg
= {};
87 memcpy(&netcfg
, config
, n
->config_size
);
89 if (!(vdev
->guest_features
>> VIRTIO_NET_F_CTRL_MAC_ADDR
& 1) &&
90 memcmp(netcfg
.mac
, n
->mac
, ETH_ALEN
)) {
91 memcpy(n
->mac
, netcfg
.mac
, ETH_ALEN
);
92 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->mac
);
96 static bool virtio_net_started(VirtIONet
*n
, uint8_t status
)
98 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
99 return (status
& VIRTIO_CONFIG_S_DRIVER_OK
) &&
100 (n
->status
& VIRTIO_NET_S_LINK_UP
) && vdev
->vm_running
;
103 static void virtio_net_announce_timer(void *opaque
)
105 VirtIONet
*n
= opaque
;
106 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
108 n
->announce_counter
--;
109 n
->status
|= VIRTIO_NET_S_ANNOUNCE
;
110 virtio_notify_config(vdev
);
113 static void virtio_net_vhost_status(VirtIONet
*n
, uint8_t status
)
115 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
116 NetClientState
*nc
= qemu_get_queue(n
->nic
);
117 int queues
= n
->multiqueue
? n
->max_queues
: 1;
119 if (!get_vhost_net(nc
->peer
)) {
123 if (!!n
->vhost_started
==
124 (virtio_net_started(n
, status
) && !nc
->peer
->link_down
)) {
127 if (!n
->vhost_started
) {
130 if (!vhost_net_query(get_vhost_net(nc
->peer
), vdev
)) {
134 /* Any packets outstanding? Purge them to avoid touching rings
135 * when vhost is running.
137 for (i
= 0; i
< queues
; i
++) {
138 NetClientState
*qnc
= qemu_get_subqueue(n
->nic
, i
);
140 /* Purge both directions: TX and RX. */
141 qemu_net_queue_purge(qnc
->peer
->incoming_queue
, qnc
);
142 qemu_net_queue_purge(qnc
->incoming_queue
, qnc
->peer
);
145 n
->vhost_started
= 1;
146 r
= vhost_net_start(vdev
, n
->nic
->ncs
, queues
);
148 error_report("unable to start vhost net: %d: "
149 "falling back on userspace virtio", -r
);
150 n
->vhost_started
= 0;
153 vhost_net_stop(vdev
, n
->nic
->ncs
, queues
);
154 n
->vhost_started
= 0;
158 static void virtio_net_set_status(struct VirtIODevice
*vdev
, uint8_t status
)
160 VirtIONet
*n
= VIRTIO_NET(vdev
);
163 uint8_t queue_status
;
165 virtio_net_vhost_status(n
, status
);
167 for (i
= 0; i
< n
->max_queues
; i
++) {
170 if ((!n
->multiqueue
&& i
!= 0) || i
>= n
->curr_queues
) {
173 queue_status
= status
;
176 if (!q
->tx_waiting
) {
180 if (virtio_net_started(n
, queue_status
) && !n
->vhost_started
) {
182 timer_mod(q
->tx_timer
,
183 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + n
->tx_timeout
);
185 qemu_bh_schedule(q
->tx_bh
);
189 timer_del(q
->tx_timer
);
191 qemu_bh_cancel(q
->tx_bh
);
197 static void virtio_net_set_link_status(NetClientState
*nc
)
199 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
200 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
201 uint16_t old_status
= n
->status
;
204 n
->status
&= ~VIRTIO_NET_S_LINK_UP
;
206 n
->status
|= VIRTIO_NET_S_LINK_UP
;
208 if (n
->status
!= old_status
)
209 virtio_notify_config(vdev
);
211 virtio_net_set_status(vdev
, vdev
->status
);
214 static void rxfilter_notify(NetClientState
*nc
)
216 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
218 if (nc
->rxfilter_notify_enabled
) {
219 gchar
*path
= object_get_canonical_path(OBJECT(n
->qdev
));
220 qapi_event_send_nic_rx_filter_changed(!!n
->netclient_name
,
221 n
->netclient_name
, path
, &error_abort
);
224 /* disable event notification to avoid events flooding */
225 nc
->rxfilter_notify_enabled
= 0;
229 static char *mac_strdup_printf(const uint8_t *mac
)
231 return g_strdup_printf("%.2x:%.2x:%.2x:%.2x:%.2x:%.2x", mac
[0],
232 mac
[1], mac
[2], mac
[3], mac
[4], mac
[5]);
235 static intList
*get_vlan_table(VirtIONet
*n
)
237 intList
*list
, *entry
;
241 for (i
= 0; i
< MAX_VLAN
>> 5; i
++) {
242 for (j
= 0; n
->vlans
[i
] && j
<= 0x1f; j
++) {
243 if (n
->vlans
[i
] & (1U << j
)) {
244 entry
= g_malloc0(sizeof(*entry
));
245 entry
->value
= (i
<< 5) + j
;
255 static RxFilterInfo
*virtio_net_query_rxfilter(NetClientState
*nc
)
257 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
258 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
260 strList
*str_list
, *entry
;
263 info
= g_malloc0(sizeof(*info
));
264 info
->name
= g_strdup(nc
->name
);
265 info
->promiscuous
= n
->promisc
;
268 info
->unicast
= RX_STATE_NONE
;
269 } else if (n
->alluni
) {
270 info
->unicast
= RX_STATE_ALL
;
272 info
->unicast
= RX_STATE_NORMAL
;
276 info
->multicast
= RX_STATE_NONE
;
277 } else if (n
->allmulti
) {
278 info
->multicast
= RX_STATE_ALL
;
280 info
->multicast
= RX_STATE_NORMAL
;
283 info
->broadcast_allowed
= n
->nobcast
;
284 info
->multicast_overflow
= n
->mac_table
.multi_overflow
;
285 info
->unicast_overflow
= n
->mac_table
.uni_overflow
;
287 info
->main_mac
= mac_strdup_printf(n
->mac
);
290 for (i
= 0; i
< n
->mac_table
.first_multi
; i
++) {
291 entry
= g_malloc0(sizeof(*entry
));
292 entry
->value
= mac_strdup_printf(n
->mac_table
.macs
+ i
* ETH_ALEN
);
293 entry
->next
= str_list
;
296 info
->unicast_table
= str_list
;
299 for (i
= n
->mac_table
.first_multi
; i
< n
->mac_table
.in_use
; i
++) {
300 entry
= g_malloc0(sizeof(*entry
));
301 entry
->value
= mac_strdup_printf(n
->mac_table
.macs
+ i
* ETH_ALEN
);
302 entry
->next
= str_list
;
305 info
->multicast_table
= str_list
;
306 info
->vlan_table
= get_vlan_table(n
);
308 if (!((1 << VIRTIO_NET_F_CTRL_VLAN
) & vdev
->guest_features
)) {
309 info
->vlan
= RX_STATE_ALL
;
310 } else if (!info
->vlan_table
) {
311 info
->vlan
= RX_STATE_NONE
;
313 info
->vlan
= RX_STATE_NORMAL
;
316 /* enable event notification after query */
317 nc
->rxfilter_notify_enabled
= 1;
322 static void virtio_net_reset(VirtIODevice
*vdev
)
324 VirtIONet
*n
= VIRTIO_NET(vdev
);
326 /* Reset back to compatibility mode */
333 /* multiqueue is disabled by default */
335 timer_del(n
->announce_timer
);
336 n
->announce_counter
= 0;
337 n
->status
&= ~VIRTIO_NET_S_ANNOUNCE
;
339 /* Flush any MAC and VLAN filter table state */
340 n
->mac_table
.in_use
= 0;
341 n
->mac_table
.first_multi
= 0;
342 n
->mac_table
.multi_overflow
= 0;
343 n
->mac_table
.uni_overflow
= 0;
344 memset(n
->mac_table
.macs
, 0, MAC_TABLE_ENTRIES
* ETH_ALEN
);
345 memcpy(&n
->mac
[0], &n
->nic
->conf
->macaddr
, sizeof(n
->mac
));
346 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->mac
);
347 memset(n
->vlans
, 0, MAX_VLAN
>> 3);
350 static void peer_test_vnet_hdr(VirtIONet
*n
)
352 NetClientState
*nc
= qemu_get_queue(n
->nic
);
357 n
->has_vnet_hdr
= qemu_has_vnet_hdr(nc
->peer
);
360 static int peer_has_vnet_hdr(VirtIONet
*n
)
362 return n
->has_vnet_hdr
;
365 static int peer_has_ufo(VirtIONet
*n
)
367 if (!peer_has_vnet_hdr(n
))
370 n
->has_ufo
= qemu_has_ufo(qemu_get_queue(n
->nic
)->peer
);
375 static void virtio_net_set_mrg_rx_bufs(VirtIONet
*n
, int mergeable_rx_bufs
)
380 n
->mergeable_rx_bufs
= mergeable_rx_bufs
;
382 n
->guest_hdr_len
= n
->mergeable_rx_bufs
?
383 sizeof(struct virtio_net_hdr_mrg_rxbuf
) : sizeof(struct virtio_net_hdr
);
385 for (i
= 0; i
< n
->max_queues
; i
++) {
386 nc
= qemu_get_subqueue(n
->nic
, i
);
388 if (peer_has_vnet_hdr(n
) &&
389 qemu_has_vnet_hdr_len(nc
->peer
, n
->guest_hdr_len
)) {
390 qemu_set_vnet_hdr_len(nc
->peer
, n
->guest_hdr_len
);
391 n
->host_hdr_len
= n
->guest_hdr_len
;
396 static int peer_attach(VirtIONet
*n
, int index
)
398 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, index
);
404 if (nc
->peer
->info
->type
!= NET_CLIENT_OPTIONS_KIND_TAP
) {
408 return tap_enable(nc
->peer
);
411 static int peer_detach(VirtIONet
*n
, int index
)
413 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, index
);
419 if (nc
->peer
->info
->type
!= NET_CLIENT_OPTIONS_KIND_TAP
) {
423 return tap_disable(nc
->peer
);
426 static void virtio_net_set_queues(VirtIONet
*n
)
431 for (i
= 0; i
< n
->max_queues
; i
++) {
432 if (i
< n
->curr_queues
) {
433 r
= peer_attach(n
, i
);
436 r
= peer_detach(n
, i
);
442 static void virtio_net_set_multiqueue(VirtIONet
*n
, int multiqueue
);
444 static uint32_t virtio_net_get_features(VirtIODevice
*vdev
, uint32_t features
)
446 VirtIONet
*n
= VIRTIO_NET(vdev
);
447 NetClientState
*nc
= qemu_get_queue(n
->nic
);
449 features
|= (1 << VIRTIO_NET_F_MAC
);
451 if (!peer_has_vnet_hdr(n
)) {
452 features
&= ~(0x1 << VIRTIO_NET_F_CSUM
);
453 features
&= ~(0x1 << VIRTIO_NET_F_HOST_TSO4
);
454 features
&= ~(0x1 << VIRTIO_NET_F_HOST_TSO6
);
455 features
&= ~(0x1 << VIRTIO_NET_F_HOST_ECN
);
457 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_CSUM
);
458 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_TSO4
);
459 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_TSO6
);
460 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_ECN
);
463 if (!peer_has_vnet_hdr(n
) || !peer_has_ufo(n
)) {
464 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_UFO
);
465 features
&= ~(0x1 << VIRTIO_NET_F_HOST_UFO
);
468 if (!get_vhost_net(nc
->peer
)) {
471 return vhost_net_get_features(get_vhost_net(nc
->peer
), features
);
474 static uint32_t virtio_net_bad_features(VirtIODevice
*vdev
)
476 uint32_t features
= 0;
478 /* Linux kernel 2.6.25. It understood MAC (as everyone must),
480 features
|= (1 << VIRTIO_NET_F_MAC
);
481 features
|= (1 << VIRTIO_NET_F_CSUM
);
482 features
|= (1 << VIRTIO_NET_F_HOST_TSO4
);
483 features
|= (1 << VIRTIO_NET_F_HOST_TSO6
);
484 features
|= (1 << VIRTIO_NET_F_HOST_ECN
);
489 static void virtio_net_apply_guest_offloads(VirtIONet
*n
)
491 qemu_set_offload(qemu_get_queue(n
->nic
)->peer
,
492 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_CSUM
)),
493 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_TSO4
)),
494 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_TSO6
)),
495 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_ECN
)),
496 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_UFO
)));
499 static uint64_t virtio_net_guest_offloads_by_features(uint32_t features
)
501 static const uint64_t guest_offloads_mask
=
502 (1ULL << VIRTIO_NET_F_GUEST_CSUM
) |
503 (1ULL << VIRTIO_NET_F_GUEST_TSO4
) |
504 (1ULL << VIRTIO_NET_F_GUEST_TSO6
) |
505 (1ULL << VIRTIO_NET_F_GUEST_ECN
) |
506 (1ULL << VIRTIO_NET_F_GUEST_UFO
);
508 return guest_offloads_mask
& features
;
511 static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet
*n
)
513 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
514 return virtio_net_guest_offloads_by_features(vdev
->guest_features
);
517 static void virtio_net_set_features(VirtIODevice
*vdev
, uint32_t features
)
519 VirtIONet
*n
= VIRTIO_NET(vdev
);
522 virtio_net_set_multiqueue(n
, !!(features
& (1 << VIRTIO_NET_F_MQ
)));
524 virtio_net_set_mrg_rx_bufs(n
, !!(features
& (1 << VIRTIO_NET_F_MRG_RXBUF
)));
526 if (n
->has_vnet_hdr
) {
527 n
->curr_guest_offloads
=
528 virtio_net_guest_offloads_by_features(features
);
529 virtio_net_apply_guest_offloads(n
);
532 for (i
= 0; i
< n
->max_queues
; i
++) {
533 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, i
);
535 if (!get_vhost_net(nc
->peer
)) {
538 vhost_net_ack_features(get_vhost_net(nc
->peer
), features
);
541 if ((1 << VIRTIO_NET_F_CTRL_VLAN
) & features
) {
542 memset(n
->vlans
, 0, MAX_VLAN
>> 3);
544 memset(n
->vlans
, 0xff, MAX_VLAN
>> 3);
548 static int virtio_net_handle_rx_mode(VirtIONet
*n
, uint8_t cmd
,
549 struct iovec
*iov
, unsigned int iov_cnt
)
553 NetClientState
*nc
= qemu_get_queue(n
->nic
);
555 s
= iov_to_buf(iov
, iov_cnt
, 0, &on
, sizeof(on
));
556 if (s
!= sizeof(on
)) {
557 return VIRTIO_NET_ERR
;
560 if (cmd
== VIRTIO_NET_CTRL_RX_PROMISC
) {
562 } else if (cmd
== VIRTIO_NET_CTRL_RX_ALLMULTI
) {
564 } else if (cmd
== VIRTIO_NET_CTRL_RX_ALLUNI
) {
566 } else if (cmd
== VIRTIO_NET_CTRL_RX_NOMULTI
) {
568 } else if (cmd
== VIRTIO_NET_CTRL_RX_NOUNI
) {
570 } else if (cmd
== VIRTIO_NET_CTRL_RX_NOBCAST
) {
573 return VIRTIO_NET_ERR
;
578 return VIRTIO_NET_OK
;
581 static int virtio_net_handle_offloads(VirtIONet
*n
, uint8_t cmd
,
582 struct iovec
*iov
, unsigned int iov_cnt
)
584 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
588 if (!((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
) & vdev
->guest_features
)) {
589 return VIRTIO_NET_ERR
;
592 s
= iov_to_buf(iov
, iov_cnt
, 0, &offloads
, sizeof(offloads
));
593 if (s
!= sizeof(offloads
)) {
594 return VIRTIO_NET_ERR
;
597 if (cmd
== VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET
) {
598 uint64_t supported_offloads
;
600 if (!n
->has_vnet_hdr
) {
601 return VIRTIO_NET_ERR
;
604 supported_offloads
= virtio_net_supported_guest_offloads(n
);
605 if (offloads
& ~supported_offloads
) {
606 return VIRTIO_NET_ERR
;
609 n
->curr_guest_offloads
= offloads
;
610 virtio_net_apply_guest_offloads(n
);
612 return VIRTIO_NET_OK
;
614 return VIRTIO_NET_ERR
;
618 static int virtio_net_handle_mac(VirtIONet
*n
, uint8_t cmd
,
619 struct iovec
*iov
, unsigned int iov_cnt
)
621 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
622 struct virtio_net_ctrl_mac mac_data
;
624 NetClientState
*nc
= qemu_get_queue(n
->nic
);
626 if (cmd
== VIRTIO_NET_CTRL_MAC_ADDR_SET
) {
627 if (iov_size(iov
, iov_cnt
) != sizeof(n
->mac
)) {
628 return VIRTIO_NET_ERR
;
630 s
= iov_to_buf(iov
, iov_cnt
, 0, &n
->mac
, sizeof(n
->mac
));
631 assert(s
== sizeof(n
->mac
));
632 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->mac
);
635 return VIRTIO_NET_OK
;
638 if (cmd
!= VIRTIO_NET_CTRL_MAC_TABLE_SET
) {
639 return VIRTIO_NET_ERR
;
644 uint8_t uni_overflow
= 0;
645 uint8_t multi_overflow
= 0;
646 uint8_t *macs
= g_malloc0(MAC_TABLE_ENTRIES
* ETH_ALEN
);
648 s
= iov_to_buf(iov
, iov_cnt
, 0, &mac_data
.entries
,
649 sizeof(mac_data
.entries
));
650 mac_data
.entries
= virtio_ldl_p(vdev
, &mac_data
.entries
);
651 if (s
!= sizeof(mac_data
.entries
)) {
654 iov_discard_front(&iov
, &iov_cnt
, s
);
656 if (mac_data
.entries
* ETH_ALEN
> iov_size(iov
, iov_cnt
)) {
660 if (mac_data
.entries
<= MAC_TABLE_ENTRIES
) {
661 s
= iov_to_buf(iov
, iov_cnt
, 0, macs
,
662 mac_data
.entries
* ETH_ALEN
);
663 if (s
!= mac_data
.entries
* ETH_ALEN
) {
666 in_use
+= mac_data
.entries
;
671 iov_discard_front(&iov
, &iov_cnt
, mac_data
.entries
* ETH_ALEN
);
673 first_multi
= in_use
;
675 s
= iov_to_buf(iov
, iov_cnt
, 0, &mac_data
.entries
,
676 sizeof(mac_data
.entries
));
677 mac_data
.entries
= virtio_ldl_p(vdev
, &mac_data
.entries
);
678 if (s
!= sizeof(mac_data
.entries
)) {
682 iov_discard_front(&iov
, &iov_cnt
, s
);
684 if (mac_data
.entries
* ETH_ALEN
!= iov_size(iov
, iov_cnt
)) {
688 if (mac_data
.entries
<= MAC_TABLE_ENTRIES
- in_use
) {
689 s
= iov_to_buf(iov
, iov_cnt
, 0, &macs
[in_use
* ETH_ALEN
],
690 mac_data
.entries
* ETH_ALEN
);
691 if (s
!= mac_data
.entries
* ETH_ALEN
) {
694 in_use
+= mac_data
.entries
;
699 n
->mac_table
.in_use
= in_use
;
700 n
->mac_table
.first_multi
= first_multi
;
701 n
->mac_table
.uni_overflow
= uni_overflow
;
702 n
->mac_table
.multi_overflow
= multi_overflow
;
703 memcpy(n
->mac_table
.macs
, macs
, MAC_TABLE_ENTRIES
* ETH_ALEN
);
707 return VIRTIO_NET_OK
;
711 return VIRTIO_NET_ERR
;
714 static int virtio_net_handle_vlan_table(VirtIONet
*n
, uint8_t cmd
,
715 struct iovec
*iov
, unsigned int iov_cnt
)
717 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
720 NetClientState
*nc
= qemu_get_queue(n
->nic
);
722 s
= iov_to_buf(iov
, iov_cnt
, 0, &vid
, sizeof(vid
));
723 vid
= virtio_lduw_p(vdev
, &vid
);
724 if (s
!= sizeof(vid
)) {
725 return VIRTIO_NET_ERR
;
729 return VIRTIO_NET_ERR
;
731 if (cmd
== VIRTIO_NET_CTRL_VLAN_ADD
)
732 n
->vlans
[vid
>> 5] |= (1U << (vid
& 0x1f));
733 else if (cmd
== VIRTIO_NET_CTRL_VLAN_DEL
)
734 n
->vlans
[vid
>> 5] &= ~(1U << (vid
& 0x1f));
736 return VIRTIO_NET_ERR
;
740 return VIRTIO_NET_OK
;
743 static int virtio_net_handle_announce(VirtIONet
*n
, uint8_t cmd
,
744 struct iovec
*iov
, unsigned int iov_cnt
)
746 if (cmd
== VIRTIO_NET_CTRL_ANNOUNCE_ACK
&&
747 n
->status
& VIRTIO_NET_S_ANNOUNCE
) {
748 n
->status
&= ~VIRTIO_NET_S_ANNOUNCE
;
749 if (n
->announce_counter
) {
750 timer_mod(n
->announce_timer
,
751 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) +
752 self_announce_delay(n
->announce_counter
));
754 return VIRTIO_NET_OK
;
756 return VIRTIO_NET_ERR
;
760 static int virtio_net_handle_mq(VirtIONet
*n
, uint8_t cmd
,
761 struct iovec
*iov
, unsigned int iov_cnt
)
763 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
764 struct virtio_net_ctrl_mq mq
;
768 s
= iov_to_buf(iov
, iov_cnt
, 0, &mq
, sizeof(mq
));
769 if (s
!= sizeof(mq
)) {
770 return VIRTIO_NET_ERR
;
773 if (cmd
!= VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET
) {
774 return VIRTIO_NET_ERR
;
777 queues
= virtio_lduw_p(vdev
, &mq
.virtqueue_pairs
);
779 if (queues
< VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN
||
780 queues
> VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX
||
781 queues
> n
->max_queues
||
783 return VIRTIO_NET_ERR
;
786 n
->curr_queues
= queues
;
787 /* stop the backend before changing the number of queues to avoid handling a
789 virtio_net_set_status(vdev
, vdev
->status
);
790 virtio_net_set_queues(n
);
792 return VIRTIO_NET_OK
;
794 static void virtio_net_handle_ctrl(VirtIODevice
*vdev
, VirtQueue
*vq
)
796 VirtIONet
*n
= VIRTIO_NET(vdev
);
797 struct virtio_net_ctrl_hdr ctrl
;
798 virtio_net_ctrl_ack status
= VIRTIO_NET_ERR
;
799 VirtQueueElement elem
;
802 unsigned int iov_cnt
;
804 while (virtqueue_pop(vq
, &elem
)) {
805 if (iov_size(elem
.in_sg
, elem
.in_num
) < sizeof(status
) ||
806 iov_size(elem
.out_sg
, elem
.out_num
) < sizeof(ctrl
)) {
807 error_report("virtio-net ctrl missing headers");
812 iov_cnt
= elem
.out_num
;
813 s
= iov_to_buf(iov
, iov_cnt
, 0, &ctrl
, sizeof(ctrl
));
814 iov_discard_front(&iov
, &iov_cnt
, sizeof(ctrl
));
815 if (s
!= sizeof(ctrl
)) {
816 status
= VIRTIO_NET_ERR
;
817 } else if (ctrl
.class == VIRTIO_NET_CTRL_RX
) {
818 status
= virtio_net_handle_rx_mode(n
, ctrl
.cmd
, iov
, iov_cnt
);
819 } else if (ctrl
.class == VIRTIO_NET_CTRL_MAC
) {
820 status
= virtio_net_handle_mac(n
, ctrl
.cmd
, iov
, iov_cnt
);
821 } else if (ctrl
.class == VIRTIO_NET_CTRL_VLAN
) {
822 status
= virtio_net_handle_vlan_table(n
, ctrl
.cmd
, iov
, iov_cnt
);
823 } else if (ctrl
.class == VIRTIO_NET_CTRL_ANNOUNCE
) {
824 status
= virtio_net_handle_announce(n
, ctrl
.cmd
, iov
, iov_cnt
);
825 } else if (ctrl
.class == VIRTIO_NET_CTRL_MQ
) {
826 status
= virtio_net_handle_mq(n
, ctrl
.cmd
, iov
, iov_cnt
);
827 } else if (ctrl
.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS
) {
828 status
= virtio_net_handle_offloads(n
, ctrl
.cmd
, iov
, iov_cnt
);
831 s
= iov_from_buf(elem
.in_sg
, elem
.in_num
, 0, &status
, sizeof(status
));
832 assert(s
== sizeof(status
));
834 virtqueue_push(vq
, &elem
, sizeof(status
));
835 virtio_notify(vdev
, vq
);
841 static void virtio_net_handle_rx(VirtIODevice
*vdev
, VirtQueue
*vq
)
843 VirtIONet
*n
= VIRTIO_NET(vdev
);
844 int queue_index
= vq2q(virtio_get_queue_index(vq
));
846 qemu_flush_queued_packets(qemu_get_subqueue(n
->nic
, queue_index
));
849 static int virtio_net_can_receive(NetClientState
*nc
)
851 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
852 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
853 VirtIONetQueue
*q
= virtio_net_get_subqueue(nc
);
855 if (!vdev
->vm_running
) {
859 if (nc
->queue_index
>= n
->curr_queues
) {
863 if (!virtio_queue_ready(q
->rx_vq
) ||
864 !(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
871 static int virtio_net_has_buffers(VirtIONetQueue
*q
, int bufsize
)
874 if (virtio_queue_empty(q
->rx_vq
) ||
875 (n
->mergeable_rx_bufs
&&
876 !virtqueue_avail_bytes(q
->rx_vq
, bufsize
, 0))) {
877 virtio_queue_set_notification(q
->rx_vq
, 1);
879 /* To avoid a race condition where the guest has made some buffers
880 * available after the above check but before notification was
881 * enabled, check for available buffers again.
883 if (virtio_queue_empty(q
->rx_vq
) ||
884 (n
->mergeable_rx_bufs
&&
885 !virtqueue_avail_bytes(q
->rx_vq
, bufsize
, 0))) {
890 virtio_queue_set_notification(q
->rx_vq
, 0);
894 static void virtio_net_hdr_swap(VirtIODevice
*vdev
, struct virtio_net_hdr
*hdr
)
896 virtio_tswap16s(vdev
, &hdr
->hdr_len
);
897 virtio_tswap16s(vdev
, &hdr
->gso_size
);
898 virtio_tswap16s(vdev
, &hdr
->csum_start
);
899 virtio_tswap16s(vdev
, &hdr
->csum_offset
);
902 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
903 * it never finds out that the packets don't have valid checksums. This
904 * causes dhclient to get upset. Fedora's carried a patch for ages to
905 * fix this with Xen but it hasn't appeared in an upstream release of
908 * To avoid breaking existing guests, we catch udp packets and add
909 * checksums. This is terrible but it's better than hacking the guest
912 * N.B. if we introduce a zero-copy API, this operation is no longer free so
913 * we should provide a mechanism to disable it to avoid polluting the host
916 static void work_around_broken_dhclient(struct virtio_net_hdr
*hdr
,
917 uint8_t *buf
, size_t size
)
919 if ((hdr
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) && /* missing csum */
920 (size
> 27 && size
< 1500) && /* normal sized MTU */
921 (buf
[12] == 0x08 && buf
[13] == 0x00) && /* ethertype == IPv4 */
922 (buf
[23] == 17) && /* ip.protocol == UDP */
923 (buf
[34] == 0 && buf
[35] == 67)) { /* udp.srcport == bootps */
924 net_checksum_calculate(buf
, size
);
925 hdr
->flags
&= ~VIRTIO_NET_HDR_F_NEEDS_CSUM
;
929 static void receive_header(VirtIONet
*n
, const struct iovec
*iov
, int iov_cnt
,
930 const void *buf
, size_t size
)
932 if (n
->has_vnet_hdr
) {
933 /* FIXME this cast is evil */
934 void *wbuf
= (void *)buf
;
935 work_around_broken_dhclient(wbuf
, wbuf
+ n
->host_hdr_len
,
936 size
- n
->host_hdr_len
);
937 virtio_net_hdr_swap(VIRTIO_DEVICE(n
), wbuf
);
938 iov_from_buf(iov
, iov_cnt
, 0, buf
, sizeof(struct virtio_net_hdr
));
940 struct virtio_net_hdr hdr
= {
942 .gso_type
= VIRTIO_NET_HDR_GSO_NONE
944 iov_from_buf(iov
, iov_cnt
, 0, &hdr
, sizeof hdr
);
948 static int receive_filter(VirtIONet
*n
, const uint8_t *buf
, int size
)
950 static const uint8_t bcast
[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
951 static const uint8_t vlan
[] = {0x81, 0x00};
952 uint8_t *ptr
= (uint8_t *)buf
;
958 ptr
+= n
->host_hdr_len
;
960 if (!memcmp(&ptr
[12], vlan
, sizeof(vlan
))) {
961 int vid
= be16_to_cpup((uint16_t *)(ptr
+ 14)) & 0xfff;
962 if (!(n
->vlans
[vid
>> 5] & (1U << (vid
& 0x1f))))
966 if (ptr
[0] & 1) { // multicast
967 if (!memcmp(ptr
, bcast
, sizeof(bcast
))) {
969 } else if (n
->nomulti
) {
971 } else if (n
->allmulti
|| n
->mac_table
.multi_overflow
) {
975 for (i
= n
->mac_table
.first_multi
; i
< n
->mac_table
.in_use
; i
++) {
976 if (!memcmp(ptr
, &n
->mac_table
.macs
[i
* ETH_ALEN
], ETH_ALEN
)) {
983 } else if (n
->alluni
|| n
->mac_table
.uni_overflow
) {
985 } else if (!memcmp(ptr
, n
->mac
, ETH_ALEN
)) {
989 for (i
= 0; i
< n
->mac_table
.first_multi
; i
++) {
990 if (!memcmp(ptr
, &n
->mac_table
.macs
[i
* ETH_ALEN
], ETH_ALEN
)) {
999 static ssize_t
virtio_net_receive(NetClientState
*nc
, const uint8_t *buf
, size_t size
)
1001 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
1002 VirtIONetQueue
*q
= virtio_net_get_subqueue(nc
);
1003 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1004 struct iovec mhdr_sg
[VIRTQUEUE_MAX_SIZE
];
1005 struct virtio_net_hdr_mrg_rxbuf mhdr
;
1006 unsigned mhdr_cnt
= 0;
1007 size_t offset
, i
, guest_offset
;
1009 if (!virtio_net_can_receive(nc
)) {
1013 /* hdr_len refers to the header we supply to the guest */
1014 if (!virtio_net_has_buffers(q
, size
+ n
->guest_hdr_len
- n
->host_hdr_len
)) {
1018 if (!receive_filter(n
, buf
, size
))
1023 while (offset
< size
) {
1024 VirtQueueElement elem
;
1026 const struct iovec
*sg
= elem
.in_sg
;
1030 if (virtqueue_pop(q
->rx_vq
, &elem
) == 0) {
1033 error_report("virtio-net unexpected empty queue: "
1034 "i %zd mergeable %d offset %zd, size %zd, "
1035 "guest hdr len %zd, host hdr len %zd guest features 0x%x",
1036 i
, n
->mergeable_rx_bufs
, offset
, size
,
1037 n
->guest_hdr_len
, n
->host_hdr_len
, vdev
->guest_features
);
1041 if (elem
.in_num
< 1) {
1042 error_report("virtio-net receive queue contains no in buffers");
1047 assert(offset
== 0);
1048 if (n
->mergeable_rx_bufs
) {
1049 mhdr_cnt
= iov_copy(mhdr_sg
, ARRAY_SIZE(mhdr_sg
),
1051 offsetof(typeof(mhdr
), num_buffers
),
1052 sizeof(mhdr
.num_buffers
));
1055 receive_header(n
, sg
, elem
.in_num
, buf
, size
);
1056 offset
= n
->host_hdr_len
;
1057 total
+= n
->guest_hdr_len
;
1058 guest_offset
= n
->guest_hdr_len
;
1063 /* copy in packet. ugh */
1064 len
= iov_from_buf(sg
, elem
.in_num
, guest_offset
,
1065 buf
+ offset
, size
- offset
);
1068 /* If buffers can't be merged, at this point we
1069 * must have consumed the complete packet.
1070 * Otherwise, drop it. */
1071 if (!n
->mergeable_rx_bufs
&& offset
< size
) {
1073 error_report("virtio-net truncated non-mergeable packet: "
1074 "i %zd mergeable %d offset %zd, size %zd, "
1075 "guest hdr len %zd, host hdr len %zd",
1076 i
, n
->mergeable_rx_bufs
,
1077 offset
, size
, n
->guest_hdr_len
, n
->host_hdr_len
);
1082 /* signal other side */
1083 virtqueue_fill(q
->rx_vq
, &elem
, total
, i
++);
1087 virtio_stw_p(vdev
, &mhdr
.num_buffers
, i
);
1088 iov_from_buf(mhdr_sg
, mhdr_cnt
,
1090 &mhdr
.num_buffers
, sizeof mhdr
.num_buffers
);
1093 virtqueue_flush(q
->rx_vq
, i
);
1094 virtio_notify(vdev
, q
->rx_vq
);
1099 static int32_t virtio_net_flush_tx(VirtIONetQueue
*q
);
1101 static void virtio_net_tx_complete(NetClientState
*nc
, ssize_t len
)
1103 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
1104 VirtIONetQueue
*q
= virtio_net_get_subqueue(nc
);
1105 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1107 virtqueue_push(q
->tx_vq
, &q
->async_tx
.elem
, 0);
1108 virtio_notify(vdev
, q
->tx_vq
);
1110 q
->async_tx
.elem
.out_num
= q
->async_tx
.len
= 0;
1112 virtio_queue_set_notification(q
->tx_vq
, 1);
1113 virtio_net_flush_tx(q
);
1117 static int32_t virtio_net_flush_tx(VirtIONetQueue
*q
)
1119 VirtIONet
*n
= q
->n
;
1120 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1121 VirtQueueElement elem
;
1122 int32_t num_packets
= 0;
1123 int queue_index
= vq2q(virtio_get_queue_index(q
->tx_vq
));
1124 if (!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
1128 assert(vdev
->vm_running
);
1130 if (q
->async_tx
.elem
.out_num
) {
1131 virtio_queue_set_notification(q
->tx_vq
, 0);
1135 while (virtqueue_pop(q
->tx_vq
, &elem
)) {
1137 unsigned int out_num
= elem
.out_num
;
1138 struct iovec
*out_sg
= &elem
.out_sg
[0];
1139 struct iovec sg
[VIRTQUEUE_MAX_SIZE
];
1142 error_report("virtio-net header not in first element");
1146 if (n
->has_vnet_hdr
) {
1147 if (out_sg
[0].iov_len
< n
->guest_hdr_len
) {
1148 error_report("virtio-net header incorrect");
1151 virtio_net_hdr_swap(vdev
, (void *) out_sg
[0].iov_base
);
1155 * If host wants to see the guest header as is, we can
1156 * pass it on unchanged. Otherwise, copy just the parts
1157 * that host is interested in.
1159 assert(n
->host_hdr_len
<= n
->guest_hdr_len
);
1160 if (n
->host_hdr_len
!= n
->guest_hdr_len
) {
1161 unsigned sg_num
= iov_copy(sg
, ARRAY_SIZE(sg
),
1163 0, n
->host_hdr_len
);
1164 sg_num
+= iov_copy(sg
+ sg_num
, ARRAY_SIZE(sg
) - sg_num
,
1166 n
->guest_hdr_len
, -1);
1171 len
= n
->guest_hdr_len
;
1173 ret
= qemu_sendv_packet_async(qemu_get_subqueue(n
->nic
, queue_index
),
1174 out_sg
, out_num
, virtio_net_tx_complete
);
1176 virtio_queue_set_notification(q
->tx_vq
, 0);
1177 q
->async_tx
.elem
= elem
;
1178 q
->async_tx
.len
= len
;
1184 virtqueue_push(q
->tx_vq
, &elem
, 0);
1185 virtio_notify(vdev
, q
->tx_vq
);
1187 if (++num_packets
>= n
->tx_burst
) {
1194 static void virtio_net_handle_tx_timer(VirtIODevice
*vdev
, VirtQueue
*vq
)
1196 VirtIONet
*n
= VIRTIO_NET(vdev
);
1197 VirtIONetQueue
*q
= &n
->vqs
[vq2q(virtio_get_queue_index(vq
))];
1199 /* This happens when device was stopped but VCPU wasn't. */
1200 if (!vdev
->vm_running
) {
1205 if (q
->tx_waiting
) {
1206 virtio_queue_set_notification(vq
, 1);
1207 timer_del(q
->tx_timer
);
1209 virtio_net_flush_tx(q
);
1211 timer_mod(q
->tx_timer
,
1212 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + n
->tx_timeout
);
1214 virtio_queue_set_notification(vq
, 0);
1218 static void virtio_net_handle_tx_bh(VirtIODevice
*vdev
, VirtQueue
*vq
)
1220 VirtIONet
*n
= VIRTIO_NET(vdev
);
1221 VirtIONetQueue
*q
= &n
->vqs
[vq2q(virtio_get_queue_index(vq
))];
1223 if (unlikely(q
->tx_waiting
)) {
1227 /* This happens when device was stopped but VCPU wasn't. */
1228 if (!vdev
->vm_running
) {
1231 virtio_queue_set_notification(vq
, 0);
1232 qemu_bh_schedule(q
->tx_bh
);
1235 static void virtio_net_tx_timer(void *opaque
)
1237 VirtIONetQueue
*q
= opaque
;
1238 VirtIONet
*n
= q
->n
;
1239 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1240 /* This happens when device was stopped but BH wasn't. */
1241 if (!vdev
->vm_running
) {
1242 /* Make sure tx waiting is set, so we'll run when restarted. */
1243 assert(q
->tx_waiting
);
1249 /* Just in case the driver is not ready on more */
1250 if (!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
1254 virtio_queue_set_notification(q
->tx_vq
, 1);
1255 virtio_net_flush_tx(q
);
1258 static void virtio_net_tx_bh(void *opaque
)
1260 VirtIONetQueue
*q
= opaque
;
1261 VirtIONet
*n
= q
->n
;
1262 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1265 /* This happens when device was stopped but BH wasn't. */
1266 if (!vdev
->vm_running
) {
1267 /* Make sure tx waiting is set, so we'll run when restarted. */
1268 assert(q
->tx_waiting
);
1274 /* Just in case the driver is not ready on more */
1275 if (unlikely(!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
))) {
1279 ret
= virtio_net_flush_tx(q
);
1280 if (ret
== -EBUSY
) {
1281 return; /* Notification re-enable handled by tx_complete */
1284 /* If we flush a full burst of packets, assume there are
1285 * more coming and immediately reschedule */
1286 if (ret
>= n
->tx_burst
) {
1287 qemu_bh_schedule(q
->tx_bh
);
1292 /* If less than a full burst, re-enable notification and flush
1293 * anything that may have come in while we weren't looking. If
1294 * we find something, assume the guest is still active and reschedule */
1295 virtio_queue_set_notification(q
->tx_vq
, 1);
1296 if (virtio_net_flush_tx(q
) > 0) {
1297 virtio_queue_set_notification(q
->tx_vq
, 0);
1298 qemu_bh_schedule(q
->tx_bh
);
1303 static void virtio_net_set_multiqueue(VirtIONet
*n
, int multiqueue
)
1305 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1306 int i
, max
= multiqueue
? n
->max_queues
: 1;
1308 n
->multiqueue
= multiqueue
;
1310 for (i
= 2; i
<= n
->max_queues
* 2 + 1; i
++) {
1311 virtio_del_queue(vdev
, i
);
1314 for (i
= 1; i
< max
; i
++) {
1315 n
->vqs
[i
].rx_vq
= virtio_add_queue(vdev
, 256, virtio_net_handle_rx
);
1316 if (n
->vqs
[i
].tx_timer
) {
1318 virtio_add_queue(vdev
, 256, virtio_net_handle_tx_timer
);
1319 n
->vqs
[i
].tx_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
1320 virtio_net_tx_timer
,
1324 virtio_add_queue(vdev
, 256, virtio_net_handle_tx_bh
);
1325 n
->vqs
[i
].tx_bh
= qemu_bh_new(virtio_net_tx_bh
, &n
->vqs
[i
]);
1328 n
->vqs
[i
].tx_waiting
= 0;
1332 /* Note: Minux Guests (version 3.2.1) use ctrl vq but don't ack
1333 * VIRTIO_NET_F_CTRL_VQ. Create ctrl vq unconditionally to avoid
1336 n
->ctrl_vq
= virtio_add_queue(vdev
, 64, virtio_net_handle_ctrl
);
1338 virtio_net_set_queues(n
);
1341 static void virtio_net_save(QEMUFile
*f
, void *opaque
)
1343 VirtIONet
*n
= opaque
;
1344 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1346 /* At this point, backend must be stopped, otherwise
1347 * it might keep writing to memory. */
1348 assert(!n
->vhost_started
);
1349 virtio_save(vdev
, f
);
1352 static void virtio_net_save_device(VirtIODevice
*vdev
, QEMUFile
*f
)
1354 VirtIONet
*n
= VIRTIO_NET(vdev
);
1357 qemu_put_buffer(f
, n
->mac
, ETH_ALEN
);
1358 qemu_put_be32(f
, n
->vqs
[0].tx_waiting
);
1359 qemu_put_be32(f
, n
->mergeable_rx_bufs
);
1360 qemu_put_be16(f
, n
->status
);
1361 qemu_put_byte(f
, n
->promisc
);
1362 qemu_put_byte(f
, n
->allmulti
);
1363 qemu_put_be32(f
, n
->mac_table
.in_use
);
1364 qemu_put_buffer(f
, n
->mac_table
.macs
, n
->mac_table
.in_use
* ETH_ALEN
);
1365 qemu_put_buffer(f
, (uint8_t *)n
->vlans
, MAX_VLAN
>> 3);
1366 qemu_put_be32(f
, n
->has_vnet_hdr
);
1367 qemu_put_byte(f
, n
->mac_table
.multi_overflow
);
1368 qemu_put_byte(f
, n
->mac_table
.uni_overflow
);
1369 qemu_put_byte(f
, n
->alluni
);
1370 qemu_put_byte(f
, n
->nomulti
);
1371 qemu_put_byte(f
, n
->nouni
);
1372 qemu_put_byte(f
, n
->nobcast
);
1373 qemu_put_byte(f
, n
->has_ufo
);
1374 if (n
->max_queues
> 1) {
1375 qemu_put_be16(f
, n
->max_queues
);
1376 qemu_put_be16(f
, n
->curr_queues
);
1377 for (i
= 1; i
< n
->curr_queues
; i
++) {
1378 qemu_put_be32(f
, n
->vqs
[i
].tx_waiting
);
1382 if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
) & vdev
->guest_features
) {
1383 qemu_put_be64(f
, n
->curr_guest_offloads
);
1387 static int virtio_net_load(QEMUFile
*f
, void *opaque
, int version_id
)
1389 VirtIONet
*n
= opaque
;
1390 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1392 if (version_id
< 2 || version_id
> VIRTIO_NET_VM_VERSION
)
1395 return virtio_load(vdev
, f
, version_id
);
1398 static int virtio_net_load_device(VirtIODevice
*vdev
, QEMUFile
*f
,
1401 VirtIONet
*n
= VIRTIO_NET(vdev
);
1404 qemu_get_buffer(f
, n
->mac
, ETH_ALEN
);
1405 n
->vqs
[0].tx_waiting
= qemu_get_be32(f
);
1407 virtio_net_set_mrg_rx_bufs(n
, qemu_get_be32(f
));
1409 if (version_id
>= 3)
1410 n
->status
= qemu_get_be16(f
);
1412 if (version_id
>= 4) {
1413 if (version_id
< 8) {
1414 n
->promisc
= qemu_get_be32(f
);
1415 n
->allmulti
= qemu_get_be32(f
);
1417 n
->promisc
= qemu_get_byte(f
);
1418 n
->allmulti
= qemu_get_byte(f
);
1422 if (version_id
>= 5) {
1423 n
->mac_table
.in_use
= qemu_get_be32(f
);
1424 /* MAC_TABLE_ENTRIES may be different from the saved image */
1425 if (n
->mac_table
.in_use
<= MAC_TABLE_ENTRIES
) {
1426 qemu_get_buffer(f
, n
->mac_table
.macs
,
1427 n
->mac_table
.in_use
* ETH_ALEN
);
1431 /* Overflow detected - can happen if source has a larger MAC table.
1432 * We simply set overflow flag so there's no need to maintain the
1433 * table of addresses, discard them all.
1434 * Note: 64 bit math to avoid integer overflow.
1436 for (i
= 0; i
< (int64_t)n
->mac_table
.in_use
* ETH_ALEN
; ++i
) {
1439 n
->mac_table
.multi_overflow
= n
->mac_table
.uni_overflow
= 1;
1440 n
->mac_table
.in_use
= 0;
1444 if (version_id
>= 6)
1445 qemu_get_buffer(f
, (uint8_t *)n
->vlans
, MAX_VLAN
>> 3);
1447 if (version_id
>= 7) {
1448 if (qemu_get_be32(f
) && !peer_has_vnet_hdr(n
)) {
1449 error_report("virtio-net: saved image requires vnet_hdr=on");
1454 if (version_id
>= 9) {
1455 n
->mac_table
.multi_overflow
= qemu_get_byte(f
);
1456 n
->mac_table
.uni_overflow
= qemu_get_byte(f
);
1459 if (version_id
>= 10) {
1460 n
->alluni
= qemu_get_byte(f
);
1461 n
->nomulti
= qemu_get_byte(f
);
1462 n
->nouni
= qemu_get_byte(f
);
1463 n
->nobcast
= qemu_get_byte(f
);
1466 if (version_id
>= 11) {
1467 if (qemu_get_byte(f
) && !peer_has_ufo(n
)) {
1468 error_report("virtio-net: saved image requires TUN_F_UFO support");
1473 if (n
->max_queues
> 1) {
1474 if (n
->max_queues
!= qemu_get_be16(f
)) {
1475 error_report("virtio-net: different max_queues ");
1479 n
->curr_queues
= qemu_get_be16(f
);
1480 if (n
->curr_queues
> n
->max_queues
) {
1481 error_report("virtio-net: curr_queues %x > max_queues %x",
1482 n
->curr_queues
, n
->max_queues
);
1485 for (i
= 1; i
< n
->curr_queues
; i
++) {
1486 n
->vqs
[i
].tx_waiting
= qemu_get_be32(f
);
1490 if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
) & vdev
->guest_features
) {
1491 n
->curr_guest_offloads
= qemu_get_be64(f
);
1493 n
->curr_guest_offloads
= virtio_net_supported_guest_offloads(n
);
1496 if (peer_has_vnet_hdr(n
)) {
1497 virtio_net_apply_guest_offloads(n
);
1500 virtio_net_set_queues(n
);
1502 /* Find the first multicast entry in the saved MAC filter */
1503 for (i
= 0; i
< n
->mac_table
.in_use
; i
++) {
1504 if (n
->mac_table
.macs
[i
* ETH_ALEN
] & 1) {
1508 n
->mac_table
.first_multi
= i
;
1510 /* nc.link_down can't be migrated, so infer link_down according
1511 * to link status bit in n->status */
1512 link_down
= (n
->status
& VIRTIO_NET_S_LINK_UP
) == 0;
1513 for (i
= 0; i
< n
->max_queues
; i
++) {
1514 qemu_get_subqueue(n
->nic
, i
)->link_down
= link_down
;
1517 if (vdev
->guest_features
& (0x1 << VIRTIO_NET_F_GUEST_ANNOUNCE
) &&
1518 vdev
->guest_features
& (0x1 << VIRTIO_NET_F_CTRL_VQ
)) {
1519 n
->announce_counter
= SELF_ANNOUNCE_ROUNDS
;
1520 timer_mod(n
->announce_timer
, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
));
1526 static void virtio_net_cleanup(NetClientState
*nc
)
1528 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
1533 static NetClientInfo net_virtio_info
= {
1534 .type
= NET_CLIENT_OPTIONS_KIND_NIC
,
1535 .size
= sizeof(NICState
),
1536 .can_receive
= virtio_net_can_receive
,
1537 .receive
= virtio_net_receive
,
1538 .cleanup
= virtio_net_cleanup
,
1539 .link_status_changed
= virtio_net_set_link_status
,
1540 .query_rx_filter
= virtio_net_query_rxfilter
,
1543 static bool virtio_net_guest_notifier_pending(VirtIODevice
*vdev
, int idx
)
1545 VirtIONet
*n
= VIRTIO_NET(vdev
);
1546 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, vq2q(idx
));
1547 assert(n
->vhost_started
);
1548 return vhost_net_virtqueue_pending(get_vhost_net(nc
->peer
), idx
);
1551 static void virtio_net_guest_notifier_mask(VirtIODevice
*vdev
, int idx
,
1554 VirtIONet
*n
= VIRTIO_NET(vdev
);
1555 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, vq2q(idx
));
1556 assert(n
->vhost_started
);
1557 vhost_net_virtqueue_mask(get_vhost_net(nc
->peer
),
1561 void virtio_net_set_config_size(VirtIONet
*n
, uint32_t host_features
)
1563 int i
, config_size
= 0;
1564 host_features
|= (1 << VIRTIO_NET_F_MAC
);
1565 for (i
= 0; feature_sizes
[i
].flags
!= 0; i
++) {
1566 if (host_features
& feature_sizes
[i
].flags
) {
1567 config_size
= MAX(feature_sizes
[i
].end
, config_size
);
1570 n
->config_size
= config_size
;
1573 void virtio_net_set_netclient_name(VirtIONet
*n
, const char *name
,
1577 * The name can be NULL, the netclient name will be type.x.
1579 assert(type
!= NULL
);
1581 g_free(n
->netclient_name
);
1582 g_free(n
->netclient_type
);
1583 n
->netclient_name
= g_strdup(name
);
1584 n
->netclient_type
= g_strdup(type
);
1587 static void virtio_net_device_realize(DeviceState
*dev
, Error
**errp
)
1589 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
1590 VirtIONet
*n
= VIRTIO_NET(dev
);
1594 virtio_init(vdev
, "virtio-net", VIRTIO_ID_NET
, n
->config_size
);
1596 n
->max_queues
= MAX(n
->nic_conf
.peers
.queues
, 1);
1597 n
->vqs
= g_malloc0(sizeof(VirtIONetQueue
) * n
->max_queues
);
1598 n
->vqs
[0].rx_vq
= virtio_add_queue(vdev
, 256, virtio_net_handle_rx
);
1601 n
->tx_timeout
= n
->net_conf
.txtimer
;
1603 if (n
->net_conf
.tx
&& strcmp(n
->net_conf
.tx
, "timer")
1604 && strcmp(n
->net_conf
.tx
, "bh")) {
1605 error_report("virtio-net: "
1606 "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
1608 error_report("Defaulting to \"bh\"");
1611 if (n
->net_conf
.tx
&& !strcmp(n
->net_conf
.tx
, "timer")) {
1612 n
->vqs
[0].tx_vq
= virtio_add_queue(vdev
, 256,
1613 virtio_net_handle_tx_timer
);
1614 n
->vqs
[0].tx_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, virtio_net_tx_timer
,
1617 n
->vqs
[0].tx_vq
= virtio_add_queue(vdev
, 256,
1618 virtio_net_handle_tx_bh
);
1619 n
->vqs
[0].tx_bh
= qemu_bh_new(virtio_net_tx_bh
, &n
->vqs
[0]);
1621 n
->ctrl_vq
= virtio_add_queue(vdev
, 64, virtio_net_handle_ctrl
);
1622 qemu_macaddr_default_if_unset(&n
->nic_conf
.macaddr
);
1623 memcpy(&n
->mac
[0], &n
->nic_conf
.macaddr
, sizeof(n
->mac
));
1624 n
->status
= VIRTIO_NET_S_LINK_UP
;
1625 n
->announce_timer
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
1626 virtio_net_announce_timer
, n
);
1628 if (n
->netclient_type
) {
1630 * Happen when virtio_net_set_netclient_name has been called.
1632 n
->nic
= qemu_new_nic(&net_virtio_info
, &n
->nic_conf
,
1633 n
->netclient_type
, n
->netclient_name
, n
);
1635 n
->nic
= qemu_new_nic(&net_virtio_info
, &n
->nic_conf
,
1636 object_get_typename(OBJECT(dev
)), dev
->id
, n
);
1639 peer_test_vnet_hdr(n
);
1640 if (peer_has_vnet_hdr(n
)) {
1641 for (i
= 0; i
< n
->max_queues
; i
++) {
1642 qemu_using_vnet_hdr(qemu_get_subqueue(n
->nic
, i
)->peer
, true);
1644 n
->host_hdr_len
= sizeof(struct virtio_net_hdr
);
1646 n
->host_hdr_len
= 0;
1649 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->nic_conf
.macaddr
.a
);
1651 n
->vqs
[0].tx_waiting
= 0;
1652 n
->tx_burst
= n
->net_conf
.txburst
;
1653 virtio_net_set_mrg_rx_bufs(n
, 0);
1654 n
->promisc
= 1; /* for compatibility */
1656 n
->mac_table
.macs
= g_malloc0(MAC_TABLE_ENTRIES
* ETH_ALEN
);
1658 n
->vlans
= g_malloc0(MAX_VLAN
>> 3);
1660 nc
= qemu_get_queue(n
->nic
);
1661 nc
->rxfilter_notify_enabled
= 1;
1664 register_savevm(dev
, "virtio-net", -1, VIRTIO_NET_VM_VERSION
,
1665 virtio_net_save
, virtio_net_load
, n
);
1667 add_boot_device_path(n
->nic_conf
.bootindex
, dev
, "/ethernet-phy@0");
1670 static void virtio_net_device_unrealize(DeviceState
*dev
, Error
**errp
)
1672 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
1673 VirtIONet
*n
= VIRTIO_NET(dev
);
1676 /* This will stop vhost backend if appropriate. */
1677 virtio_net_set_status(vdev
, 0);
1679 unregister_savevm(dev
, "virtio-net", n
);
1681 g_free(n
->netclient_name
);
1682 n
->netclient_name
= NULL
;
1683 g_free(n
->netclient_type
);
1684 n
->netclient_type
= NULL
;
1686 g_free(n
->mac_table
.macs
);
1689 for (i
= 0; i
< n
->max_queues
; i
++) {
1690 VirtIONetQueue
*q
= &n
->vqs
[i
];
1691 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, i
);
1693 qemu_purge_queued_packets(nc
);
1696 timer_del(q
->tx_timer
);
1697 timer_free(q
->tx_timer
);
1698 } else if (q
->tx_bh
) {
1699 qemu_bh_delete(q
->tx_bh
);
1703 timer_del(n
->announce_timer
);
1704 timer_free(n
->announce_timer
);
1706 qemu_del_nic(n
->nic
);
1707 virtio_cleanup(vdev
);
1710 static void virtio_net_instance_init(Object
*obj
)
1712 VirtIONet
*n
= VIRTIO_NET(obj
);
1715 * The default config_size is sizeof(struct virtio_net_config).
1716 * Can be overriden with virtio_net_set_config_size.
1718 n
->config_size
= sizeof(struct virtio_net_config
);
1721 static Property virtio_net_properties
[] = {
1722 DEFINE_NIC_PROPERTIES(VirtIONet
, nic_conf
),
1723 DEFINE_PROP_UINT32("x-txtimer", VirtIONet
, net_conf
.txtimer
,
1725 DEFINE_PROP_INT32("x-txburst", VirtIONet
, net_conf
.txburst
, TX_BURST
),
1726 DEFINE_PROP_STRING("tx", VirtIONet
, net_conf
.tx
),
1727 DEFINE_PROP_END_OF_LIST(),
1730 static void virtio_net_class_init(ObjectClass
*klass
, void *data
)
1732 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1733 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
1735 dc
->props
= virtio_net_properties
;
1736 set_bit(DEVICE_CATEGORY_NETWORK
, dc
->categories
);
1737 vdc
->realize
= virtio_net_device_realize
;
1738 vdc
->unrealize
= virtio_net_device_unrealize
;
1739 vdc
->get_config
= virtio_net_get_config
;
1740 vdc
->set_config
= virtio_net_set_config
;
1741 vdc
->get_features
= virtio_net_get_features
;
1742 vdc
->set_features
= virtio_net_set_features
;
1743 vdc
->bad_features
= virtio_net_bad_features
;
1744 vdc
->reset
= virtio_net_reset
;
1745 vdc
->set_status
= virtio_net_set_status
;
1746 vdc
->guest_notifier_mask
= virtio_net_guest_notifier_mask
;
1747 vdc
->guest_notifier_pending
= virtio_net_guest_notifier_pending
;
1748 vdc
->load
= virtio_net_load_device
;
1749 vdc
->save
= virtio_net_save_device
;
1752 static const TypeInfo virtio_net_info
= {
1753 .name
= TYPE_VIRTIO_NET
,
1754 .parent
= TYPE_VIRTIO_DEVICE
,
1755 .instance_size
= sizeof(VirtIONet
),
1756 .instance_init
= virtio_net_instance_init
,
1757 .class_init
= virtio_net_class_init
,
1760 static void virtio_register_types(void)
1762 type_register_static(&virtio_net_info
);
1765 type_init(virtio_register_types
)