2 * Virtio Network Device
4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
15 #include "hw/virtio/virtio.h"
17 #include "net/checksum.h"
19 #include "qemu/error-report.h"
20 #include "qemu/timer.h"
21 #include "hw/virtio/virtio-net.h"
22 #include "net/vhost_net.h"
23 #include "hw/virtio/virtio-bus.h"
24 #include "qapi/qmp/qjson.h"
25 #include "monitor/monitor.h"
27 #define VIRTIO_NET_VM_VERSION 11
29 #define MAC_TABLE_ENTRIES 64
30 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
33 * Calculate the number of bytes up to and including the given 'field' of
36 #define endof(container, field) \
37 (offsetof(container, field) + sizeof(((container *)0)->field))
39 typedef struct VirtIOFeature
{
44 static VirtIOFeature feature_sizes
[] = {
45 {.flags
= 1 << VIRTIO_NET_F_MAC
,
46 .end
= endof(struct virtio_net_config
, mac
)},
47 {.flags
= 1 << VIRTIO_NET_F_STATUS
,
48 .end
= endof(struct virtio_net_config
, status
)},
49 {.flags
= 1 << VIRTIO_NET_F_MQ
,
50 .end
= endof(struct virtio_net_config
, max_virtqueue_pairs
)},
54 static VirtIONetQueue
*virtio_net_get_subqueue(NetClientState
*nc
)
56 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
58 return &n
->vqs
[nc
->queue_index
];
61 static int vq2q(int queue_index
)
63 return queue_index
/ 2;
67 * - we could suppress RX interrupt if we were so inclined.
70 static void virtio_net_get_config(VirtIODevice
*vdev
, uint8_t *config
)
72 VirtIONet
*n
= VIRTIO_NET(vdev
);
73 struct virtio_net_config netcfg
;
75 stw_p(&netcfg
.status
, n
->status
);
76 stw_p(&netcfg
.max_virtqueue_pairs
, n
->max_queues
);
77 memcpy(netcfg
.mac
, n
->mac
, ETH_ALEN
);
78 memcpy(config
, &netcfg
, n
->config_size
);
81 static void virtio_net_set_config(VirtIODevice
*vdev
, const uint8_t *config
)
83 VirtIONet
*n
= VIRTIO_NET(vdev
);
84 struct virtio_net_config netcfg
= {};
86 memcpy(&netcfg
, config
, n
->config_size
);
88 if (!(vdev
->guest_features
>> VIRTIO_NET_F_CTRL_MAC_ADDR
& 1) &&
89 memcmp(netcfg
.mac
, n
->mac
, ETH_ALEN
)) {
90 memcpy(n
->mac
, netcfg
.mac
, ETH_ALEN
);
91 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->mac
);
95 static bool virtio_net_started(VirtIONet
*n
, uint8_t status
)
97 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
98 return (status
& VIRTIO_CONFIG_S_DRIVER_OK
) &&
99 (n
->status
& VIRTIO_NET_S_LINK_UP
) && vdev
->vm_running
;
102 static void virtio_net_vhost_status(VirtIONet
*n
, uint8_t status
)
104 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
105 NetClientState
*nc
= qemu_get_queue(n
->nic
);
106 int queues
= n
->multiqueue
? n
->max_queues
: 1;
111 if (nc
->peer
->info
->type
!= NET_CLIENT_OPTIONS_KIND_TAP
) {
115 if (!tap_get_vhost_net(nc
->peer
)) {
119 if (!!n
->vhost_started
==
120 (virtio_net_started(n
, status
) && !nc
->peer
->link_down
)) {
123 if (!n
->vhost_started
) {
125 if (!vhost_net_query(tap_get_vhost_net(nc
->peer
), vdev
)) {
128 n
->vhost_started
= 1;
129 r
= vhost_net_start(vdev
, n
->nic
->ncs
, queues
);
131 error_report("unable to start vhost net: %d: "
132 "falling back on userspace virtio", -r
);
133 n
->vhost_started
= 0;
136 vhost_net_stop(vdev
, n
->nic
->ncs
, queues
);
137 n
->vhost_started
= 0;
141 static void virtio_net_set_status(struct VirtIODevice
*vdev
, uint8_t status
)
143 VirtIONet
*n
= VIRTIO_NET(vdev
);
146 uint8_t queue_status
;
148 virtio_net_vhost_status(n
, status
);
150 for (i
= 0; i
< n
->max_queues
; i
++) {
153 if ((!n
->multiqueue
&& i
!= 0) || i
>= n
->curr_queues
) {
156 queue_status
= status
;
159 if (!q
->tx_waiting
) {
163 if (virtio_net_started(n
, queue_status
) && !n
->vhost_started
) {
165 timer_mod(q
->tx_timer
,
166 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + n
->tx_timeout
);
168 qemu_bh_schedule(q
->tx_bh
);
172 timer_del(q
->tx_timer
);
174 qemu_bh_cancel(q
->tx_bh
);
180 static void virtio_net_set_link_status(NetClientState
*nc
)
182 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
183 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
184 uint16_t old_status
= n
->status
;
187 n
->status
&= ~VIRTIO_NET_S_LINK_UP
;
189 n
->status
|= VIRTIO_NET_S_LINK_UP
;
191 if (n
->status
!= old_status
)
192 virtio_notify_config(vdev
);
194 virtio_net_set_status(vdev
, vdev
->status
);
197 static void rxfilter_notify(NetClientState
*nc
)
200 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
202 if (nc
->rxfilter_notify_enabled
) {
203 gchar
*path
= object_get_canonical_path(OBJECT(n
->qdev
));
204 if (n
->netclient_name
) {
205 event_data
= qobject_from_jsonf("{ 'name': %s, 'path': %s }",
206 n
->netclient_name
, path
);
208 event_data
= qobject_from_jsonf("{ 'path': %s }", path
);
210 monitor_protocol_event(QEVENT_NIC_RX_FILTER_CHANGED
, event_data
);
211 qobject_decref(event_data
);
214 /* disable event notification to avoid events flooding */
215 nc
->rxfilter_notify_enabled
= 0;
219 static char *mac_strdup_printf(const uint8_t *mac
)
221 return g_strdup_printf("%.2x:%.2x:%.2x:%.2x:%.2x:%.2x", mac
[0],
222 mac
[1], mac
[2], mac
[3], mac
[4], mac
[5]);
225 static RxFilterInfo
*virtio_net_query_rxfilter(NetClientState
*nc
)
227 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
229 strList
*str_list
, *entry
;
230 intList
*int_list
, *int_entry
;
233 info
= g_malloc0(sizeof(*info
));
234 info
->name
= g_strdup(nc
->name
);
235 info
->promiscuous
= n
->promisc
;
238 info
->unicast
= RX_STATE_NONE
;
239 } else if (n
->alluni
) {
240 info
->unicast
= RX_STATE_ALL
;
242 info
->unicast
= RX_STATE_NORMAL
;
246 info
->multicast
= RX_STATE_NONE
;
247 } else if (n
->allmulti
) {
248 info
->multicast
= RX_STATE_ALL
;
250 info
->multicast
= RX_STATE_NORMAL
;
253 info
->broadcast_allowed
= n
->nobcast
;
254 info
->multicast_overflow
= n
->mac_table
.multi_overflow
;
255 info
->unicast_overflow
= n
->mac_table
.uni_overflow
;
257 info
->main_mac
= mac_strdup_printf(n
->mac
);
260 for (i
= 0; i
< n
->mac_table
.first_multi
; i
++) {
261 entry
= g_malloc0(sizeof(*entry
));
262 entry
->value
= mac_strdup_printf(n
->mac_table
.macs
+ i
* ETH_ALEN
);
263 entry
->next
= str_list
;
266 info
->unicast_table
= str_list
;
269 for (i
= n
->mac_table
.first_multi
; i
< n
->mac_table
.in_use
; i
++) {
270 entry
= g_malloc0(sizeof(*entry
));
271 entry
->value
= mac_strdup_printf(n
->mac_table
.macs
+ i
* ETH_ALEN
);
272 entry
->next
= str_list
;
275 info
->multicast_table
= str_list
;
278 for (i
= 0; i
< MAX_VLAN
>> 5; i
++) {
279 for (j
= 0; n
->vlans
[i
] && j
< 0x1f; j
++) {
280 if (n
->vlans
[i
] & (1U << j
)) {
281 int_entry
= g_malloc0(sizeof(*int_entry
));
282 int_entry
->value
= (i
<< 5) + j
;
283 int_entry
->next
= int_list
;
284 int_list
= int_entry
;
288 info
->vlan_table
= int_list
;
290 /* enable event notification after query */
291 nc
->rxfilter_notify_enabled
= 1;
296 static void virtio_net_reset(VirtIODevice
*vdev
)
298 VirtIONet
*n
= VIRTIO_NET(vdev
);
300 /* Reset back to compatibility mode */
307 /* multiqueue is disabled by default */
310 /* Flush any MAC and VLAN filter table state */
311 n
->mac_table
.in_use
= 0;
312 n
->mac_table
.first_multi
= 0;
313 n
->mac_table
.multi_overflow
= 0;
314 n
->mac_table
.uni_overflow
= 0;
315 memset(n
->mac_table
.macs
, 0, MAC_TABLE_ENTRIES
* ETH_ALEN
);
316 memcpy(&n
->mac
[0], &n
->nic
->conf
->macaddr
, sizeof(n
->mac
));
317 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->mac
);
318 memset(n
->vlans
, 0, MAX_VLAN
>> 3);
321 static void peer_test_vnet_hdr(VirtIONet
*n
)
323 NetClientState
*nc
= qemu_get_queue(n
->nic
);
328 n
->has_vnet_hdr
= qemu_has_vnet_hdr(nc
->peer
);
331 static int peer_has_vnet_hdr(VirtIONet
*n
)
333 return n
->has_vnet_hdr
;
336 static int peer_has_ufo(VirtIONet
*n
)
338 if (!peer_has_vnet_hdr(n
))
341 n
->has_ufo
= qemu_has_ufo(qemu_get_queue(n
->nic
)->peer
);
346 static void virtio_net_set_mrg_rx_bufs(VirtIONet
*n
, int mergeable_rx_bufs
)
351 n
->mergeable_rx_bufs
= mergeable_rx_bufs
;
353 n
->guest_hdr_len
= n
->mergeable_rx_bufs
?
354 sizeof(struct virtio_net_hdr_mrg_rxbuf
) : sizeof(struct virtio_net_hdr
);
356 for (i
= 0; i
< n
->max_queues
; i
++) {
357 nc
= qemu_get_subqueue(n
->nic
, i
);
359 if (peer_has_vnet_hdr(n
) &&
360 qemu_has_vnet_hdr_len(nc
->peer
, n
->guest_hdr_len
)) {
361 qemu_set_vnet_hdr_len(nc
->peer
, n
->guest_hdr_len
);
362 n
->host_hdr_len
= n
->guest_hdr_len
;
367 static int peer_attach(VirtIONet
*n
, int index
)
369 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, index
);
375 if (nc
->peer
->info
->type
!= NET_CLIENT_OPTIONS_KIND_TAP
) {
379 return tap_enable(nc
->peer
);
382 static int peer_detach(VirtIONet
*n
, int index
)
384 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, index
);
390 if (nc
->peer
->info
->type
!= NET_CLIENT_OPTIONS_KIND_TAP
) {
394 return tap_disable(nc
->peer
);
397 static void virtio_net_set_queues(VirtIONet
*n
)
402 for (i
= 0; i
< n
->max_queues
; i
++) {
403 if (i
< n
->curr_queues
) {
404 r
= peer_attach(n
, i
);
407 r
= peer_detach(n
, i
);
413 static void virtio_net_set_multiqueue(VirtIONet
*n
, int multiqueue
);
415 static uint32_t virtio_net_get_features(VirtIODevice
*vdev
, uint32_t features
)
417 VirtIONet
*n
= VIRTIO_NET(vdev
);
418 NetClientState
*nc
= qemu_get_queue(n
->nic
);
420 features
|= (1 << VIRTIO_NET_F_MAC
);
422 if (!peer_has_vnet_hdr(n
)) {
423 features
&= ~(0x1 << VIRTIO_NET_F_CSUM
);
424 features
&= ~(0x1 << VIRTIO_NET_F_HOST_TSO4
);
425 features
&= ~(0x1 << VIRTIO_NET_F_HOST_TSO6
);
426 features
&= ~(0x1 << VIRTIO_NET_F_HOST_ECN
);
428 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_CSUM
);
429 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_TSO4
);
430 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_TSO6
);
431 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_ECN
);
434 if (!peer_has_vnet_hdr(n
) || !peer_has_ufo(n
)) {
435 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_UFO
);
436 features
&= ~(0x1 << VIRTIO_NET_F_HOST_UFO
);
439 if (!nc
->peer
|| nc
->peer
->info
->type
!= NET_CLIENT_OPTIONS_KIND_TAP
) {
442 if (!tap_get_vhost_net(nc
->peer
)) {
445 return vhost_net_get_features(tap_get_vhost_net(nc
->peer
), features
);
448 static uint32_t virtio_net_bad_features(VirtIODevice
*vdev
)
450 uint32_t features
= 0;
452 /* Linux kernel 2.6.25. It understood MAC (as everyone must),
454 features
|= (1 << VIRTIO_NET_F_MAC
);
455 features
|= (1 << VIRTIO_NET_F_CSUM
);
456 features
|= (1 << VIRTIO_NET_F_HOST_TSO4
);
457 features
|= (1 << VIRTIO_NET_F_HOST_TSO6
);
458 features
|= (1 << VIRTIO_NET_F_HOST_ECN
);
463 static void virtio_net_apply_guest_offloads(VirtIONet
*n
)
465 qemu_set_offload(qemu_get_queue(n
->nic
)->peer
,
466 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_CSUM
)),
467 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_TSO4
)),
468 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_TSO6
)),
469 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_ECN
)),
470 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_UFO
)));
473 static uint64_t virtio_net_guest_offloads_by_features(uint32_t features
)
475 static const uint64_t guest_offloads_mask
=
476 (1ULL << VIRTIO_NET_F_GUEST_CSUM
) |
477 (1ULL << VIRTIO_NET_F_GUEST_TSO4
) |
478 (1ULL << VIRTIO_NET_F_GUEST_TSO6
) |
479 (1ULL << VIRTIO_NET_F_GUEST_ECN
) |
480 (1ULL << VIRTIO_NET_F_GUEST_UFO
);
482 return guest_offloads_mask
& features
;
485 static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet
*n
)
487 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
488 return virtio_net_guest_offloads_by_features(vdev
->guest_features
);
491 static void virtio_net_set_features(VirtIODevice
*vdev
, uint32_t features
)
493 VirtIONet
*n
= VIRTIO_NET(vdev
);
496 virtio_net_set_multiqueue(n
, !!(features
& (1 << VIRTIO_NET_F_MQ
)));
498 virtio_net_set_mrg_rx_bufs(n
, !!(features
& (1 << VIRTIO_NET_F_MRG_RXBUF
)));
500 if (n
->has_vnet_hdr
) {
501 n
->curr_guest_offloads
=
502 virtio_net_guest_offloads_by_features(features
);
503 virtio_net_apply_guest_offloads(n
);
506 for (i
= 0; i
< n
->max_queues
; i
++) {
507 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, i
);
509 if (!nc
->peer
|| nc
->peer
->info
->type
!= NET_CLIENT_OPTIONS_KIND_TAP
) {
512 if (!tap_get_vhost_net(nc
->peer
)) {
515 vhost_net_ack_features(tap_get_vhost_net(nc
->peer
), features
);
519 static int virtio_net_handle_rx_mode(VirtIONet
*n
, uint8_t cmd
,
520 struct iovec
*iov
, unsigned int iov_cnt
)
524 NetClientState
*nc
= qemu_get_queue(n
->nic
);
526 s
= iov_to_buf(iov
, iov_cnt
, 0, &on
, sizeof(on
));
527 if (s
!= sizeof(on
)) {
528 return VIRTIO_NET_ERR
;
531 if (cmd
== VIRTIO_NET_CTRL_RX_PROMISC
) {
533 } else if (cmd
== VIRTIO_NET_CTRL_RX_ALLMULTI
) {
535 } else if (cmd
== VIRTIO_NET_CTRL_RX_ALLUNI
) {
537 } else if (cmd
== VIRTIO_NET_CTRL_RX_NOMULTI
) {
539 } else if (cmd
== VIRTIO_NET_CTRL_RX_NOUNI
) {
541 } else if (cmd
== VIRTIO_NET_CTRL_RX_NOBCAST
) {
544 return VIRTIO_NET_ERR
;
549 return VIRTIO_NET_OK
;
552 static int virtio_net_handle_offloads(VirtIONet
*n
, uint8_t cmd
,
553 struct iovec
*iov
, unsigned int iov_cnt
)
555 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
559 if (!((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
) & vdev
->guest_features
)) {
560 return VIRTIO_NET_ERR
;
563 s
= iov_to_buf(iov
, iov_cnt
, 0, &offloads
, sizeof(offloads
));
564 if (s
!= sizeof(offloads
)) {
565 return VIRTIO_NET_ERR
;
568 if (cmd
== VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET
) {
569 uint64_t supported_offloads
;
571 if (!n
->has_vnet_hdr
) {
572 return VIRTIO_NET_ERR
;
575 supported_offloads
= virtio_net_supported_guest_offloads(n
);
576 if (offloads
& ~supported_offloads
) {
577 return VIRTIO_NET_ERR
;
580 n
->curr_guest_offloads
= offloads
;
581 virtio_net_apply_guest_offloads(n
);
583 return VIRTIO_NET_OK
;
585 return VIRTIO_NET_ERR
;
589 static int virtio_net_handle_mac(VirtIONet
*n
, uint8_t cmd
,
590 struct iovec
*iov
, unsigned int iov_cnt
)
592 struct virtio_net_ctrl_mac mac_data
;
594 NetClientState
*nc
= qemu_get_queue(n
->nic
);
596 if (cmd
== VIRTIO_NET_CTRL_MAC_ADDR_SET
) {
597 if (iov_size(iov
, iov_cnt
) != sizeof(n
->mac
)) {
598 return VIRTIO_NET_ERR
;
600 s
= iov_to_buf(iov
, iov_cnt
, 0, &n
->mac
, sizeof(n
->mac
));
601 assert(s
== sizeof(n
->mac
));
602 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->mac
);
605 return VIRTIO_NET_OK
;
608 if (cmd
!= VIRTIO_NET_CTRL_MAC_TABLE_SET
) {
609 return VIRTIO_NET_ERR
;
614 uint8_t uni_overflow
= 0;
615 uint8_t multi_overflow
= 0;
616 uint8_t *macs
= g_malloc0(MAC_TABLE_ENTRIES
* ETH_ALEN
);
618 s
= iov_to_buf(iov
, iov_cnt
, 0, &mac_data
.entries
,
619 sizeof(mac_data
.entries
));
620 mac_data
.entries
= ldl_p(&mac_data
.entries
);
621 if (s
!= sizeof(mac_data
.entries
)) {
624 iov_discard_front(&iov
, &iov_cnt
, s
);
626 if (mac_data
.entries
* ETH_ALEN
> iov_size(iov
, iov_cnt
)) {
630 if (mac_data
.entries
<= MAC_TABLE_ENTRIES
) {
631 s
= iov_to_buf(iov
, iov_cnt
, 0, macs
,
632 mac_data
.entries
* ETH_ALEN
);
633 if (s
!= mac_data
.entries
* ETH_ALEN
) {
636 in_use
+= mac_data
.entries
;
641 iov_discard_front(&iov
, &iov_cnt
, mac_data
.entries
* ETH_ALEN
);
643 first_multi
= in_use
;
645 s
= iov_to_buf(iov
, iov_cnt
, 0, &mac_data
.entries
,
646 sizeof(mac_data
.entries
));
647 mac_data
.entries
= ldl_p(&mac_data
.entries
);
648 if (s
!= sizeof(mac_data
.entries
)) {
652 iov_discard_front(&iov
, &iov_cnt
, s
);
654 if (mac_data
.entries
* ETH_ALEN
!= iov_size(iov
, iov_cnt
)) {
658 if (in_use
+ mac_data
.entries
<= MAC_TABLE_ENTRIES
) {
659 s
= iov_to_buf(iov
, iov_cnt
, 0, &macs
[in_use
* ETH_ALEN
],
660 mac_data
.entries
* ETH_ALEN
);
661 if (s
!= mac_data
.entries
* ETH_ALEN
) {
664 in_use
+= mac_data
.entries
;
669 n
->mac_table
.in_use
= in_use
;
670 n
->mac_table
.first_multi
= first_multi
;
671 n
->mac_table
.uni_overflow
= uni_overflow
;
672 n
->mac_table
.multi_overflow
= multi_overflow
;
673 memcpy(n
->mac_table
.macs
, macs
, MAC_TABLE_ENTRIES
* ETH_ALEN
);
677 return VIRTIO_NET_OK
;
681 return VIRTIO_NET_ERR
;
684 static int virtio_net_handle_vlan_table(VirtIONet
*n
, uint8_t cmd
,
685 struct iovec
*iov
, unsigned int iov_cnt
)
689 NetClientState
*nc
= qemu_get_queue(n
->nic
);
691 s
= iov_to_buf(iov
, iov_cnt
, 0, &vid
, sizeof(vid
));
693 if (s
!= sizeof(vid
)) {
694 return VIRTIO_NET_ERR
;
698 return VIRTIO_NET_ERR
;
700 if (cmd
== VIRTIO_NET_CTRL_VLAN_ADD
)
701 n
->vlans
[vid
>> 5] |= (1U << (vid
& 0x1f));
702 else if (cmd
== VIRTIO_NET_CTRL_VLAN_DEL
)
703 n
->vlans
[vid
>> 5] &= ~(1U << (vid
& 0x1f));
705 return VIRTIO_NET_ERR
;
709 return VIRTIO_NET_OK
;
712 static int virtio_net_handle_mq(VirtIONet
*n
, uint8_t cmd
,
713 struct iovec
*iov
, unsigned int iov_cnt
)
715 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
716 struct virtio_net_ctrl_mq mq
;
720 s
= iov_to_buf(iov
, iov_cnt
, 0, &mq
, sizeof(mq
));
721 if (s
!= sizeof(mq
)) {
722 return VIRTIO_NET_ERR
;
725 if (cmd
!= VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET
) {
726 return VIRTIO_NET_ERR
;
729 queues
= lduw_p(&mq
.virtqueue_pairs
);
731 if (queues
< VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN
||
732 queues
> VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX
||
733 queues
> n
->max_queues
||
735 return VIRTIO_NET_ERR
;
738 n
->curr_queues
= queues
;
739 /* stop the backend before changing the number of queues to avoid handling a
741 virtio_net_set_status(vdev
, vdev
->status
);
742 virtio_net_set_queues(n
);
744 return VIRTIO_NET_OK
;
746 static void virtio_net_handle_ctrl(VirtIODevice
*vdev
, VirtQueue
*vq
)
748 VirtIONet
*n
= VIRTIO_NET(vdev
);
749 struct virtio_net_ctrl_hdr ctrl
;
750 virtio_net_ctrl_ack status
= VIRTIO_NET_ERR
;
751 VirtQueueElement elem
;
754 unsigned int iov_cnt
;
756 while (virtqueue_pop(vq
, &elem
)) {
757 if (iov_size(elem
.in_sg
, elem
.in_num
) < sizeof(status
) ||
758 iov_size(elem
.out_sg
, elem
.out_num
) < sizeof(ctrl
)) {
759 error_report("virtio-net ctrl missing headers");
764 iov_cnt
= elem
.out_num
;
765 s
= iov_to_buf(iov
, iov_cnt
, 0, &ctrl
, sizeof(ctrl
));
766 iov_discard_front(&iov
, &iov_cnt
, sizeof(ctrl
));
767 if (s
!= sizeof(ctrl
)) {
768 status
= VIRTIO_NET_ERR
;
769 } else if (ctrl
.class == VIRTIO_NET_CTRL_RX
) {
770 status
= virtio_net_handle_rx_mode(n
, ctrl
.cmd
, iov
, iov_cnt
);
771 } else if (ctrl
.class == VIRTIO_NET_CTRL_MAC
) {
772 status
= virtio_net_handle_mac(n
, ctrl
.cmd
, iov
, iov_cnt
);
773 } else if (ctrl
.class == VIRTIO_NET_CTRL_VLAN
) {
774 status
= virtio_net_handle_vlan_table(n
, ctrl
.cmd
, iov
, iov_cnt
);
775 } else if (ctrl
.class == VIRTIO_NET_CTRL_MQ
) {
776 status
= virtio_net_handle_mq(n
, ctrl
.cmd
, iov
, iov_cnt
);
777 } else if (ctrl
.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS
) {
778 status
= virtio_net_handle_offloads(n
, ctrl
.cmd
, iov
, iov_cnt
);
781 s
= iov_from_buf(elem
.in_sg
, elem
.in_num
, 0, &status
, sizeof(status
));
782 assert(s
== sizeof(status
));
784 virtqueue_push(vq
, &elem
, sizeof(status
));
785 virtio_notify(vdev
, vq
);
791 static void virtio_net_handle_rx(VirtIODevice
*vdev
, VirtQueue
*vq
)
793 VirtIONet
*n
= VIRTIO_NET(vdev
);
794 int queue_index
= vq2q(virtio_get_queue_index(vq
));
796 qemu_flush_queued_packets(qemu_get_subqueue(n
->nic
, queue_index
));
799 static int virtio_net_can_receive(NetClientState
*nc
)
801 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
802 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
803 VirtIONetQueue
*q
= virtio_net_get_subqueue(nc
);
805 if (!vdev
->vm_running
) {
809 if (nc
->queue_index
>= n
->curr_queues
) {
813 if (!virtio_queue_ready(q
->rx_vq
) ||
814 !(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
821 static int virtio_net_has_buffers(VirtIONetQueue
*q
, int bufsize
)
824 if (virtio_queue_empty(q
->rx_vq
) ||
825 (n
->mergeable_rx_bufs
&&
826 !virtqueue_avail_bytes(q
->rx_vq
, bufsize
, 0))) {
827 virtio_queue_set_notification(q
->rx_vq
, 1);
829 /* To avoid a race condition where the guest has made some buffers
830 * available after the above check but before notification was
831 * enabled, check for available buffers again.
833 if (virtio_queue_empty(q
->rx_vq
) ||
834 (n
->mergeable_rx_bufs
&&
835 !virtqueue_avail_bytes(q
->rx_vq
, bufsize
, 0))) {
840 virtio_queue_set_notification(q
->rx_vq
, 0);
844 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
845 * it never finds out that the packets don't have valid checksums. This
846 * causes dhclient to get upset. Fedora's carried a patch for ages to
847 * fix this with Xen but it hasn't appeared in an upstream release of
850 * To avoid breaking existing guests, we catch udp packets and add
851 * checksums. This is terrible but it's better than hacking the guest
854 * N.B. if we introduce a zero-copy API, this operation is no longer free so
855 * we should provide a mechanism to disable it to avoid polluting the host
858 static void work_around_broken_dhclient(struct virtio_net_hdr
*hdr
,
859 uint8_t *buf
, size_t size
)
861 if ((hdr
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) && /* missing csum */
862 (size
> 27 && size
< 1500) && /* normal sized MTU */
863 (buf
[12] == 0x08 && buf
[13] == 0x00) && /* ethertype == IPv4 */
864 (buf
[23] == 17) && /* ip.protocol == UDP */
865 (buf
[34] == 0 && buf
[35] == 67)) { /* udp.srcport == bootps */
866 net_checksum_calculate(buf
, size
);
867 hdr
->flags
&= ~VIRTIO_NET_HDR_F_NEEDS_CSUM
;
871 static void receive_header(VirtIONet
*n
, const struct iovec
*iov
, int iov_cnt
,
872 const void *buf
, size_t size
)
874 if (n
->has_vnet_hdr
) {
875 /* FIXME this cast is evil */
876 void *wbuf
= (void *)buf
;
877 work_around_broken_dhclient(wbuf
, wbuf
+ n
->host_hdr_len
,
878 size
- n
->host_hdr_len
);
879 iov_from_buf(iov
, iov_cnt
, 0, buf
, sizeof(struct virtio_net_hdr
));
881 struct virtio_net_hdr hdr
= {
883 .gso_type
= VIRTIO_NET_HDR_GSO_NONE
885 iov_from_buf(iov
, iov_cnt
, 0, &hdr
, sizeof hdr
);
889 static int receive_filter(VirtIONet
*n
, const uint8_t *buf
, int size
)
891 static const uint8_t bcast
[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
892 static const uint8_t vlan
[] = {0x81, 0x00};
893 uint8_t *ptr
= (uint8_t *)buf
;
899 ptr
+= n
->host_hdr_len
;
901 if (!memcmp(&ptr
[12], vlan
, sizeof(vlan
))) {
902 int vid
= be16_to_cpup((uint16_t *)(ptr
+ 14)) & 0xfff;
903 if (!(n
->vlans
[vid
>> 5] & (1U << (vid
& 0x1f))))
907 if (ptr
[0] & 1) { // multicast
908 if (!memcmp(ptr
, bcast
, sizeof(bcast
))) {
910 } else if (n
->nomulti
) {
912 } else if (n
->allmulti
|| n
->mac_table
.multi_overflow
) {
916 for (i
= n
->mac_table
.first_multi
; i
< n
->mac_table
.in_use
; i
++) {
917 if (!memcmp(ptr
, &n
->mac_table
.macs
[i
* ETH_ALEN
], ETH_ALEN
)) {
924 } else if (n
->alluni
|| n
->mac_table
.uni_overflow
) {
926 } else if (!memcmp(ptr
, n
->mac
, ETH_ALEN
)) {
930 for (i
= 0; i
< n
->mac_table
.first_multi
; i
++) {
931 if (!memcmp(ptr
, &n
->mac_table
.macs
[i
* ETH_ALEN
], ETH_ALEN
)) {
940 static ssize_t
virtio_net_receive(NetClientState
*nc
, const uint8_t *buf
, size_t size
)
942 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
943 VirtIONetQueue
*q
= virtio_net_get_subqueue(nc
);
944 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
945 struct iovec mhdr_sg
[VIRTQUEUE_MAX_SIZE
];
946 struct virtio_net_hdr_mrg_rxbuf mhdr
;
947 unsigned mhdr_cnt
= 0;
948 size_t offset
, i
, guest_offset
;
950 if (!virtio_net_can_receive(nc
)) {
954 /* hdr_len refers to the header we supply to the guest */
955 if (!virtio_net_has_buffers(q
, size
+ n
->guest_hdr_len
- n
->host_hdr_len
)) {
959 if (!receive_filter(n
, buf
, size
))
964 while (offset
< size
) {
965 VirtQueueElement elem
;
967 const struct iovec
*sg
= elem
.in_sg
;
971 if (virtqueue_pop(q
->rx_vq
, &elem
) == 0) {
974 error_report("virtio-net unexpected empty queue: "
975 "i %zd mergeable %d offset %zd, size %zd, "
976 "guest hdr len %zd, host hdr len %zd guest features 0x%x",
977 i
, n
->mergeable_rx_bufs
, offset
, size
,
978 n
->guest_hdr_len
, n
->host_hdr_len
, vdev
->guest_features
);
982 if (elem
.in_num
< 1) {
983 error_report("virtio-net receive queue contains no in buffers");
989 if (n
->mergeable_rx_bufs
) {
990 mhdr_cnt
= iov_copy(mhdr_sg
, ARRAY_SIZE(mhdr_sg
),
992 offsetof(typeof(mhdr
), num_buffers
),
993 sizeof(mhdr
.num_buffers
));
996 receive_header(n
, sg
, elem
.in_num
, buf
, size
);
997 offset
= n
->host_hdr_len
;
998 total
+= n
->guest_hdr_len
;
999 guest_offset
= n
->guest_hdr_len
;
1004 /* copy in packet. ugh */
1005 len
= iov_from_buf(sg
, elem
.in_num
, guest_offset
,
1006 buf
+ offset
, size
- offset
);
1009 /* If buffers can't be merged, at this point we
1010 * must have consumed the complete packet.
1011 * Otherwise, drop it. */
1012 if (!n
->mergeable_rx_bufs
&& offset
< size
) {
1014 error_report("virtio-net truncated non-mergeable packet: "
1015 "i %zd mergeable %d offset %zd, size %zd, "
1016 "guest hdr len %zd, host hdr len %zd",
1017 i
, n
->mergeable_rx_bufs
,
1018 offset
, size
, n
->guest_hdr_len
, n
->host_hdr_len
);
1023 /* signal other side */
1024 virtqueue_fill(q
->rx_vq
, &elem
, total
, i
++);
1028 stw_p(&mhdr
.num_buffers
, i
);
1029 iov_from_buf(mhdr_sg
, mhdr_cnt
,
1031 &mhdr
.num_buffers
, sizeof mhdr
.num_buffers
);
1034 virtqueue_flush(q
->rx_vq
, i
);
1035 virtio_notify(vdev
, q
->rx_vq
);
1040 static int32_t virtio_net_flush_tx(VirtIONetQueue
*q
);
1042 static void virtio_net_tx_complete(NetClientState
*nc
, ssize_t len
)
1044 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
1045 VirtIONetQueue
*q
= virtio_net_get_subqueue(nc
);
1046 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1048 virtqueue_push(q
->tx_vq
, &q
->async_tx
.elem
, 0);
1049 virtio_notify(vdev
, q
->tx_vq
);
1051 q
->async_tx
.elem
.out_num
= q
->async_tx
.len
= 0;
1053 virtio_queue_set_notification(q
->tx_vq
, 1);
1054 virtio_net_flush_tx(q
);
1058 static int32_t virtio_net_flush_tx(VirtIONetQueue
*q
)
1060 VirtIONet
*n
= q
->n
;
1061 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1062 VirtQueueElement elem
;
1063 int32_t num_packets
= 0;
1064 int queue_index
= vq2q(virtio_get_queue_index(q
->tx_vq
));
1065 if (!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
1069 assert(vdev
->vm_running
);
1071 if (q
->async_tx
.elem
.out_num
) {
1072 virtio_queue_set_notification(q
->tx_vq
, 0);
1076 while (virtqueue_pop(q
->tx_vq
, &elem
)) {
1078 unsigned int out_num
= elem
.out_num
;
1079 struct iovec
*out_sg
= &elem
.out_sg
[0];
1080 struct iovec sg
[VIRTQUEUE_MAX_SIZE
];
1083 error_report("virtio-net header not in first element");
1088 * If host wants to see the guest header as is, we can
1089 * pass it on unchanged. Otherwise, copy just the parts
1090 * that host is interested in.
1092 assert(n
->host_hdr_len
<= n
->guest_hdr_len
);
1093 if (n
->host_hdr_len
!= n
->guest_hdr_len
) {
1094 unsigned sg_num
= iov_copy(sg
, ARRAY_SIZE(sg
),
1096 0, n
->host_hdr_len
);
1097 sg_num
+= iov_copy(sg
+ sg_num
, ARRAY_SIZE(sg
) - sg_num
,
1099 n
->guest_hdr_len
, -1);
1104 len
= n
->guest_hdr_len
;
1106 ret
= qemu_sendv_packet_async(qemu_get_subqueue(n
->nic
, queue_index
),
1107 out_sg
, out_num
, virtio_net_tx_complete
);
1109 virtio_queue_set_notification(q
->tx_vq
, 0);
1110 q
->async_tx
.elem
= elem
;
1111 q
->async_tx
.len
= len
;
1117 virtqueue_push(q
->tx_vq
, &elem
, 0);
1118 virtio_notify(vdev
, q
->tx_vq
);
1120 if (++num_packets
>= n
->tx_burst
) {
1127 static void virtio_net_handle_tx_timer(VirtIODevice
*vdev
, VirtQueue
*vq
)
1129 VirtIONet
*n
= VIRTIO_NET(vdev
);
1130 VirtIONetQueue
*q
= &n
->vqs
[vq2q(virtio_get_queue_index(vq
))];
1132 /* This happens when device was stopped but VCPU wasn't. */
1133 if (!vdev
->vm_running
) {
1138 if (q
->tx_waiting
) {
1139 virtio_queue_set_notification(vq
, 1);
1140 timer_del(q
->tx_timer
);
1142 virtio_net_flush_tx(q
);
1144 timer_mod(q
->tx_timer
,
1145 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + n
->tx_timeout
);
1147 virtio_queue_set_notification(vq
, 0);
1151 static void virtio_net_handle_tx_bh(VirtIODevice
*vdev
, VirtQueue
*vq
)
1153 VirtIONet
*n
= VIRTIO_NET(vdev
);
1154 VirtIONetQueue
*q
= &n
->vqs
[vq2q(virtio_get_queue_index(vq
))];
1156 if (unlikely(q
->tx_waiting
)) {
1160 /* This happens when device was stopped but VCPU wasn't. */
1161 if (!vdev
->vm_running
) {
1164 virtio_queue_set_notification(vq
, 0);
1165 qemu_bh_schedule(q
->tx_bh
);
1168 static void virtio_net_tx_timer(void *opaque
)
1170 VirtIONetQueue
*q
= opaque
;
1171 VirtIONet
*n
= q
->n
;
1172 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1173 assert(vdev
->vm_running
);
1177 /* Just in case the driver is not ready on more */
1178 if (!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
1182 virtio_queue_set_notification(q
->tx_vq
, 1);
1183 virtio_net_flush_tx(q
);
1186 static void virtio_net_tx_bh(void *opaque
)
1188 VirtIONetQueue
*q
= opaque
;
1189 VirtIONet
*n
= q
->n
;
1190 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1193 assert(vdev
->vm_running
);
1197 /* Just in case the driver is not ready on more */
1198 if (unlikely(!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
))) {
1202 ret
= virtio_net_flush_tx(q
);
1203 if (ret
== -EBUSY
) {
1204 return; /* Notification re-enable handled by tx_complete */
1207 /* If we flush a full burst of packets, assume there are
1208 * more coming and immediately reschedule */
1209 if (ret
>= n
->tx_burst
) {
1210 qemu_bh_schedule(q
->tx_bh
);
1215 /* If less than a full burst, re-enable notification and flush
1216 * anything that may have come in while we weren't looking. If
1217 * we find something, assume the guest is still active and reschedule */
1218 virtio_queue_set_notification(q
->tx_vq
, 1);
1219 if (virtio_net_flush_tx(q
) > 0) {
1220 virtio_queue_set_notification(q
->tx_vq
, 0);
1221 qemu_bh_schedule(q
->tx_bh
);
1226 static void virtio_net_set_multiqueue(VirtIONet
*n
, int multiqueue
)
1228 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1229 int i
, max
= multiqueue
? n
->max_queues
: 1;
1231 n
->multiqueue
= multiqueue
;
1233 for (i
= 2; i
<= n
->max_queues
* 2 + 1; i
++) {
1234 virtio_del_queue(vdev
, i
);
1237 for (i
= 1; i
< max
; i
++) {
1238 n
->vqs
[i
].rx_vq
= virtio_add_queue(vdev
, 256, virtio_net_handle_rx
);
1239 if (n
->vqs
[i
].tx_timer
) {
1241 virtio_add_queue(vdev
, 256, virtio_net_handle_tx_timer
);
1242 n
->vqs
[i
].tx_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
1243 virtio_net_tx_timer
,
1247 virtio_add_queue(vdev
, 256, virtio_net_handle_tx_bh
);
1248 n
->vqs
[i
].tx_bh
= qemu_bh_new(virtio_net_tx_bh
, &n
->vqs
[i
]);
1251 n
->vqs
[i
].tx_waiting
= 0;
1255 /* Note: Minux Guests (version 3.2.1) use ctrl vq but don't ack
1256 * VIRTIO_NET_F_CTRL_VQ. Create ctrl vq unconditionally to avoid
1259 n
->ctrl_vq
= virtio_add_queue(vdev
, 64, virtio_net_handle_ctrl
);
1261 virtio_net_set_queues(n
);
1264 static void virtio_net_save(QEMUFile
*f
, void *opaque
)
1267 VirtIONet
*n
= opaque
;
1268 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1270 /* At this point, backend must be stopped, otherwise
1271 * it might keep writing to memory. */
1272 assert(!n
->vhost_started
);
1273 virtio_save(vdev
, f
);
1275 qemu_put_buffer(f
, n
->mac
, ETH_ALEN
);
1276 qemu_put_be32(f
, n
->vqs
[0].tx_waiting
);
1277 qemu_put_be32(f
, n
->mergeable_rx_bufs
);
1278 qemu_put_be16(f
, n
->status
);
1279 qemu_put_byte(f
, n
->promisc
);
1280 qemu_put_byte(f
, n
->allmulti
);
1281 qemu_put_be32(f
, n
->mac_table
.in_use
);
1282 qemu_put_buffer(f
, n
->mac_table
.macs
, n
->mac_table
.in_use
* ETH_ALEN
);
1283 qemu_put_buffer(f
, (uint8_t *)n
->vlans
, MAX_VLAN
>> 3);
1284 qemu_put_be32(f
, n
->has_vnet_hdr
);
1285 qemu_put_byte(f
, n
->mac_table
.multi_overflow
);
1286 qemu_put_byte(f
, n
->mac_table
.uni_overflow
);
1287 qemu_put_byte(f
, n
->alluni
);
1288 qemu_put_byte(f
, n
->nomulti
);
1289 qemu_put_byte(f
, n
->nouni
);
1290 qemu_put_byte(f
, n
->nobcast
);
1291 qemu_put_byte(f
, n
->has_ufo
);
1292 if (n
->max_queues
> 1) {
1293 qemu_put_be16(f
, n
->max_queues
);
1294 qemu_put_be16(f
, n
->curr_queues
);
1295 for (i
= 1; i
< n
->curr_queues
; i
++) {
1296 qemu_put_be32(f
, n
->vqs
[i
].tx_waiting
);
1300 if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
) & vdev
->guest_features
) {
1301 qemu_put_be64(f
, n
->curr_guest_offloads
);
1305 static int virtio_net_load(QEMUFile
*f
, void *opaque
, int version_id
)
1307 VirtIONet
*n
= opaque
;
1308 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1309 int ret
, i
, link_down
;
1311 if (version_id
< 2 || version_id
> VIRTIO_NET_VM_VERSION
)
1314 ret
= virtio_load(vdev
, f
);
1319 qemu_get_buffer(f
, n
->mac
, ETH_ALEN
);
1320 n
->vqs
[0].tx_waiting
= qemu_get_be32(f
);
1322 virtio_net_set_mrg_rx_bufs(n
, qemu_get_be32(f
));
1324 if (version_id
>= 3)
1325 n
->status
= qemu_get_be16(f
);
1327 if (version_id
>= 4) {
1328 if (version_id
< 8) {
1329 n
->promisc
= qemu_get_be32(f
);
1330 n
->allmulti
= qemu_get_be32(f
);
1332 n
->promisc
= qemu_get_byte(f
);
1333 n
->allmulti
= qemu_get_byte(f
);
1337 if (version_id
>= 5) {
1338 n
->mac_table
.in_use
= qemu_get_be32(f
);
1339 /* MAC_TABLE_ENTRIES may be different from the saved image */
1340 if (n
->mac_table
.in_use
<= MAC_TABLE_ENTRIES
) {
1341 qemu_get_buffer(f
, n
->mac_table
.macs
,
1342 n
->mac_table
.in_use
* ETH_ALEN
);
1343 } else if (n
->mac_table
.in_use
) {
1344 uint8_t *buf
= g_malloc0(n
->mac_table
.in_use
);
1345 qemu_get_buffer(f
, buf
, n
->mac_table
.in_use
* ETH_ALEN
);
1347 n
->mac_table
.multi_overflow
= n
->mac_table
.uni_overflow
= 1;
1348 n
->mac_table
.in_use
= 0;
1352 if (version_id
>= 6)
1353 qemu_get_buffer(f
, (uint8_t *)n
->vlans
, MAX_VLAN
>> 3);
1355 if (version_id
>= 7) {
1356 if (qemu_get_be32(f
) && !peer_has_vnet_hdr(n
)) {
1357 error_report("virtio-net: saved image requires vnet_hdr=on");
1362 if (version_id
>= 9) {
1363 n
->mac_table
.multi_overflow
= qemu_get_byte(f
);
1364 n
->mac_table
.uni_overflow
= qemu_get_byte(f
);
1367 if (version_id
>= 10) {
1368 n
->alluni
= qemu_get_byte(f
);
1369 n
->nomulti
= qemu_get_byte(f
);
1370 n
->nouni
= qemu_get_byte(f
);
1371 n
->nobcast
= qemu_get_byte(f
);
1374 if (version_id
>= 11) {
1375 if (qemu_get_byte(f
) && !peer_has_ufo(n
)) {
1376 error_report("virtio-net: saved image requires TUN_F_UFO support");
1381 if (n
->max_queues
> 1) {
1382 if (n
->max_queues
!= qemu_get_be16(f
)) {
1383 error_report("virtio-net: different max_queues ");
1387 n
->curr_queues
= qemu_get_be16(f
);
1388 for (i
= 1; i
< n
->curr_queues
; i
++) {
1389 n
->vqs
[i
].tx_waiting
= qemu_get_be32(f
);
1393 if ((1 << VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
) & vdev
->guest_features
) {
1394 n
->curr_guest_offloads
= qemu_get_be64(f
);
1396 n
->curr_guest_offloads
= virtio_net_supported_guest_offloads(n
);
1399 if (peer_has_vnet_hdr(n
)) {
1400 virtio_net_apply_guest_offloads(n
);
1403 virtio_net_set_queues(n
);
1405 /* Find the first multicast entry in the saved MAC filter */
1406 for (i
= 0; i
< n
->mac_table
.in_use
; i
++) {
1407 if (n
->mac_table
.macs
[i
* ETH_ALEN
] & 1) {
1411 n
->mac_table
.first_multi
= i
;
1413 /* nc.link_down can't be migrated, so infer link_down according
1414 * to link status bit in n->status */
1415 link_down
= (n
->status
& VIRTIO_NET_S_LINK_UP
) == 0;
1416 for (i
= 0; i
< n
->max_queues
; i
++) {
1417 qemu_get_subqueue(n
->nic
, i
)->link_down
= link_down
;
1423 static void virtio_net_cleanup(NetClientState
*nc
)
1425 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
1430 static NetClientInfo net_virtio_info
= {
1431 .type
= NET_CLIENT_OPTIONS_KIND_NIC
,
1432 .size
= sizeof(NICState
),
1433 .can_receive
= virtio_net_can_receive
,
1434 .receive
= virtio_net_receive
,
1435 .cleanup
= virtio_net_cleanup
,
1436 .link_status_changed
= virtio_net_set_link_status
,
1437 .query_rx_filter
= virtio_net_query_rxfilter
,
1440 static bool virtio_net_guest_notifier_pending(VirtIODevice
*vdev
, int idx
)
1442 VirtIONet
*n
= VIRTIO_NET(vdev
);
1443 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, vq2q(idx
));
1444 assert(n
->vhost_started
);
1445 return vhost_net_virtqueue_pending(tap_get_vhost_net(nc
->peer
), idx
);
1448 static void virtio_net_guest_notifier_mask(VirtIODevice
*vdev
, int idx
,
1451 VirtIONet
*n
= VIRTIO_NET(vdev
);
1452 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, vq2q(idx
));
1453 assert(n
->vhost_started
);
1454 vhost_net_virtqueue_mask(tap_get_vhost_net(nc
->peer
),
1458 void virtio_net_set_config_size(VirtIONet
*n
, uint32_t host_features
)
1460 int i
, config_size
= 0;
1461 host_features
|= (1 << VIRTIO_NET_F_MAC
);
1462 for (i
= 0; feature_sizes
[i
].flags
!= 0; i
++) {
1463 if (host_features
& feature_sizes
[i
].flags
) {
1464 config_size
= MAX(feature_sizes
[i
].end
, config_size
);
1467 n
->config_size
= config_size
;
1470 void virtio_net_set_netclient_name(VirtIONet
*n
, const char *name
,
1474 * The name can be NULL, the netclient name will be type.x.
1476 assert(type
!= NULL
);
1478 if (n
->netclient_name
) {
1479 g_free(n
->netclient_name
);
1480 n
->netclient_name
= NULL
;
1482 if (n
->netclient_type
) {
1483 g_free(n
->netclient_type
);
1484 n
->netclient_type
= NULL
;
1488 n
->netclient_name
= g_strdup(name
);
1490 n
->netclient_type
= g_strdup(type
);
1493 static void virtio_net_device_realize(DeviceState
*dev
, Error
**errp
)
1495 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
1496 VirtIONet
*n
= VIRTIO_NET(dev
);
1500 virtio_init(vdev
, "virtio-net", VIRTIO_ID_NET
, n
->config_size
);
1502 n
->max_queues
= MAX(n
->nic_conf
.queues
, 1);
1503 n
->vqs
= g_malloc0(sizeof(VirtIONetQueue
) * n
->max_queues
);
1504 n
->vqs
[0].rx_vq
= virtio_add_queue(vdev
, 256, virtio_net_handle_rx
);
1507 n
->tx_timeout
= n
->net_conf
.txtimer
;
1509 if (n
->net_conf
.tx
&& strcmp(n
->net_conf
.tx
, "timer")
1510 && strcmp(n
->net_conf
.tx
, "bh")) {
1511 error_report("virtio-net: "
1512 "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
1514 error_report("Defaulting to \"bh\"");
1517 if (n
->net_conf
.tx
&& !strcmp(n
->net_conf
.tx
, "timer")) {
1518 n
->vqs
[0].tx_vq
= virtio_add_queue(vdev
, 256,
1519 virtio_net_handle_tx_timer
);
1520 n
->vqs
[0].tx_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
, virtio_net_tx_timer
,
1523 n
->vqs
[0].tx_vq
= virtio_add_queue(vdev
, 256,
1524 virtio_net_handle_tx_bh
);
1525 n
->vqs
[0].tx_bh
= qemu_bh_new(virtio_net_tx_bh
, &n
->vqs
[0]);
1527 n
->ctrl_vq
= virtio_add_queue(vdev
, 64, virtio_net_handle_ctrl
);
1528 qemu_macaddr_default_if_unset(&n
->nic_conf
.macaddr
);
1529 memcpy(&n
->mac
[0], &n
->nic_conf
.macaddr
, sizeof(n
->mac
));
1530 n
->status
= VIRTIO_NET_S_LINK_UP
;
1532 if (n
->netclient_type
) {
1534 * Happen when virtio_net_set_netclient_name has been called.
1536 n
->nic
= qemu_new_nic(&net_virtio_info
, &n
->nic_conf
,
1537 n
->netclient_type
, n
->netclient_name
, n
);
1539 n
->nic
= qemu_new_nic(&net_virtio_info
, &n
->nic_conf
,
1540 object_get_typename(OBJECT(dev
)), dev
->id
, n
);
1543 peer_test_vnet_hdr(n
);
1544 if (peer_has_vnet_hdr(n
)) {
1545 for (i
= 0; i
< n
->max_queues
; i
++) {
1546 qemu_using_vnet_hdr(qemu_get_subqueue(n
->nic
, i
)->peer
, true);
1548 n
->host_hdr_len
= sizeof(struct virtio_net_hdr
);
1550 n
->host_hdr_len
= 0;
1553 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->nic_conf
.macaddr
.a
);
1555 n
->vqs
[0].tx_waiting
= 0;
1556 n
->tx_burst
= n
->net_conf
.txburst
;
1557 virtio_net_set_mrg_rx_bufs(n
, 0);
1558 n
->promisc
= 1; /* for compatibility */
1560 n
->mac_table
.macs
= g_malloc0(MAC_TABLE_ENTRIES
* ETH_ALEN
);
1562 n
->vlans
= g_malloc0(MAX_VLAN
>> 3);
1564 nc
= qemu_get_queue(n
->nic
);
1565 nc
->rxfilter_notify_enabled
= 1;
1568 register_savevm(dev
, "virtio-net", -1, VIRTIO_NET_VM_VERSION
,
1569 virtio_net_save
, virtio_net_load
, n
);
1571 add_boot_device_path(n
->nic_conf
.bootindex
, dev
, "/ethernet-phy@0");
1574 static void virtio_net_device_unrealize(DeviceState
*dev
, Error
**errp
)
1576 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
1577 VirtIONet
*n
= VIRTIO_NET(dev
);
1580 /* This will stop vhost backend if appropriate. */
1581 virtio_net_set_status(vdev
, 0);
1583 unregister_savevm(dev
, "virtio-net", n
);
1585 if (n
->netclient_name
) {
1586 g_free(n
->netclient_name
);
1587 n
->netclient_name
= NULL
;
1589 if (n
->netclient_type
) {
1590 g_free(n
->netclient_type
);
1591 n
->netclient_type
= NULL
;
1594 g_free(n
->mac_table
.macs
);
1597 for (i
= 0; i
< n
->max_queues
; i
++) {
1598 VirtIONetQueue
*q
= &n
->vqs
[i
];
1599 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, i
);
1601 qemu_purge_queued_packets(nc
);
1604 timer_del(q
->tx_timer
);
1605 timer_free(q
->tx_timer
);
1606 } else if (q
->tx_bh
) {
1607 qemu_bh_delete(q
->tx_bh
);
1612 qemu_del_nic(n
->nic
);
1613 virtio_cleanup(vdev
);
1616 static void virtio_net_instance_init(Object
*obj
)
1618 VirtIONet
*n
= VIRTIO_NET(obj
);
1621 * The default config_size is sizeof(struct virtio_net_config).
1622 * Can be overriden with virtio_net_set_config_size.
1624 n
->config_size
= sizeof(struct virtio_net_config
);
1627 static Property virtio_net_properties
[] = {
1628 DEFINE_NIC_PROPERTIES(VirtIONet
, nic_conf
),
1629 DEFINE_PROP_UINT32("x-txtimer", VirtIONet
, net_conf
.txtimer
,
1631 DEFINE_PROP_INT32("x-txburst", VirtIONet
, net_conf
.txburst
, TX_BURST
),
1632 DEFINE_PROP_STRING("tx", VirtIONet
, net_conf
.tx
),
1633 DEFINE_PROP_END_OF_LIST(),
1636 static void virtio_net_class_init(ObjectClass
*klass
, void *data
)
1638 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1639 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
1641 dc
->props
= virtio_net_properties
;
1642 set_bit(DEVICE_CATEGORY_NETWORK
, dc
->categories
);
1643 vdc
->realize
= virtio_net_device_realize
;
1644 vdc
->unrealize
= virtio_net_device_unrealize
;
1645 vdc
->get_config
= virtio_net_get_config
;
1646 vdc
->set_config
= virtio_net_set_config
;
1647 vdc
->get_features
= virtio_net_get_features
;
1648 vdc
->set_features
= virtio_net_set_features
;
1649 vdc
->bad_features
= virtio_net_bad_features
;
1650 vdc
->reset
= virtio_net_reset
;
1651 vdc
->set_status
= virtio_net_set_status
;
1652 vdc
->guest_notifier_mask
= virtio_net_guest_notifier_mask
;
1653 vdc
->guest_notifier_pending
= virtio_net_guest_notifier_pending
;
1656 static const TypeInfo virtio_net_info
= {
1657 .name
= TYPE_VIRTIO_NET
,
1658 .parent
= TYPE_VIRTIO_DEVICE
,
1659 .instance_size
= sizeof(VirtIONet
),
1660 .instance_init
= virtio_net_instance_init
,
1661 .class_init
= virtio_net_class_init
,
1664 static void virtio_register_types(void)
1666 type_register_static(&virtio_net_info
);
1669 type_init(virtio_register_types
)