2 * Virtio Network Device
4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
16 #include "hw/virtio/virtio.h"
18 #include "net/checksum.h"
20 #include "qemu/error-report.h"
21 #include "qemu/timer.h"
22 #include "hw/virtio/virtio-net.h"
23 #include "net/vhost_net.h"
24 #include "hw/virtio/virtio-bus.h"
25 #include "qapi/qmp/qjson.h"
26 #include "qapi-event.h"
27 #include "hw/virtio/virtio-access.h"
29 #define VIRTIO_NET_VM_VERSION 11
31 #define MAC_TABLE_ENTRIES 64
32 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
34 /* previously fixed value */
35 #define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256
36 /* for now, only allow larger queues; with virtio-1, guest can downsize */
37 #define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
40 * Calculate the number of bytes up to and including the given 'field' of
43 #define endof(container, field) \
44 (offsetof(container, field) + sizeof(((container *)0)->field))
46 typedef struct VirtIOFeature
{
51 static VirtIOFeature feature_sizes
[] = {
52 {.flags
= 1 << VIRTIO_NET_F_MAC
,
53 .end
= endof(struct virtio_net_config
, mac
)},
54 {.flags
= 1 << VIRTIO_NET_F_STATUS
,
55 .end
= endof(struct virtio_net_config
, status
)},
56 {.flags
= 1 << VIRTIO_NET_F_MQ
,
57 .end
= endof(struct virtio_net_config
, max_virtqueue_pairs
)},
61 static VirtIONetQueue
*virtio_net_get_subqueue(NetClientState
*nc
)
63 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
65 return &n
->vqs
[nc
->queue_index
];
68 static int vq2q(int queue_index
)
70 return queue_index
/ 2;
74 * - we could suppress RX interrupt if we were so inclined.
77 static void virtio_net_get_config(VirtIODevice
*vdev
, uint8_t *config
)
79 VirtIONet
*n
= VIRTIO_NET(vdev
);
80 struct virtio_net_config netcfg
;
82 virtio_stw_p(vdev
, &netcfg
.status
, n
->status
);
83 virtio_stw_p(vdev
, &netcfg
.max_virtqueue_pairs
, n
->max_queues
);
84 memcpy(netcfg
.mac
, n
->mac
, ETH_ALEN
);
85 memcpy(config
, &netcfg
, n
->config_size
);
88 static void virtio_net_set_config(VirtIODevice
*vdev
, const uint8_t *config
)
90 VirtIONet
*n
= VIRTIO_NET(vdev
);
91 struct virtio_net_config netcfg
= {};
93 memcpy(&netcfg
, config
, n
->config_size
);
95 if (!virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_MAC_ADDR
) &&
96 !virtio_vdev_has_feature(vdev
, VIRTIO_F_VERSION_1
) &&
97 memcmp(netcfg
.mac
, n
->mac
, ETH_ALEN
)) {
98 memcpy(n
->mac
, netcfg
.mac
, ETH_ALEN
);
99 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->mac
);
103 static bool virtio_net_started(VirtIONet
*n
, uint8_t status
)
105 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
106 return (status
& VIRTIO_CONFIG_S_DRIVER_OK
) &&
107 (n
->status
& VIRTIO_NET_S_LINK_UP
) && vdev
->vm_running
;
110 static void virtio_net_announce_timer(void *opaque
)
112 VirtIONet
*n
= opaque
;
113 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
115 n
->announce_counter
--;
116 n
->status
|= VIRTIO_NET_S_ANNOUNCE
;
117 virtio_notify_config(vdev
);
120 static void virtio_net_vhost_status(VirtIONet
*n
, uint8_t status
)
122 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
123 NetClientState
*nc
= qemu_get_queue(n
->nic
);
124 int queues
= n
->multiqueue
? n
->max_queues
: 1;
126 if (!get_vhost_net(nc
->peer
)) {
130 if ((virtio_net_started(n
, status
) && !nc
->peer
->link_down
) ==
131 !!n
->vhost_started
) {
134 if (!n
->vhost_started
) {
137 if (n
->needs_vnet_hdr_swap
) {
138 error_report("backend does not support %s vnet headers; "
139 "falling back on userspace virtio",
140 virtio_is_big_endian(vdev
) ? "BE" : "LE");
144 /* Any packets outstanding? Purge them to avoid touching rings
145 * when vhost is running.
147 for (i
= 0; i
< queues
; i
++) {
148 NetClientState
*qnc
= qemu_get_subqueue(n
->nic
, i
);
150 /* Purge both directions: TX and RX. */
151 qemu_net_queue_purge(qnc
->peer
->incoming_queue
, qnc
);
152 qemu_net_queue_purge(qnc
->incoming_queue
, qnc
->peer
);
155 n
->vhost_started
= 1;
156 r
= vhost_net_start(vdev
, n
->nic
->ncs
, queues
);
158 error_report("unable to start vhost net: %d: "
159 "falling back on userspace virtio", -r
);
160 n
->vhost_started
= 0;
163 vhost_net_stop(vdev
, n
->nic
->ncs
, queues
);
164 n
->vhost_started
= 0;
168 static int virtio_net_set_vnet_endian_one(VirtIODevice
*vdev
,
169 NetClientState
*peer
,
172 if (virtio_is_big_endian(vdev
)) {
173 return qemu_set_vnet_be(peer
, enable
);
175 return qemu_set_vnet_le(peer
, enable
);
179 static bool virtio_net_set_vnet_endian(VirtIODevice
*vdev
, NetClientState
*ncs
,
180 int queues
, bool enable
)
184 for (i
= 0; i
< queues
; i
++) {
185 if (virtio_net_set_vnet_endian_one(vdev
, ncs
[i
].peer
, enable
) < 0 &&
188 virtio_net_set_vnet_endian_one(vdev
, ncs
[i
].peer
, false);
198 static void virtio_net_vnet_endian_status(VirtIONet
*n
, uint8_t status
)
200 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
201 int queues
= n
->multiqueue
? n
->max_queues
: 1;
203 if (virtio_net_started(n
, status
)) {
204 /* Before using the device, we tell the network backend about the
205 * endianness to use when parsing vnet headers. If the backend
206 * can't do it, we fallback onto fixing the headers in the core
209 n
->needs_vnet_hdr_swap
= virtio_net_set_vnet_endian(vdev
, n
->nic
->ncs
,
211 } else if (virtio_net_started(n
, vdev
->status
)) {
212 /* After using the device, we need to reset the network backend to
213 * the default (guest native endianness), otherwise the guest may
214 * lose network connectivity if it is rebooted into a different
217 virtio_net_set_vnet_endian(vdev
, n
->nic
->ncs
, queues
, false);
221 static void virtio_net_set_status(struct VirtIODevice
*vdev
, uint8_t status
)
223 VirtIONet
*n
= VIRTIO_NET(vdev
);
226 uint8_t queue_status
;
228 virtio_net_vnet_endian_status(n
, status
);
229 virtio_net_vhost_status(n
, status
);
231 for (i
= 0; i
< n
->max_queues
; i
++) {
232 NetClientState
*ncs
= qemu_get_subqueue(n
->nic
, i
);
236 if ((!n
->multiqueue
&& i
!= 0) || i
>= n
->curr_queues
) {
239 queue_status
= status
;
242 virtio_net_started(n
, queue_status
) && !n
->vhost_started
;
245 qemu_flush_queued_packets(ncs
);
248 if (!q
->tx_waiting
) {
254 timer_mod(q
->tx_timer
,
255 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + n
->tx_timeout
);
257 qemu_bh_schedule(q
->tx_bh
);
261 timer_del(q
->tx_timer
);
263 qemu_bh_cancel(q
->tx_bh
);
269 static void virtio_net_set_link_status(NetClientState
*nc
)
271 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
272 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
273 uint16_t old_status
= n
->status
;
276 n
->status
&= ~VIRTIO_NET_S_LINK_UP
;
278 n
->status
|= VIRTIO_NET_S_LINK_UP
;
280 if (n
->status
!= old_status
)
281 virtio_notify_config(vdev
);
283 virtio_net_set_status(vdev
, vdev
->status
);
286 static void rxfilter_notify(NetClientState
*nc
)
288 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
290 if (nc
->rxfilter_notify_enabled
) {
291 gchar
*path
= object_get_canonical_path(OBJECT(n
->qdev
));
292 qapi_event_send_nic_rx_filter_changed(!!n
->netclient_name
,
293 n
->netclient_name
, path
, &error_abort
);
296 /* disable event notification to avoid events flooding */
297 nc
->rxfilter_notify_enabled
= 0;
301 static intList
*get_vlan_table(VirtIONet
*n
)
303 intList
*list
, *entry
;
307 for (i
= 0; i
< MAX_VLAN
>> 5; i
++) {
308 for (j
= 0; n
->vlans
[i
] && j
<= 0x1f; j
++) {
309 if (n
->vlans
[i
] & (1U << j
)) {
310 entry
= g_malloc0(sizeof(*entry
));
311 entry
->value
= (i
<< 5) + j
;
321 static RxFilterInfo
*virtio_net_query_rxfilter(NetClientState
*nc
)
323 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
324 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
326 strList
*str_list
, *entry
;
329 info
= g_malloc0(sizeof(*info
));
330 info
->name
= g_strdup(nc
->name
);
331 info
->promiscuous
= n
->promisc
;
334 info
->unicast
= RX_STATE_NONE
;
335 } else if (n
->alluni
) {
336 info
->unicast
= RX_STATE_ALL
;
338 info
->unicast
= RX_STATE_NORMAL
;
342 info
->multicast
= RX_STATE_NONE
;
343 } else if (n
->allmulti
) {
344 info
->multicast
= RX_STATE_ALL
;
346 info
->multicast
= RX_STATE_NORMAL
;
349 info
->broadcast_allowed
= n
->nobcast
;
350 info
->multicast_overflow
= n
->mac_table
.multi_overflow
;
351 info
->unicast_overflow
= n
->mac_table
.uni_overflow
;
353 info
->main_mac
= qemu_mac_strdup_printf(n
->mac
);
356 for (i
= 0; i
< n
->mac_table
.first_multi
; i
++) {
357 entry
= g_malloc0(sizeof(*entry
));
358 entry
->value
= qemu_mac_strdup_printf(n
->mac_table
.macs
+ i
* ETH_ALEN
);
359 entry
->next
= str_list
;
362 info
->unicast_table
= str_list
;
365 for (i
= n
->mac_table
.first_multi
; i
< n
->mac_table
.in_use
; i
++) {
366 entry
= g_malloc0(sizeof(*entry
));
367 entry
->value
= qemu_mac_strdup_printf(n
->mac_table
.macs
+ i
* ETH_ALEN
);
368 entry
->next
= str_list
;
371 info
->multicast_table
= str_list
;
372 info
->vlan_table
= get_vlan_table(n
);
374 if (!virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_VLAN
)) {
375 info
->vlan
= RX_STATE_ALL
;
376 } else if (!info
->vlan_table
) {
377 info
->vlan
= RX_STATE_NONE
;
379 info
->vlan
= RX_STATE_NORMAL
;
382 /* enable event notification after query */
383 nc
->rxfilter_notify_enabled
= 1;
388 static void virtio_net_reset(VirtIODevice
*vdev
)
390 VirtIONet
*n
= VIRTIO_NET(vdev
);
392 /* Reset back to compatibility mode */
399 /* multiqueue is disabled by default */
401 timer_del(n
->announce_timer
);
402 n
->announce_counter
= 0;
403 n
->status
&= ~VIRTIO_NET_S_ANNOUNCE
;
405 /* Flush any MAC and VLAN filter table state */
406 n
->mac_table
.in_use
= 0;
407 n
->mac_table
.first_multi
= 0;
408 n
->mac_table
.multi_overflow
= 0;
409 n
->mac_table
.uni_overflow
= 0;
410 memset(n
->mac_table
.macs
, 0, MAC_TABLE_ENTRIES
* ETH_ALEN
);
411 memcpy(&n
->mac
[0], &n
->nic
->conf
->macaddr
, sizeof(n
->mac
));
412 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->mac
);
413 memset(n
->vlans
, 0, MAX_VLAN
>> 3);
416 static void peer_test_vnet_hdr(VirtIONet
*n
)
418 NetClientState
*nc
= qemu_get_queue(n
->nic
);
423 n
->has_vnet_hdr
= qemu_has_vnet_hdr(nc
->peer
);
426 static int peer_has_vnet_hdr(VirtIONet
*n
)
428 return n
->has_vnet_hdr
;
431 static int peer_has_ufo(VirtIONet
*n
)
433 if (!peer_has_vnet_hdr(n
))
436 n
->has_ufo
= qemu_has_ufo(qemu_get_queue(n
->nic
)->peer
);
441 static void virtio_net_set_mrg_rx_bufs(VirtIONet
*n
, int mergeable_rx_bufs
,
447 n
->mergeable_rx_bufs
= mergeable_rx_bufs
;
450 n
->guest_hdr_len
= sizeof(struct virtio_net_hdr_mrg_rxbuf
);
452 n
->guest_hdr_len
= n
->mergeable_rx_bufs
?
453 sizeof(struct virtio_net_hdr_mrg_rxbuf
) :
454 sizeof(struct virtio_net_hdr
);
457 for (i
= 0; i
< n
->max_queues
; i
++) {
458 nc
= qemu_get_subqueue(n
->nic
, i
);
460 if (peer_has_vnet_hdr(n
) &&
461 qemu_has_vnet_hdr_len(nc
->peer
, n
->guest_hdr_len
)) {
462 qemu_set_vnet_hdr_len(nc
->peer
, n
->guest_hdr_len
);
463 n
->host_hdr_len
= n
->guest_hdr_len
;
468 static int peer_attach(VirtIONet
*n
, int index
)
470 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, index
);
476 if (nc
->peer
->info
->type
== NET_CLIENT_DRIVER_VHOST_USER
) {
477 vhost_set_vring_enable(nc
->peer
, 1);
480 if (nc
->peer
->info
->type
!= NET_CLIENT_DRIVER_TAP
) {
484 return tap_enable(nc
->peer
);
487 static int peer_detach(VirtIONet
*n
, int index
)
489 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, index
);
495 if (nc
->peer
->info
->type
== NET_CLIENT_DRIVER_VHOST_USER
) {
496 vhost_set_vring_enable(nc
->peer
, 0);
499 if (nc
->peer
->info
->type
!= NET_CLIENT_DRIVER_TAP
) {
503 return tap_disable(nc
->peer
);
506 static void virtio_net_set_queues(VirtIONet
*n
)
511 for (i
= 0; i
< n
->max_queues
; i
++) {
512 if (i
< n
->curr_queues
) {
513 r
= peer_attach(n
, i
);
516 r
= peer_detach(n
, i
);
522 static void virtio_net_set_multiqueue(VirtIONet
*n
, int multiqueue
);
524 static uint64_t virtio_net_get_features(VirtIODevice
*vdev
, uint64_t features
,
527 VirtIONet
*n
= VIRTIO_NET(vdev
);
528 NetClientState
*nc
= qemu_get_queue(n
->nic
);
530 /* Firstly sync all virtio-net possible supported features */
531 features
|= n
->host_features
;
533 virtio_add_feature(&features
, VIRTIO_NET_F_MAC
);
535 if (!peer_has_vnet_hdr(n
)) {
536 virtio_clear_feature(&features
, VIRTIO_NET_F_CSUM
);
537 virtio_clear_feature(&features
, VIRTIO_NET_F_HOST_TSO4
);
538 virtio_clear_feature(&features
, VIRTIO_NET_F_HOST_TSO6
);
539 virtio_clear_feature(&features
, VIRTIO_NET_F_HOST_ECN
);
541 virtio_clear_feature(&features
, VIRTIO_NET_F_GUEST_CSUM
);
542 virtio_clear_feature(&features
, VIRTIO_NET_F_GUEST_TSO4
);
543 virtio_clear_feature(&features
, VIRTIO_NET_F_GUEST_TSO6
);
544 virtio_clear_feature(&features
, VIRTIO_NET_F_GUEST_ECN
);
547 if (!peer_has_vnet_hdr(n
) || !peer_has_ufo(n
)) {
548 virtio_clear_feature(&features
, VIRTIO_NET_F_GUEST_UFO
);
549 virtio_clear_feature(&features
, VIRTIO_NET_F_HOST_UFO
);
552 if (!get_vhost_net(nc
->peer
)) {
555 return vhost_net_get_features(get_vhost_net(nc
->peer
), features
);
558 static uint64_t virtio_net_bad_features(VirtIODevice
*vdev
)
560 uint64_t features
= 0;
562 /* Linux kernel 2.6.25. It understood MAC (as everyone must),
564 virtio_add_feature(&features
, VIRTIO_NET_F_MAC
);
565 virtio_add_feature(&features
, VIRTIO_NET_F_CSUM
);
566 virtio_add_feature(&features
, VIRTIO_NET_F_HOST_TSO4
);
567 virtio_add_feature(&features
, VIRTIO_NET_F_HOST_TSO6
);
568 virtio_add_feature(&features
, VIRTIO_NET_F_HOST_ECN
);
573 static void virtio_net_apply_guest_offloads(VirtIONet
*n
)
575 qemu_set_offload(qemu_get_queue(n
->nic
)->peer
,
576 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_CSUM
)),
577 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_TSO4
)),
578 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_TSO6
)),
579 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_ECN
)),
580 !!(n
->curr_guest_offloads
& (1ULL << VIRTIO_NET_F_GUEST_UFO
)));
583 static uint64_t virtio_net_guest_offloads_by_features(uint32_t features
)
585 static const uint64_t guest_offloads_mask
=
586 (1ULL << VIRTIO_NET_F_GUEST_CSUM
) |
587 (1ULL << VIRTIO_NET_F_GUEST_TSO4
) |
588 (1ULL << VIRTIO_NET_F_GUEST_TSO6
) |
589 (1ULL << VIRTIO_NET_F_GUEST_ECN
) |
590 (1ULL << VIRTIO_NET_F_GUEST_UFO
);
592 return guest_offloads_mask
& features
;
595 static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet
*n
)
597 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
598 return virtio_net_guest_offloads_by_features(vdev
->guest_features
);
601 static void virtio_net_set_features(VirtIODevice
*vdev
, uint64_t features
)
603 VirtIONet
*n
= VIRTIO_NET(vdev
);
606 virtio_net_set_multiqueue(n
,
607 virtio_has_feature(features
, VIRTIO_NET_F_MQ
));
609 virtio_net_set_mrg_rx_bufs(n
,
610 virtio_has_feature(features
,
611 VIRTIO_NET_F_MRG_RXBUF
),
612 virtio_has_feature(features
,
613 VIRTIO_F_VERSION_1
));
615 if (n
->has_vnet_hdr
) {
616 n
->curr_guest_offloads
=
617 virtio_net_guest_offloads_by_features(features
);
618 virtio_net_apply_guest_offloads(n
);
621 for (i
= 0; i
< n
->max_queues
; i
++) {
622 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, i
);
624 if (!get_vhost_net(nc
->peer
)) {
627 vhost_net_ack_features(get_vhost_net(nc
->peer
), features
);
630 if (virtio_has_feature(features
, VIRTIO_NET_F_CTRL_VLAN
)) {
631 memset(n
->vlans
, 0, MAX_VLAN
>> 3);
633 memset(n
->vlans
, 0xff, MAX_VLAN
>> 3);
637 static int virtio_net_handle_rx_mode(VirtIONet
*n
, uint8_t cmd
,
638 struct iovec
*iov
, unsigned int iov_cnt
)
642 NetClientState
*nc
= qemu_get_queue(n
->nic
);
644 s
= iov_to_buf(iov
, iov_cnt
, 0, &on
, sizeof(on
));
645 if (s
!= sizeof(on
)) {
646 return VIRTIO_NET_ERR
;
649 if (cmd
== VIRTIO_NET_CTRL_RX_PROMISC
) {
651 } else if (cmd
== VIRTIO_NET_CTRL_RX_ALLMULTI
) {
653 } else if (cmd
== VIRTIO_NET_CTRL_RX_ALLUNI
) {
655 } else if (cmd
== VIRTIO_NET_CTRL_RX_NOMULTI
) {
657 } else if (cmd
== VIRTIO_NET_CTRL_RX_NOUNI
) {
659 } else if (cmd
== VIRTIO_NET_CTRL_RX_NOBCAST
) {
662 return VIRTIO_NET_ERR
;
667 return VIRTIO_NET_OK
;
670 static int virtio_net_handle_offloads(VirtIONet
*n
, uint8_t cmd
,
671 struct iovec
*iov
, unsigned int iov_cnt
)
673 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
677 if (!virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
)) {
678 return VIRTIO_NET_ERR
;
681 s
= iov_to_buf(iov
, iov_cnt
, 0, &offloads
, sizeof(offloads
));
682 if (s
!= sizeof(offloads
)) {
683 return VIRTIO_NET_ERR
;
686 if (cmd
== VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET
) {
687 uint64_t supported_offloads
;
689 if (!n
->has_vnet_hdr
) {
690 return VIRTIO_NET_ERR
;
693 supported_offloads
= virtio_net_supported_guest_offloads(n
);
694 if (offloads
& ~supported_offloads
) {
695 return VIRTIO_NET_ERR
;
698 n
->curr_guest_offloads
= offloads
;
699 virtio_net_apply_guest_offloads(n
);
701 return VIRTIO_NET_OK
;
703 return VIRTIO_NET_ERR
;
707 static int virtio_net_handle_mac(VirtIONet
*n
, uint8_t cmd
,
708 struct iovec
*iov
, unsigned int iov_cnt
)
710 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
711 struct virtio_net_ctrl_mac mac_data
;
713 NetClientState
*nc
= qemu_get_queue(n
->nic
);
715 if (cmd
== VIRTIO_NET_CTRL_MAC_ADDR_SET
) {
716 if (iov_size(iov
, iov_cnt
) != sizeof(n
->mac
)) {
717 return VIRTIO_NET_ERR
;
719 s
= iov_to_buf(iov
, iov_cnt
, 0, &n
->mac
, sizeof(n
->mac
));
720 assert(s
== sizeof(n
->mac
));
721 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->mac
);
724 return VIRTIO_NET_OK
;
727 if (cmd
!= VIRTIO_NET_CTRL_MAC_TABLE_SET
) {
728 return VIRTIO_NET_ERR
;
733 uint8_t uni_overflow
= 0;
734 uint8_t multi_overflow
= 0;
735 uint8_t *macs
= g_malloc0(MAC_TABLE_ENTRIES
* ETH_ALEN
);
737 s
= iov_to_buf(iov
, iov_cnt
, 0, &mac_data
.entries
,
738 sizeof(mac_data
.entries
));
739 mac_data
.entries
= virtio_ldl_p(vdev
, &mac_data
.entries
);
740 if (s
!= sizeof(mac_data
.entries
)) {
743 iov_discard_front(&iov
, &iov_cnt
, s
);
745 if (mac_data
.entries
* ETH_ALEN
> iov_size(iov
, iov_cnt
)) {
749 if (mac_data
.entries
<= MAC_TABLE_ENTRIES
) {
750 s
= iov_to_buf(iov
, iov_cnt
, 0, macs
,
751 mac_data
.entries
* ETH_ALEN
);
752 if (s
!= mac_data
.entries
* ETH_ALEN
) {
755 in_use
+= mac_data
.entries
;
760 iov_discard_front(&iov
, &iov_cnt
, mac_data
.entries
* ETH_ALEN
);
762 first_multi
= in_use
;
764 s
= iov_to_buf(iov
, iov_cnt
, 0, &mac_data
.entries
,
765 sizeof(mac_data
.entries
));
766 mac_data
.entries
= virtio_ldl_p(vdev
, &mac_data
.entries
);
767 if (s
!= sizeof(mac_data
.entries
)) {
771 iov_discard_front(&iov
, &iov_cnt
, s
);
773 if (mac_data
.entries
* ETH_ALEN
!= iov_size(iov
, iov_cnt
)) {
777 if (mac_data
.entries
<= MAC_TABLE_ENTRIES
- in_use
) {
778 s
= iov_to_buf(iov
, iov_cnt
, 0, &macs
[in_use
* ETH_ALEN
],
779 mac_data
.entries
* ETH_ALEN
);
780 if (s
!= mac_data
.entries
* ETH_ALEN
) {
783 in_use
+= mac_data
.entries
;
788 n
->mac_table
.in_use
= in_use
;
789 n
->mac_table
.first_multi
= first_multi
;
790 n
->mac_table
.uni_overflow
= uni_overflow
;
791 n
->mac_table
.multi_overflow
= multi_overflow
;
792 memcpy(n
->mac_table
.macs
, macs
, MAC_TABLE_ENTRIES
* ETH_ALEN
);
796 return VIRTIO_NET_OK
;
800 return VIRTIO_NET_ERR
;
803 static int virtio_net_handle_vlan_table(VirtIONet
*n
, uint8_t cmd
,
804 struct iovec
*iov
, unsigned int iov_cnt
)
806 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
809 NetClientState
*nc
= qemu_get_queue(n
->nic
);
811 s
= iov_to_buf(iov
, iov_cnt
, 0, &vid
, sizeof(vid
));
812 vid
= virtio_lduw_p(vdev
, &vid
);
813 if (s
!= sizeof(vid
)) {
814 return VIRTIO_NET_ERR
;
818 return VIRTIO_NET_ERR
;
820 if (cmd
== VIRTIO_NET_CTRL_VLAN_ADD
)
821 n
->vlans
[vid
>> 5] |= (1U << (vid
& 0x1f));
822 else if (cmd
== VIRTIO_NET_CTRL_VLAN_DEL
)
823 n
->vlans
[vid
>> 5] &= ~(1U << (vid
& 0x1f));
825 return VIRTIO_NET_ERR
;
829 return VIRTIO_NET_OK
;
832 static int virtio_net_handle_announce(VirtIONet
*n
, uint8_t cmd
,
833 struct iovec
*iov
, unsigned int iov_cnt
)
835 if (cmd
== VIRTIO_NET_CTRL_ANNOUNCE_ACK
&&
836 n
->status
& VIRTIO_NET_S_ANNOUNCE
) {
837 n
->status
&= ~VIRTIO_NET_S_ANNOUNCE
;
838 if (n
->announce_counter
) {
839 timer_mod(n
->announce_timer
,
840 qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
) +
841 self_announce_delay(n
->announce_counter
));
843 return VIRTIO_NET_OK
;
845 return VIRTIO_NET_ERR
;
849 static int virtio_net_handle_mq(VirtIONet
*n
, uint8_t cmd
,
850 struct iovec
*iov
, unsigned int iov_cnt
)
852 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
853 struct virtio_net_ctrl_mq mq
;
857 s
= iov_to_buf(iov
, iov_cnt
, 0, &mq
, sizeof(mq
));
858 if (s
!= sizeof(mq
)) {
859 return VIRTIO_NET_ERR
;
862 if (cmd
!= VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET
) {
863 return VIRTIO_NET_ERR
;
866 queues
= virtio_lduw_p(vdev
, &mq
.virtqueue_pairs
);
868 if (queues
< VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN
||
869 queues
> VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX
||
870 queues
> n
->max_queues
||
872 return VIRTIO_NET_ERR
;
875 n
->curr_queues
= queues
;
876 /* stop the backend before changing the number of queues to avoid handling a
878 virtio_net_set_status(vdev
, vdev
->status
);
879 virtio_net_set_queues(n
);
881 return VIRTIO_NET_OK
;
884 static void virtio_net_handle_ctrl(VirtIODevice
*vdev
, VirtQueue
*vq
)
886 VirtIONet
*n
= VIRTIO_NET(vdev
);
887 struct virtio_net_ctrl_hdr ctrl
;
888 virtio_net_ctrl_ack status
= VIRTIO_NET_ERR
;
889 VirtQueueElement
*elem
;
891 struct iovec
*iov
, *iov2
;
892 unsigned int iov_cnt
;
895 elem
= virtqueue_pop(vq
, sizeof(VirtQueueElement
));
899 if (iov_size(elem
->in_sg
, elem
->in_num
) < sizeof(status
) ||
900 iov_size(elem
->out_sg
, elem
->out_num
) < sizeof(ctrl
)) {
901 virtio_error(vdev
, "virtio-net ctrl missing headers");
902 virtqueue_detach_element(vq
, elem
, 0);
907 iov_cnt
= elem
->out_num
;
908 iov2
= iov
= g_memdup(elem
->out_sg
, sizeof(struct iovec
) * elem
->out_num
);
909 s
= iov_to_buf(iov
, iov_cnt
, 0, &ctrl
, sizeof(ctrl
));
910 iov_discard_front(&iov
, &iov_cnt
, sizeof(ctrl
));
911 if (s
!= sizeof(ctrl
)) {
912 status
= VIRTIO_NET_ERR
;
913 } else if (ctrl
.class == VIRTIO_NET_CTRL_RX
) {
914 status
= virtio_net_handle_rx_mode(n
, ctrl
.cmd
, iov
, iov_cnt
);
915 } else if (ctrl
.class == VIRTIO_NET_CTRL_MAC
) {
916 status
= virtio_net_handle_mac(n
, ctrl
.cmd
, iov
, iov_cnt
);
917 } else if (ctrl
.class == VIRTIO_NET_CTRL_VLAN
) {
918 status
= virtio_net_handle_vlan_table(n
, ctrl
.cmd
, iov
, iov_cnt
);
919 } else if (ctrl
.class == VIRTIO_NET_CTRL_ANNOUNCE
) {
920 status
= virtio_net_handle_announce(n
, ctrl
.cmd
, iov
, iov_cnt
);
921 } else if (ctrl
.class == VIRTIO_NET_CTRL_MQ
) {
922 status
= virtio_net_handle_mq(n
, ctrl
.cmd
, iov
, iov_cnt
);
923 } else if (ctrl
.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS
) {
924 status
= virtio_net_handle_offloads(n
, ctrl
.cmd
, iov
, iov_cnt
);
927 s
= iov_from_buf(elem
->in_sg
, elem
->in_num
, 0, &status
, sizeof(status
));
928 assert(s
== sizeof(status
));
930 virtqueue_push(vq
, elem
, sizeof(status
));
931 virtio_notify(vdev
, vq
);
939 static void virtio_net_handle_rx(VirtIODevice
*vdev
, VirtQueue
*vq
)
941 VirtIONet
*n
= VIRTIO_NET(vdev
);
942 int queue_index
= vq2q(virtio_get_queue_index(vq
));
944 qemu_flush_queued_packets(qemu_get_subqueue(n
->nic
, queue_index
));
947 static int virtio_net_can_receive(NetClientState
*nc
)
949 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
950 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
951 VirtIONetQueue
*q
= virtio_net_get_subqueue(nc
);
953 if (!vdev
->vm_running
) {
957 if (nc
->queue_index
>= n
->curr_queues
) {
961 if (!virtio_queue_ready(q
->rx_vq
) ||
962 !(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
969 static int virtio_net_has_buffers(VirtIONetQueue
*q
, int bufsize
)
972 if (virtio_queue_empty(q
->rx_vq
) ||
973 (n
->mergeable_rx_bufs
&&
974 !virtqueue_avail_bytes(q
->rx_vq
, bufsize
, 0))) {
975 virtio_queue_set_notification(q
->rx_vq
, 1);
977 /* To avoid a race condition where the guest has made some buffers
978 * available after the above check but before notification was
979 * enabled, check for available buffers again.
981 if (virtio_queue_empty(q
->rx_vq
) ||
982 (n
->mergeable_rx_bufs
&&
983 !virtqueue_avail_bytes(q
->rx_vq
, bufsize
, 0))) {
988 virtio_queue_set_notification(q
->rx_vq
, 0);
992 static void virtio_net_hdr_swap(VirtIODevice
*vdev
, struct virtio_net_hdr
*hdr
)
994 virtio_tswap16s(vdev
, &hdr
->hdr_len
);
995 virtio_tswap16s(vdev
, &hdr
->gso_size
);
996 virtio_tswap16s(vdev
, &hdr
->csum_start
);
997 virtio_tswap16s(vdev
, &hdr
->csum_offset
);
1000 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
1001 * it never finds out that the packets don't have valid checksums. This
1002 * causes dhclient to get upset. Fedora's carried a patch for ages to
1003 * fix this with Xen but it hasn't appeared in an upstream release of
1006 * To avoid breaking existing guests, we catch udp packets and add
1007 * checksums. This is terrible but it's better than hacking the guest
1010 * N.B. if we introduce a zero-copy API, this operation is no longer free so
1011 * we should provide a mechanism to disable it to avoid polluting the host
1014 static void work_around_broken_dhclient(struct virtio_net_hdr
*hdr
,
1015 uint8_t *buf
, size_t size
)
1017 if ((hdr
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) && /* missing csum */
1018 (size
> 27 && size
< 1500) && /* normal sized MTU */
1019 (buf
[12] == 0x08 && buf
[13] == 0x00) && /* ethertype == IPv4 */
1020 (buf
[23] == 17) && /* ip.protocol == UDP */
1021 (buf
[34] == 0 && buf
[35] == 67)) { /* udp.srcport == bootps */
1022 net_checksum_calculate(buf
, size
);
1023 hdr
->flags
&= ~VIRTIO_NET_HDR_F_NEEDS_CSUM
;
1027 static void receive_header(VirtIONet
*n
, const struct iovec
*iov
, int iov_cnt
,
1028 const void *buf
, size_t size
)
1030 if (n
->has_vnet_hdr
) {
1031 /* FIXME this cast is evil */
1032 void *wbuf
= (void *)buf
;
1033 work_around_broken_dhclient(wbuf
, wbuf
+ n
->host_hdr_len
,
1034 size
- n
->host_hdr_len
);
1036 if (n
->needs_vnet_hdr_swap
) {
1037 virtio_net_hdr_swap(VIRTIO_DEVICE(n
), wbuf
);
1039 iov_from_buf(iov
, iov_cnt
, 0, buf
, sizeof(struct virtio_net_hdr
));
1041 struct virtio_net_hdr hdr
= {
1043 .gso_type
= VIRTIO_NET_HDR_GSO_NONE
1045 iov_from_buf(iov
, iov_cnt
, 0, &hdr
, sizeof hdr
);
1049 static int receive_filter(VirtIONet
*n
, const uint8_t *buf
, int size
)
1051 static const uint8_t bcast
[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1052 static const uint8_t vlan
[] = {0x81, 0x00};
1053 uint8_t *ptr
= (uint8_t *)buf
;
1059 ptr
+= n
->host_hdr_len
;
1061 if (!memcmp(&ptr
[12], vlan
, sizeof(vlan
))) {
1062 int vid
= lduw_be_p(ptr
+ 14) & 0xfff;
1063 if (!(n
->vlans
[vid
>> 5] & (1U << (vid
& 0x1f))))
1067 if (ptr
[0] & 1) { // multicast
1068 if (!memcmp(ptr
, bcast
, sizeof(bcast
))) {
1070 } else if (n
->nomulti
) {
1072 } else if (n
->allmulti
|| n
->mac_table
.multi_overflow
) {
1076 for (i
= n
->mac_table
.first_multi
; i
< n
->mac_table
.in_use
; i
++) {
1077 if (!memcmp(ptr
, &n
->mac_table
.macs
[i
* ETH_ALEN
], ETH_ALEN
)) {
1084 } else if (n
->alluni
|| n
->mac_table
.uni_overflow
) {
1086 } else if (!memcmp(ptr
, n
->mac
, ETH_ALEN
)) {
1090 for (i
= 0; i
< n
->mac_table
.first_multi
; i
++) {
1091 if (!memcmp(ptr
, &n
->mac_table
.macs
[i
* ETH_ALEN
], ETH_ALEN
)) {
1100 static ssize_t
virtio_net_receive(NetClientState
*nc
, const uint8_t *buf
, size_t size
)
1102 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
1103 VirtIONetQueue
*q
= virtio_net_get_subqueue(nc
);
1104 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1105 struct iovec mhdr_sg
[VIRTQUEUE_MAX_SIZE
];
1106 struct virtio_net_hdr_mrg_rxbuf mhdr
;
1107 unsigned mhdr_cnt
= 0;
1108 size_t offset
, i
, guest_offset
;
1110 if (!virtio_net_can_receive(nc
)) {
1114 /* hdr_len refers to the header we supply to the guest */
1115 if (!virtio_net_has_buffers(q
, size
+ n
->guest_hdr_len
- n
->host_hdr_len
)) {
1119 if (!receive_filter(n
, buf
, size
))
1124 while (offset
< size
) {
1125 VirtQueueElement
*elem
;
1127 const struct iovec
*sg
;
1131 elem
= virtqueue_pop(q
->rx_vq
, sizeof(VirtQueueElement
));
1134 virtio_error(vdev
, "virtio-net unexpected empty queue: "
1135 "i %zd mergeable %d offset %zd, size %zd, "
1136 "guest hdr len %zd, host hdr len %zd "
1137 "guest features 0x%" PRIx64
,
1138 i
, n
->mergeable_rx_bufs
, offset
, size
,
1139 n
->guest_hdr_len
, n
->host_hdr_len
,
1140 vdev
->guest_features
);
1145 if (elem
->in_num
< 1) {
1147 "virtio-net receive queue contains no in buffers");
1148 virtqueue_detach_element(q
->rx_vq
, elem
, 0);
1155 assert(offset
== 0);
1156 if (n
->mergeable_rx_bufs
) {
1157 mhdr_cnt
= iov_copy(mhdr_sg
, ARRAY_SIZE(mhdr_sg
),
1159 offsetof(typeof(mhdr
), num_buffers
),
1160 sizeof(mhdr
.num_buffers
));
1163 receive_header(n
, sg
, elem
->in_num
, buf
, size
);
1164 offset
= n
->host_hdr_len
;
1165 total
+= n
->guest_hdr_len
;
1166 guest_offset
= n
->guest_hdr_len
;
1171 /* copy in packet. ugh */
1172 len
= iov_from_buf(sg
, elem
->in_num
, guest_offset
,
1173 buf
+ offset
, size
- offset
);
1176 /* If buffers can't be merged, at this point we
1177 * must have consumed the complete packet.
1178 * Otherwise, drop it. */
1179 if (!n
->mergeable_rx_bufs
&& offset
< size
) {
1180 virtqueue_discard(q
->rx_vq
, elem
, total
);
1185 /* signal other side */
1186 virtqueue_fill(q
->rx_vq
, elem
, total
, i
++);
1191 virtio_stw_p(vdev
, &mhdr
.num_buffers
, i
);
1192 iov_from_buf(mhdr_sg
, mhdr_cnt
,
1194 &mhdr
.num_buffers
, sizeof mhdr
.num_buffers
);
1197 virtqueue_flush(q
->rx_vq
, i
);
1198 virtio_notify(vdev
, q
->rx_vq
);
1203 static int32_t virtio_net_flush_tx(VirtIONetQueue
*q
);
1205 static void virtio_net_tx_complete(NetClientState
*nc
, ssize_t len
)
1207 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
1208 VirtIONetQueue
*q
= virtio_net_get_subqueue(nc
);
1209 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1211 virtqueue_push(q
->tx_vq
, q
->async_tx
.elem
, 0);
1212 virtio_notify(vdev
, q
->tx_vq
);
1214 g_free(q
->async_tx
.elem
);
1215 q
->async_tx
.elem
= NULL
;
1217 virtio_queue_set_notification(q
->tx_vq
, 1);
1218 virtio_net_flush_tx(q
);
1222 static int32_t virtio_net_flush_tx(VirtIONetQueue
*q
)
1224 VirtIONet
*n
= q
->n
;
1225 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1226 VirtQueueElement
*elem
;
1227 int32_t num_packets
= 0;
1228 int queue_index
= vq2q(virtio_get_queue_index(q
->tx_vq
));
1229 if (!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
1233 if (q
->async_tx
.elem
) {
1234 virtio_queue_set_notification(q
->tx_vq
, 0);
1240 unsigned int out_num
;
1241 struct iovec sg
[VIRTQUEUE_MAX_SIZE
], sg2
[VIRTQUEUE_MAX_SIZE
+ 1], *out_sg
;
1242 struct virtio_net_hdr_mrg_rxbuf mhdr
;
1244 elem
= virtqueue_pop(q
->tx_vq
, sizeof(VirtQueueElement
));
1249 out_num
= elem
->out_num
;
1250 out_sg
= elem
->out_sg
;
1252 virtio_error(vdev
, "virtio-net header not in first element");
1253 virtqueue_detach_element(q
->tx_vq
, elem
, 0);
1258 if (n
->has_vnet_hdr
) {
1259 if (iov_to_buf(out_sg
, out_num
, 0, &mhdr
, n
->guest_hdr_len
) <
1261 virtio_error(vdev
, "virtio-net header incorrect");
1262 virtqueue_detach_element(q
->tx_vq
, elem
, 0);
1266 if (n
->needs_vnet_hdr_swap
) {
1267 virtio_net_hdr_swap(vdev
, (void *) &mhdr
);
1268 sg2
[0].iov_base
= &mhdr
;
1269 sg2
[0].iov_len
= n
->guest_hdr_len
;
1270 out_num
= iov_copy(&sg2
[1], ARRAY_SIZE(sg2
) - 1,
1272 n
->guest_hdr_len
, -1);
1273 if (out_num
== VIRTQUEUE_MAX_SIZE
) {
1281 * If host wants to see the guest header as is, we can
1282 * pass it on unchanged. Otherwise, copy just the parts
1283 * that host is interested in.
1285 assert(n
->host_hdr_len
<= n
->guest_hdr_len
);
1286 if (n
->host_hdr_len
!= n
->guest_hdr_len
) {
1287 unsigned sg_num
= iov_copy(sg
, ARRAY_SIZE(sg
),
1289 0, n
->host_hdr_len
);
1290 sg_num
+= iov_copy(sg
+ sg_num
, ARRAY_SIZE(sg
) - sg_num
,
1292 n
->guest_hdr_len
, -1);
1297 ret
= qemu_sendv_packet_async(qemu_get_subqueue(n
->nic
, queue_index
),
1298 out_sg
, out_num
, virtio_net_tx_complete
);
1300 virtio_queue_set_notification(q
->tx_vq
, 0);
1301 q
->async_tx
.elem
= elem
;
1306 virtqueue_push(q
->tx_vq
, elem
, 0);
1307 virtio_notify(vdev
, q
->tx_vq
);
1310 if (++num_packets
>= n
->tx_burst
) {
1317 static void virtio_net_handle_tx_timer(VirtIODevice
*vdev
, VirtQueue
*vq
)
1319 VirtIONet
*n
= VIRTIO_NET(vdev
);
1320 VirtIONetQueue
*q
= &n
->vqs
[vq2q(virtio_get_queue_index(vq
))];
1322 /* This happens when device was stopped but VCPU wasn't. */
1323 if (!vdev
->vm_running
) {
1328 if (q
->tx_waiting
) {
1329 virtio_queue_set_notification(vq
, 1);
1330 timer_del(q
->tx_timer
);
1332 if (virtio_net_flush_tx(q
) == -EINVAL
) {
1336 timer_mod(q
->tx_timer
,
1337 qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) + n
->tx_timeout
);
1339 virtio_queue_set_notification(vq
, 0);
1343 static void virtio_net_handle_tx_bh(VirtIODevice
*vdev
, VirtQueue
*vq
)
1345 VirtIONet
*n
= VIRTIO_NET(vdev
);
1346 VirtIONetQueue
*q
= &n
->vqs
[vq2q(virtio_get_queue_index(vq
))];
1348 if (unlikely(q
->tx_waiting
)) {
1352 /* This happens when device was stopped but VCPU wasn't. */
1353 if (!vdev
->vm_running
) {
1356 virtio_queue_set_notification(vq
, 0);
1357 qemu_bh_schedule(q
->tx_bh
);
1360 static void virtio_net_tx_timer(void *opaque
)
1362 VirtIONetQueue
*q
= opaque
;
1363 VirtIONet
*n
= q
->n
;
1364 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1365 /* This happens when device was stopped but BH wasn't. */
1366 if (!vdev
->vm_running
) {
1367 /* Make sure tx waiting is set, so we'll run when restarted. */
1368 assert(q
->tx_waiting
);
1374 /* Just in case the driver is not ready on more */
1375 if (!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
1379 virtio_queue_set_notification(q
->tx_vq
, 1);
1380 virtio_net_flush_tx(q
);
1383 static void virtio_net_tx_bh(void *opaque
)
1385 VirtIONetQueue
*q
= opaque
;
1386 VirtIONet
*n
= q
->n
;
1387 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1390 /* This happens when device was stopped but BH wasn't. */
1391 if (!vdev
->vm_running
) {
1392 /* Make sure tx waiting is set, so we'll run when restarted. */
1393 assert(q
->tx_waiting
);
1399 /* Just in case the driver is not ready on more */
1400 if (unlikely(!(vdev
->status
& VIRTIO_CONFIG_S_DRIVER_OK
))) {
1404 ret
= virtio_net_flush_tx(q
);
1405 if (ret
== -EBUSY
|| ret
== -EINVAL
) {
1406 return; /* Notification re-enable handled by tx_complete or device
1410 /* If we flush a full burst of packets, assume there are
1411 * more coming and immediately reschedule */
1412 if (ret
>= n
->tx_burst
) {
1413 qemu_bh_schedule(q
->tx_bh
);
1418 /* If less than a full burst, re-enable notification and flush
1419 * anything that may have come in while we weren't looking. If
1420 * we find something, assume the guest is still active and reschedule */
1421 virtio_queue_set_notification(q
->tx_vq
, 1);
1422 ret
= virtio_net_flush_tx(q
);
1423 if (ret
== -EINVAL
) {
1425 } else if (ret
> 0) {
1426 virtio_queue_set_notification(q
->tx_vq
, 0);
1427 qemu_bh_schedule(q
->tx_bh
);
1432 static void virtio_net_add_queue(VirtIONet
*n
, int index
)
1434 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1436 n
->vqs
[index
].rx_vq
= virtio_add_queue(vdev
, n
->net_conf
.rx_queue_size
,
1437 virtio_net_handle_rx
);
1438 if (n
->net_conf
.tx
&& !strcmp(n
->net_conf
.tx
, "timer")) {
1439 n
->vqs
[index
].tx_vq
=
1440 virtio_add_queue(vdev
, 256, virtio_net_handle_tx_timer
);
1441 n
->vqs
[index
].tx_timer
= timer_new_ns(QEMU_CLOCK_VIRTUAL
,
1442 virtio_net_tx_timer
,
1445 n
->vqs
[index
].tx_vq
=
1446 virtio_add_queue(vdev
, 256, virtio_net_handle_tx_bh
);
1447 n
->vqs
[index
].tx_bh
= qemu_bh_new(virtio_net_tx_bh
, &n
->vqs
[index
]);
1450 n
->vqs
[index
].tx_waiting
= 0;
1451 n
->vqs
[index
].n
= n
;
1454 static void virtio_net_del_queue(VirtIONet
*n
, int index
)
1456 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1457 VirtIONetQueue
*q
= &n
->vqs
[index
];
1458 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, index
);
1460 qemu_purge_queued_packets(nc
);
1462 virtio_del_queue(vdev
, index
* 2);
1464 timer_del(q
->tx_timer
);
1465 timer_free(q
->tx_timer
);
1467 qemu_bh_delete(q
->tx_bh
);
1469 virtio_del_queue(vdev
, index
* 2 + 1);
1472 static void virtio_net_change_num_queues(VirtIONet
*n
, int new_max_queues
)
1474 VirtIODevice
*vdev
= VIRTIO_DEVICE(n
);
1475 int old_num_queues
= virtio_get_num_queues(vdev
);
1476 int new_num_queues
= new_max_queues
* 2 + 1;
1479 assert(old_num_queues
>= 3);
1480 assert(old_num_queues
% 2 == 1);
1482 if (old_num_queues
== new_num_queues
) {
1487 * We always need to remove and add ctrl vq if
1488 * old_num_queues != new_num_queues. Remove ctrl_vq first,
1489 * and then we only enter one of the following too loops.
1491 virtio_del_queue(vdev
, old_num_queues
- 1);
1493 for (i
= new_num_queues
- 1; i
< old_num_queues
- 1; i
+= 2) {
1494 /* new_num_queues < old_num_queues */
1495 virtio_net_del_queue(n
, i
/ 2);
1498 for (i
= old_num_queues
- 1; i
< new_num_queues
- 1; i
+= 2) {
1499 /* new_num_queues > old_num_queues */
1500 virtio_net_add_queue(n
, i
/ 2);
1503 /* add ctrl_vq last */
1504 n
->ctrl_vq
= virtio_add_queue(vdev
, 64, virtio_net_handle_ctrl
);
1507 static void virtio_net_set_multiqueue(VirtIONet
*n
, int multiqueue
)
1509 int max
= multiqueue
? n
->max_queues
: 1;
1511 n
->multiqueue
= multiqueue
;
1512 virtio_net_change_num_queues(n
, max
);
1514 virtio_net_set_queues(n
);
1517 static void virtio_net_save_device(VirtIODevice
*vdev
, QEMUFile
*f
)
1519 VirtIONet
*n
= VIRTIO_NET(vdev
);
1522 qemu_put_buffer(f
, n
->mac
, ETH_ALEN
);
1523 qemu_put_be32(f
, n
->vqs
[0].tx_waiting
);
1524 qemu_put_be32(f
, n
->mergeable_rx_bufs
);
1525 qemu_put_be16(f
, n
->status
);
1526 qemu_put_byte(f
, n
->promisc
);
1527 qemu_put_byte(f
, n
->allmulti
);
1528 qemu_put_be32(f
, n
->mac_table
.in_use
);
1529 qemu_put_buffer(f
, n
->mac_table
.macs
, n
->mac_table
.in_use
* ETH_ALEN
);
1530 qemu_put_buffer(f
, (uint8_t *)n
->vlans
, MAX_VLAN
>> 3);
1531 qemu_put_be32(f
, n
->has_vnet_hdr
);
1532 qemu_put_byte(f
, n
->mac_table
.multi_overflow
);
1533 qemu_put_byte(f
, n
->mac_table
.uni_overflow
);
1534 qemu_put_byte(f
, n
->alluni
);
1535 qemu_put_byte(f
, n
->nomulti
);
1536 qemu_put_byte(f
, n
->nouni
);
1537 qemu_put_byte(f
, n
->nobcast
);
1538 qemu_put_byte(f
, n
->has_ufo
);
1539 if (n
->max_queues
> 1) {
1540 qemu_put_be16(f
, n
->max_queues
);
1541 qemu_put_be16(f
, n
->curr_queues
);
1542 for (i
= 1; i
< n
->curr_queues
; i
++) {
1543 qemu_put_be32(f
, n
->vqs
[i
].tx_waiting
);
1547 if (virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
)) {
1548 qemu_put_be64(f
, n
->curr_guest_offloads
);
1552 static int virtio_net_load_device(VirtIODevice
*vdev
, QEMUFile
*f
,
1555 VirtIONet
*n
= VIRTIO_NET(vdev
);
1558 qemu_get_buffer(f
, n
->mac
, ETH_ALEN
);
1559 n
->vqs
[0].tx_waiting
= qemu_get_be32(f
);
1561 virtio_net_set_mrg_rx_bufs(n
, qemu_get_be32(f
),
1562 virtio_vdev_has_feature(vdev
,
1563 VIRTIO_F_VERSION_1
));
1565 n
->status
= qemu_get_be16(f
);
1567 n
->promisc
= qemu_get_byte(f
);
1568 n
->allmulti
= qemu_get_byte(f
);
1570 n
->mac_table
.in_use
= qemu_get_be32(f
);
1571 /* MAC_TABLE_ENTRIES may be different from the saved image */
1572 if (n
->mac_table
.in_use
<= MAC_TABLE_ENTRIES
) {
1573 qemu_get_buffer(f
, n
->mac_table
.macs
,
1574 n
->mac_table
.in_use
* ETH_ALEN
);
1578 /* Overflow detected - can happen if source has a larger MAC table.
1579 * We simply set overflow flag so there's no need to maintain the
1580 * table of addresses, discard them all.
1581 * Note: 64 bit math to avoid integer overflow.
1583 for (i
= 0; i
< (int64_t)n
->mac_table
.in_use
* ETH_ALEN
; ++i
) {
1586 n
->mac_table
.multi_overflow
= n
->mac_table
.uni_overflow
= 1;
1587 n
->mac_table
.in_use
= 0;
1590 qemu_get_buffer(f
, (uint8_t *)n
->vlans
, MAX_VLAN
>> 3);
1592 if (qemu_get_be32(f
) && !peer_has_vnet_hdr(n
)) {
1593 error_report("virtio-net: saved image requires vnet_hdr=on");
1597 n
->mac_table
.multi_overflow
= qemu_get_byte(f
);
1598 n
->mac_table
.uni_overflow
= qemu_get_byte(f
);
1600 n
->alluni
= qemu_get_byte(f
);
1601 n
->nomulti
= qemu_get_byte(f
);
1602 n
->nouni
= qemu_get_byte(f
);
1603 n
->nobcast
= qemu_get_byte(f
);
1605 if (qemu_get_byte(f
) && !peer_has_ufo(n
)) {
1606 error_report("virtio-net: saved image requires TUN_F_UFO support");
1610 if (n
->max_queues
> 1) {
1611 if (n
->max_queues
!= qemu_get_be16(f
)) {
1612 error_report("virtio-net: different max_queues ");
1616 n
->curr_queues
= qemu_get_be16(f
);
1617 if (n
->curr_queues
> n
->max_queues
) {
1618 error_report("virtio-net: curr_queues %x > max_queues %x",
1619 n
->curr_queues
, n
->max_queues
);
1622 for (i
= 1; i
< n
->curr_queues
; i
++) {
1623 n
->vqs
[i
].tx_waiting
= qemu_get_be32(f
);
1627 if (virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
)) {
1628 n
->curr_guest_offloads
= qemu_get_be64(f
);
1630 n
->curr_guest_offloads
= virtio_net_supported_guest_offloads(n
);
1633 if (peer_has_vnet_hdr(n
)) {
1634 virtio_net_apply_guest_offloads(n
);
1637 virtio_net_set_queues(n
);
1639 /* Find the first multicast entry in the saved MAC filter */
1640 for (i
= 0; i
< n
->mac_table
.in_use
; i
++) {
1641 if (n
->mac_table
.macs
[i
* ETH_ALEN
] & 1) {
1645 n
->mac_table
.first_multi
= i
;
1647 /* nc.link_down can't be migrated, so infer link_down according
1648 * to link status bit in n->status */
1649 link_down
= (n
->status
& VIRTIO_NET_S_LINK_UP
) == 0;
1650 for (i
= 0; i
< n
->max_queues
; i
++) {
1651 qemu_get_subqueue(n
->nic
, i
)->link_down
= link_down
;
1654 if (virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_GUEST_ANNOUNCE
) &&
1655 virtio_vdev_has_feature(vdev
, VIRTIO_NET_F_CTRL_VQ
)) {
1656 n
->announce_counter
= SELF_ANNOUNCE_ROUNDS
;
1657 timer_mod(n
->announce_timer
, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL
));
1663 static NetClientInfo net_virtio_info
= {
1664 .type
= NET_CLIENT_DRIVER_NIC
,
1665 .size
= sizeof(NICState
),
1666 .can_receive
= virtio_net_can_receive
,
1667 .receive
= virtio_net_receive
,
1668 .link_status_changed
= virtio_net_set_link_status
,
1669 .query_rx_filter
= virtio_net_query_rxfilter
,
1672 static bool virtio_net_guest_notifier_pending(VirtIODevice
*vdev
, int idx
)
1674 VirtIONet
*n
= VIRTIO_NET(vdev
);
1675 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, vq2q(idx
));
1676 assert(n
->vhost_started
);
1677 return vhost_net_virtqueue_pending(get_vhost_net(nc
->peer
), idx
);
1680 static void virtio_net_guest_notifier_mask(VirtIODevice
*vdev
, int idx
,
1683 VirtIONet
*n
= VIRTIO_NET(vdev
);
1684 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, vq2q(idx
));
1685 assert(n
->vhost_started
);
1686 vhost_net_virtqueue_mask(get_vhost_net(nc
->peer
),
1690 static void virtio_net_set_config_size(VirtIONet
*n
, uint64_t host_features
)
1692 int i
, config_size
= 0;
1693 virtio_add_feature(&host_features
, VIRTIO_NET_F_MAC
);
1694 for (i
= 0; feature_sizes
[i
].flags
!= 0; i
++) {
1695 if (host_features
& feature_sizes
[i
].flags
) {
1696 config_size
= MAX(feature_sizes
[i
].end
, config_size
);
1699 n
->config_size
= config_size
;
1702 void virtio_net_set_netclient_name(VirtIONet
*n
, const char *name
,
1706 * The name can be NULL, the netclient name will be type.x.
1708 assert(type
!= NULL
);
1710 g_free(n
->netclient_name
);
1711 g_free(n
->netclient_type
);
1712 n
->netclient_name
= g_strdup(name
);
1713 n
->netclient_type
= g_strdup(type
);
1716 static void virtio_net_device_realize(DeviceState
*dev
, Error
**errp
)
1718 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
1719 VirtIONet
*n
= VIRTIO_NET(dev
);
1723 virtio_net_set_config_size(n
, n
->host_features
);
1724 virtio_init(vdev
, "virtio-net", VIRTIO_ID_NET
, n
->config_size
);
1727 * We set a lower limit on RX queue size to what it always was.
1728 * Guests that want a smaller ring can always resize it without
1729 * help from us (using virtio 1 and up).
1731 if (n
->net_conf
.rx_queue_size
< VIRTIO_NET_RX_QUEUE_MIN_SIZE
||
1732 n
->net_conf
.rx_queue_size
> VIRTQUEUE_MAX_SIZE
||
1733 (n
->net_conf
.rx_queue_size
& (n
->net_conf
.rx_queue_size
- 1))) {
1734 error_setg(errp
, "Invalid rx_queue_size (= %" PRIu16
"), "
1735 "must be a power of 2 between %d and %d.",
1736 n
->net_conf
.rx_queue_size
, VIRTIO_NET_RX_QUEUE_MIN_SIZE
,
1737 VIRTQUEUE_MAX_SIZE
);
1738 virtio_cleanup(vdev
);
1742 n
->max_queues
= MAX(n
->nic_conf
.peers
.queues
, 1);
1743 if (n
->max_queues
* 2 + 1 > VIRTIO_QUEUE_MAX
) {
1744 error_setg(errp
, "Invalid number of queues (= %" PRIu32
"), "
1745 "must be a positive integer less than %d.",
1746 n
->max_queues
, (VIRTIO_QUEUE_MAX
- 1) / 2);
1747 virtio_cleanup(vdev
);
1750 n
->vqs
= g_malloc0(sizeof(VirtIONetQueue
) * n
->max_queues
);
1752 n
->tx_timeout
= n
->net_conf
.txtimer
;
1754 if (n
->net_conf
.tx
&& strcmp(n
->net_conf
.tx
, "timer")
1755 && strcmp(n
->net_conf
.tx
, "bh")) {
1756 error_report("virtio-net: "
1757 "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
1759 error_report("Defaulting to \"bh\"");
1762 for (i
= 0; i
< n
->max_queues
; i
++) {
1763 virtio_net_add_queue(n
, i
);
1766 n
->ctrl_vq
= virtio_add_queue(vdev
, 64, virtio_net_handle_ctrl
);
1767 qemu_macaddr_default_if_unset(&n
->nic_conf
.macaddr
);
1768 memcpy(&n
->mac
[0], &n
->nic_conf
.macaddr
, sizeof(n
->mac
));
1769 n
->status
= VIRTIO_NET_S_LINK_UP
;
1770 n
->announce_timer
= timer_new_ms(QEMU_CLOCK_VIRTUAL
,
1771 virtio_net_announce_timer
, n
);
1773 if (n
->netclient_type
) {
1775 * Happen when virtio_net_set_netclient_name has been called.
1777 n
->nic
= qemu_new_nic(&net_virtio_info
, &n
->nic_conf
,
1778 n
->netclient_type
, n
->netclient_name
, n
);
1780 n
->nic
= qemu_new_nic(&net_virtio_info
, &n
->nic_conf
,
1781 object_get_typename(OBJECT(dev
)), dev
->id
, n
);
1784 peer_test_vnet_hdr(n
);
1785 if (peer_has_vnet_hdr(n
)) {
1786 for (i
= 0; i
< n
->max_queues
; i
++) {
1787 qemu_using_vnet_hdr(qemu_get_subqueue(n
->nic
, i
)->peer
, true);
1789 n
->host_hdr_len
= sizeof(struct virtio_net_hdr
);
1791 n
->host_hdr_len
= 0;
1794 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->nic_conf
.macaddr
.a
);
1796 n
->vqs
[0].tx_waiting
= 0;
1797 n
->tx_burst
= n
->net_conf
.txburst
;
1798 virtio_net_set_mrg_rx_bufs(n
, 0, 0);
1799 n
->promisc
= 1; /* for compatibility */
1801 n
->mac_table
.macs
= g_malloc0(MAC_TABLE_ENTRIES
* ETH_ALEN
);
1803 n
->vlans
= g_malloc0(MAX_VLAN
>> 3);
1805 nc
= qemu_get_queue(n
->nic
);
1806 nc
->rxfilter_notify_enabled
= 1;
1811 static void virtio_net_device_unrealize(DeviceState
*dev
, Error
**errp
)
1813 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
1814 VirtIONet
*n
= VIRTIO_NET(dev
);
1817 /* This will stop vhost backend if appropriate. */
1818 virtio_net_set_status(vdev
, 0);
1820 g_free(n
->netclient_name
);
1821 n
->netclient_name
= NULL
;
1822 g_free(n
->netclient_type
);
1823 n
->netclient_type
= NULL
;
1825 g_free(n
->mac_table
.macs
);
1828 max_queues
= n
->multiqueue
? n
->max_queues
: 1;
1829 for (i
= 0; i
< max_queues
; i
++) {
1830 virtio_net_del_queue(n
, i
);
1833 timer_del(n
->announce_timer
);
1834 timer_free(n
->announce_timer
);
1836 qemu_del_nic(n
->nic
);
1837 virtio_cleanup(vdev
);
1840 static void virtio_net_instance_init(Object
*obj
)
1842 VirtIONet
*n
= VIRTIO_NET(obj
);
1845 * The default config_size is sizeof(struct virtio_net_config).
1846 * Can be overriden with virtio_net_set_config_size.
1848 n
->config_size
= sizeof(struct virtio_net_config
);
1849 device_add_bootindex_property(obj
, &n
->nic_conf
.bootindex
,
1850 "bootindex", "/ethernet-phy@0",
1854 static void virtio_net_pre_save(void *opaque
)
1856 VirtIONet
*n
= opaque
;
1858 /* At this point, backend must be stopped, otherwise
1859 * it might keep writing to memory. */
1860 assert(!n
->vhost_started
);
1863 static const VMStateDescription vmstate_virtio_net
= {
1864 .name
= "virtio-net",
1865 .minimum_version_id
= VIRTIO_NET_VM_VERSION
,
1866 .version_id
= VIRTIO_NET_VM_VERSION
,
1867 .fields
= (VMStateField
[]) {
1868 VMSTATE_VIRTIO_DEVICE
,
1869 VMSTATE_END_OF_LIST()
1871 .pre_save
= virtio_net_pre_save
,
1874 static Property virtio_net_properties
[] = {
1875 DEFINE_PROP_BIT("csum", VirtIONet
, host_features
, VIRTIO_NET_F_CSUM
, true),
1876 DEFINE_PROP_BIT("guest_csum", VirtIONet
, host_features
,
1877 VIRTIO_NET_F_GUEST_CSUM
, true),
1878 DEFINE_PROP_BIT("gso", VirtIONet
, host_features
, VIRTIO_NET_F_GSO
, true),
1879 DEFINE_PROP_BIT("guest_tso4", VirtIONet
, host_features
,
1880 VIRTIO_NET_F_GUEST_TSO4
, true),
1881 DEFINE_PROP_BIT("guest_tso6", VirtIONet
, host_features
,
1882 VIRTIO_NET_F_GUEST_TSO6
, true),
1883 DEFINE_PROP_BIT("guest_ecn", VirtIONet
, host_features
,
1884 VIRTIO_NET_F_GUEST_ECN
, true),
1885 DEFINE_PROP_BIT("guest_ufo", VirtIONet
, host_features
,
1886 VIRTIO_NET_F_GUEST_UFO
, true),
1887 DEFINE_PROP_BIT("guest_announce", VirtIONet
, host_features
,
1888 VIRTIO_NET_F_GUEST_ANNOUNCE
, true),
1889 DEFINE_PROP_BIT("host_tso4", VirtIONet
, host_features
,
1890 VIRTIO_NET_F_HOST_TSO4
, true),
1891 DEFINE_PROP_BIT("host_tso6", VirtIONet
, host_features
,
1892 VIRTIO_NET_F_HOST_TSO6
, true),
1893 DEFINE_PROP_BIT("host_ecn", VirtIONet
, host_features
,
1894 VIRTIO_NET_F_HOST_ECN
, true),
1895 DEFINE_PROP_BIT("host_ufo", VirtIONet
, host_features
,
1896 VIRTIO_NET_F_HOST_UFO
, true),
1897 DEFINE_PROP_BIT("mrg_rxbuf", VirtIONet
, host_features
,
1898 VIRTIO_NET_F_MRG_RXBUF
, true),
1899 DEFINE_PROP_BIT("status", VirtIONet
, host_features
,
1900 VIRTIO_NET_F_STATUS
, true),
1901 DEFINE_PROP_BIT("ctrl_vq", VirtIONet
, host_features
,
1902 VIRTIO_NET_F_CTRL_VQ
, true),
1903 DEFINE_PROP_BIT("ctrl_rx", VirtIONet
, host_features
,
1904 VIRTIO_NET_F_CTRL_RX
, true),
1905 DEFINE_PROP_BIT("ctrl_vlan", VirtIONet
, host_features
,
1906 VIRTIO_NET_F_CTRL_VLAN
, true),
1907 DEFINE_PROP_BIT("ctrl_rx_extra", VirtIONet
, host_features
,
1908 VIRTIO_NET_F_CTRL_RX_EXTRA
, true),
1909 DEFINE_PROP_BIT("ctrl_mac_addr", VirtIONet
, host_features
,
1910 VIRTIO_NET_F_CTRL_MAC_ADDR
, true),
1911 DEFINE_PROP_BIT("ctrl_guest_offloads", VirtIONet
, host_features
,
1912 VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
, true),
1913 DEFINE_PROP_BIT("mq", VirtIONet
, host_features
, VIRTIO_NET_F_MQ
, false),
1914 DEFINE_NIC_PROPERTIES(VirtIONet
, nic_conf
),
1915 DEFINE_PROP_UINT32("x-txtimer", VirtIONet
, net_conf
.txtimer
,
1917 DEFINE_PROP_INT32("x-txburst", VirtIONet
, net_conf
.txburst
, TX_BURST
),
1918 DEFINE_PROP_STRING("tx", VirtIONet
, net_conf
.tx
),
1919 DEFINE_PROP_UINT16("rx_queue_size", VirtIONet
, net_conf
.rx_queue_size
,
1920 VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE
),
1921 DEFINE_PROP_END_OF_LIST(),
1924 static void virtio_net_class_init(ObjectClass
*klass
, void *data
)
1926 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1927 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
1929 dc
->props
= virtio_net_properties
;
1930 dc
->vmsd
= &vmstate_virtio_net
;
1931 set_bit(DEVICE_CATEGORY_NETWORK
, dc
->categories
);
1932 vdc
->realize
= virtio_net_device_realize
;
1933 vdc
->unrealize
= virtio_net_device_unrealize
;
1934 vdc
->get_config
= virtio_net_get_config
;
1935 vdc
->set_config
= virtio_net_set_config
;
1936 vdc
->get_features
= virtio_net_get_features
;
1937 vdc
->set_features
= virtio_net_set_features
;
1938 vdc
->bad_features
= virtio_net_bad_features
;
1939 vdc
->reset
= virtio_net_reset
;
1940 vdc
->set_status
= virtio_net_set_status
;
1941 vdc
->guest_notifier_mask
= virtio_net_guest_notifier_mask
;
1942 vdc
->guest_notifier_pending
= virtio_net_guest_notifier_pending
;
1943 vdc
->load
= virtio_net_load_device
;
1944 vdc
->save
= virtio_net_save_device
;
1947 static const TypeInfo virtio_net_info
= {
1948 .name
= TYPE_VIRTIO_NET
,
1949 .parent
= TYPE_VIRTIO_DEVICE
,
1950 .instance_size
= sizeof(VirtIONet
),
1951 .instance_init
= virtio_net_instance_init
,
1952 .class_init
= virtio_net_class_init
,
1955 static void virtio_register_types(void)
1957 type_register_static(&virtio_net_info
);
1960 type_init(virtio_register_types
)