2 * Virtio Network Device
4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
17 #include "net/checksum.h"
19 #include "qemu/error-report.h"
20 #include "qemu/timer.h"
21 #include "virtio-net.h"
22 #include "vhost_net.h"
24 #define VIRTIO_NET_VM_VERSION 11
26 #define MAC_TABLE_ENTRIES 64
27 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
29 typedef struct VirtIONetQueue
{
36 VirtQueueElement elem
;
42 typedef struct VirtIONet
45 uint8_t mac
[ETH_ALEN
];
47 VirtIONetQueue vqs
[MAX_QUEUE_NUM
];
52 uint32_t has_vnet_hdr
;
56 int mergeable_rx_bufs
;
63 uint8_t vhost_started
;
67 uint8_t multi_overflow
;
80 * Calculate the number of bytes up to and including the given 'field' of
83 #define endof(container, field) \
84 (offsetof(container, field) + sizeof(((container *)0)->field))
86 typedef struct VirtIOFeature
{
91 static VirtIOFeature feature_sizes
[] = {
92 {.flags
= 1 << VIRTIO_NET_F_MAC
,
93 .end
= endof(struct virtio_net_config
, mac
)},
94 {.flags
= 1 << VIRTIO_NET_F_STATUS
,
95 .end
= endof(struct virtio_net_config
, status
)},
96 {.flags
= 1 << VIRTIO_NET_F_MQ
,
97 .end
= endof(struct virtio_net_config
, max_virtqueue_pairs
)},
101 static VirtIONetQueue
*virtio_net_get_subqueue(NetClientState
*nc
)
103 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
105 return &n
->vqs
[nc
->queue_index
];
108 static int vq2q(int queue_index
)
110 return queue_index
/ 2;
114 * - we could suppress RX interrupt if we were so inclined.
117 static VirtIONet
*to_virtio_net(VirtIODevice
*vdev
)
119 return (VirtIONet
*)vdev
;
122 static void virtio_net_get_config(VirtIODevice
*vdev
, uint8_t *config
)
124 VirtIONet
*n
= to_virtio_net(vdev
);
125 struct virtio_net_config netcfg
;
127 stw_p(&netcfg
.status
, n
->status
);
128 stw_p(&netcfg
.max_virtqueue_pairs
, n
->max_queues
);
129 memcpy(netcfg
.mac
, n
->mac
, ETH_ALEN
);
130 memcpy(config
, &netcfg
, n
->config_size
);
133 static void virtio_net_set_config(VirtIODevice
*vdev
, const uint8_t *config
)
135 VirtIONet
*n
= to_virtio_net(vdev
);
136 struct virtio_net_config netcfg
= {};
138 memcpy(&netcfg
, config
, n
->config_size
);
140 if (!(n
->vdev
.guest_features
>> VIRTIO_NET_F_CTRL_MAC_ADDR
& 1) &&
141 memcmp(netcfg
.mac
, n
->mac
, ETH_ALEN
)) {
142 memcpy(n
->mac
, netcfg
.mac
, ETH_ALEN
);
143 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->mac
);
147 static bool virtio_net_started(VirtIONet
*n
, uint8_t status
)
149 return (status
& VIRTIO_CONFIG_S_DRIVER_OK
) &&
150 (n
->status
& VIRTIO_NET_S_LINK_UP
) && n
->vdev
.vm_running
;
153 static void virtio_net_vhost_status(VirtIONet
*n
, uint8_t status
)
155 NetClientState
*nc
= qemu_get_queue(n
->nic
);
156 int queues
= n
->multiqueue
? n
->max_queues
: 1;
161 if (nc
->peer
->info
->type
!= NET_CLIENT_OPTIONS_KIND_TAP
) {
165 if (!tap_get_vhost_net(nc
->peer
)) {
169 if (!!n
->vhost_started
== virtio_net_started(n
, status
) &&
170 !nc
->peer
->link_down
) {
173 if (!n
->vhost_started
) {
175 if (!vhost_net_query(tap_get_vhost_net(nc
->peer
), &n
->vdev
)) {
178 n
->vhost_started
= 1;
179 r
= vhost_net_start(&n
->vdev
, n
->nic
->ncs
, queues
);
181 error_report("unable to start vhost net: %d: "
182 "falling back on userspace virtio", -r
);
183 n
->vhost_started
= 0;
186 vhost_net_stop(&n
->vdev
, n
->nic
->ncs
, queues
);
187 n
->vhost_started
= 0;
191 static void virtio_net_set_status(struct VirtIODevice
*vdev
, uint8_t status
)
193 VirtIONet
*n
= to_virtio_net(vdev
);
196 uint8_t queue_status
;
198 virtio_net_vhost_status(n
, status
);
200 for (i
= 0; i
< n
->max_queues
; i
++) {
203 if ((!n
->multiqueue
&& i
!= 0) || i
>= n
->curr_queues
) {
206 queue_status
= status
;
209 if (!q
->tx_waiting
) {
213 if (virtio_net_started(n
, queue_status
) && !n
->vhost_started
) {
215 qemu_mod_timer(q
->tx_timer
,
216 qemu_get_clock_ns(vm_clock
) + n
->tx_timeout
);
218 qemu_bh_schedule(q
->tx_bh
);
222 qemu_del_timer(q
->tx_timer
);
224 qemu_bh_cancel(q
->tx_bh
);
230 static void virtio_net_set_link_status(NetClientState
*nc
)
232 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
233 uint16_t old_status
= n
->status
;
236 n
->status
&= ~VIRTIO_NET_S_LINK_UP
;
238 n
->status
|= VIRTIO_NET_S_LINK_UP
;
240 if (n
->status
!= old_status
)
241 virtio_notify_config(&n
->vdev
);
243 virtio_net_set_status(&n
->vdev
, n
->vdev
.status
);
246 static void virtio_net_reset(VirtIODevice
*vdev
)
248 VirtIONet
*n
= to_virtio_net(vdev
);
250 /* Reset back to compatibility mode */
257 /* multiqueue is disabled by default */
260 /* Flush any MAC and VLAN filter table state */
261 n
->mac_table
.in_use
= 0;
262 n
->mac_table
.first_multi
= 0;
263 n
->mac_table
.multi_overflow
= 0;
264 n
->mac_table
.uni_overflow
= 0;
265 memset(n
->mac_table
.macs
, 0, MAC_TABLE_ENTRIES
* ETH_ALEN
);
266 memcpy(&n
->mac
[0], &n
->nic
->conf
->macaddr
, sizeof(n
->mac
));
267 memset(n
->vlans
, 0, MAX_VLAN
>> 3);
270 static void peer_test_vnet_hdr(VirtIONet
*n
)
272 NetClientState
*nc
= qemu_get_queue(n
->nic
);
277 if (nc
->peer
->info
->type
!= NET_CLIENT_OPTIONS_KIND_TAP
) {
281 n
->has_vnet_hdr
= tap_has_vnet_hdr(nc
->peer
);
284 static int peer_has_vnet_hdr(VirtIONet
*n
)
286 return n
->has_vnet_hdr
;
289 static int peer_has_ufo(VirtIONet
*n
)
291 if (!peer_has_vnet_hdr(n
))
294 n
->has_ufo
= tap_has_ufo(qemu_get_queue(n
->nic
)->peer
);
299 static void virtio_net_set_mrg_rx_bufs(VirtIONet
*n
, int mergeable_rx_bufs
)
304 n
->mergeable_rx_bufs
= mergeable_rx_bufs
;
306 n
->guest_hdr_len
= n
->mergeable_rx_bufs
?
307 sizeof(struct virtio_net_hdr_mrg_rxbuf
) : sizeof(struct virtio_net_hdr
);
309 for (i
= 0; i
< n
->max_queues
; i
++) {
310 nc
= qemu_get_subqueue(n
->nic
, i
);
312 if (peer_has_vnet_hdr(n
) &&
313 tap_has_vnet_hdr_len(nc
->peer
, n
->guest_hdr_len
)) {
314 tap_set_vnet_hdr_len(nc
->peer
, n
->guest_hdr_len
);
315 n
->host_hdr_len
= n
->guest_hdr_len
;
320 static int peer_attach(VirtIONet
*n
, int index
)
322 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, index
);
328 if (nc
->peer
->info
->type
!= NET_CLIENT_OPTIONS_KIND_TAP
) {
332 return tap_enable(nc
->peer
);
335 static int peer_detach(VirtIONet
*n
, int index
)
337 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, index
);
343 if (nc
->peer
->info
->type
!= NET_CLIENT_OPTIONS_KIND_TAP
) {
347 return tap_disable(nc
->peer
);
350 static void virtio_net_set_queues(VirtIONet
*n
)
354 for (i
= 0; i
< n
->max_queues
; i
++) {
355 if (i
< n
->curr_queues
) {
356 assert(!peer_attach(n
, i
));
358 assert(!peer_detach(n
, i
));
363 static void virtio_net_set_multiqueue(VirtIONet
*n
, int multiqueue
, int ctrl
);
365 static uint32_t virtio_net_get_features(VirtIODevice
*vdev
, uint32_t features
)
367 VirtIONet
*n
= to_virtio_net(vdev
);
368 NetClientState
*nc
= qemu_get_queue(n
->nic
);
370 features
|= (1 << VIRTIO_NET_F_MAC
);
372 if (!peer_has_vnet_hdr(n
)) {
373 features
&= ~(0x1 << VIRTIO_NET_F_CSUM
);
374 features
&= ~(0x1 << VIRTIO_NET_F_HOST_TSO4
);
375 features
&= ~(0x1 << VIRTIO_NET_F_HOST_TSO6
);
376 features
&= ~(0x1 << VIRTIO_NET_F_HOST_ECN
);
378 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_CSUM
);
379 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_TSO4
);
380 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_TSO6
);
381 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_ECN
);
384 if (!peer_has_vnet_hdr(n
) || !peer_has_ufo(n
)) {
385 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_UFO
);
386 features
&= ~(0x1 << VIRTIO_NET_F_HOST_UFO
);
389 if (!nc
->peer
|| nc
->peer
->info
->type
!= NET_CLIENT_OPTIONS_KIND_TAP
) {
392 if (!tap_get_vhost_net(nc
->peer
)) {
395 return vhost_net_get_features(tap_get_vhost_net(nc
->peer
), features
);
398 static uint32_t virtio_net_bad_features(VirtIODevice
*vdev
)
400 uint32_t features
= 0;
402 /* Linux kernel 2.6.25. It understood MAC (as everyone must),
404 features
|= (1 << VIRTIO_NET_F_MAC
);
405 features
|= (1 << VIRTIO_NET_F_CSUM
);
406 features
|= (1 << VIRTIO_NET_F_HOST_TSO4
);
407 features
|= (1 << VIRTIO_NET_F_HOST_TSO6
);
408 features
|= (1 << VIRTIO_NET_F_HOST_ECN
);
413 static void virtio_net_set_features(VirtIODevice
*vdev
, uint32_t features
)
415 VirtIONet
*n
= to_virtio_net(vdev
);
418 virtio_net_set_multiqueue(n
, !!(features
& (1 << VIRTIO_NET_F_MQ
)),
419 !!(features
& (1 << VIRTIO_NET_F_CTRL_VQ
)));
421 virtio_net_set_mrg_rx_bufs(n
, !!(features
& (1 << VIRTIO_NET_F_MRG_RXBUF
)));
423 if (n
->has_vnet_hdr
) {
424 tap_set_offload(qemu_get_subqueue(n
->nic
, 0)->peer
,
425 (features
>> VIRTIO_NET_F_GUEST_CSUM
) & 1,
426 (features
>> VIRTIO_NET_F_GUEST_TSO4
) & 1,
427 (features
>> VIRTIO_NET_F_GUEST_TSO6
) & 1,
428 (features
>> VIRTIO_NET_F_GUEST_ECN
) & 1,
429 (features
>> VIRTIO_NET_F_GUEST_UFO
) & 1);
432 for (i
= 0; i
< n
->max_queues
; i
++) {
433 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, i
);
435 if (!nc
->peer
|| nc
->peer
->info
->type
!= NET_CLIENT_OPTIONS_KIND_TAP
) {
438 if (!tap_get_vhost_net(nc
->peer
)) {
441 vhost_net_ack_features(tap_get_vhost_net(nc
->peer
), features
);
445 static int virtio_net_handle_rx_mode(VirtIONet
*n
, uint8_t cmd
,
446 struct iovec
*iov
, unsigned int iov_cnt
)
451 s
= iov_to_buf(iov
, iov_cnt
, 0, &on
, sizeof(on
));
452 if (s
!= sizeof(on
)) {
453 return VIRTIO_NET_ERR
;
456 if (cmd
== VIRTIO_NET_CTRL_RX_PROMISC
) {
458 } else if (cmd
== VIRTIO_NET_CTRL_RX_ALLMULTI
) {
460 } else if (cmd
== VIRTIO_NET_CTRL_RX_ALLUNI
) {
462 } else if (cmd
== VIRTIO_NET_CTRL_RX_NOMULTI
) {
464 } else if (cmd
== VIRTIO_NET_CTRL_RX_NOUNI
) {
466 } else if (cmd
== VIRTIO_NET_CTRL_RX_NOBCAST
) {
469 return VIRTIO_NET_ERR
;
472 return VIRTIO_NET_OK
;
475 static int virtio_net_handle_mac(VirtIONet
*n
, uint8_t cmd
,
476 struct iovec
*iov
, unsigned int iov_cnt
)
478 struct virtio_net_ctrl_mac mac_data
;
481 if (cmd
== VIRTIO_NET_CTRL_MAC_ADDR_SET
) {
482 if (iov_size(iov
, iov_cnt
) != sizeof(n
->mac
)) {
483 return VIRTIO_NET_ERR
;
485 s
= iov_to_buf(iov
, iov_cnt
, 0, &n
->mac
, sizeof(n
->mac
));
486 assert(s
== sizeof(n
->mac
));
487 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->mac
);
488 return VIRTIO_NET_OK
;
491 if (cmd
!= VIRTIO_NET_CTRL_MAC_TABLE_SET
) {
492 return VIRTIO_NET_ERR
;
495 n
->mac_table
.in_use
= 0;
496 n
->mac_table
.first_multi
= 0;
497 n
->mac_table
.uni_overflow
= 0;
498 n
->mac_table
.multi_overflow
= 0;
499 memset(n
->mac_table
.macs
, 0, MAC_TABLE_ENTRIES
* ETH_ALEN
);
501 s
= iov_to_buf(iov
, iov_cnt
, 0, &mac_data
.entries
,
502 sizeof(mac_data
.entries
));
503 mac_data
.entries
= ldl_p(&mac_data
.entries
);
504 if (s
!= sizeof(mac_data
.entries
)) {
505 return VIRTIO_NET_ERR
;
507 iov_discard_front(&iov
, &iov_cnt
, s
);
509 if (mac_data
.entries
* ETH_ALEN
> iov_size(iov
, iov_cnt
)) {
510 return VIRTIO_NET_ERR
;
513 if (mac_data
.entries
<= MAC_TABLE_ENTRIES
) {
514 s
= iov_to_buf(iov
, iov_cnt
, 0, n
->mac_table
.macs
,
515 mac_data
.entries
* ETH_ALEN
);
516 if (s
!= mac_data
.entries
* ETH_ALEN
) {
517 return VIRTIO_NET_ERR
;
519 n
->mac_table
.in_use
+= mac_data
.entries
;
521 n
->mac_table
.uni_overflow
= 1;
524 iov_discard_front(&iov
, &iov_cnt
, mac_data
.entries
* ETH_ALEN
);
526 n
->mac_table
.first_multi
= n
->mac_table
.in_use
;
528 s
= iov_to_buf(iov
, iov_cnt
, 0, &mac_data
.entries
,
529 sizeof(mac_data
.entries
));
530 mac_data
.entries
= ldl_p(&mac_data
.entries
);
531 if (s
!= sizeof(mac_data
.entries
)) {
532 return VIRTIO_NET_ERR
;
535 iov_discard_front(&iov
, &iov_cnt
, s
);
537 if (mac_data
.entries
* ETH_ALEN
!= iov_size(iov
, iov_cnt
)) {
538 return VIRTIO_NET_ERR
;
541 if (n
->mac_table
.in_use
+ mac_data
.entries
<= MAC_TABLE_ENTRIES
) {
542 s
= iov_to_buf(iov
, iov_cnt
, 0, n
->mac_table
.macs
,
543 mac_data
.entries
* ETH_ALEN
);
544 if (s
!= mac_data
.entries
* ETH_ALEN
) {
545 return VIRTIO_NET_ERR
;
547 n
->mac_table
.in_use
+= mac_data
.entries
;
549 n
->mac_table
.multi_overflow
= 1;
552 return VIRTIO_NET_OK
;
555 static int virtio_net_handle_vlan_table(VirtIONet
*n
, uint8_t cmd
,
556 struct iovec
*iov
, unsigned int iov_cnt
)
561 s
= iov_to_buf(iov
, iov_cnt
, 0, &vid
, sizeof(vid
));
563 if (s
!= sizeof(vid
)) {
564 return VIRTIO_NET_ERR
;
568 return VIRTIO_NET_ERR
;
570 if (cmd
== VIRTIO_NET_CTRL_VLAN_ADD
)
571 n
->vlans
[vid
>> 5] |= (1U << (vid
& 0x1f));
572 else if (cmd
== VIRTIO_NET_CTRL_VLAN_DEL
)
573 n
->vlans
[vid
>> 5] &= ~(1U << (vid
& 0x1f));
575 return VIRTIO_NET_ERR
;
577 return VIRTIO_NET_OK
;
580 static int virtio_net_handle_mq(VirtIONet
*n
, uint8_t cmd
,
581 VirtQueueElement
*elem
)
583 struct virtio_net_ctrl_mq s
;
585 if (elem
->out_num
!= 2 ||
586 elem
->out_sg
[1].iov_len
!= sizeof(struct virtio_net_ctrl_mq
)) {
587 error_report("virtio-net ctrl invalid steering command");
588 return VIRTIO_NET_ERR
;
591 if (cmd
!= VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET
) {
592 return VIRTIO_NET_ERR
;
595 memcpy(&s
, elem
->out_sg
[1].iov_base
, sizeof(struct virtio_net_ctrl_mq
));
597 if (s
.virtqueue_pairs
< VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN
||
598 s
.virtqueue_pairs
> VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX
||
599 s
.virtqueue_pairs
> n
->max_queues
||
601 return VIRTIO_NET_ERR
;
604 n
->curr_queues
= s
.virtqueue_pairs
;
605 /* stop the backend before changing the number of queues to avoid handling a
607 virtio_net_set_status(&n
->vdev
, n
->vdev
.status
);
608 virtio_net_set_queues(n
);
610 return VIRTIO_NET_OK
;
612 static void virtio_net_handle_ctrl(VirtIODevice
*vdev
, VirtQueue
*vq
)
614 VirtIONet
*n
= to_virtio_net(vdev
);
615 struct virtio_net_ctrl_hdr ctrl
;
616 virtio_net_ctrl_ack status
= VIRTIO_NET_ERR
;
617 VirtQueueElement elem
;
620 unsigned int iov_cnt
;
622 while (virtqueue_pop(vq
, &elem
)) {
623 if (iov_size(elem
.in_sg
, elem
.in_num
) < sizeof(status
) ||
624 iov_size(elem
.out_sg
, elem
.out_num
) < sizeof(ctrl
)) {
625 error_report("virtio-net ctrl missing headers");
630 iov_cnt
= elem
.out_num
;
631 s
= iov_to_buf(iov
, iov_cnt
, 0, &ctrl
, sizeof(ctrl
));
632 iov_discard_front(&iov
, &iov_cnt
, sizeof(ctrl
));
633 if (s
!= sizeof(ctrl
)) {
634 status
= VIRTIO_NET_ERR
;
635 } else if (ctrl
.class == VIRTIO_NET_CTRL_RX
) {
636 status
= virtio_net_handle_rx_mode(n
, ctrl
.cmd
, iov
, iov_cnt
);
637 } else if (ctrl
.class == VIRTIO_NET_CTRL_MAC
) {
638 status
= virtio_net_handle_mac(n
, ctrl
.cmd
, iov
, iov_cnt
);
639 } else if (ctrl
.class == VIRTIO_NET_CTRL_VLAN
) {
640 status
= virtio_net_handle_vlan_table(n
, ctrl
.cmd
, iov
, iov_cnt
);
641 } else if (ctrl
.class == VIRTIO_NET_CTRL_MQ
) {
642 status
= virtio_net_handle_mq(n
, ctrl
.cmd
, &elem
);
645 s
= iov_from_buf(elem
.in_sg
, elem
.in_num
, 0, &status
, sizeof(status
));
646 assert(s
== sizeof(status
));
648 virtqueue_push(vq
, &elem
, sizeof(status
));
649 virtio_notify(vdev
, vq
);
655 static void virtio_net_handle_rx(VirtIODevice
*vdev
, VirtQueue
*vq
)
657 VirtIONet
*n
= to_virtio_net(vdev
);
658 int queue_index
= vq2q(virtio_get_queue_index(vq
));
660 qemu_flush_queued_packets(qemu_get_subqueue(n
->nic
, queue_index
));
663 static int virtio_net_can_receive(NetClientState
*nc
)
665 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
666 VirtIONetQueue
*q
= virtio_net_get_subqueue(nc
);
668 if (!n
->vdev
.vm_running
) {
672 if (nc
->queue_index
>= n
->curr_queues
) {
676 if (!virtio_queue_ready(q
->rx_vq
) ||
677 !(n
->vdev
.status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
684 static int virtio_net_has_buffers(VirtIONetQueue
*q
, int bufsize
)
687 if (virtio_queue_empty(q
->rx_vq
) ||
688 (n
->mergeable_rx_bufs
&&
689 !virtqueue_avail_bytes(q
->rx_vq
, bufsize
, 0))) {
690 virtio_queue_set_notification(q
->rx_vq
, 1);
692 /* To avoid a race condition where the guest has made some buffers
693 * available after the above check but before notification was
694 * enabled, check for available buffers again.
696 if (virtio_queue_empty(q
->rx_vq
) ||
697 (n
->mergeable_rx_bufs
&&
698 !virtqueue_avail_bytes(q
->rx_vq
, bufsize
, 0))) {
703 virtio_queue_set_notification(q
->rx_vq
, 0);
707 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
708 * it never finds out that the packets don't have valid checksums. This
709 * causes dhclient to get upset. Fedora's carried a patch for ages to
710 * fix this with Xen but it hasn't appeared in an upstream release of
713 * To avoid breaking existing guests, we catch udp packets and add
714 * checksums. This is terrible but it's better than hacking the guest
717 * N.B. if we introduce a zero-copy API, this operation is no longer free so
718 * we should provide a mechanism to disable it to avoid polluting the host
721 static void work_around_broken_dhclient(struct virtio_net_hdr
*hdr
,
722 uint8_t *buf
, size_t size
)
724 if ((hdr
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) && /* missing csum */
725 (size
> 27 && size
< 1500) && /* normal sized MTU */
726 (buf
[12] == 0x08 && buf
[13] == 0x00) && /* ethertype == IPv4 */
727 (buf
[23] == 17) && /* ip.protocol == UDP */
728 (buf
[34] == 0 && buf
[35] == 67)) { /* udp.srcport == bootps */
729 net_checksum_calculate(buf
, size
);
730 hdr
->flags
&= ~VIRTIO_NET_HDR_F_NEEDS_CSUM
;
734 static void receive_header(VirtIONet
*n
, const struct iovec
*iov
, int iov_cnt
,
735 const void *buf
, size_t size
)
737 if (n
->has_vnet_hdr
) {
738 /* FIXME this cast is evil */
739 void *wbuf
= (void *)buf
;
740 work_around_broken_dhclient(wbuf
, wbuf
+ n
->host_hdr_len
,
741 size
- n
->host_hdr_len
);
742 iov_from_buf(iov
, iov_cnt
, 0, buf
, sizeof(struct virtio_net_hdr
));
744 struct virtio_net_hdr hdr
= {
746 .gso_type
= VIRTIO_NET_HDR_GSO_NONE
748 iov_from_buf(iov
, iov_cnt
, 0, &hdr
, sizeof hdr
);
752 static int receive_filter(VirtIONet
*n
, const uint8_t *buf
, int size
)
754 static const uint8_t bcast
[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
755 static const uint8_t vlan
[] = {0x81, 0x00};
756 uint8_t *ptr
= (uint8_t *)buf
;
762 ptr
+= n
->host_hdr_len
;
764 if (!memcmp(&ptr
[12], vlan
, sizeof(vlan
))) {
765 int vid
= be16_to_cpup((uint16_t *)(ptr
+ 14)) & 0xfff;
766 if (!(n
->vlans
[vid
>> 5] & (1U << (vid
& 0x1f))))
770 if (ptr
[0] & 1) { // multicast
771 if (!memcmp(ptr
, bcast
, sizeof(bcast
))) {
773 } else if (n
->nomulti
) {
775 } else if (n
->allmulti
|| n
->mac_table
.multi_overflow
) {
779 for (i
= n
->mac_table
.first_multi
; i
< n
->mac_table
.in_use
; i
++) {
780 if (!memcmp(ptr
, &n
->mac_table
.macs
[i
* ETH_ALEN
], ETH_ALEN
)) {
787 } else if (n
->alluni
|| n
->mac_table
.uni_overflow
) {
789 } else if (!memcmp(ptr
, n
->mac
, ETH_ALEN
)) {
793 for (i
= 0; i
< n
->mac_table
.first_multi
; i
++) {
794 if (!memcmp(ptr
, &n
->mac_table
.macs
[i
* ETH_ALEN
], ETH_ALEN
)) {
803 static ssize_t
virtio_net_receive(NetClientState
*nc
, const uint8_t *buf
, size_t size
)
805 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
806 VirtIONetQueue
*q
= virtio_net_get_subqueue(nc
);
807 struct iovec mhdr_sg
[VIRTQUEUE_MAX_SIZE
];
808 struct virtio_net_hdr_mrg_rxbuf mhdr
;
809 unsigned mhdr_cnt
= 0;
810 size_t offset
, i
, guest_offset
;
812 if (!virtio_net_can_receive(nc
)) {
816 /* hdr_len refers to the header we supply to the guest */
817 if (!virtio_net_has_buffers(q
, size
+ n
->guest_hdr_len
- n
->host_hdr_len
)) {
821 if (!receive_filter(n
, buf
, size
))
826 while (offset
< size
) {
827 VirtQueueElement elem
;
829 const struct iovec
*sg
= elem
.in_sg
;
833 if (virtqueue_pop(q
->rx_vq
, &elem
) == 0) {
836 error_report("virtio-net unexpected empty queue: "
837 "i %zd mergeable %d offset %zd, size %zd, "
838 "guest hdr len %zd, host hdr len %zd guest features 0x%x",
839 i
, n
->mergeable_rx_bufs
, offset
, size
,
840 n
->guest_hdr_len
, n
->host_hdr_len
, n
->vdev
.guest_features
);
844 if (elem
.in_num
< 1) {
845 error_report("virtio-net receive queue contains no in buffers");
851 if (n
->mergeable_rx_bufs
) {
852 mhdr_cnt
= iov_copy(mhdr_sg
, ARRAY_SIZE(mhdr_sg
),
854 offsetof(typeof(mhdr
), num_buffers
),
855 sizeof(mhdr
.num_buffers
));
858 receive_header(n
, sg
, elem
.in_num
, buf
, size
);
859 offset
= n
->host_hdr_len
;
860 total
+= n
->guest_hdr_len
;
861 guest_offset
= n
->guest_hdr_len
;
866 /* copy in packet. ugh */
867 len
= iov_from_buf(sg
, elem
.in_num
, guest_offset
,
868 buf
+ offset
, size
- offset
);
871 /* If buffers can't be merged, at this point we
872 * must have consumed the complete packet.
873 * Otherwise, drop it. */
874 if (!n
->mergeable_rx_bufs
&& offset
< size
) {
876 error_report("virtio-net truncated non-mergeable packet: "
877 "i %zd mergeable %d offset %zd, size %zd, "
878 "guest hdr len %zd, host hdr len %zd",
879 i
, n
->mergeable_rx_bufs
,
880 offset
, size
, n
->guest_hdr_len
, n
->host_hdr_len
);
885 /* signal other side */
886 virtqueue_fill(q
->rx_vq
, &elem
, total
, i
++);
890 stw_p(&mhdr
.num_buffers
, i
);
891 iov_from_buf(mhdr_sg
, mhdr_cnt
,
893 &mhdr
.num_buffers
, sizeof mhdr
.num_buffers
);
896 virtqueue_flush(q
->rx_vq
, i
);
897 virtio_notify(&n
->vdev
, q
->rx_vq
);
902 static int32_t virtio_net_flush_tx(VirtIONetQueue
*q
);
904 static void virtio_net_tx_complete(NetClientState
*nc
, ssize_t len
)
906 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
907 VirtIONetQueue
*q
= virtio_net_get_subqueue(nc
);
909 virtqueue_push(q
->tx_vq
, &q
->async_tx
.elem
, 0);
910 virtio_notify(&n
->vdev
, q
->tx_vq
);
912 q
->async_tx
.elem
.out_num
= q
->async_tx
.len
= 0;
914 virtio_queue_set_notification(q
->tx_vq
, 1);
915 virtio_net_flush_tx(q
);
919 static int32_t virtio_net_flush_tx(VirtIONetQueue
*q
)
922 VirtQueueElement elem
;
923 int32_t num_packets
= 0;
924 int queue_index
= vq2q(virtio_get_queue_index(q
->tx_vq
));
925 if (!(n
->vdev
.status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
929 assert(n
->vdev
.vm_running
);
931 if (q
->async_tx
.elem
.out_num
) {
932 virtio_queue_set_notification(q
->tx_vq
, 0);
936 while (virtqueue_pop(q
->tx_vq
, &elem
)) {
938 unsigned int out_num
= elem
.out_num
;
939 struct iovec
*out_sg
= &elem
.out_sg
[0];
940 struct iovec sg
[VIRTQUEUE_MAX_SIZE
];
943 error_report("virtio-net header not in first element");
948 * If host wants to see the guest header as is, we can
949 * pass it on unchanged. Otherwise, copy just the parts
950 * that host is interested in.
952 assert(n
->host_hdr_len
<= n
->guest_hdr_len
);
953 if (n
->host_hdr_len
!= n
->guest_hdr_len
) {
954 unsigned sg_num
= iov_copy(sg
, ARRAY_SIZE(sg
),
957 sg_num
+= iov_copy(sg
+ sg_num
, ARRAY_SIZE(sg
) - sg_num
,
959 n
->guest_hdr_len
, -1);
964 len
= n
->guest_hdr_len
;
966 ret
= qemu_sendv_packet_async(qemu_get_subqueue(n
->nic
, queue_index
),
967 out_sg
, out_num
, virtio_net_tx_complete
);
969 virtio_queue_set_notification(q
->tx_vq
, 0);
970 q
->async_tx
.elem
= elem
;
971 q
->async_tx
.len
= len
;
977 virtqueue_push(q
->tx_vq
, &elem
, 0);
978 virtio_notify(&n
->vdev
, q
->tx_vq
);
980 if (++num_packets
>= n
->tx_burst
) {
987 static void virtio_net_handle_tx_timer(VirtIODevice
*vdev
, VirtQueue
*vq
)
989 VirtIONet
*n
= to_virtio_net(vdev
);
990 VirtIONetQueue
*q
= &n
->vqs
[vq2q(virtio_get_queue_index(vq
))];
992 /* This happens when device was stopped but VCPU wasn't. */
993 if (!n
->vdev
.vm_running
) {
999 virtio_queue_set_notification(vq
, 1);
1000 qemu_del_timer(q
->tx_timer
);
1002 virtio_net_flush_tx(q
);
1004 qemu_mod_timer(q
->tx_timer
,
1005 qemu_get_clock_ns(vm_clock
) + n
->tx_timeout
);
1007 virtio_queue_set_notification(vq
, 0);
1011 static void virtio_net_handle_tx_bh(VirtIODevice
*vdev
, VirtQueue
*vq
)
1013 VirtIONet
*n
= to_virtio_net(vdev
);
1014 VirtIONetQueue
*q
= &n
->vqs
[vq2q(virtio_get_queue_index(vq
))];
1016 if (unlikely(q
->tx_waiting
)) {
1020 /* This happens when device was stopped but VCPU wasn't. */
1021 if (!n
->vdev
.vm_running
) {
1024 virtio_queue_set_notification(vq
, 0);
1025 qemu_bh_schedule(q
->tx_bh
);
1028 static void virtio_net_tx_timer(void *opaque
)
1030 VirtIONetQueue
*q
= opaque
;
1031 VirtIONet
*n
= q
->n
;
1032 assert(n
->vdev
.vm_running
);
1036 /* Just in case the driver is not ready on more */
1037 if (!(n
->vdev
.status
& VIRTIO_CONFIG_S_DRIVER_OK
))
1040 virtio_queue_set_notification(q
->tx_vq
, 1);
1041 virtio_net_flush_tx(q
);
1044 static void virtio_net_tx_bh(void *opaque
)
1046 VirtIONetQueue
*q
= opaque
;
1047 VirtIONet
*n
= q
->n
;
1050 assert(n
->vdev
.vm_running
);
1054 /* Just in case the driver is not ready on more */
1055 if (unlikely(!(n
->vdev
.status
& VIRTIO_CONFIG_S_DRIVER_OK
)))
1058 ret
= virtio_net_flush_tx(q
);
1059 if (ret
== -EBUSY
) {
1060 return; /* Notification re-enable handled by tx_complete */
1063 /* If we flush a full burst of packets, assume there are
1064 * more coming and immediately reschedule */
1065 if (ret
>= n
->tx_burst
) {
1066 qemu_bh_schedule(q
->tx_bh
);
1071 /* If less than a full burst, re-enable notification and flush
1072 * anything that may have come in while we weren't looking. If
1073 * we find something, assume the guest is still active and reschedule */
1074 virtio_queue_set_notification(q
->tx_vq
, 1);
1075 if (virtio_net_flush_tx(q
) > 0) {
1076 virtio_queue_set_notification(q
->tx_vq
, 0);
1077 qemu_bh_schedule(q
->tx_bh
);
1082 static void virtio_net_set_multiqueue(VirtIONet
*n
, int multiqueue
, int ctrl
)
1084 VirtIODevice
*vdev
= &n
->vdev
;
1085 int i
, max
= multiqueue
? n
->max_queues
: 1;
1087 n
->multiqueue
= multiqueue
;
1089 for (i
= 2; i
<= n
->max_queues
* 2 + 1; i
++) {
1090 virtio_del_queue(vdev
, i
);
1093 for (i
= 1; i
< max
; i
++) {
1094 n
->vqs
[i
].rx_vq
= virtio_add_queue(vdev
, 256, virtio_net_handle_rx
);
1095 if (n
->vqs
[i
].tx_timer
) {
1097 virtio_add_queue(vdev
, 256, virtio_net_handle_tx_timer
);
1098 n
->vqs
[i
].tx_timer
= qemu_new_timer_ns(vm_clock
,
1099 virtio_net_tx_timer
,
1103 virtio_add_queue(vdev
, 256, virtio_net_handle_tx_bh
);
1104 n
->vqs
[i
].tx_bh
= qemu_bh_new(virtio_net_tx_bh
, &n
->vqs
[i
]);
1107 n
->vqs
[i
].tx_waiting
= 0;
1112 n
->ctrl_vq
= virtio_add_queue(vdev
, 64, virtio_net_handle_ctrl
);
1115 virtio_net_set_queues(n
);
1118 static void virtio_net_save(QEMUFile
*f
, void *opaque
)
1121 VirtIONet
*n
= opaque
;
1123 /* At this point, backend must be stopped, otherwise
1124 * it might keep writing to memory. */
1125 assert(!n
->vhost_started
);
1126 virtio_save(&n
->vdev
, f
);
1128 qemu_put_buffer(f
, n
->mac
, ETH_ALEN
);
1129 qemu_put_be32(f
, n
->vqs
[0].tx_waiting
);
1130 qemu_put_be32(f
, n
->mergeable_rx_bufs
);
1131 qemu_put_be16(f
, n
->status
);
1132 qemu_put_byte(f
, n
->promisc
);
1133 qemu_put_byte(f
, n
->allmulti
);
1134 qemu_put_be32(f
, n
->mac_table
.in_use
);
1135 qemu_put_buffer(f
, n
->mac_table
.macs
, n
->mac_table
.in_use
* ETH_ALEN
);
1136 qemu_put_buffer(f
, (uint8_t *)n
->vlans
, MAX_VLAN
>> 3);
1137 qemu_put_be32(f
, n
->has_vnet_hdr
);
1138 qemu_put_byte(f
, n
->mac_table
.multi_overflow
);
1139 qemu_put_byte(f
, n
->mac_table
.uni_overflow
);
1140 qemu_put_byte(f
, n
->alluni
);
1141 qemu_put_byte(f
, n
->nomulti
);
1142 qemu_put_byte(f
, n
->nouni
);
1143 qemu_put_byte(f
, n
->nobcast
);
1144 qemu_put_byte(f
, n
->has_ufo
);
1145 if (n
->max_queues
> 1) {
1146 qemu_put_be16(f
, n
->max_queues
);
1147 qemu_put_be16(f
, n
->curr_queues
);
1148 for (i
= 1; i
< n
->curr_queues
; i
++) {
1149 qemu_put_be32(f
, n
->vqs
[i
].tx_waiting
);
1154 static int virtio_net_load(QEMUFile
*f
, void *opaque
, int version_id
)
1156 VirtIONet
*n
= opaque
;
1157 int ret
, i
, link_down
;
1159 if (version_id
< 2 || version_id
> VIRTIO_NET_VM_VERSION
)
1162 ret
= virtio_load(&n
->vdev
, f
);
1167 qemu_get_buffer(f
, n
->mac
, ETH_ALEN
);
1168 n
->vqs
[0].tx_waiting
= qemu_get_be32(f
);
1170 virtio_net_set_mrg_rx_bufs(n
, qemu_get_be32(f
));
1172 if (version_id
>= 3)
1173 n
->status
= qemu_get_be16(f
);
1175 if (version_id
>= 4) {
1176 if (version_id
< 8) {
1177 n
->promisc
= qemu_get_be32(f
);
1178 n
->allmulti
= qemu_get_be32(f
);
1180 n
->promisc
= qemu_get_byte(f
);
1181 n
->allmulti
= qemu_get_byte(f
);
1185 if (version_id
>= 5) {
1186 n
->mac_table
.in_use
= qemu_get_be32(f
);
1187 /* MAC_TABLE_ENTRIES may be different from the saved image */
1188 if (n
->mac_table
.in_use
<= MAC_TABLE_ENTRIES
) {
1189 qemu_get_buffer(f
, n
->mac_table
.macs
,
1190 n
->mac_table
.in_use
* ETH_ALEN
);
1191 } else if (n
->mac_table
.in_use
) {
1192 uint8_t *buf
= g_malloc0(n
->mac_table
.in_use
);
1193 qemu_get_buffer(f
, buf
, n
->mac_table
.in_use
* ETH_ALEN
);
1195 n
->mac_table
.multi_overflow
= n
->mac_table
.uni_overflow
= 1;
1196 n
->mac_table
.in_use
= 0;
1200 if (version_id
>= 6)
1201 qemu_get_buffer(f
, (uint8_t *)n
->vlans
, MAX_VLAN
>> 3);
1203 if (version_id
>= 7) {
1204 if (qemu_get_be32(f
) && !peer_has_vnet_hdr(n
)) {
1205 error_report("virtio-net: saved image requires vnet_hdr=on");
1209 if (n
->has_vnet_hdr
) {
1210 tap_set_offload(qemu_get_queue(n
->nic
)->peer
,
1211 (n
->vdev
.guest_features
>> VIRTIO_NET_F_GUEST_CSUM
) & 1,
1212 (n
->vdev
.guest_features
>> VIRTIO_NET_F_GUEST_TSO4
) & 1,
1213 (n
->vdev
.guest_features
>> VIRTIO_NET_F_GUEST_TSO6
) & 1,
1214 (n
->vdev
.guest_features
>> VIRTIO_NET_F_GUEST_ECN
) & 1,
1215 (n
->vdev
.guest_features
>> VIRTIO_NET_F_GUEST_UFO
) & 1);
1219 if (version_id
>= 9) {
1220 n
->mac_table
.multi_overflow
= qemu_get_byte(f
);
1221 n
->mac_table
.uni_overflow
= qemu_get_byte(f
);
1224 if (version_id
>= 10) {
1225 n
->alluni
= qemu_get_byte(f
);
1226 n
->nomulti
= qemu_get_byte(f
);
1227 n
->nouni
= qemu_get_byte(f
);
1228 n
->nobcast
= qemu_get_byte(f
);
1231 if (version_id
>= 11) {
1232 if (qemu_get_byte(f
) && !peer_has_ufo(n
)) {
1233 error_report("virtio-net: saved image requires TUN_F_UFO support");
1238 if (n
->max_queues
> 1) {
1239 if (n
->max_queues
!= qemu_get_be16(f
)) {
1240 error_report("virtio-net: different max_queues ");
1244 n
->curr_queues
= qemu_get_be16(f
);
1245 for (i
= 1; i
< n
->curr_queues
; i
++) {
1246 n
->vqs
[i
].tx_waiting
= qemu_get_be32(f
);
1250 virtio_net_set_queues(n
);
1252 /* Find the first multicast entry in the saved MAC filter */
1253 for (i
= 0; i
< n
->mac_table
.in_use
; i
++) {
1254 if (n
->mac_table
.macs
[i
* ETH_ALEN
] & 1) {
1258 n
->mac_table
.first_multi
= i
;
1260 /* nc.link_down can't be migrated, so infer link_down according
1261 * to link status bit in n->status */
1262 link_down
= (n
->status
& VIRTIO_NET_S_LINK_UP
) == 0;
1263 for (i
= 0; i
< n
->max_queues
; i
++) {
1264 qemu_get_subqueue(n
->nic
, i
)->link_down
= link_down
;
1270 static void virtio_net_cleanup(NetClientState
*nc
)
1272 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
1277 static NetClientInfo net_virtio_info
= {
1278 .type
= NET_CLIENT_OPTIONS_KIND_NIC
,
1279 .size
= sizeof(NICState
),
1280 .can_receive
= virtio_net_can_receive
,
1281 .receive
= virtio_net_receive
,
1282 .cleanup
= virtio_net_cleanup
,
1283 .link_status_changed
= virtio_net_set_link_status
,
1286 static bool virtio_net_guest_notifier_pending(VirtIODevice
*vdev
, int idx
)
1288 VirtIONet
*n
= to_virtio_net(vdev
);
1289 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, vq2q(idx
));
1290 assert(n
->vhost_started
);
1291 return vhost_net_virtqueue_pending(tap_get_vhost_net(nc
->peer
), idx
);
1294 static void virtio_net_guest_notifier_mask(VirtIODevice
*vdev
, int idx
,
1297 VirtIONet
*n
= to_virtio_net(vdev
);
1298 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, vq2q(idx
));
1299 assert(n
->vhost_started
);
1300 vhost_net_virtqueue_mask(tap_get_vhost_net(nc
->peer
),
1304 VirtIODevice
*virtio_net_init(DeviceState
*dev
, NICConf
*conf
,
1305 virtio_net_conf
*net
, uint32_t host_features
)
1308 int i
, config_size
= 0;
1310 for (i
= 0; feature_sizes
[i
].flags
!= 0; i
++) {
1311 if (host_features
& feature_sizes
[i
].flags
) {
1312 config_size
= MAX(feature_sizes
[i
].end
, config_size
);
1316 n
= (VirtIONet
*)virtio_common_init("virtio-net", VIRTIO_ID_NET
,
1317 config_size
, sizeof(VirtIONet
));
1319 n
->config_size
= config_size
;
1320 n
->vdev
.get_config
= virtio_net_get_config
;
1321 n
->vdev
.set_config
= virtio_net_set_config
;
1322 n
->vdev
.get_features
= virtio_net_get_features
;
1323 n
->vdev
.set_features
= virtio_net_set_features
;
1324 n
->vdev
.bad_features
= virtio_net_bad_features
;
1325 n
->vdev
.reset
= virtio_net_reset
;
1326 n
->vdev
.set_status
= virtio_net_set_status
;
1327 n
->vdev
.guest_notifier_mask
= virtio_net_guest_notifier_mask
;
1328 n
->vdev
.guest_notifier_pending
= virtio_net_guest_notifier_pending
;
1329 n
->vqs
[0].rx_vq
= virtio_add_queue(&n
->vdev
, 256, virtio_net_handle_rx
);
1330 n
->max_queues
= conf
->queues
;
1333 n
->tx_timeout
= net
->txtimer
;
1335 if (net
->tx
&& strcmp(net
->tx
, "timer") && strcmp(net
->tx
, "bh")) {
1336 error_report("virtio-net: "
1337 "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
1339 error_report("Defaulting to \"bh\"");
1342 if (net
->tx
&& !strcmp(net
->tx
, "timer")) {
1343 n
->vqs
[0].tx_vq
= virtio_add_queue(&n
->vdev
, 256,
1344 virtio_net_handle_tx_timer
);
1345 n
->vqs
[0].tx_timer
= qemu_new_timer_ns(vm_clock
, virtio_net_tx_timer
,
1348 n
->vqs
[0].tx_vq
= virtio_add_queue(&n
->vdev
, 256,
1349 virtio_net_handle_tx_bh
);
1350 n
->vqs
[0].tx_bh
= qemu_bh_new(virtio_net_tx_bh
, &n
->vqs
[0]);
1352 n
->ctrl_vq
= virtio_add_queue(&n
->vdev
, 64, virtio_net_handle_ctrl
);
1353 qemu_macaddr_default_if_unset(&conf
->macaddr
);
1354 memcpy(&n
->mac
[0], &conf
->macaddr
, sizeof(n
->mac
));
1355 n
->status
= VIRTIO_NET_S_LINK_UP
;
1357 n
->nic
= qemu_new_nic(&net_virtio_info
, conf
, object_get_typename(OBJECT(dev
)), dev
->id
, n
);
1358 peer_test_vnet_hdr(n
);
1359 if (peer_has_vnet_hdr(n
)) {
1360 for (i
= 0; i
< n
->max_queues
; i
++) {
1361 tap_using_vnet_hdr(qemu_get_subqueue(n
->nic
, i
)->peer
, true);
1363 n
->host_hdr_len
= sizeof(struct virtio_net_hdr
);
1365 n
->host_hdr_len
= 0;
1368 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), conf
->macaddr
.a
);
1370 n
->vqs
[0].tx_waiting
= 0;
1371 n
->tx_burst
= net
->txburst
;
1372 virtio_net_set_mrg_rx_bufs(n
, 0);
1373 n
->promisc
= 1; /* for compatibility */
1375 n
->mac_table
.macs
= g_malloc0(MAC_TABLE_ENTRIES
* ETH_ALEN
);
1377 n
->vlans
= g_malloc0(MAX_VLAN
>> 3);
1380 register_savevm(dev
, "virtio-net", -1, VIRTIO_NET_VM_VERSION
,
1381 virtio_net_save
, virtio_net_load
, n
);
1383 add_boot_device_path(conf
->bootindex
, dev
, "/ethernet-phy@0");
1388 void virtio_net_exit(VirtIODevice
*vdev
)
1390 VirtIONet
*n
= DO_UPCAST(VirtIONet
, vdev
, vdev
);
1393 /* This will stop vhost backend if appropriate. */
1394 virtio_net_set_status(vdev
, 0);
1396 unregister_savevm(n
->qdev
, "virtio-net", n
);
1398 g_free(n
->mac_table
.macs
);
1401 for (i
= 0; i
< n
->max_queues
; i
++) {
1402 VirtIONetQueue
*q
= &n
->vqs
[i
];
1403 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, i
);
1405 qemu_purge_queued_packets(nc
);
1408 qemu_del_timer(q
->tx_timer
);
1409 qemu_free_timer(q
->tx_timer
);
1411 qemu_bh_delete(q
->tx_bh
);
1415 qemu_del_nic(n
->nic
);
1416 virtio_cleanup(&n
->vdev
);