2 * Virtio Network Device
4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
15 #include "hw/virtio.h"
17 #include "net/checksum.h"
19 #include "qemu/error-report.h"
20 #include "qemu/timer.h"
21 #include "hw/virtio-net.h"
22 #include "hw/vhost_net.h"
24 #define VIRTIO_NET_VM_VERSION 11
26 #define MAC_TABLE_ENTRIES 64
27 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
30 * Calculate the number of bytes up to and including the given 'field' of
33 #define endof(container, field) \
34 (offsetof(container, field) + sizeof(((container *)0)->field))
36 typedef struct VirtIOFeature
{
41 static VirtIOFeature feature_sizes
[] = {
42 {.flags
= 1 << VIRTIO_NET_F_MAC
,
43 .end
= endof(struct virtio_net_config
, mac
)},
44 {.flags
= 1 << VIRTIO_NET_F_STATUS
,
45 .end
= endof(struct virtio_net_config
, status
)},
46 {.flags
= 1 << VIRTIO_NET_F_MQ
,
47 .end
= endof(struct virtio_net_config
, max_virtqueue_pairs
)},
51 static VirtIONetQueue
*virtio_net_get_subqueue(NetClientState
*nc
)
53 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
55 return &n
->vqs
[nc
->queue_index
];
58 static int vq2q(int queue_index
)
60 return queue_index
/ 2;
64 * - we could suppress RX interrupt if we were so inclined.
67 static VirtIONet
*to_virtio_net(VirtIODevice
*vdev
)
69 return (VirtIONet
*)vdev
;
72 static void virtio_net_get_config(VirtIODevice
*vdev
, uint8_t *config
)
74 VirtIONet
*n
= to_virtio_net(vdev
);
75 struct virtio_net_config netcfg
;
77 stw_p(&netcfg
.status
, n
->status
);
78 stw_p(&netcfg
.max_virtqueue_pairs
, n
->max_queues
);
79 memcpy(netcfg
.mac
, n
->mac
, ETH_ALEN
);
80 memcpy(config
, &netcfg
, n
->config_size
);
83 static void virtio_net_set_config(VirtIODevice
*vdev
, const uint8_t *config
)
85 VirtIONet
*n
= to_virtio_net(vdev
);
86 struct virtio_net_config netcfg
= {};
88 memcpy(&netcfg
, config
, n
->config_size
);
90 if (!(n
->vdev
.guest_features
>> VIRTIO_NET_F_CTRL_MAC_ADDR
& 1) &&
91 memcmp(netcfg
.mac
, n
->mac
, ETH_ALEN
)) {
92 memcpy(n
->mac
, netcfg
.mac
, ETH_ALEN
);
93 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->mac
);
97 static bool virtio_net_started(VirtIONet
*n
, uint8_t status
)
99 return (status
& VIRTIO_CONFIG_S_DRIVER_OK
) &&
100 (n
->status
& VIRTIO_NET_S_LINK_UP
) && n
->vdev
.vm_running
;
103 static void virtio_net_vhost_status(VirtIONet
*n
, uint8_t status
)
105 NetClientState
*nc
= qemu_get_queue(n
->nic
);
106 int queues
= n
->multiqueue
? n
->max_queues
: 1;
111 if (nc
->peer
->info
->type
!= NET_CLIENT_OPTIONS_KIND_TAP
) {
115 if (!tap_get_vhost_net(nc
->peer
)) {
119 if (!!n
->vhost_started
== virtio_net_started(n
, status
) &&
120 !nc
->peer
->link_down
) {
123 if (!n
->vhost_started
) {
125 if (!vhost_net_query(tap_get_vhost_net(nc
->peer
), &n
->vdev
)) {
128 n
->vhost_started
= 1;
129 r
= vhost_net_start(&n
->vdev
, n
->nic
->ncs
, queues
);
131 error_report("unable to start vhost net: %d: "
132 "falling back on userspace virtio", -r
);
133 n
->vhost_started
= 0;
136 vhost_net_stop(&n
->vdev
, n
->nic
->ncs
, queues
);
137 n
->vhost_started
= 0;
141 static void virtio_net_set_status(struct VirtIODevice
*vdev
, uint8_t status
)
143 VirtIONet
*n
= to_virtio_net(vdev
);
146 uint8_t queue_status
;
148 virtio_net_vhost_status(n
, status
);
150 for (i
= 0; i
< n
->max_queues
; i
++) {
153 if ((!n
->multiqueue
&& i
!= 0) || i
>= n
->curr_queues
) {
156 queue_status
= status
;
159 if (!q
->tx_waiting
) {
163 if (virtio_net_started(n
, queue_status
) && !n
->vhost_started
) {
165 qemu_mod_timer(q
->tx_timer
,
166 qemu_get_clock_ns(vm_clock
) + n
->tx_timeout
);
168 qemu_bh_schedule(q
->tx_bh
);
172 qemu_del_timer(q
->tx_timer
);
174 qemu_bh_cancel(q
->tx_bh
);
180 static void virtio_net_set_link_status(NetClientState
*nc
)
182 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
183 uint16_t old_status
= n
->status
;
186 n
->status
&= ~VIRTIO_NET_S_LINK_UP
;
188 n
->status
|= VIRTIO_NET_S_LINK_UP
;
190 if (n
->status
!= old_status
)
191 virtio_notify_config(&n
->vdev
);
193 virtio_net_set_status(&n
->vdev
, n
->vdev
.status
);
196 static void virtio_net_reset(VirtIODevice
*vdev
)
198 VirtIONet
*n
= to_virtio_net(vdev
);
200 /* Reset back to compatibility mode */
207 /* multiqueue is disabled by default */
210 /* Flush any MAC and VLAN filter table state */
211 n
->mac_table
.in_use
= 0;
212 n
->mac_table
.first_multi
= 0;
213 n
->mac_table
.multi_overflow
= 0;
214 n
->mac_table
.uni_overflow
= 0;
215 memset(n
->mac_table
.macs
, 0, MAC_TABLE_ENTRIES
* ETH_ALEN
);
216 memcpy(&n
->mac
[0], &n
->nic
->conf
->macaddr
, sizeof(n
->mac
));
217 memset(n
->vlans
, 0, MAX_VLAN
>> 3);
220 static void peer_test_vnet_hdr(VirtIONet
*n
)
222 NetClientState
*nc
= qemu_get_queue(n
->nic
);
227 if (nc
->peer
->info
->type
!= NET_CLIENT_OPTIONS_KIND_TAP
) {
231 n
->has_vnet_hdr
= tap_has_vnet_hdr(nc
->peer
);
234 static int peer_has_vnet_hdr(VirtIONet
*n
)
236 return n
->has_vnet_hdr
;
239 static int peer_has_ufo(VirtIONet
*n
)
241 if (!peer_has_vnet_hdr(n
))
244 n
->has_ufo
= tap_has_ufo(qemu_get_queue(n
->nic
)->peer
);
249 static void virtio_net_set_mrg_rx_bufs(VirtIONet
*n
, int mergeable_rx_bufs
)
254 n
->mergeable_rx_bufs
= mergeable_rx_bufs
;
256 n
->guest_hdr_len
= n
->mergeable_rx_bufs
?
257 sizeof(struct virtio_net_hdr_mrg_rxbuf
) : sizeof(struct virtio_net_hdr
);
259 for (i
= 0; i
< n
->max_queues
; i
++) {
260 nc
= qemu_get_subqueue(n
->nic
, i
);
262 if (peer_has_vnet_hdr(n
) &&
263 tap_has_vnet_hdr_len(nc
->peer
, n
->guest_hdr_len
)) {
264 tap_set_vnet_hdr_len(nc
->peer
, n
->guest_hdr_len
);
265 n
->host_hdr_len
= n
->guest_hdr_len
;
270 static int peer_attach(VirtIONet
*n
, int index
)
272 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, index
);
278 if (nc
->peer
->info
->type
!= NET_CLIENT_OPTIONS_KIND_TAP
) {
282 return tap_enable(nc
->peer
);
285 static int peer_detach(VirtIONet
*n
, int index
)
287 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, index
);
293 if (nc
->peer
->info
->type
!= NET_CLIENT_OPTIONS_KIND_TAP
) {
297 return tap_disable(nc
->peer
);
300 static void virtio_net_set_queues(VirtIONet
*n
)
304 for (i
= 0; i
< n
->max_queues
; i
++) {
305 if (i
< n
->curr_queues
) {
306 assert(!peer_attach(n
, i
));
308 assert(!peer_detach(n
, i
));
313 static void virtio_net_set_multiqueue(VirtIONet
*n
, int multiqueue
, int ctrl
);
315 static uint32_t virtio_net_get_features(VirtIODevice
*vdev
, uint32_t features
)
317 VirtIONet
*n
= to_virtio_net(vdev
);
318 NetClientState
*nc
= qemu_get_queue(n
->nic
);
320 features
|= (1 << VIRTIO_NET_F_MAC
);
322 if (!peer_has_vnet_hdr(n
)) {
323 features
&= ~(0x1 << VIRTIO_NET_F_CSUM
);
324 features
&= ~(0x1 << VIRTIO_NET_F_HOST_TSO4
);
325 features
&= ~(0x1 << VIRTIO_NET_F_HOST_TSO6
);
326 features
&= ~(0x1 << VIRTIO_NET_F_HOST_ECN
);
328 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_CSUM
);
329 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_TSO4
);
330 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_TSO6
);
331 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_ECN
);
334 if (!peer_has_vnet_hdr(n
) || !peer_has_ufo(n
)) {
335 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_UFO
);
336 features
&= ~(0x1 << VIRTIO_NET_F_HOST_UFO
);
339 if (!nc
->peer
|| nc
->peer
->info
->type
!= NET_CLIENT_OPTIONS_KIND_TAP
) {
342 if (!tap_get_vhost_net(nc
->peer
)) {
345 return vhost_net_get_features(tap_get_vhost_net(nc
->peer
), features
);
348 static uint32_t virtio_net_bad_features(VirtIODevice
*vdev
)
350 uint32_t features
= 0;
352 /* Linux kernel 2.6.25. It understood MAC (as everyone must),
354 features
|= (1 << VIRTIO_NET_F_MAC
);
355 features
|= (1 << VIRTIO_NET_F_CSUM
);
356 features
|= (1 << VIRTIO_NET_F_HOST_TSO4
);
357 features
|= (1 << VIRTIO_NET_F_HOST_TSO6
);
358 features
|= (1 << VIRTIO_NET_F_HOST_ECN
);
363 static void virtio_net_set_features(VirtIODevice
*vdev
, uint32_t features
)
365 VirtIONet
*n
= to_virtio_net(vdev
);
368 virtio_net_set_multiqueue(n
, !!(features
& (1 << VIRTIO_NET_F_MQ
)),
369 !!(features
& (1 << VIRTIO_NET_F_CTRL_VQ
)));
371 virtio_net_set_mrg_rx_bufs(n
, !!(features
& (1 << VIRTIO_NET_F_MRG_RXBUF
)));
373 if (n
->has_vnet_hdr
) {
374 tap_set_offload(qemu_get_subqueue(n
->nic
, 0)->peer
,
375 (features
>> VIRTIO_NET_F_GUEST_CSUM
) & 1,
376 (features
>> VIRTIO_NET_F_GUEST_TSO4
) & 1,
377 (features
>> VIRTIO_NET_F_GUEST_TSO6
) & 1,
378 (features
>> VIRTIO_NET_F_GUEST_ECN
) & 1,
379 (features
>> VIRTIO_NET_F_GUEST_UFO
) & 1);
382 for (i
= 0; i
< n
->max_queues
; i
++) {
383 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, i
);
385 if (!nc
->peer
|| nc
->peer
->info
->type
!= NET_CLIENT_OPTIONS_KIND_TAP
) {
388 if (!tap_get_vhost_net(nc
->peer
)) {
391 vhost_net_ack_features(tap_get_vhost_net(nc
->peer
), features
);
395 static int virtio_net_handle_rx_mode(VirtIONet
*n
, uint8_t cmd
,
396 struct iovec
*iov
, unsigned int iov_cnt
)
401 s
= iov_to_buf(iov
, iov_cnt
, 0, &on
, sizeof(on
));
402 if (s
!= sizeof(on
)) {
403 return VIRTIO_NET_ERR
;
406 if (cmd
== VIRTIO_NET_CTRL_RX_PROMISC
) {
408 } else if (cmd
== VIRTIO_NET_CTRL_RX_ALLMULTI
) {
410 } else if (cmd
== VIRTIO_NET_CTRL_RX_ALLUNI
) {
412 } else if (cmd
== VIRTIO_NET_CTRL_RX_NOMULTI
) {
414 } else if (cmd
== VIRTIO_NET_CTRL_RX_NOUNI
) {
416 } else if (cmd
== VIRTIO_NET_CTRL_RX_NOBCAST
) {
419 return VIRTIO_NET_ERR
;
422 return VIRTIO_NET_OK
;
425 static int virtio_net_handle_mac(VirtIONet
*n
, uint8_t cmd
,
426 struct iovec
*iov
, unsigned int iov_cnt
)
428 struct virtio_net_ctrl_mac mac_data
;
431 if (cmd
== VIRTIO_NET_CTRL_MAC_ADDR_SET
) {
432 if (iov_size(iov
, iov_cnt
) != sizeof(n
->mac
)) {
433 return VIRTIO_NET_ERR
;
435 s
= iov_to_buf(iov
, iov_cnt
, 0, &n
->mac
, sizeof(n
->mac
));
436 assert(s
== sizeof(n
->mac
));
437 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), n
->mac
);
438 return VIRTIO_NET_OK
;
441 if (cmd
!= VIRTIO_NET_CTRL_MAC_TABLE_SET
) {
442 return VIRTIO_NET_ERR
;
445 n
->mac_table
.in_use
= 0;
446 n
->mac_table
.first_multi
= 0;
447 n
->mac_table
.uni_overflow
= 0;
448 n
->mac_table
.multi_overflow
= 0;
449 memset(n
->mac_table
.macs
, 0, MAC_TABLE_ENTRIES
* ETH_ALEN
);
451 s
= iov_to_buf(iov
, iov_cnt
, 0, &mac_data
.entries
,
452 sizeof(mac_data
.entries
));
453 mac_data
.entries
= ldl_p(&mac_data
.entries
);
454 if (s
!= sizeof(mac_data
.entries
)) {
455 return VIRTIO_NET_ERR
;
457 iov_discard_front(&iov
, &iov_cnt
, s
);
459 if (mac_data
.entries
* ETH_ALEN
> iov_size(iov
, iov_cnt
)) {
460 return VIRTIO_NET_ERR
;
463 if (mac_data
.entries
<= MAC_TABLE_ENTRIES
) {
464 s
= iov_to_buf(iov
, iov_cnt
, 0, n
->mac_table
.macs
,
465 mac_data
.entries
* ETH_ALEN
);
466 if (s
!= mac_data
.entries
* ETH_ALEN
) {
467 return VIRTIO_NET_ERR
;
469 n
->mac_table
.in_use
+= mac_data
.entries
;
471 n
->mac_table
.uni_overflow
= 1;
474 iov_discard_front(&iov
, &iov_cnt
, mac_data
.entries
* ETH_ALEN
);
476 n
->mac_table
.first_multi
= n
->mac_table
.in_use
;
478 s
= iov_to_buf(iov
, iov_cnt
, 0, &mac_data
.entries
,
479 sizeof(mac_data
.entries
));
480 mac_data
.entries
= ldl_p(&mac_data
.entries
);
481 if (s
!= sizeof(mac_data
.entries
)) {
482 return VIRTIO_NET_ERR
;
485 iov_discard_front(&iov
, &iov_cnt
, s
);
487 if (mac_data
.entries
* ETH_ALEN
!= iov_size(iov
, iov_cnt
)) {
488 return VIRTIO_NET_ERR
;
491 if (n
->mac_table
.in_use
+ mac_data
.entries
<= MAC_TABLE_ENTRIES
) {
492 s
= iov_to_buf(iov
, iov_cnt
, 0, n
->mac_table
.macs
,
493 mac_data
.entries
* ETH_ALEN
);
494 if (s
!= mac_data
.entries
* ETH_ALEN
) {
495 return VIRTIO_NET_ERR
;
497 n
->mac_table
.in_use
+= mac_data
.entries
;
499 n
->mac_table
.multi_overflow
= 1;
502 return VIRTIO_NET_OK
;
505 static int virtio_net_handle_vlan_table(VirtIONet
*n
, uint8_t cmd
,
506 struct iovec
*iov
, unsigned int iov_cnt
)
511 s
= iov_to_buf(iov
, iov_cnt
, 0, &vid
, sizeof(vid
));
513 if (s
!= sizeof(vid
)) {
514 return VIRTIO_NET_ERR
;
518 return VIRTIO_NET_ERR
;
520 if (cmd
== VIRTIO_NET_CTRL_VLAN_ADD
)
521 n
->vlans
[vid
>> 5] |= (1U << (vid
& 0x1f));
522 else if (cmd
== VIRTIO_NET_CTRL_VLAN_DEL
)
523 n
->vlans
[vid
>> 5] &= ~(1U << (vid
& 0x1f));
525 return VIRTIO_NET_ERR
;
527 return VIRTIO_NET_OK
;
530 static int virtio_net_handle_mq(VirtIONet
*n
, uint8_t cmd
,
531 VirtQueueElement
*elem
)
533 struct virtio_net_ctrl_mq s
;
535 if (elem
->out_num
!= 2 ||
536 elem
->out_sg
[1].iov_len
!= sizeof(struct virtio_net_ctrl_mq
)) {
537 error_report("virtio-net ctrl invalid steering command");
538 return VIRTIO_NET_ERR
;
541 if (cmd
!= VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET
) {
542 return VIRTIO_NET_ERR
;
545 memcpy(&s
, elem
->out_sg
[1].iov_base
, sizeof(struct virtio_net_ctrl_mq
));
547 if (s
.virtqueue_pairs
< VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN
||
548 s
.virtqueue_pairs
> VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX
||
549 s
.virtqueue_pairs
> n
->max_queues
||
551 return VIRTIO_NET_ERR
;
554 n
->curr_queues
= s
.virtqueue_pairs
;
555 /* stop the backend before changing the number of queues to avoid handling a
557 virtio_net_set_status(&n
->vdev
, n
->vdev
.status
);
558 virtio_net_set_queues(n
);
560 return VIRTIO_NET_OK
;
562 static void virtio_net_handle_ctrl(VirtIODevice
*vdev
, VirtQueue
*vq
)
564 VirtIONet
*n
= to_virtio_net(vdev
);
565 struct virtio_net_ctrl_hdr ctrl
;
566 virtio_net_ctrl_ack status
= VIRTIO_NET_ERR
;
567 VirtQueueElement elem
;
570 unsigned int iov_cnt
;
572 while (virtqueue_pop(vq
, &elem
)) {
573 if (iov_size(elem
.in_sg
, elem
.in_num
) < sizeof(status
) ||
574 iov_size(elem
.out_sg
, elem
.out_num
) < sizeof(ctrl
)) {
575 error_report("virtio-net ctrl missing headers");
580 iov_cnt
= elem
.out_num
;
581 s
= iov_to_buf(iov
, iov_cnt
, 0, &ctrl
, sizeof(ctrl
));
582 iov_discard_front(&iov
, &iov_cnt
, sizeof(ctrl
));
583 if (s
!= sizeof(ctrl
)) {
584 status
= VIRTIO_NET_ERR
;
585 } else if (ctrl
.class == VIRTIO_NET_CTRL_RX
) {
586 status
= virtio_net_handle_rx_mode(n
, ctrl
.cmd
, iov
, iov_cnt
);
587 } else if (ctrl
.class == VIRTIO_NET_CTRL_MAC
) {
588 status
= virtio_net_handle_mac(n
, ctrl
.cmd
, iov
, iov_cnt
);
589 } else if (ctrl
.class == VIRTIO_NET_CTRL_VLAN
) {
590 status
= virtio_net_handle_vlan_table(n
, ctrl
.cmd
, iov
, iov_cnt
);
591 } else if (ctrl
.class == VIRTIO_NET_CTRL_MQ
) {
592 status
= virtio_net_handle_mq(n
, ctrl
.cmd
, &elem
);
595 s
= iov_from_buf(elem
.in_sg
, elem
.in_num
, 0, &status
, sizeof(status
));
596 assert(s
== sizeof(status
));
598 virtqueue_push(vq
, &elem
, sizeof(status
));
599 virtio_notify(vdev
, vq
);
605 static void virtio_net_handle_rx(VirtIODevice
*vdev
, VirtQueue
*vq
)
607 VirtIONet
*n
= to_virtio_net(vdev
);
608 int queue_index
= vq2q(virtio_get_queue_index(vq
));
610 qemu_flush_queued_packets(qemu_get_subqueue(n
->nic
, queue_index
));
613 static int virtio_net_can_receive(NetClientState
*nc
)
615 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
616 VirtIONetQueue
*q
= virtio_net_get_subqueue(nc
);
618 if (!n
->vdev
.vm_running
) {
622 if (nc
->queue_index
>= n
->curr_queues
) {
626 if (!virtio_queue_ready(q
->rx_vq
) ||
627 !(n
->vdev
.status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
634 static int virtio_net_has_buffers(VirtIONetQueue
*q
, int bufsize
)
637 if (virtio_queue_empty(q
->rx_vq
) ||
638 (n
->mergeable_rx_bufs
&&
639 !virtqueue_avail_bytes(q
->rx_vq
, bufsize
, 0))) {
640 virtio_queue_set_notification(q
->rx_vq
, 1);
642 /* To avoid a race condition where the guest has made some buffers
643 * available after the above check but before notification was
644 * enabled, check for available buffers again.
646 if (virtio_queue_empty(q
->rx_vq
) ||
647 (n
->mergeable_rx_bufs
&&
648 !virtqueue_avail_bytes(q
->rx_vq
, bufsize
, 0))) {
653 virtio_queue_set_notification(q
->rx_vq
, 0);
657 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
658 * it never finds out that the packets don't have valid checksums. This
659 * causes dhclient to get upset. Fedora's carried a patch for ages to
660 * fix this with Xen but it hasn't appeared in an upstream release of
663 * To avoid breaking existing guests, we catch udp packets and add
664 * checksums. This is terrible but it's better than hacking the guest
667 * N.B. if we introduce a zero-copy API, this operation is no longer free so
668 * we should provide a mechanism to disable it to avoid polluting the host
671 static void work_around_broken_dhclient(struct virtio_net_hdr
*hdr
,
672 uint8_t *buf
, size_t size
)
674 if ((hdr
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) && /* missing csum */
675 (size
> 27 && size
< 1500) && /* normal sized MTU */
676 (buf
[12] == 0x08 && buf
[13] == 0x00) && /* ethertype == IPv4 */
677 (buf
[23] == 17) && /* ip.protocol == UDP */
678 (buf
[34] == 0 && buf
[35] == 67)) { /* udp.srcport == bootps */
679 net_checksum_calculate(buf
, size
);
680 hdr
->flags
&= ~VIRTIO_NET_HDR_F_NEEDS_CSUM
;
684 static void receive_header(VirtIONet
*n
, const struct iovec
*iov
, int iov_cnt
,
685 const void *buf
, size_t size
)
687 if (n
->has_vnet_hdr
) {
688 /* FIXME this cast is evil */
689 void *wbuf
= (void *)buf
;
690 work_around_broken_dhclient(wbuf
, wbuf
+ n
->host_hdr_len
,
691 size
- n
->host_hdr_len
);
692 iov_from_buf(iov
, iov_cnt
, 0, buf
, sizeof(struct virtio_net_hdr
));
694 struct virtio_net_hdr hdr
= {
696 .gso_type
= VIRTIO_NET_HDR_GSO_NONE
698 iov_from_buf(iov
, iov_cnt
, 0, &hdr
, sizeof hdr
);
702 static int receive_filter(VirtIONet
*n
, const uint8_t *buf
, int size
)
704 static const uint8_t bcast
[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
705 static const uint8_t vlan
[] = {0x81, 0x00};
706 uint8_t *ptr
= (uint8_t *)buf
;
712 ptr
+= n
->host_hdr_len
;
714 if (!memcmp(&ptr
[12], vlan
, sizeof(vlan
))) {
715 int vid
= be16_to_cpup((uint16_t *)(ptr
+ 14)) & 0xfff;
716 if (!(n
->vlans
[vid
>> 5] & (1U << (vid
& 0x1f))))
720 if (ptr
[0] & 1) { // multicast
721 if (!memcmp(ptr
, bcast
, sizeof(bcast
))) {
723 } else if (n
->nomulti
) {
725 } else if (n
->allmulti
|| n
->mac_table
.multi_overflow
) {
729 for (i
= n
->mac_table
.first_multi
; i
< n
->mac_table
.in_use
; i
++) {
730 if (!memcmp(ptr
, &n
->mac_table
.macs
[i
* ETH_ALEN
], ETH_ALEN
)) {
737 } else if (n
->alluni
|| n
->mac_table
.uni_overflow
) {
739 } else if (!memcmp(ptr
, n
->mac
, ETH_ALEN
)) {
743 for (i
= 0; i
< n
->mac_table
.first_multi
; i
++) {
744 if (!memcmp(ptr
, &n
->mac_table
.macs
[i
* ETH_ALEN
], ETH_ALEN
)) {
753 static ssize_t
virtio_net_receive(NetClientState
*nc
, const uint8_t *buf
, size_t size
)
755 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
756 VirtIONetQueue
*q
= virtio_net_get_subqueue(nc
);
757 struct iovec mhdr_sg
[VIRTQUEUE_MAX_SIZE
];
758 struct virtio_net_hdr_mrg_rxbuf mhdr
;
759 unsigned mhdr_cnt
= 0;
760 size_t offset
, i
, guest_offset
;
762 if (!virtio_net_can_receive(nc
)) {
766 /* hdr_len refers to the header we supply to the guest */
767 if (!virtio_net_has_buffers(q
, size
+ n
->guest_hdr_len
- n
->host_hdr_len
)) {
771 if (!receive_filter(n
, buf
, size
))
776 while (offset
< size
) {
777 VirtQueueElement elem
;
779 const struct iovec
*sg
= elem
.in_sg
;
783 if (virtqueue_pop(q
->rx_vq
, &elem
) == 0) {
786 error_report("virtio-net unexpected empty queue: "
787 "i %zd mergeable %d offset %zd, size %zd, "
788 "guest hdr len %zd, host hdr len %zd guest features 0x%x",
789 i
, n
->mergeable_rx_bufs
, offset
, size
,
790 n
->guest_hdr_len
, n
->host_hdr_len
, n
->vdev
.guest_features
);
794 if (elem
.in_num
< 1) {
795 error_report("virtio-net receive queue contains no in buffers");
801 if (n
->mergeable_rx_bufs
) {
802 mhdr_cnt
= iov_copy(mhdr_sg
, ARRAY_SIZE(mhdr_sg
),
804 offsetof(typeof(mhdr
), num_buffers
),
805 sizeof(mhdr
.num_buffers
));
808 receive_header(n
, sg
, elem
.in_num
, buf
, size
);
809 offset
= n
->host_hdr_len
;
810 total
+= n
->guest_hdr_len
;
811 guest_offset
= n
->guest_hdr_len
;
816 /* copy in packet. ugh */
817 len
= iov_from_buf(sg
, elem
.in_num
, guest_offset
,
818 buf
+ offset
, size
- offset
);
821 /* If buffers can't be merged, at this point we
822 * must have consumed the complete packet.
823 * Otherwise, drop it. */
824 if (!n
->mergeable_rx_bufs
&& offset
< size
) {
826 error_report("virtio-net truncated non-mergeable packet: "
827 "i %zd mergeable %d offset %zd, size %zd, "
828 "guest hdr len %zd, host hdr len %zd",
829 i
, n
->mergeable_rx_bufs
,
830 offset
, size
, n
->guest_hdr_len
, n
->host_hdr_len
);
835 /* signal other side */
836 virtqueue_fill(q
->rx_vq
, &elem
, total
, i
++);
840 stw_p(&mhdr
.num_buffers
, i
);
841 iov_from_buf(mhdr_sg
, mhdr_cnt
,
843 &mhdr
.num_buffers
, sizeof mhdr
.num_buffers
);
846 virtqueue_flush(q
->rx_vq
, i
);
847 virtio_notify(&n
->vdev
, q
->rx_vq
);
852 static int32_t virtio_net_flush_tx(VirtIONetQueue
*q
);
854 static void virtio_net_tx_complete(NetClientState
*nc
, ssize_t len
)
856 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
857 VirtIONetQueue
*q
= virtio_net_get_subqueue(nc
);
859 virtqueue_push(q
->tx_vq
, &q
->async_tx
.elem
, 0);
860 virtio_notify(&n
->vdev
, q
->tx_vq
);
862 q
->async_tx
.elem
.out_num
= q
->async_tx
.len
= 0;
864 virtio_queue_set_notification(q
->tx_vq
, 1);
865 virtio_net_flush_tx(q
);
869 static int32_t virtio_net_flush_tx(VirtIONetQueue
*q
)
872 VirtQueueElement elem
;
873 int32_t num_packets
= 0;
874 int queue_index
= vq2q(virtio_get_queue_index(q
->tx_vq
));
875 if (!(n
->vdev
.status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
879 assert(n
->vdev
.vm_running
);
881 if (q
->async_tx
.elem
.out_num
) {
882 virtio_queue_set_notification(q
->tx_vq
, 0);
886 while (virtqueue_pop(q
->tx_vq
, &elem
)) {
888 unsigned int out_num
= elem
.out_num
;
889 struct iovec
*out_sg
= &elem
.out_sg
[0];
890 struct iovec sg
[VIRTQUEUE_MAX_SIZE
];
893 error_report("virtio-net header not in first element");
898 * If host wants to see the guest header as is, we can
899 * pass it on unchanged. Otherwise, copy just the parts
900 * that host is interested in.
902 assert(n
->host_hdr_len
<= n
->guest_hdr_len
);
903 if (n
->host_hdr_len
!= n
->guest_hdr_len
) {
904 unsigned sg_num
= iov_copy(sg
, ARRAY_SIZE(sg
),
907 sg_num
+= iov_copy(sg
+ sg_num
, ARRAY_SIZE(sg
) - sg_num
,
909 n
->guest_hdr_len
, -1);
914 len
= n
->guest_hdr_len
;
916 ret
= qemu_sendv_packet_async(qemu_get_subqueue(n
->nic
, queue_index
),
917 out_sg
, out_num
, virtio_net_tx_complete
);
919 virtio_queue_set_notification(q
->tx_vq
, 0);
920 q
->async_tx
.elem
= elem
;
921 q
->async_tx
.len
= len
;
927 virtqueue_push(q
->tx_vq
, &elem
, 0);
928 virtio_notify(&n
->vdev
, q
->tx_vq
);
930 if (++num_packets
>= n
->tx_burst
) {
937 static void virtio_net_handle_tx_timer(VirtIODevice
*vdev
, VirtQueue
*vq
)
939 VirtIONet
*n
= to_virtio_net(vdev
);
940 VirtIONetQueue
*q
= &n
->vqs
[vq2q(virtio_get_queue_index(vq
))];
942 /* This happens when device was stopped but VCPU wasn't. */
943 if (!n
->vdev
.vm_running
) {
949 virtio_queue_set_notification(vq
, 1);
950 qemu_del_timer(q
->tx_timer
);
952 virtio_net_flush_tx(q
);
954 qemu_mod_timer(q
->tx_timer
,
955 qemu_get_clock_ns(vm_clock
) + n
->tx_timeout
);
957 virtio_queue_set_notification(vq
, 0);
961 static void virtio_net_handle_tx_bh(VirtIODevice
*vdev
, VirtQueue
*vq
)
963 VirtIONet
*n
= to_virtio_net(vdev
);
964 VirtIONetQueue
*q
= &n
->vqs
[vq2q(virtio_get_queue_index(vq
))];
966 if (unlikely(q
->tx_waiting
)) {
970 /* This happens when device was stopped but VCPU wasn't. */
971 if (!n
->vdev
.vm_running
) {
974 virtio_queue_set_notification(vq
, 0);
975 qemu_bh_schedule(q
->tx_bh
);
978 static void virtio_net_tx_timer(void *opaque
)
980 VirtIONetQueue
*q
= opaque
;
982 assert(n
->vdev
.vm_running
);
986 /* Just in case the driver is not ready on more */
987 if (!(n
->vdev
.status
& VIRTIO_CONFIG_S_DRIVER_OK
))
990 virtio_queue_set_notification(q
->tx_vq
, 1);
991 virtio_net_flush_tx(q
);
994 static void virtio_net_tx_bh(void *opaque
)
996 VirtIONetQueue
*q
= opaque
;
1000 assert(n
->vdev
.vm_running
);
1004 /* Just in case the driver is not ready on more */
1005 if (unlikely(!(n
->vdev
.status
& VIRTIO_CONFIG_S_DRIVER_OK
)))
1008 ret
= virtio_net_flush_tx(q
);
1009 if (ret
== -EBUSY
) {
1010 return; /* Notification re-enable handled by tx_complete */
1013 /* If we flush a full burst of packets, assume there are
1014 * more coming and immediately reschedule */
1015 if (ret
>= n
->tx_burst
) {
1016 qemu_bh_schedule(q
->tx_bh
);
1021 /* If less than a full burst, re-enable notification and flush
1022 * anything that may have come in while we weren't looking. If
1023 * we find something, assume the guest is still active and reschedule */
1024 virtio_queue_set_notification(q
->tx_vq
, 1);
1025 if (virtio_net_flush_tx(q
) > 0) {
1026 virtio_queue_set_notification(q
->tx_vq
, 0);
1027 qemu_bh_schedule(q
->tx_bh
);
1032 static void virtio_net_set_multiqueue(VirtIONet
*n
, int multiqueue
, int ctrl
)
1034 VirtIODevice
*vdev
= &n
->vdev
;
1035 int i
, max
= multiqueue
? n
->max_queues
: 1;
1037 n
->multiqueue
= multiqueue
;
1039 for (i
= 2; i
<= n
->max_queues
* 2 + 1; i
++) {
1040 virtio_del_queue(vdev
, i
);
1043 for (i
= 1; i
< max
; i
++) {
1044 n
->vqs
[i
].rx_vq
= virtio_add_queue(vdev
, 256, virtio_net_handle_rx
);
1045 if (n
->vqs
[i
].tx_timer
) {
1047 virtio_add_queue(vdev
, 256, virtio_net_handle_tx_timer
);
1048 n
->vqs
[i
].tx_timer
= qemu_new_timer_ns(vm_clock
,
1049 virtio_net_tx_timer
,
1053 virtio_add_queue(vdev
, 256, virtio_net_handle_tx_bh
);
1054 n
->vqs
[i
].tx_bh
= qemu_bh_new(virtio_net_tx_bh
, &n
->vqs
[i
]);
1057 n
->vqs
[i
].tx_waiting
= 0;
1062 n
->ctrl_vq
= virtio_add_queue(vdev
, 64, virtio_net_handle_ctrl
);
1065 virtio_net_set_queues(n
);
1068 static void virtio_net_save(QEMUFile
*f
, void *opaque
)
1071 VirtIONet
*n
= opaque
;
1073 /* At this point, backend must be stopped, otherwise
1074 * it might keep writing to memory. */
1075 assert(!n
->vhost_started
);
1076 virtio_save(&n
->vdev
, f
);
1078 qemu_put_buffer(f
, n
->mac
, ETH_ALEN
);
1079 qemu_put_be32(f
, n
->vqs
[0].tx_waiting
);
1080 qemu_put_be32(f
, n
->mergeable_rx_bufs
);
1081 qemu_put_be16(f
, n
->status
);
1082 qemu_put_byte(f
, n
->promisc
);
1083 qemu_put_byte(f
, n
->allmulti
);
1084 qemu_put_be32(f
, n
->mac_table
.in_use
);
1085 qemu_put_buffer(f
, n
->mac_table
.macs
, n
->mac_table
.in_use
* ETH_ALEN
);
1086 qemu_put_buffer(f
, (uint8_t *)n
->vlans
, MAX_VLAN
>> 3);
1087 qemu_put_be32(f
, n
->has_vnet_hdr
);
1088 qemu_put_byte(f
, n
->mac_table
.multi_overflow
);
1089 qemu_put_byte(f
, n
->mac_table
.uni_overflow
);
1090 qemu_put_byte(f
, n
->alluni
);
1091 qemu_put_byte(f
, n
->nomulti
);
1092 qemu_put_byte(f
, n
->nouni
);
1093 qemu_put_byte(f
, n
->nobcast
);
1094 qemu_put_byte(f
, n
->has_ufo
);
1095 if (n
->max_queues
> 1) {
1096 qemu_put_be16(f
, n
->max_queues
);
1097 qemu_put_be16(f
, n
->curr_queues
);
1098 for (i
= 1; i
< n
->curr_queues
; i
++) {
1099 qemu_put_be32(f
, n
->vqs
[i
].tx_waiting
);
1104 static int virtio_net_load(QEMUFile
*f
, void *opaque
, int version_id
)
1106 VirtIONet
*n
= opaque
;
1107 int ret
, i
, link_down
;
1109 if (version_id
< 2 || version_id
> VIRTIO_NET_VM_VERSION
)
1112 ret
= virtio_load(&n
->vdev
, f
);
1117 qemu_get_buffer(f
, n
->mac
, ETH_ALEN
);
1118 n
->vqs
[0].tx_waiting
= qemu_get_be32(f
);
1120 virtio_net_set_mrg_rx_bufs(n
, qemu_get_be32(f
));
1122 if (version_id
>= 3)
1123 n
->status
= qemu_get_be16(f
);
1125 if (version_id
>= 4) {
1126 if (version_id
< 8) {
1127 n
->promisc
= qemu_get_be32(f
);
1128 n
->allmulti
= qemu_get_be32(f
);
1130 n
->promisc
= qemu_get_byte(f
);
1131 n
->allmulti
= qemu_get_byte(f
);
1135 if (version_id
>= 5) {
1136 n
->mac_table
.in_use
= qemu_get_be32(f
);
1137 /* MAC_TABLE_ENTRIES may be different from the saved image */
1138 if (n
->mac_table
.in_use
<= MAC_TABLE_ENTRIES
) {
1139 qemu_get_buffer(f
, n
->mac_table
.macs
,
1140 n
->mac_table
.in_use
* ETH_ALEN
);
1141 } else if (n
->mac_table
.in_use
) {
1142 uint8_t *buf
= g_malloc0(n
->mac_table
.in_use
);
1143 qemu_get_buffer(f
, buf
, n
->mac_table
.in_use
* ETH_ALEN
);
1145 n
->mac_table
.multi_overflow
= n
->mac_table
.uni_overflow
= 1;
1146 n
->mac_table
.in_use
= 0;
1150 if (version_id
>= 6)
1151 qemu_get_buffer(f
, (uint8_t *)n
->vlans
, MAX_VLAN
>> 3);
1153 if (version_id
>= 7) {
1154 if (qemu_get_be32(f
) && !peer_has_vnet_hdr(n
)) {
1155 error_report("virtio-net: saved image requires vnet_hdr=on");
1159 if (n
->has_vnet_hdr
) {
1160 tap_set_offload(qemu_get_queue(n
->nic
)->peer
,
1161 (n
->vdev
.guest_features
>> VIRTIO_NET_F_GUEST_CSUM
) & 1,
1162 (n
->vdev
.guest_features
>> VIRTIO_NET_F_GUEST_TSO4
) & 1,
1163 (n
->vdev
.guest_features
>> VIRTIO_NET_F_GUEST_TSO6
) & 1,
1164 (n
->vdev
.guest_features
>> VIRTIO_NET_F_GUEST_ECN
) & 1,
1165 (n
->vdev
.guest_features
>> VIRTIO_NET_F_GUEST_UFO
) & 1);
1169 if (version_id
>= 9) {
1170 n
->mac_table
.multi_overflow
= qemu_get_byte(f
);
1171 n
->mac_table
.uni_overflow
= qemu_get_byte(f
);
1174 if (version_id
>= 10) {
1175 n
->alluni
= qemu_get_byte(f
);
1176 n
->nomulti
= qemu_get_byte(f
);
1177 n
->nouni
= qemu_get_byte(f
);
1178 n
->nobcast
= qemu_get_byte(f
);
1181 if (version_id
>= 11) {
1182 if (qemu_get_byte(f
) && !peer_has_ufo(n
)) {
1183 error_report("virtio-net: saved image requires TUN_F_UFO support");
1188 if (n
->max_queues
> 1) {
1189 if (n
->max_queues
!= qemu_get_be16(f
)) {
1190 error_report("virtio-net: different max_queues ");
1194 n
->curr_queues
= qemu_get_be16(f
);
1195 for (i
= 1; i
< n
->curr_queues
; i
++) {
1196 n
->vqs
[i
].tx_waiting
= qemu_get_be32(f
);
1200 virtio_net_set_queues(n
);
1202 /* Find the first multicast entry in the saved MAC filter */
1203 for (i
= 0; i
< n
->mac_table
.in_use
; i
++) {
1204 if (n
->mac_table
.macs
[i
* ETH_ALEN
] & 1) {
1208 n
->mac_table
.first_multi
= i
;
1210 /* nc.link_down can't be migrated, so infer link_down according
1211 * to link status bit in n->status */
1212 link_down
= (n
->status
& VIRTIO_NET_S_LINK_UP
) == 0;
1213 for (i
= 0; i
< n
->max_queues
; i
++) {
1214 qemu_get_subqueue(n
->nic
, i
)->link_down
= link_down
;
1220 static void virtio_net_cleanup(NetClientState
*nc
)
1222 VirtIONet
*n
= qemu_get_nic_opaque(nc
);
1227 static NetClientInfo net_virtio_info
= {
1228 .type
= NET_CLIENT_OPTIONS_KIND_NIC
,
1229 .size
= sizeof(NICState
),
1230 .can_receive
= virtio_net_can_receive
,
1231 .receive
= virtio_net_receive
,
1232 .cleanup
= virtio_net_cleanup
,
1233 .link_status_changed
= virtio_net_set_link_status
,
1236 static bool virtio_net_guest_notifier_pending(VirtIODevice
*vdev
, int idx
)
1238 VirtIONet
*n
= to_virtio_net(vdev
);
1239 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, vq2q(idx
));
1240 assert(n
->vhost_started
);
1241 return vhost_net_virtqueue_pending(tap_get_vhost_net(nc
->peer
), idx
);
1244 static void virtio_net_guest_notifier_mask(VirtIODevice
*vdev
, int idx
,
1247 VirtIONet
*n
= to_virtio_net(vdev
);
1248 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, vq2q(idx
));
1249 assert(n
->vhost_started
);
1250 vhost_net_virtqueue_mask(tap_get_vhost_net(nc
->peer
),
1254 VirtIODevice
*virtio_net_init(DeviceState
*dev
, NICConf
*conf
,
1255 virtio_net_conf
*net
, uint32_t host_features
)
1258 int i
, config_size
= 0;
1260 for (i
= 0; feature_sizes
[i
].flags
!= 0; i
++) {
1261 if (host_features
& feature_sizes
[i
].flags
) {
1262 config_size
= MAX(feature_sizes
[i
].end
, config_size
);
1266 n
= (VirtIONet
*)virtio_common_init("virtio-net", VIRTIO_ID_NET
,
1267 config_size
, sizeof(VirtIONet
));
1269 n
->config_size
= config_size
;
1270 n
->vdev
.get_config
= virtio_net_get_config
;
1271 n
->vdev
.set_config
= virtio_net_set_config
;
1272 n
->vdev
.get_features
= virtio_net_get_features
;
1273 n
->vdev
.set_features
= virtio_net_set_features
;
1274 n
->vdev
.bad_features
= virtio_net_bad_features
;
1275 n
->vdev
.reset
= virtio_net_reset
;
1276 n
->vdev
.set_status
= virtio_net_set_status
;
1277 n
->vdev
.guest_notifier_mask
= virtio_net_guest_notifier_mask
;
1278 n
->vdev
.guest_notifier_pending
= virtio_net_guest_notifier_pending
;
1279 n
->max_queues
= MAX(conf
->queues
, 1);
1280 n
->vqs
= g_malloc0(sizeof(VirtIONetQueue
) * n
->max_queues
);
1281 n
->vqs
[0].rx_vq
= virtio_add_queue(&n
->vdev
, 256, virtio_net_handle_rx
);
1284 n
->tx_timeout
= net
->txtimer
;
1286 if (net
->tx
&& strcmp(net
->tx
, "timer") && strcmp(net
->tx
, "bh")) {
1287 error_report("virtio-net: "
1288 "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
1290 error_report("Defaulting to \"bh\"");
1293 if (net
->tx
&& !strcmp(net
->tx
, "timer")) {
1294 n
->vqs
[0].tx_vq
= virtio_add_queue(&n
->vdev
, 256,
1295 virtio_net_handle_tx_timer
);
1296 n
->vqs
[0].tx_timer
= qemu_new_timer_ns(vm_clock
, virtio_net_tx_timer
,
1299 n
->vqs
[0].tx_vq
= virtio_add_queue(&n
->vdev
, 256,
1300 virtio_net_handle_tx_bh
);
1301 n
->vqs
[0].tx_bh
= qemu_bh_new(virtio_net_tx_bh
, &n
->vqs
[0]);
1303 n
->ctrl_vq
= virtio_add_queue(&n
->vdev
, 64, virtio_net_handle_ctrl
);
1304 qemu_macaddr_default_if_unset(&conf
->macaddr
);
1305 memcpy(&n
->mac
[0], &conf
->macaddr
, sizeof(n
->mac
));
1306 n
->status
= VIRTIO_NET_S_LINK_UP
;
1308 n
->nic
= qemu_new_nic(&net_virtio_info
, conf
, object_get_typename(OBJECT(dev
)), dev
->id
, n
);
1309 peer_test_vnet_hdr(n
);
1310 if (peer_has_vnet_hdr(n
)) {
1311 for (i
= 0; i
< n
->max_queues
; i
++) {
1312 tap_using_vnet_hdr(qemu_get_subqueue(n
->nic
, i
)->peer
, true);
1314 n
->host_hdr_len
= sizeof(struct virtio_net_hdr
);
1316 n
->host_hdr_len
= 0;
1319 qemu_format_nic_info_str(qemu_get_queue(n
->nic
), conf
->macaddr
.a
);
1321 n
->vqs
[0].tx_waiting
= 0;
1322 n
->tx_burst
= net
->txburst
;
1323 virtio_net_set_mrg_rx_bufs(n
, 0);
1324 n
->promisc
= 1; /* for compatibility */
1326 n
->mac_table
.macs
= g_malloc0(MAC_TABLE_ENTRIES
* ETH_ALEN
);
1328 n
->vlans
= g_malloc0(MAX_VLAN
>> 3);
1331 register_savevm(dev
, "virtio-net", -1, VIRTIO_NET_VM_VERSION
,
1332 virtio_net_save
, virtio_net_load
, n
);
1334 add_boot_device_path(conf
->bootindex
, dev
, "/ethernet-phy@0");
1339 void virtio_net_exit(VirtIODevice
*vdev
)
1341 VirtIONet
*n
= DO_UPCAST(VirtIONet
, vdev
, vdev
);
1344 /* This will stop vhost backend if appropriate. */
1345 virtio_net_set_status(vdev
, 0);
1347 unregister_savevm(n
->qdev
, "virtio-net", n
);
1349 g_free(n
->mac_table
.macs
);
1352 for (i
= 0; i
< n
->max_queues
; i
++) {
1353 VirtIONetQueue
*q
= &n
->vqs
[i
];
1354 NetClientState
*nc
= qemu_get_subqueue(n
->nic
, i
);
1356 qemu_purge_queued_packets(nc
);
1359 qemu_del_timer(q
->tx_timer
);
1360 qemu_free_timer(q
->tx_timer
);
1362 qemu_bh_delete(q
->tx_bh
);
1367 qemu_del_nic(n
->nic
);
1368 virtio_cleanup(&n
->vdev
);