2 * Virtio Network Device
4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
17 #include "net/checksum.h"
19 #include "qemu-error.h"
20 #include "qemu-timer.h"
21 #include "virtio-net.h"
22 #include "vhost_net.h"
24 #define VIRTIO_NET_VM_VERSION 11
26 #define MAC_TABLE_ENTRIES 64
27 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
29 typedef struct VirtIONet
32 uint8_t mac
[ETH_ALEN
];
40 uint32_t has_vnet_hdr
;
43 VirtQueueElement elem
;
46 int mergeable_rx_bufs
;
53 uint8_t vhost_started
;
54 VMChangeStateEntry
*vmstate
;
58 uint8_t multi_overflow
;
66 * - we could suppress RX interrupt if we were so inclined.
69 static VirtIONet
*to_virtio_net(VirtIODevice
*vdev
)
71 return (VirtIONet
*)vdev
;
74 static void virtio_net_get_config(VirtIODevice
*vdev
, uint8_t *config
)
76 VirtIONet
*n
= to_virtio_net(vdev
);
77 struct virtio_net_config netcfg
;
79 netcfg
.status
= n
->status
;
80 memcpy(netcfg
.mac
, n
->mac
, ETH_ALEN
);
81 memcpy(config
, &netcfg
, sizeof(netcfg
));
84 static void virtio_net_set_config(VirtIODevice
*vdev
, const uint8_t *config
)
86 VirtIONet
*n
= to_virtio_net(vdev
);
87 struct virtio_net_config netcfg
;
89 memcpy(&netcfg
, config
, sizeof(netcfg
));
91 if (memcmp(netcfg
.mac
, n
->mac
, ETH_ALEN
)) {
92 memcpy(n
->mac
, netcfg
.mac
, ETH_ALEN
);
93 qemu_format_nic_info_str(&n
->nic
->nc
, n
->mac
);
97 static void virtio_net_set_link_status(VLANClientState
*nc
)
99 VirtIONet
*n
= DO_UPCAST(NICState
, nc
, nc
)->opaque
;
100 uint16_t old_status
= n
->status
;
103 n
->status
&= ~VIRTIO_NET_S_LINK_UP
;
105 n
->status
|= VIRTIO_NET_S_LINK_UP
;
107 if (n
->status
!= old_status
)
108 virtio_notify_config(&n
->vdev
);
111 static void virtio_net_reset(VirtIODevice
*vdev
)
113 VirtIONet
*n
= to_virtio_net(vdev
);
115 /* Reset back to compatibility mode */
122 if (n
->vhost_started
) {
123 vhost_net_stop(tap_get_vhost_net(n
->nic
->nc
.peer
), vdev
);
124 n
->vhost_started
= 0;
127 /* Flush any MAC and VLAN filter table state */
128 n
->mac_table
.in_use
= 0;
129 n
->mac_table
.first_multi
= 0;
130 n
->mac_table
.multi_overflow
= 0;
131 n
->mac_table
.uni_overflow
= 0;
132 memset(n
->mac_table
.macs
, 0, MAC_TABLE_ENTRIES
* ETH_ALEN
);
133 memset(n
->vlans
, 0, MAX_VLAN
>> 3);
136 static int peer_has_vnet_hdr(VirtIONet
*n
)
138 if (!n
->nic
->nc
.peer
)
141 if (n
->nic
->nc
.peer
->info
->type
!= NET_CLIENT_TYPE_TAP
)
144 n
->has_vnet_hdr
= tap_has_vnet_hdr(n
->nic
->nc
.peer
);
146 return n
->has_vnet_hdr
;
149 static int peer_has_ufo(VirtIONet
*n
)
151 if (!peer_has_vnet_hdr(n
))
154 n
->has_ufo
= tap_has_ufo(n
->nic
->nc
.peer
);
159 static uint32_t virtio_net_get_features(VirtIODevice
*vdev
, uint32_t features
)
161 VirtIONet
*n
= to_virtio_net(vdev
);
163 features
|= (1 << VIRTIO_NET_F_MAC
);
165 if (peer_has_vnet_hdr(n
)) {
166 tap_using_vnet_hdr(n
->nic
->nc
.peer
, 1);
168 features
&= ~(0x1 << VIRTIO_NET_F_CSUM
);
169 features
&= ~(0x1 << VIRTIO_NET_F_HOST_TSO4
);
170 features
&= ~(0x1 << VIRTIO_NET_F_HOST_TSO6
);
171 features
&= ~(0x1 << VIRTIO_NET_F_HOST_ECN
);
173 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_CSUM
);
174 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_TSO4
);
175 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_TSO6
);
176 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_ECN
);
179 if (!peer_has_vnet_hdr(n
) || !peer_has_ufo(n
)) {
180 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_UFO
);
181 features
&= ~(0x1 << VIRTIO_NET_F_HOST_UFO
);
184 if (!n
->nic
->nc
.peer
||
185 n
->nic
->nc
.peer
->info
->type
!= NET_CLIENT_TYPE_TAP
) {
188 if (!tap_get_vhost_net(n
->nic
->nc
.peer
)) {
191 return vhost_net_get_features(tap_get_vhost_net(n
->nic
->nc
.peer
), features
);
194 static uint32_t virtio_net_bad_features(VirtIODevice
*vdev
)
196 uint32_t features
= 0;
198 /* Linux kernel 2.6.25. It understood MAC (as everyone must),
200 features
|= (1 << VIRTIO_NET_F_MAC
);
201 features
|= (1 << VIRTIO_NET_F_CSUM
);
202 features
|= (1 << VIRTIO_NET_F_HOST_TSO4
);
203 features
|= (1 << VIRTIO_NET_F_HOST_TSO6
);
204 features
|= (1 << VIRTIO_NET_F_HOST_ECN
);
209 static void virtio_net_set_features(VirtIODevice
*vdev
, uint32_t features
)
211 VirtIONet
*n
= to_virtio_net(vdev
);
213 n
->mergeable_rx_bufs
= !!(features
& (1 << VIRTIO_NET_F_MRG_RXBUF
));
215 if (n
->has_vnet_hdr
) {
216 tap_set_offload(n
->nic
->nc
.peer
,
217 (features
>> VIRTIO_NET_F_GUEST_CSUM
) & 1,
218 (features
>> VIRTIO_NET_F_GUEST_TSO4
) & 1,
219 (features
>> VIRTIO_NET_F_GUEST_TSO6
) & 1,
220 (features
>> VIRTIO_NET_F_GUEST_ECN
) & 1,
221 (features
>> VIRTIO_NET_F_GUEST_UFO
) & 1);
223 if (!n
->nic
->nc
.peer
||
224 n
->nic
->nc
.peer
->info
->type
!= NET_CLIENT_TYPE_TAP
) {
227 if (!tap_get_vhost_net(n
->nic
->nc
.peer
)) {
230 vhost_net_ack_features(tap_get_vhost_net(n
->nic
->nc
.peer
), features
);
233 static int virtio_net_handle_rx_mode(VirtIONet
*n
, uint8_t cmd
,
234 VirtQueueElement
*elem
)
238 if (elem
->out_num
!= 2 || elem
->out_sg
[1].iov_len
!= sizeof(on
)) {
239 fprintf(stderr
, "virtio-net ctrl invalid rx mode command\n");
243 on
= ldub_p(elem
->out_sg
[1].iov_base
);
245 if (cmd
== VIRTIO_NET_CTRL_RX_MODE_PROMISC
)
247 else if (cmd
== VIRTIO_NET_CTRL_RX_MODE_ALLMULTI
)
249 else if (cmd
== VIRTIO_NET_CTRL_RX_MODE_ALLUNI
)
251 else if (cmd
== VIRTIO_NET_CTRL_RX_MODE_NOMULTI
)
253 else if (cmd
== VIRTIO_NET_CTRL_RX_MODE_NOUNI
)
255 else if (cmd
== VIRTIO_NET_CTRL_RX_MODE_NOBCAST
)
258 return VIRTIO_NET_ERR
;
260 return VIRTIO_NET_OK
;
263 static int virtio_net_handle_mac(VirtIONet
*n
, uint8_t cmd
,
264 VirtQueueElement
*elem
)
266 struct virtio_net_ctrl_mac mac_data
;
268 if (cmd
!= VIRTIO_NET_CTRL_MAC_TABLE_SET
|| elem
->out_num
!= 3 ||
269 elem
->out_sg
[1].iov_len
< sizeof(mac_data
) ||
270 elem
->out_sg
[2].iov_len
< sizeof(mac_data
))
271 return VIRTIO_NET_ERR
;
273 n
->mac_table
.in_use
= 0;
274 n
->mac_table
.first_multi
= 0;
275 n
->mac_table
.uni_overflow
= 0;
276 n
->mac_table
.multi_overflow
= 0;
277 memset(n
->mac_table
.macs
, 0, MAC_TABLE_ENTRIES
* ETH_ALEN
);
279 mac_data
.entries
= ldl_le_p(elem
->out_sg
[1].iov_base
);
281 if (sizeof(mac_data
.entries
) +
282 (mac_data
.entries
* ETH_ALEN
) > elem
->out_sg
[1].iov_len
)
283 return VIRTIO_NET_ERR
;
285 if (mac_data
.entries
<= MAC_TABLE_ENTRIES
) {
286 memcpy(n
->mac_table
.macs
, elem
->out_sg
[1].iov_base
+ sizeof(mac_data
),
287 mac_data
.entries
* ETH_ALEN
);
288 n
->mac_table
.in_use
+= mac_data
.entries
;
290 n
->mac_table
.uni_overflow
= 1;
293 n
->mac_table
.first_multi
= n
->mac_table
.in_use
;
295 mac_data
.entries
= ldl_le_p(elem
->out_sg
[2].iov_base
);
297 if (sizeof(mac_data
.entries
) +
298 (mac_data
.entries
* ETH_ALEN
) > elem
->out_sg
[2].iov_len
)
299 return VIRTIO_NET_ERR
;
301 if (mac_data
.entries
) {
302 if (n
->mac_table
.in_use
+ mac_data
.entries
<= MAC_TABLE_ENTRIES
) {
303 memcpy(n
->mac_table
.macs
+ (n
->mac_table
.in_use
* ETH_ALEN
),
304 elem
->out_sg
[2].iov_base
+ sizeof(mac_data
),
305 mac_data
.entries
* ETH_ALEN
);
306 n
->mac_table
.in_use
+= mac_data
.entries
;
308 n
->mac_table
.multi_overflow
= 1;
312 return VIRTIO_NET_OK
;
315 static int virtio_net_handle_vlan_table(VirtIONet
*n
, uint8_t cmd
,
316 VirtQueueElement
*elem
)
320 if (elem
->out_num
!= 2 || elem
->out_sg
[1].iov_len
!= sizeof(vid
)) {
321 fprintf(stderr
, "virtio-net ctrl invalid vlan command\n");
322 return VIRTIO_NET_ERR
;
325 vid
= lduw_le_p(elem
->out_sg
[1].iov_base
);
328 return VIRTIO_NET_ERR
;
330 if (cmd
== VIRTIO_NET_CTRL_VLAN_ADD
)
331 n
->vlans
[vid
>> 5] |= (1U << (vid
& 0x1f));
332 else if (cmd
== VIRTIO_NET_CTRL_VLAN_DEL
)
333 n
->vlans
[vid
>> 5] &= ~(1U << (vid
& 0x1f));
335 return VIRTIO_NET_ERR
;
337 return VIRTIO_NET_OK
;
340 static void virtio_net_handle_ctrl(VirtIODevice
*vdev
, VirtQueue
*vq
)
342 VirtIONet
*n
= to_virtio_net(vdev
);
343 struct virtio_net_ctrl_hdr ctrl
;
344 virtio_net_ctrl_ack status
= VIRTIO_NET_ERR
;
345 VirtQueueElement elem
;
347 while (virtqueue_pop(vq
, &elem
)) {
348 if ((elem
.in_num
< 1) || (elem
.out_num
< 1)) {
349 fprintf(stderr
, "virtio-net ctrl missing headers\n");
353 if (elem
.out_sg
[0].iov_len
< sizeof(ctrl
) ||
354 elem
.in_sg
[elem
.in_num
- 1].iov_len
< sizeof(status
)) {
355 fprintf(stderr
, "virtio-net ctrl header not in correct element\n");
359 ctrl
.class = ldub_p(elem
.out_sg
[0].iov_base
);
360 ctrl
.cmd
= ldub_p(elem
.out_sg
[0].iov_base
+ sizeof(ctrl
.class));
362 if (ctrl
.class == VIRTIO_NET_CTRL_RX_MODE
)
363 status
= virtio_net_handle_rx_mode(n
, ctrl
.cmd
, &elem
);
364 else if (ctrl
.class == VIRTIO_NET_CTRL_MAC
)
365 status
= virtio_net_handle_mac(n
, ctrl
.cmd
, &elem
);
366 else if (ctrl
.class == VIRTIO_NET_CTRL_VLAN
)
367 status
= virtio_net_handle_vlan_table(n
, ctrl
.cmd
, &elem
);
369 stb_p(elem
.in_sg
[elem
.in_num
- 1].iov_base
, status
);
371 virtqueue_push(vq
, &elem
, sizeof(status
));
372 virtio_notify(vdev
, vq
);
378 static void virtio_net_handle_rx(VirtIODevice
*vdev
, VirtQueue
*vq
)
380 VirtIONet
*n
= to_virtio_net(vdev
);
382 qemu_flush_queued_packets(&n
->nic
->nc
);
384 /* We now have RX buffers, signal to the IO thread to break out of the
385 * select to re-poll the tap file descriptor */
389 static int virtio_net_can_receive(VLANClientState
*nc
)
391 VirtIONet
*n
= DO_UPCAST(NICState
, nc
, nc
)->opaque
;
393 if (!virtio_queue_ready(n
->rx_vq
) ||
394 !(n
->vdev
.status
& VIRTIO_CONFIG_S_DRIVER_OK
))
400 static int virtio_net_has_buffers(VirtIONet
*n
, int bufsize
)
402 if (virtio_queue_empty(n
->rx_vq
) ||
403 (n
->mergeable_rx_bufs
&&
404 !virtqueue_avail_bytes(n
->rx_vq
, bufsize
, 0))) {
405 virtio_queue_set_notification(n
->rx_vq
, 1);
407 /* To avoid a race condition where the guest has made some buffers
408 * available after the above check but before notification was
409 * enabled, check for available buffers again.
411 if (virtio_queue_empty(n
->rx_vq
) ||
412 (n
->mergeable_rx_bufs
&&
413 !virtqueue_avail_bytes(n
->rx_vq
, bufsize
, 0)))
417 virtio_queue_set_notification(n
->rx_vq
, 0);
421 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
422 * it never finds out that the packets don't have valid checksums. This
423 * causes dhclient to get upset. Fedora's carried a patch for ages to
424 * fix this with Xen but it hasn't appeared in an upstream release of
427 * To avoid breaking existing guests, we catch udp packets and add
428 * checksums. This is terrible but it's better than hacking the guest
431 * N.B. if we introduce a zero-copy API, this operation is no longer free so
432 * we should provide a mechanism to disable it to avoid polluting the host
435 static void work_around_broken_dhclient(struct virtio_net_hdr
*hdr
,
436 const uint8_t *buf
, size_t size
)
438 if ((hdr
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) && /* missing csum */
439 (size
> 27 && size
< 1500) && /* normal sized MTU */
440 (buf
[12] == 0x08 && buf
[13] == 0x00) && /* ethertype == IPv4 */
441 (buf
[23] == 17) && /* ip.protocol == UDP */
442 (buf
[34] == 0 && buf
[35] == 67)) { /* udp.srcport == bootps */
443 /* FIXME this cast is evil */
444 net_checksum_calculate((uint8_t *)buf
, size
);
445 hdr
->flags
&= ~VIRTIO_NET_HDR_F_NEEDS_CSUM
;
449 static int receive_header(VirtIONet
*n
, struct iovec
*iov
, int iovcnt
,
450 const void *buf
, size_t size
, size_t hdr_len
)
452 struct virtio_net_hdr
*hdr
= (struct virtio_net_hdr
*)iov
[0].iov_base
;
456 hdr
->gso_type
= VIRTIO_NET_HDR_GSO_NONE
;
458 if (n
->has_vnet_hdr
) {
459 memcpy(hdr
, buf
, sizeof(*hdr
));
460 offset
= sizeof(*hdr
);
461 work_around_broken_dhclient(hdr
, buf
+ offset
, size
- offset
);
464 /* We only ever receive a struct virtio_net_hdr from the tapfd,
465 * but we may be passing along a larger header to the guest.
467 iov
[0].iov_base
+= hdr_len
;
468 iov
[0].iov_len
-= hdr_len
;
473 static int receive_filter(VirtIONet
*n
, const uint8_t *buf
, int size
)
475 static const uint8_t bcast
[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
476 static const uint8_t vlan
[] = {0x81, 0x00};
477 uint8_t *ptr
= (uint8_t *)buf
;
483 if (n
->has_vnet_hdr
) {
484 ptr
+= sizeof(struct virtio_net_hdr
);
487 if (!memcmp(&ptr
[12], vlan
, sizeof(vlan
))) {
488 int vid
= be16_to_cpup((uint16_t *)(ptr
+ 14)) & 0xfff;
489 if (!(n
->vlans
[vid
>> 5] & (1U << (vid
& 0x1f))))
493 if (ptr
[0] & 1) { // multicast
494 if (!memcmp(ptr
, bcast
, sizeof(bcast
))) {
496 } else if (n
->nomulti
) {
498 } else if (n
->allmulti
|| n
->mac_table
.multi_overflow
) {
502 for (i
= n
->mac_table
.first_multi
; i
< n
->mac_table
.in_use
; i
++) {
503 if (!memcmp(ptr
, &n
->mac_table
.macs
[i
* ETH_ALEN
], ETH_ALEN
)) {
510 } else if (n
->alluni
|| n
->mac_table
.uni_overflow
) {
512 } else if (!memcmp(ptr
, n
->mac
, ETH_ALEN
)) {
516 for (i
= 0; i
< n
->mac_table
.first_multi
; i
++) {
517 if (!memcmp(ptr
, &n
->mac_table
.macs
[i
* ETH_ALEN
], ETH_ALEN
)) {
526 static ssize_t
virtio_net_receive(VLANClientState
*nc
, const uint8_t *buf
, size_t size
)
528 VirtIONet
*n
= DO_UPCAST(NICState
, nc
, nc
)->opaque
;
529 struct virtio_net_hdr_mrg_rxbuf
*mhdr
= NULL
;
530 size_t hdr_len
, offset
, i
;
532 if (!virtio_net_can_receive(&n
->nic
->nc
))
535 if (!virtio_net_has_buffers(n
, size
))
538 if (!receive_filter(n
, buf
, size
))
541 /* hdr_len refers to the header we supply to the guest */
542 hdr_len
= n
->mergeable_rx_bufs
?
543 sizeof(struct virtio_net_hdr_mrg_rxbuf
) : sizeof(struct virtio_net_hdr
);
547 while (offset
< size
) {
548 VirtQueueElement elem
;
550 struct iovec sg
[VIRTQUEUE_MAX_SIZE
];
554 if ((i
!= 0 && !n
->mergeable_rx_bufs
) ||
555 virtqueue_pop(n
->rx_vq
, &elem
) == 0) {
558 fprintf(stderr
, "virtio-net truncating packet\n");
562 if (elem
.in_num
< 1) {
563 fprintf(stderr
, "virtio-net receive queue contains no in buffers\n");
567 if (!n
->mergeable_rx_bufs
&& elem
.in_sg
[0].iov_len
!= hdr_len
) {
568 fprintf(stderr
, "virtio-net header not in first element\n");
572 memcpy(&sg
, &elem
.in_sg
[0], sizeof(sg
[0]) * elem
.in_num
);
575 if (n
->mergeable_rx_bufs
)
576 mhdr
= (struct virtio_net_hdr_mrg_rxbuf
*)sg
[0].iov_base
;
578 offset
+= receive_header(n
, sg
, elem
.in_num
,
579 buf
+ offset
, size
- offset
, hdr_len
);
583 /* copy in packet. ugh */
584 len
= iov_from_buf(sg
, elem
.in_num
,
585 buf
+ offset
, size
- offset
);
588 /* signal other side */
589 virtqueue_fill(n
->rx_vq
, &elem
, total
, i
++);
595 mhdr
->num_buffers
= i
;
597 virtqueue_flush(n
->rx_vq
, i
);
598 virtio_notify(&n
->vdev
, n
->rx_vq
);
603 static void virtio_net_flush_tx(VirtIONet
*n
, VirtQueue
*vq
);
605 static void virtio_net_tx_complete(VLANClientState
*nc
, ssize_t len
)
607 VirtIONet
*n
= DO_UPCAST(NICState
, nc
, nc
)->opaque
;
609 virtqueue_push(n
->tx_vq
, &n
->async_tx
.elem
, n
->async_tx
.len
);
610 virtio_notify(&n
->vdev
, n
->tx_vq
);
612 n
->async_tx
.elem
.out_num
= n
->async_tx
.len
= 0;
614 virtio_queue_set_notification(n
->tx_vq
, 1);
615 virtio_net_flush_tx(n
, n
->tx_vq
);
619 static void virtio_net_flush_tx(VirtIONet
*n
, VirtQueue
*vq
)
621 VirtQueueElement elem
;
623 if (!(n
->vdev
.status
& VIRTIO_CONFIG_S_DRIVER_OK
))
626 if (n
->async_tx
.elem
.out_num
) {
627 virtio_queue_set_notification(n
->tx_vq
, 0);
631 while (virtqueue_pop(vq
, &elem
)) {
632 ssize_t ret
, len
= 0;
633 unsigned int out_num
= elem
.out_num
;
634 struct iovec
*out_sg
= &elem
.out_sg
[0];
637 /* hdr_len refers to the header received from the guest */
638 hdr_len
= n
->mergeable_rx_bufs
?
639 sizeof(struct virtio_net_hdr_mrg_rxbuf
) :
640 sizeof(struct virtio_net_hdr
);
642 if (out_num
< 1 || out_sg
->iov_len
!= hdr_len
) {
643 fprintf(stderr
, "virtio-net header not in first element\n");
647 /* ignore the header if GSO is not supported */
648 if (!n
->has_vnet_hdr
) {
652 } else if (n
->mergeable_rx_bufs
) {
653 /* tapfd expects a struct virtio_net_hdr */
654 hdr_len
-= sizeof(struct virtio_net_hdr
);
655 out_sg
->iov_len
-= hdr_len
;
659 ret
= qemu_sendv_packet_async(&n
->nic
->nc
, out_sg
, out_num
,
660 virtio_net_tx_complete
);
662 virtio_queue_set_notification(n
->tx_vq
, 0);
663 n
->async_tx
.elem
= elem
;
664 n
->async_tx
.len
= len
;
670 virtqueue_push(vq
, &elem
, len
);
671 virtio_notify(&n
->vdev
, vq
);
675 static void virtio_net_handle_tx(VirtIODevice
*vdev
, VirtQueue
*vq
)
677 VirtIONet
*n
= to_virtio_net(vdev
);
679 if (n
->tx_timer_active
) {
680 virtio_queue_set_notification(vq
, 1);
681 qemu_del_timer(n
->tx_timer
);
682 n
->tx_timer_active
= 0;
683 virtio_net_flush_tx(n
, vq
);
685 qemu_mod_timer(n
->tx_timer
,
686 qemu_get_clock(vm_clock
) + TX_TIMER_INTERVAL
);
687 n
->tx_timer_active
= 1;
688 virtio_queue_set_notification(vq
, 0);
692 static void virtio_net_tx_timer(void *opaque
)
694 VirtIONet
*n
= opaque
;
696 n
->tx_timer_active
= 0;
698 /* Just in case the driver is not ready on more */
699 if (!(n
->vdev
.status
& VIRTIO_CONFIG_S_DRIVER_OK
))
702 virtio_queue_set_notification(n
->tx_vq
, 1);
703 virtio_net_flush_tx(n
, n
->tx_vq
);
706 static void virtio_net_save(QEMUFile
*f
, void *opaque
)
708 VirtIONet
*n
= opaque
;
710 if (n
->vhost_started
) {
711 /* TODO: should we really stop the backend?
712 * If we don't, it might keep writing to memory. */
713 vhost_net_stop(tap_get_vhost_net(n
->nic
->nc
.peer
), &n
->vdev
);
714 n
->vhost_started
= 0;
716 virtio_save(&n
->vdev
, f
);
718 qemu_put_buffer(f
, n
->mac
, ETH_ALEN
);
719 qemu_put_be32(f
, n
->tx_timer_active
);
720 qemu_put_be32(f
, n
->mergeable_rx_bufs
);
721 qemu_put_be16(f
, n
->status
);
722 qemu_put_byte(f
, n
->promisc
);
723 qemu_put_byte(f
, n
->allmulti
);
724 qemu_put_be32(f
, n
->mac_table
.in_use
);
725 qemu_put_buffer(f
, n
->mac_table
.macs
, n
->mac_table
.in_use
* ETH_ALEN
);
726 qemu_put_buffer(f
, (uint8_t *)n
->vlans
, MAX_VLAN
>> 3);
727 qemu_put_be32(f
, n
->has_vnet_hdr
);
728 qemu_put_byte(f
, n
->mac_table
.multi_overflow
);
729 qemu_put_byte(f
, n
->mac_table
.uni_overflow
);
730 qemu_put_byte(f
, n
->alluni
);
731 qemu_put_byte(f
, n
->nomulti
);
732 qemu_put_byte(f
, n
->nouni
);
733 qemu_put_byte(f
, n
->nobcast
);
734 qemu_put_byte(f
, n
->has_ufo
);
737 static int virtio_net_load(QEMUFile
*f
, void *opaque
, int version_id
)
739 VirtIONet
*n
= opaque
;
742 if (version_id
< 2 || version_id
> VIRTIO_NET_VM_VERSION
)
745 virtio_load(&n
->vdev
, f
);
747 qemu_get_buffer(f
, n
->mac
, ETH_ALEN
);
748 n
->tx_timer_active
= qemu_get_be32(f
);
749 n
->mergeable_rx_bufs
= qemu_get_be32(f
);
752 n
->status
= qemu_get_be16(f
);
754 if (version_id
>= 4) {
755 if (version_id
< 8) {
756 n
->promisc
= qemu_get_be32(f
);
757 n
->allmulti
= qemu_get_be32(f
);
759 n
->promisc
= qemu_get_byte(f
);
760 n
->allmulti
= qemu_get_byte(f
);
764 if (version_id
>= 5) {
765 n
->mac_table
.in_use
= qemu_get_be32(f
);
766 /* MAC_TABLE_ENTRIES may be different from the saved image */
767 if (n
->mac_table
.in_use
<= MAC_TABLE_ENTRIES
) {
768 qemu_get_buffer(f
, n
->mac_table
.macs
,
769 n
->mac_table
.in_use
* ETH_ALEN
);
770 } else if (n
->mac_table
.in_use
) {
771 qemu_fseek(f
, n
->mac_table
.in_use
* ETH_ALEN
, SEEK_CUR
);
772 n
->mac_table
.multi_overflow
= n
->mac_table
.uni_overflow
= 1;
773 n
->mac_table
.in_use
= 0;
778 qemu_get_buffer(f
, (uint8_t *)n
->vlans
, MAX_VLAN
>> 3);
780 if (version_id
>= 7) {
781 if (qemu_get_be32(f
) && !peer_has_vnet_hdr(n
)) {
782 error_report("virtio-net: saved image requires vnet_hdr=on");
786 if (n
->has_vnet_hdr
) {
787 tap_using_vnet_hdr(n
->nic
->nc
.peer
, 1);
788 tap_set_offload(n
->nic
->nc
.peer
,
789 (n
->vdev
.guest_features
>> VIRTIO_NET_F_GUEST_CSUM
) & 1,
790 (n
->vdev
.guest_features
>> VIRTIO_NET_F_GUEST_TSO4
) & 1,
791 (n
->vdev
.guest_features
>> VIRTIO_NET_F_GUEST_TSO6
) & 1,
792 (n
->vdev
.guest_features
>> VIRTIO_NET_F_GUEST_ECN
) & 1,
793 (n
->vdev
.guest_features
>> VIRTIO_NET_F_GUEST_UFO
) & 1);
797 if (version_id
>= 9) {
798 n
->mac_table
.multi_overflow
= qemu_get_byte(f
);
799 n
->mac_table
.uni_overflow
= qemu_get_byte(f
);
802 if (version_id
>= 10) {
803 n
->alluni
= qemu_get_byte(f
);
804 n
->nomulti
= qemu_get_byte(f
);
805 n
->nouni
= qemu_get_byte(f
);
806 n
->nobcast
= qemu_get_byte(f
);
809 if (version_id
>= 11) {
810 if (qemu_get_byte(f
) && !peer_has_ufo(n
)) {
811 error_report("virtio-net: saved image requires TUN_F_UFO support");
816 /* Find the first multicast entry in the saved MAC filter */
817 for (i
= 0; i
< n
->mac_table
.in_use
; i
++) {
818 if (n
->mac_table
.macs
[i
* ETH_ALEN
] & 1) {
822 n
->mac_table
.first_multi
= i
;
824 if (n
->tx_timer_active
) {
825 qemu_mod_timer(n
->tx_timer
,
826 qemu_get_clock(vm_clock
) + TX_TIMER_INTERVAL
);
831 static void virtio_net_cleanup(VLANClientState
*nc
)
833 VirtIONet
*n
= DO_UPCAST(NICState
, nc
, nc
)->opaque
;
838 static NetClientInfo net_virtio_info
= {
839 .type
= NET_CLIENT_TYPE_NIC
,
840 .size
= sizeof(NICState
),
841 .can_receive
= virtio_net_can_receive
,
842 .receive
= virtio_net_receive
,
843 .cleanup
= virtio_net_cleanup
,
844 .link_status_changed
= virtio_net_set_link_status
,
847 static void virtio_net_set_status(struct VirtIODevice
*vdev
, uint8_t status
)
849 VirtIONet
*n
= to_virtio_net(vdev
);
850 if (!n
->nic
->nc
.peer
) {
853 if (n
->nic
->nc
.peer
->info
->type
!= NET_CLIENT_TYPE_TAP
) {
857 if (!tap_get_vhost_net(n
->nic
->nc
.peer
)) {
860 if (!!n
->vhost_started
== !!(status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
863 if (status
& VIRTIO_CONFIG_S_DRIVER_OK
) {
864 int r
= vhost_net_start(tap_get_vhost_net(n
->nic
->nc
.peer
), vdev
);
866 fprintf(stderr
, "unable to start vhost net: %d: "
867 "falling back on userspace virtio\n", -r
);
869 n
->vhost_started
= 1;
872 vhost_net_stop(tap_get_vhost_net(n
->nic
->nc
.peer
), vdev
);
873 n
->vhost_started
= 0;
877 static void virtio_net_vmstate_change(void *opaque
, int running
, int reason
)
879 VirtIONet
*n
= opaque
;
883 /* This is called when vm is started, it will start vhost backend if
884 * appropriate e.g. after migration. */
885 virtio_net_set_status(&n
->vdev
, n
->vdev
.status
);
888 VirtIODevice
*virtio_net_init(DeviceState
*dev
, NICConf
*conf
)
891 static int virtio_net_id
;
893 n
= (VirtIONet
*)virtio_common_init("virtio-net", VIRTIO_ID_NET
,
894 sizeof(struct virtio_net_config
),
897 n
->vdev
.get_config
= virtio_net_get_config
;
898 n
->vdev
.set_config
= virtio_net_set_config
;
899 n
->vdev
.get_features
= virtio_net_get_features
;
900 n
->vdev
.set_features
= virtio_net_set_features
;
901 n
->vdev
.bad_features
= virtio_net_bad_features
;
902 n
->vdev
.reset
= virtio_net_reset
;
903 n
->vdev
.set_status
= virtio_net_set_status
;
904 n
->rx_vq
= virtio_add_queue(&n
->vdev
, 256, virtio_net_handle_rx
);
905 n
->tx_vq
= virtio_add_queue(&n
->vdev
, 256, virtio_net_handle_tx
);
906 n
->ctrl_vq
= virtio_add_queue(&n
->vdev
, 64, virtio_net_handle_ctrl
);
907 qemu_macaddr_default_if_unset(&conf
->macaddr
);
908 memcpy(&n
->mac
[0], &conf
->macaddr
, sizeof(n
->mac
));
909 n
->status
= VIRTIO_NET_S_LINK_UP
;
911 n
->nic
= qemu_new_nic(&net_virtio_info
, conf
, dev
->info
->name
, dev
->id
, n
);
913 qemu_format_nic_info_str(&n
->nic
->nc
, conf
->macaddr
.a
);
915 n
->tx_timer
= qemu_new_timer(vm_clock
, virtio_net_tx_timer
, n
);
916 n
->tx_timer_active
= 0;
917 n
->mergeable_rx_bufs
= 0;
918 n
->promisc
= 1; /* for compatibility */
920 n
->mac_table
.macs
= qemu_mallocz(MAC_TABLE_ENTRIES
* ETH_ALEN
);
922 n
->vlans
= qemu_mallocz(MAX_VLAN
>> 3);
924 register_savevm("virtio-net", virtio_net_id
++, VIRTIO_NET_VM_VERSION
,
925 virtio_net_save
, virtio_net_load
, n
);
926 n
->vmstate
= qemu_add_vm_change_state_handler(virtio_net_vmstate_change
, n
);
931 void virtio_net_exit(VirtIODevice
*vdev
)
933 VirtIONet
*n
= DO_UPCAST(VirtIONet
, vdev
, vdev
);
934 qemu_del_vm_change_state_handler(n
->vmstate
);
936 if (n
->vhost_started
) {
937 vhost_net_stop(tap_get_vhost_net(n
->nic
->nc
.peer
), vdev
);
940 qemu_purge_queued_packets(&n
->nic
->nc
);
942 unregister_savevm("virtio-net", n
);
944 qemu_free(n
->mac_table
.macs
);
947 qemu_del_timer(n
->tx_timer
);
948 qemu_free_timer(n
->tx_timer
);
950 virtio_cleanup(&n
->vdev
);
951 qemu_del_vlan_client(&n
->nic
->nc
);