2 * Virtio Network Device
4 * Copyright IBM, Corp. 2007
7 * Anthony Liguori <aliguori@us.ibm.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
17 #include "net/checksum.h"
19 #include "qemu-error.h"
20 #include "qemu-timer.h"
21 #include "virtio-net.h"
22 #include "vhost_net.h"
24 #define VIRTIO_NET_VM_VERSION 11
26 #define MAC_TABLE_ENTRIES 64
27 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */
29 typedef struct VirtIONet
32 uint8_t mac
[ETH_ALEN
];
43 uint32_t has_vnet_hdr
;
46 VirtQueueElement elem
;
49 int mergeable_rx_bufs
;
56 uint8_t vhost_started
;
58 VMChangeStateEntry
*vmstate
;
62 uint8_t multi_overflow
;
71 * - we could suppress RX interrupt if we were so inclined.
74 static VirtIONet
*to_virtio_net(VirtIODevice
*vdev
)
76 return (VirtIONet
*)vdev
;
79 static void virtio_net_get_config(VirtIODevice
*vdev
, uint8_t *config
)
81 VirtIONet
*n
= to_virtio_net(vdev
);
82 struct virtio_net_config netcfg
;
84 netcfg
.status
= n
->status
;
85 memcpy(netcfg
.mac
, n
->mac
, ETH_ALEN
);
86 memcpy(config
, &netcfg
, sizeof(netcfg
));
89 static void virtio_net_set_config(VirtIODevice
*vdev
, const uint8_t *config
)
91 VirtIONet
*n
= to_virtio_net(vdev
);
92 struct virtio_net_config netcfg
;
94 memcpy(&netcfg
, config
, sizeof(netcfg
));
96 if (memcmp(netcfg
.mac
, n
->mac
, ETH_ALEN
)) {
97 memcpy(n
->mac
, netcfg
.mac
, ETH_ALEN
);
98 qemu_format_nic_info_str(&n
->nic
->nc
, n
->mac
);
102 static bool virtio_net_started(VirtIONet
*n
, uint8_t status
)
104 return (status
& VIRTIO_CONFIG_S_DRIVER_OK
) &&
105 (n
->status
& VIRTIO_NET_S_LINK_UP
) && n
->vm_running
;
108 static void virtio_net_vhost_status(VirtIONet
*n
, uint8_t status
)
110 if (!n
->nic
->nc
.peer
) {
113 if (n
->nic
->nc
.peer
->info
->type
!= NET_CLIENT_TYPE_TAP
) {
117 if (!tap_get_vhost_net(n
->nic
->nc
.peer
)) {
120 if (!!n
->vhost_started
== virtio_net_started(n
, status
)) {
123 if (!n
->vhost_started
) {
124 int r
= vhost_net_start(tap_get_vhost_net(n
->nic
->nc
.peer
), &n
->vdev
);
126 error_report("unable to start vhost net: %d: "
127 "falling back on userspace virtio", -r
);
129 n
->vhost_started
= 1;
132 vhost_net_stop(tap_get_vhost_net(n
->nic
->nc
.peer
), &n
->vdev
);
133 n
->vhost_started
= 0;
137 static void virtio_net_set_status(struct VirtIODevice
*vdev
, uint8_t status
)
139 VirtIONet
*n
= to_virtio_net(vdev
);
141 virtio_net_vhost_status(n
, status
);
143 if (!n
->tx_waiting
) {
147 if (virtio_net_started(n
, status
) && !n
->vhost_started
) {
149 qemu_mod_timer(n
->tx_timer
,
150 qemu_get_clock(vm_clock
) + n
->tx_timeout
);
152 qemu_bh_schedule(n
->tx_bh
);
156 qemu_del_timer(n
->tx_timer
);
158 qemu_bh_cancel(n
->tx_bh
);
163 static void virtio_net_set_link_status(VLANClientState
*nc
)
165 VirtIONet
*n
= DO_UPCAST(NICState
, nc
, nc
)->opaque
;
166 uint16_t old_status
= n
->status
;
169 n
->status
&= ~VIRTIO_NET_S_LINK_UP
;
171 n
->status
|= VIRTIO_NET_S_LINK_UP
;
173 if (n
->status
!= old_status
)
174 virtio_notify_config(&n
->vdev
);
176 virtio_net_set_status(&n
->vdev
, n
->vdev
.status
);
179 static void virtio_net_reset(VirtIODevice
*vdev
)
181 VirtIONet
*n
= to_virtio_net(vdev
);
183 /* Reset back to compatibility mode */
191 /* Flush any MAC and VLAN filter table state */
192 n
->mac_table
.in_use
= 0;
193 n
->mac_table
.first_multi
= 0;
194 n
->mac_table
.multi_overflow
= 0;
195 n
->mac_table
.uni_overflow
= 0;
196 memset(n
->mac_table
.macs
, 0, MAC_TABLE_ENTRIES
* ETH_ALEN
);
197 memset(n
->vlans
, 0, MAX_VLAN
>> 3);
200 static int peer_has_vnet_hdr(VirtIONet
*n
)
202 if (!n
->nic
->nc
.peer
)
205 if (n
->nic
->nc
.peer
->info
->type
!= NET_CLIENT_TYPE_TAP
)
208 n
->has_vnet_hdr
= tap_has_vnet_hdr(n
->nic
->nc
.peer
);
210 return n
->has_vnet_hdr
;
213 static int peer_has_ufo(VirtIONet
*n
)
215 if (!peer_has_vnet_hdr(n
))
218 n
->has_ufo
= tap_has_ufo(n
->nic
->nc
.peer
);
223 static uint32_t virtio_net_get_features(VirtIODevice
*vdev
, uint32_t features
)
225 VirtIONet
*n
= to_virtio_net(vdev
);
227 features
|= (1 << VIRTIO_NET_F_MAC
);
229 if (peer_has_vnet_hdr(n
)) {
230 tap_using_vnet_hdr(n
->nic
->nc
.peer
, 1);
232 features
&= ~(0x1 << VIRTIO_NET_F_CSUM
);
233 features
&= ~(0x1 << VIRTIO_NET_F_HOST_TSO4
);
234 features
&= ~(0x1 << VIRTIO_NET_F_HOST_TSO6
);
235 features
&= ~(0x1 << VIRTIO_NET_F_HOST_ECN
);
237 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_CSUM
);
238 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_TSO4
);
239 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_TSO6
);
240 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_ECN
);
243 if (!peer_has_vnet_hdr(n
) || !peer_has_ufo(n
)) {
244 features
&= ~(0x1 << VIRTIO_NET_F_GUEST_UFO
);
245 features
&= ~(0x1 << VIRTIO_NET_F_HOST_UFO
);
248 if (!n
->nic
->nc
.peer
||
249 n
->nic
->nc
.peer
->info
->type
!= NET_CLIENT_TYPE_TAP
) {
252 if (!tap_get_vhost_net(n
->nic
->nc
.peer
)) {
255 return vhost_net_get_features(tap_get_vhost_net(n
->nic
->nc
.peer
), features
);
258 static uint32_t virtio_net_bad_features(VirtIODevice
*vdev
)
260 uint32_t features
= 0;
262 /* Linux kernel 2.6.25. It understood MAC (as everyone must),
264 features
|= (1 << VIRTIO_NET_F_MAC
);
265 features
|= (1 << VIRTIO_NET_F_CSUM
);
266 features
|= (1 << VIRTIO_NET_F_HOST_TSO4
);
267 features
|= (1 << VIRTIO_NET_F_HOST_TSO6
);
268 features
|= (1 << VIRTIO_NET_F_HOST_ECN
);
273 static void virtio_net_set_features(VirtIODevice
*vdev
, uint32_t features
)
275 VirtIONet
*n
= to_virtio_net(vdev
);
277 n
->mergeable_rx_bufs
= !!(features
& (1 << VIRTIO_NET_F_MRG_RXBUF
));
279 if (n
->has_vnet_hdr
) {
280 tap_set_offload(n
->nic
->nc
.peer
,
281 (features
>> VIRTIO_NET_F_GUEST_CSUM
) & 1,
282 (features
>> VIRTIO_NET_F_GUEST_TSO4
) & 1,
283 (features
>> VIRTIO_NET_F_GUEST_TSO6
) & 1,
284 (features
>> VIRTIO_NET_F_GUEST_ECN
) & 1,
285 (features
>> VIRTIO_NET_F_GUEST_UFO
) & 1);
287 if (!n
->nic
->nc
.peer
||
288 n
->nic
->nc
.peer
->info
->type
!= NET_CLIENT_TYPE_TAP
) {
291 if (!tap_get_vhost_net(n
->nic
->nc
.peer
)) {
294 vhost_net_ack_features(tap_get_vhost_net(n
->nic
->nc
.peer
), features
);
297 static int virtio_net_handle_rx_mode(VirtIONet
*n
, uint8_t cmd
,
298 VirtQueueElement
*elem
)
302 if (elem
->out_num
!= 2 || elem
->out_sg
[1].iov_len
!= sizeof(on
)) {
303 error_report("virtio-net ctrl invalid rx mode command");
307 on
= ldub_p(elem
->out_sg
[1].iov_base
);
309 if (cmd
== VIRTIO_NET_CTRL_RX_MODE_PROMISC
)
311 else if (cmd
== VIRTIO_NET_CTRL_RX_MODE_ALLMULTI
)
313 else if (cmd
== VIRTIO_NET_CTRL_RX_MODE_ALLUNI
)
315 else if (cmd
== VIRTIO_NET_CTRL_RX_MODE_NOMULTI
)
317 else if (cmd
== VIRTIO_NET_CTRL_RX_MODE_NOUNI
)
319 else if (cmd
== VIRTIO_NET_CTRL_RX_MODE_NOBCAST
)
322 return VIRTIO_NET_ERR
;
324 return VIRTIO_NET_OK
;
327 static int virtio_net_handle_mac(VirtIONet
*n
, uint8_t cmd
,
328 VirtQueueElement
*elem
)
330 struct virtio_net_ctrl_mac mac_data
;
332 if (cmd
!= VIRTIO_NET_CTRL_MAC_TABLE_SET
|| elem
->out_num
!= 3 ||
333 elem
->out_sg
[1].iov_len
< sizeof(mac_data
) ||
334 elem
->out_sg
[2].iov_len
< sizeof(mac_data
))
335 return VIRTIO_NET_ERR
;
337 n
->mac_table
.in_use
= 0;
338 n
->mac_table
.first_multi
= 0;
339 n
->mac_table
.uni_overflow
= 0;
340 n
->mac_table
.multi_overflow
= 0;
341 memset(n
->mac_table
.macs
, 0, MAC_TABLE_ENTRIES
* ETH_ALEN
);
343 mac_data
.entries
= ldl_le_p(elem
->out_sg
[1].iov_base
);
345 if (sizeof(mac_data
.entries
) +
346 (mac_data
.entries
* ETH_ALEN
) > elem
->out_sg
[1].iov_len
)
347 return VIRTIO_NET_ERR
;
349 if (mac_data
.entries
<= MAC_TABLE_ENTRIES
) {
350 memcpy(n
->mac_table
.macs
, elem
->out_sg
[1].iov_base
+ sizeof(mac_data
),
351 mac_data
.entries
* ETH_ALEN
);
352 n
->mac_table
.in_use
+= mac_data
.entries
;
354 n
->mac_table
.uni_overflow
= 1;
357 n
->mac_table
.first_multi
= n
->mac_table
.in_use
;
359 mac_data
.entries
= ldl_le_p(elem
->out_sg
[2].iov_base
);
361 if (sizeof(mac_data
.entries
) +
362 (mac_data
.entries
* ETH_ALEN
) > elem
->out_sg
[2].iov_len
)
363 return VIRTIO_NET_ERR
;
365 if (mac_data
.entries
) {
366 if (n
->mac_table
.in_use
+ mac_data
.entries
<= MAC_TABLE_ENTRIES
) {
367 memcpy(n
->mac_table
.macs
+ (n
->mac_table
.in_use
* ETH_ALEN
),
368 elem
->out_sg
[2].iov_base
+ sizeof(mac_data
),
369 mac_data
.entries
* ETH_ALEN
);
370 n
->mac_table
.in_use
+= mac_data
.entries
;
372 n
->mac_table
.multi_overflow
= 1;
376 return VIRTIO_NET_OK
;
379 static int virtio_net_handle_vlan_table(VirtIONet
*n
, uint8_t cmd
,
380 VirtQueueElement
*elem
)
384 if (elem
->out_num
!= 2 || elem
->out_sg
[1].iov_len
!= sizeof(vid
)) {
385 error_report("virtio-net ctrl invalid vlan command");
386 return VIRTIO_NET_ERR
;
389 vid
= lduw_le_p(elem
->out_sg
[1].iov_base
);
392 return VIRTIO_NET_ERR
;
394 if (cmd
== VIRTIO_NET_CTRL_VLAN_ADD
)
395 n
->vlans
[vid
>> 5] |= (1U << (vid
& 0x1f));
396 else if (cmd
== VIRTIO_NET_CTRL_VLAN_DEL
)
397 n
->vlans
[vid
>> 5] &= ~(1U << (vid
& 0x1f));
399 return VIRTIO_NET_ERR
;
401 return VIRTIO_NET_OK
;
404 static void virtio_net_handle_ctrl(VirtIODevice
*vdev
, VirtQueue
*vq
)
406 VirtIONet
*n
= to_virtio_net(vdev
);
407 struct virtio_net_ctrl_hdr ctrl
;
408 virtio_net_ctrl_ack status
= VIRTIO_NET_ERR
;
409 VirtQueueElement elem
;
411 while (virtqueue_pop(vq
, &elem
)) {
412 if ((elem
.in_num
< 1) || (elem
.out_num
< 1)) {
413 error_report("virtio-net ctrl missing headers");
417 if (elem
.out_sg
[0].iov_len
< sizeof(ctrl
) ||
418 elem
.in_sg
[elem
.in_num
- 1].iov_len
< sizeof(status
)) {
419 error_report("virtio-net ctrl header not in correct element");
423 ctrl
.class = ldub_p(elem
.out_sg
[0].iov_base
);
424 ctrl
.cmd
= ldub_p(elem
.out_sg
[0].iov_base
+ sizeof(ctrl
.class));
426 if (ctrl
.class == VIRTIO_NET_CTRL_RX_MODE
)
427 status
= virtio_net_handle_rx_mode(n
, ctrl
.cmd
, &elem
);
428 else if (ctrl
.class == VIRTIO_NET_CTRL_MAC
)
429 status
= virtio_net_handle_mac(n
, ctrl
.cmd
, &elem
);
430 else if (ctrl
.class == VIRTIO_NET_CTRL_VLAN
)
431 status
= virtio_net_handle_vlan_table(n
, ctrl
.cmd
, &elem
);
433 stb_p(elem
.in_sg
[elem
.in_num
- 1].iov_base
, status
);
435 virtqueue_push(vq
, &elem
, sizeof(status
));
436 virtio_notify(vdev
, vq
);
442 static void virtio_net_handle_rx(VirtIODevice
*vdev
, VirtQueue
*vq
)
444 VirtIONet
*n
= to_virtio_net(vdev
);
446 qemu_flush_queued_packets(&n
->nic
->nc
);
448 /* We now have RX buffers, signal to the IO thread to break out of the
449 * select to re-poll the tap file descriptor */
453 static int virtio_net_can_receive(VLANClientState
*nc
)
455 VirtIONet
*n
= DO_UPCAST(NICState
, nc
, nc
)->opaque
;
456 if (!n
->vm_running
) {
460 if (!virtio_queue_ready(n
->rx_vq
) ||
461 !(n
->vdev
.status
& VIRTIO_CONFIG_S_DRIVER_OK
))
467 static int virtio_net_has_buffers(VirtIONet
*n
, int bufsize
)
469 if (virtio_queue_empty(n
->rx_vq
) ||
470 (n
->mergeable_rx_bufs
&&
471 !virtqueue_avail_bytes(n
->rx_vq
, bufsize
, 0))) {
472 virtio_queue_set_notification(n
->rx_vq
, 1);
474 /* To avoid a race condition where the guest has made some buffers
475 * available after the above check but before notification was
476 * enabled, check for available buffers again.
478 if (virtio_queue_empty(n
->rx_vq
) ||
479 (n
->mergeable_rx_bufs
&&
480 !virtqueue_avail_bytes(n
->rx_vq
, bufsize
, 0)))
484 virtio_queue_set_notification(n
->rx_vq
, 0);
488 /* dhclient uses AF_PACKET but doesn't pass auxdata to the kernel so
489 * it never finds out that the packets don't have valid checksums. This
490 * causes dhclient to get upset. Fedora's carried a patch for ages to
491 * fix this with Xen but it hasn't appeared in an upstream release of
494 * To avoid breaking existing guests, we catch udp packets and add
495 * checksums. This is terrible but it's better than hacking the guest
498 * N.B. if we introduce a zero-copy API, this operation is no longer free so
499 * we should provide a mechanism to disable it to avoid polluting the host
502 static void work_around_broken_dhclient(struct virtio_net_hdr
*hdr
,
503 const uint8_t *buf
, size_t size
)
505 if ((hdr
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) && /* missing csum */
506 (size
> 27 && size
< 1500) && /* normal sized MTU */
507 (buf
[12] == 0x08 && buf
[13] == 0x00) && /* ethertype == IPv4 */
508 (buf
[23] == 17) && /* ip.protocol == UDP */
509 (buf
[34] == 0 && buf
[35] == 67)) { /* udp.srcport == bootps */
510 /* FIXME this cast is evil */
511 net_checksum_calculate((uint8_t *)buf
, size
);
512 hdr
->flags
&= ~VIRTIO_NET_HDR_F_NEEDS_CSUM
;
516 static int receive_header(VirtIONet
*n
, struct iovec
*iov
, int iovcnt
,
517 const void *buf
, size_t size
, size_t hdr_len
)
519 struct virtio_net_hdr
*hdr
= (struct virtio_net_hdr
*)iov
[0].iov_base
;
523 hdr
->gso_type
= VIRTIO_NET_HDR_GSO_NONE
;
525 if (n
->has_vnet_hdr
) {
526 memcpy(hdr
, buf
, sizeof(*hdr
));
527 offset
= sizeof(*hdr
);
528 work_around_broken_dhclient(hdr
, buf
+ offset
, size
- offset
);
531 /* We only ever receive a struct virtio_net_hdr from the tapfd,
532 * but we may be passing along a larger header to the guest.
534 iov
[0].iov_base
+= hdr_len
;
535 iov
[0].iov_len
-= hdr_len
;
540 static int receive_filter(VirtIONet
*n
, const uint8_t *buf
, int size
)
542 static const uint8_t bcast
[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
543 static const uint8_t vlan
[] = {0x81, 0x00};
544 uint8_t *ptr
= (uint8_t *)buf
;
550 if (n
->has_vnet_hdr
) {
551 ptr
+= sizeof(struct virtio_net_hdr
);
554 if (!memcmp(&ptr
[12], vlan
, sizeof(vlan
))) {
555 int vid
= be16_to_cpup((uint16_t *)(ptr
+ 14)) & 0xfff;
556 if (!(n
->vlans
[vid
>> 5] & (1U << (vid
& 0x1f))))
560 if (ptr
[0] & 1) { // multicast
561 if (!memcmp(ptr
, bcast
, sizeof(bcast
))) {
563 } else if (n
->nomulti
) {
565 } else if (n
->allmulti
|| n
->mac_table
.multi_overflow
) {
569 for (i
= n
->mac_table
.first_multi
; i
< n
->mac_table
.in_use
; i
++) {
570 if (!memcmp(ptr
, &n
->mac_table
.macs
[i
* ETH_ALEN
], ETH_ALEN
)) {
577 } else if (n
->alluni
|| n
->mac_table
.uni_overflow
) {
579 } else if (!memcmp(ptr
, n
->mac
, ETH_ALEN
)) {
583 for (i
= 0; i
< n
->mac_table
.first_multi
; i
++) {
584 if (!memcmp(ptr
, &n
->mac_table
.macs
[i
* ETH_ALEN
], ETH_ALEN
)) {
593 static ssize_t
virtio_net_receive(VLANClientState
*nc
, const uint8_t *buf
, size_t size
)
595 VirtIONet
*n
= DO_UPCAST(NICState
, nc
, nc
)->opaque
;
596 struct virtio_net_hdr_mrg_rxbuf
*mhdr
= NULL
;
597 size_t guest_hdr_len
, offset
, i
, host_hdr_len
;
599 if (!virtio_net_can_receive(&n
->nic
->nc
))
602 /* hdr_len refers to the header we supply to the guest */
603 guest_hdr_len
= n
->mergeable_rx_bufs
?
604 sizeof(struct virtio_net_hdr_mrg_rxbuf
) : sizeof(struct virtio_net_hdr
);
607 host_hdr_len
= n
->has_vnet_hdr
? sizeof(struct virtio_net_hdr
) : 0;
608 if (!virtio_net_has_buffers(n
, size
+ guest_hdr_len
- host_hdr_len
))
611 if (!receive_filter(n
, buf
, size
))
616 while (offset
< size
) {
617 VirtQueueElement elem
;
619 struct iovec sg
[VIRTQUEUE_MAX_SIZE
];
623 if (virtqueue_pop(n
->rx_vq
, &elem
) == 0) {
626 error_report("virtio-net unexpected empty queue: "
627 "i %zd mergeable %d offset %zd, size %zd, "
628 "guest hdr len %zd, host hdr len %zd guest features 0x%x",
629 i
, n
->mergeable_rx_bufs
, offset
, size
,
630 guest_hdr_len
, host_hdr_len
, n
->vdev
.guest_features
);
634 if (elem
.in_num
< 1) {
635 error_report("virtio-net receive queue contains no in buffers");
639 if (!n
->mergeable_rx_bufs
&& elem
.in_sg
[0].iov_len
!= guest_hdr_len
) {
640 error_report("virtio-net header not in first element");
644 memcpy(&sg
, &elem
.in_sg
[0], sizeof(sg
[0]) * elem
.in_num
);
647 if (n
->mergeable_rx_bufs
)
648 mhdr
= (struct virtio_net_hdr_mrg_rxbuf
*)sg
[0].iov_base
;
650 offset
+= receive_header(n
, sg
, elem
.in_num
,
651 buf
+ offset
, size
- offset
, guest_hdr_len
);
652 total
+= guest_hdr_len
;
655 /* copy in packet. ugh */
656 len
= iov_from_buf(sg
, elem
.in_num
,
657 buf
+ offset
, size
- offset
);
660 /* If buffers can't be merged, at this point we
661 * must have consumed the complete packet.
662 * Otherwise, drop it. */
663 if (!n
->mergeable_rx_bufs
&& offset
< size
) {
665 error_report("virtio-net truncated non-mergeable packet: "
666 "i %zd mergeable %d offset %zd, size %zd, "
667 "guest hdr len %zd, host hdr len %zd",
668 i
, n
->mergeable_rx_bufs
,
669 offset
, size
, guest_hdr_len
, host_hdr_len
);
674 /* signal other side */
675 virtqueue_fill(n
->rx_vq
, &elem
, total
, i
++);
679 mhdr
->num_buffers
= i
;
681 virtqueue_flush(n
->rx_vq
, i
);
682 virtio_notify(&n
->vdev
, n
->rx_vq
);
687 static int32_t virtio_net_flush_tx(VirtIONet
*n
, VirtQueue
*vq
);
689 static void virtio_net_tx_complete(VLANClientState
*nc
, ssize_t len
)
691 VirtIONet
*n
= DO_UPCAST(NICState
, nc
, nc
)->opaque
;
693 virtqueue_push(n
->tx_vq
, &n
->async_tx
.elem
, n
->async_tx
.len
);
694 virtio_notify(&n
->vdev
, n
->tx_vq
);
696 n
->async_tx
.elem
.out_num
= n
->async_tx
.len
= 0;
698 virtio_queue_set_notification(n
->tx_vq
, 1);
699 virtio_net_flush_tx(n
, n
->tx_vq
);
703 static int32_t virtio_net_flush_tx(VirtIONet
*n
, VirtQueue
*vq
)
705 VirtQueueElement elem
;
706 int32_t num_packets
= 0;
707 if (!(n
->vdev
.status
& VIRTIO_CONFIG_S_DRIVER_OK
)) {
711 assert(n
->vm_running
);
713 if (n
->async_tx
.elem
.out_num
) {
714 virtio_queue_set_notification(n
->tx_vq
, 0);
718 while (virtqueue_pop(vq
, &elem
)) {
719 ssize_t ret
, len
= 0;
720 unsigned int out_num
= elem
.out_num
;
721 struct iovec
*out_sg
= &elem
.out_sg
[0];
724 /* hdr_len refers to the header received from the guest */
725 hdr_len
= n
->mergeable_rx_bufs
?
726 sizeof(struct virtio_net_hdr_mrg_rxbuf
) :
727 sizeof(struct virtio_net_hdr
);
729 if (out_num
< 1 || out_sg
->iov_len
!= hdr_len
) {
730 error_report("virtio-net header not in first element");
734 /* ignore the header if GSO is not supported */
735 if (!n
->has_vnet_hdr
) {
739 } else if (n
->mergeable_rx_bufs
) {
740 /* tapfd expects a struct virtio_net_hdr */
741 hdr_len
-= sizeof(struct virtio_net_hdr
);
742 out_sg
->iov_len
-= hdr_len
;
746 ret
= qemu_sendv_packet_async(&n
->nic
->nc
, out_sg
, out_num
,
747 virtio_net_tx_complete
);
749 virtio_queue_set_notification(n
->tx_vq
, 0);
750 n
->async_tx
.elem
= elem
;
751 n
->async_tx
.len
= len
;
757 virtqueue_push(vq
, &elem
, len
);
758 virtio_notify(&n
->vdev
, vq
);
760 if (++num_packets
>= n
->tx_burst
) {
767 static void virtio_net_handle_tx_timer(VirtIODevice
*vdev
, VirtQueue
*vq
)
769 VirtIONet
*n
= to_virtio_net(vdev
);
771 /* This happens when device was stopped but VCPU wasn't. */
772 if (!n
->vm_running
) {
778 virtio_queue_set_notification(vq
, 1);
779 qemu_del_timer(n
->tx_timer
);
781 virtio_net_flush_tx(n
, vq
);
783 qemu_mod_timer(n
->tx_timer
,
784 qemu_get_clock(vm_clock
) + n
->tx_timeout
);
786 virtio_queue_set_notification(vq
, 0);
790 static void virtio_net_handle_tx_bh(VirtIODevice
*vdev
, VirtQueue
*vq
)
792 VirtIONet
*n
= to_virtio_net(vdev
);
794 if (unlikely(n
->tx_waiting
)) {
798 /* This happens when device was stopped but VCPU wasn't. */
799 if (!n
->vm_running
) {
802 virtio_queue_set_notification(vq
, 0);
803 qemu_bh_schedule(n
->tx_bh
);
806 static void virtio_net_tx_timer(void *opaque
)
808 VirtIONet
*n
= opaque
;
809 assert(n
->vm_running
);
813 /* Just in case the driver is not ready on more */
814 if (!(n
->vdev
.status
& VIRTIO_CONFIG_S_DRIVER_OK
))
817 virtio_queue_set_notification(n
->tx_vq
, 1);
818 virtio_net_flush_tx(n
, n
->tx_vq
);
821 static void virtio_net_tx_bh(void *opaque
)
823 VirtIONet
*n
= opaque
;
826 assert(n
->vm_running
);
830 /* Just in case the driver is not ready on more */
831 if (unlikely(!(n
->vdev
.status
& VIRTIO_CONFIG_S_DRIVER_OK
)))
834 ret
= virtio_net_flush_tx(n
, n
->tx_vq
);
836 return; /* Notification re-enable handled by tx_complete */
839 /* If we flush a full burst of packets, assume there are
840 * more coming and immediately reschedule */
841 if (ret
>= n
->tx_burst
) {
842 qemu_bh_schedule(n
->tx_bh
);
847 /* If less than a full burst, re-enable notification and flush
848 * anything that may have come in while we weren't looking. If
849 * we find something, assume the guest is still active and reschedule */
850 virtio_queue_set_notification(n
->tx_vq
, 1);
851 if (virtio_net_flush_tx(n
, n
->tx_vq
) > 0) {
852 virtio_queue_set_notification(n
->tx_vq
, 0);
853 qemu_bh_schedule(n
->tx_bh
);
858 static void virtio_net_save(QEMUFile
*f
, void *opaque
)
860 VirtIONet
*n
= opaque
;
862 /* At this point, backend must be stopped, otherwise
863 * it might keep writing to memory. */
864 assert(!n
->vhost_started
);
865 virtio_save(&n
->vdev
, f
);
867 qemu_put_buffer(f
, n
->mac
, ETH_ALEN
);
868 qemu_put_be32(f
, n
->tx_waiting
);
869 qemu_put_be32(f
, n
->mergeable_rx_bufs
);
870 qemu_put_be16(f
, n
->status
);
871 qemu_put_byte(f
, n
->promisc
);
872 qemu_put_byte(f
, n
->allmulti
);
873 qemu_put_be32(f
, n
->mac_table
.in_use
);
874 qemu_put_buffer(f
, n
->mac_table
.macs
, n
->mac_table
.in_use
* ETH_ALEN
);
875 qemu_put_buffer(f
, (uint8_t *)n
->vlans
, MAX_VLAN
>> 3);
876 qemu_put_be32(f
, n
->has_vnet_hdr
);
877 qemu_put_byte(f
, n
->mac_table
.multi_overflow
);
878 qemu_put_byte(f
, n
->mac_table
.uni_overflow
);
879 qemu_put_byte(f
, n
->alluni
);
880 qemu_put_byte(f
, n
->nomulti
);
881 qemu_put_byte(f
, n
->nouni
);
882 qemu_put_byte(f
, n
->nobcast
);
883 qemu_put_byte(f
, n
->has_ufo
);
886 static int virtio_net_load(QEMUFile
*f
, void *opaque
, int version_id
)
888 VirtIONet
*n
= opaque
;
891 if (version_id
< 2 || version_id
> VIRTIO_NET_VM_VERSION
)
894 virtio_load(&n
->vdev
, f
);
896 qemu_get_buffer(f
, n
->mac
, ETH_ALEN
);
897 n
->tx_waiting
= qemu_get_be32(f
);
898 n
->mergeable_rx_bufs
= qemu_get_be32(f
);
901 n
->status
= qemu_get_be16(f
);
903 if (version_id
>= 4) {
904 if (version_id
< 8) {
905 n
->promisc
= qemu_get_be32(f
);
906 n
->allmulti
= qemu_get_be32(f
);
908 n
->promisc
= qemu_get_byte(f
);
909 n
->allmulti
= qemu_get_byte(f
);
913 if (version_id
>= 5) {
914 n
->mac_table
.in_use
= qemu_get_be32(f
);
915 /* MAC_TABLE_ENTRIES may be different from the saved image */
916 if (n
->mac_table
.in_use
<= MAC_TABLE_ENTRIES
) {
917 qemu_get_buffer(f
, n
->mac_table
.macs
,
918 n
->mac_table
.in_use
* ETH_ALEN
);
919 } else if (n
->mac_table
.in_use
) {
920 qemu_fseek(f
, n
->mac_table
.in_use
* ETH_ALEN
, SEEK_CUR
);
921 n
->mac_table
.multi_overflow
= n
->mac_table
.uni_overflow
= 1;
922 n
->mac_table
.in_use
= 0;
927 qemu_get_buffer(f
, (uint8_t *)n
->vlans
, MAX_VLAN
>> 3);
929 if (version_id
>= 7) {
930 if (qemu_get_be32(f
) && !peer_has_vnet_hdr(n
)) {
931 error_report("virtio-net: saved image requires vnet_hdr=on");
935 if (n
->has_vnet_hdr
) {
936 tap_using_vnet_hdr(n
->nic
->nc
.peer
, 1);
937 tap_set_offload(n
->nic
->nc
.peer
,
938 (n
->vdev
.guest_features
>> VIRTIO_NET_F_GUEST_CSUM
) & 1,
939 (n
->vdev
.guest_features
>> VIRTIO_NET_F_GUEST_TSO4
) & 1,
940 (n
->vdev
.guest_features
>> VIRTIO_NET_F_GUEST_TSO6
) & 1,
941 (n
->vdev
.guest_features
>> VIRTIO_NET_F_GUEST_ECN
) & 1,
942 (n
->vdev
.guest_features
>> VIRTIO_NET_F_GUEST_UFO
) & 1);
946 if (version_id
>= 9) {
947 n
->mac_table
.multi_overflow
= qemu_get_byte(f
);
948 n
->mac_table
.uni_overflow
= qemu_get_byte(f
);
951 if (version_id
>= 10) {
952 n
->alluni
= qemu_get_byte(f
);
953 n
->nomulti
= qemu_get_byte(f
);
954 n
->nouni
= qemu_get_byte(f
);
955 n
->nobcast
= qemu_get_byte(f
);
958 if (version_id
>= 11) {
959 if (qemu_get_byte(f
) && !peer_has_ufo(n
)) {
960 error_report("virtio-net: saved image requires TUN_F_UFO support");
965 /* Find the first multicast entry in the saved MAC filter */
966 for (i
= 0; i
< n
->mac_table
.in_use
; i
++) {
967 if (n
->mac_table
.macs
[i
* ETH_ALEN
] & 1) {
971 n
->mac_table
.first_multi
= i
;
975 static void virtio_net_cleanup(VLANClientState
*nc
)
977 VirtIONet
*n
= DO_UPCAST(NICState
, nc
, nc
)->opaque
;
982 static NetClientInfo net_virtio_info
= {
983 .type
= NET_CLIENT_TYPE_NIC
,
984 .size
= sizeof(NICState
),
985 .can_receive
= virtio_net_can_receive
,
986 .receive
= virtio_net_receive
,
987 .cleanup
= virtio_net_cleanup
,
988 .link_status_changed
= virtio_net_set_link_status
,
991 static void virtio_net_vmstate_change(void *opaque
, int running
, int reason
)
993 VirtIONet
*n
= opaque
;
994 n
->vm_running
= running
;
995 /* This is called when vm is started/stopped,
996 * it will start/stop vhost backend if appropriate
997 * e.g. after migration. */
998 virtio_net_set_status(&n
->vdev
, n
->vdev
.status
);
1001 VirtIODevice
*virtio_net_init(DeviceState
*dev
, NICConf
*conf
,
1002 virtio_net_conf
*net
)
1006 n
= (VirtIONet
*)virtio_common_init("virtio-net", VIRTIO_ID_NET
,
1007 sizeof(struct virtio_net_config
),
1010 n
->vdev
.get_config
= virtio_net_get_config
;
1011 n
->vdev
.set_config
= virtio_net_set_config
;
1012 n
->vdev
.get_features
= virtio_net_get_features
;
1013 n
->vdev
.set_features
= virtio_net_set_features
;
1014 n
->vdev
.bad_features
= virtio_net_bad_features
;
1015 n
->vdev
.reset
= virtio_net_reset
;
1016 n
->vdev
.set_status
= virtio_net_set_status
;
1017 n
->rx_vq
= virtio_add_queue(&n
->vdev
, 256, virtio_net_handle_rx
);
1019 if (net
->tx
&& strcmp(net
->tx
, "timer") && strcmp(net
->tx
, "bh")) {
1020 error_report("virtio-net: "
1021 "Unknown option tx=%s, valid options: \"timer\" \"bh\"",
1023 error_report("Defaulting to \"bh\"");
1026 if (net
->tx
&& !strcmp(net
->tx
, "timer")) {
1027 n
->tx_vq
= virtio_add_queue(&n
->vdev
, 256, virtio_net_handle_tx_timer
);
1028 n
->tx_timer
= qemu_new_timer(vm_clock
, virtio_net_tx_timer
, n
);
1029 n
->tx_timeout
= net
->txtimer
;
1031 n
->tx_vq
= virtio_add_queue(&n
->vdev
, 256, virtio_net_handle_tx_bh
);
1032 n
->tx_bh
= qemu_bh_new(virtio_net_tx_bh
, n
);
1034 n
->ctrl_vq
= virtio_add_queue(&n
->vdev
, 64, virtio_net_handle_ctrl
);
1035 qemu_macaddr_default_if_unset(&conf
->macaddr
);
1036 memcpy(&n
->mac
[0], &conf
->macaddr
, sizeof(n
->mac
));
1037 n
->status
= VIRTIO_NET_S_LINK_UP
;
1039 n
->nic
= qemu_new_nic(&net_virtio_info
, conf
, dev
->info
->name
, dev
->id
, n
);
1041 qemu_format_nic_info_str(&n
->nic
->nc
, conf
->macaddr
.a
);
1044 n
->tx_burst
= net
->txburst
;
1045 n
->mergeable_rx_bufs
= 0;
1046 n
->promisc
= 1; /* for compatibility */
1048 n
->mac_table
.macs
= qemu_mallocz(MAC_TABLE_ENTRIES
* ETH_ALEN
);
1050 n
->vlans
= qemu_mallocz(MAX_VLAN
>> 3);
1053 register_savevm(dev
, "virtio-net", -1, VIRTIO_NET_VM_VERSION
,
1054 virtio_net_save
, virtio_net_load
, n
);
1055 n
->vmstate
= qemu_add_vm_change_state_handler(virtio_net_vmstate_change
, n
);
1060 void virtio_net_exit(VirtIODevice
*vdev
)
1062 VirtIONet
*n
= DO_UPCAST(VirtIONet
, vdev
, vdev
);
1063 qemu_del_vm_change_state_handler(n
->vmstate
);
1065 /* This will stop vhost backend if appropriate. */
1066 virtio_net_set_status(vdev
, 0);
1068 qemu_purge_queued_packets(&n
->nic
->nc
);
1070 unregister_savevm(n
->qdev
, "virtio-net", n
);
1072 qemu_free(n
->mac_table
.macs
);
1073 qemu_free(n
->vlans
);
1076 qemu_del_timer(n
->tx_timer
);
1077 qemu_free_timer(n
->tx_timer
);
1079 qemu_bh_delete(n
->tx_bh
);
1082 virtio_cleanup(&n
->vdev
);
1083 qemu_del_vlan_client(&n
->nic
->nc
);