2 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 /* Driver for VirtIO network devices. */
29 #include <sys/cdefs.h>
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/sockio.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/socket.h>
39 #include <sys/sysctl.h>
40 #include <sys/taskqueue.h>
41 #include <sys/random.h>
42 #include <sys/sglist.h>
43 #include <sys/serialize.h>
47 #include <machine/limits.h>
49 #include <net/ethernet.h>
51 #include <net/if_arp.h>
52 #include <net/if_dl.h>
53 #include <net/if_types.h>
54 #include <net/if_media.h>
55 #include <net/vlan/if_vlan_var.h>
56 #include <net/vlan/if_vlan_ether.h>
57 #include <net/ifq_var.h>
61 #include <netinet/in_systm.h>
62 #include <netinet/in.h>
63 #include <netinet/ip.h>
64 #include <netinet/ip6.h>
65 #include <netinet/udp.h>
66 #include <netinet/tcp.h>
68 #include <dev/virtual/virtio/virtio/virtio.h>
69 #include <dev/virtual/virtio/virtio/virtqueue.h>
70 #include <dev/virtual/virtio/net/virtio_net.h>
71 #include <dev/virtual/virtio/net/if_vtnetvar.h>
73 #include "virtio_if.h"
75 MALLOC_DEFINE(M_VTNET
, "VTNET_TX", "Outgoing VTNET TX frame header");
77 static int vtnet_probe(device_t
);
78 static int vtnet_attach(device_t
);
79 static int vtnet_detach(device_t
);
80 static int vtnet_suspend(device_t
);
81 static int vtnet_resume(device_t
);
82 static int vtnet_shutdown(device_t
);
83 static int vtnet_config_change(device_t
);
85 static void vtnet_negotiate_features(struct vtnet_softc
*);
86 static int vtnet_alloc_virtqueues(struct vtnet_softc
*);
87 static void vtnet_get_hwaddr(struct vtnet_softc
*);
88 static void vtnet_set_hwaddr(struct vtnet_softc
*);
89 static int vtnet_is_link_up(struct vtnet_softc
*);
90 static void vtnet_update_link_status(struct vtnet_softc
*);
92 static void vtnet_watchdog(struct vtnet_softc
*);
94 static void vtnet_config_change_task(void *, int);
95 static int vtnet_setup_interface(struct vtnet_softc
*);
96 static int vtnet_change_mtu(struct vtnet_softc
*, int);
97 static int vtnet_ioctl(struct ifnet
*, u_long
, caddr_t
, struct ucred
*);
99 static int vtnet_init_rx_vq(struct vtnet_softc
*);
100 static void vtnet_free_rx_mbufs(struct vtnet_softc
*);
101 static void vtnet_free_tx_mbufs(struct vtnet_softc
*);
102 static void vtnet_free_ctrl_vq(struct vtnet_softc
*);
104 static struct mbuf
* vtnet_alloc_rxbuf(struct vtnet_softc
*, int,
106 static int vtnet_replace_rxbuf(struct vtnet_softc
*,
108 static int vtnet_newbuf(struct vtnet_softc
*);
109 static void vtnet_discard_merged_rxbuf(struct vtnet_softc
*, int);
110 static void vtnet_discard_rxbuf(struct vtnet_softc
*, struct mbuf
*);
111 static int vtnet_enqueue_rxbuf(struct vtnet_softc
*, struct mbuf
*);
112 static void vtnet_vlan_tag_remove(struct mbuf
*);
113 static int vtnet_rx_csum(struct vtnet_softc
*, struct mbuf
*,
114 struct virtio_net_hdr
*);
115 static int vtnet_rxeof_merged(struct vtnet_softc
*, struct mbuf
*, int);
116 static int vtnet_rxeof(struct vtnet_softc
*, int, int *);
117 static void vtnet_rx_intr_task(void *);
118 static int vtnet_rx_vq_intr(void *);
120 static void vtnet_enqueue_txhdr(struct vtnet_softc
*,
121 struct vtnet_tx_header
*);
122 static void vtnet_txeof(struct vtnet_softc
*);
123 static struct mbuf
* vtnet_tx_offload(struct vtnet_softc
*, struct mbuf
*,
124 struct virtio_net_hdr
*);
125 static int vtnet_enqueue_txbuf(struct vtnet_softc
*, struct mbuf
**,
126 struct vtnet_tx_header
*);
127 static int vtnet_encap(struct vtnet_softc
*, struct mbuf
**);
128 static void vtnet_start_locked(struct ifnet
*, struct ifaltq_subque
*);
129 static void vtnet_start(struct ifnet
*, struct ifaltq_subque
*);
130 static void vtnet_tick(void *);
131 static void vtnet_tx_intr_task(void *);
132 static int vtnet_tx_vq_intr(void *);
134 static void vtnet_stop(struct vtnet_softc
*);
135 static int vtnet_virtio_reinit(struct vtnet_softc
*);
136 static void vtnet_init_locked(struct vtnet_softc
*);
137 static void vtnet_init(void *);
139 static void vtnet_exec_ctrl_cmd(struct vtnet_softc
*, void *,
140 struct sglist
*, int, int);
142 static int vtnet_ctrl_mac_cmd(struct vtnet_softc
*, uint8_t *);
143 static int vtnet_ctrl_rx_cmd(struct vtnet_softc
*, int, int);
144 static int vtnet_set_promisc(struct vtnet_softc
*, int);
145 static int vtnet_set_allmulti(struct vtnet_softc
*, int);
146 static void vtnet_rx_filter(struct vtnet_softc
*sc
);
147 static void vtnet_rx_filter_mac(struct vtnet_softc
*);
149 static int vtnet_exec_vlan_filter(struct vtnet_softc
*, int, uint16_t);
150 static void vtnet_rx_filter_vlan(struct vtnet_softc
*);
151 static void vtnet_update_vlan_filter(struct vtnet_softc
*, int, uint16_t);
152 static void vtnet_register_vlan(void *, struct ifnet
*, uint16_t);
153 static void vtnet_unregister_vlan(void *, struct ifnet
*, uint16_t);
155 static int vtnet_ifmedia_upd(struct ifnet
*);
156 static void vtnet_ifmedia_sts(struct ifnet
*, struct ifmediareq
*);
158 static void vtnet_add_statistics(struct vtnet_softc
*);
160 static int vtnet_enable_rx_intr(struct vtnet_softc
*);
161 static int vtnet_enable_tx_intr(struct vtnet_softc
*);
162 static void vtnet_disable_rx_intr(struct vtnet_softc
*);
163 static void vtnet_disable_tx_intr(struct vtnet_softc
*);
166 static int vtnet_csum_disable
= 0;
167 TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable
);
168 static int vtnet_tso_disable
= 1;
169 TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable
);
170 static int vtnet_lro_disable
= 0;
171 TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable
);
174 * Reducing the number of transmit completed interrupts can
175 * improve performance. To do so, the define below keeps the
176 * Tx vq interrupt disabled and adds calls to vtnet_txeof()
177 * in the start and watchdog paths. The price to pay for this
178 * is the m_free'ing of transmitted mbufs may be delayed until
179 * the watchdog fires.
181 #define VTNET_TX_INTR_MODERATION
183 static struct virtio_feature_desc vtnet_feature_desc
[] = {
184 { VIRTIO_NET_F_CSUM
, "TxChecksum" },
185 { VIRTIO_NET_F_GUEST_CSUM
, "RxChecksum" },
186 { VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
, "DynOffload" },
187 { VIRTIO_NET_F_MAC
, "MacAddress" },
188 { VIRTIO_NET_F_GSO
, "TxAllGSO" },
189 { VIRTIO_NET_F_GUEST_TSO4
, "RxTSOv4" },
190 { VIRTIO_NET_F_GUEST_TSO6
, "RxTSOv6" },
191 { VIRTIO_NET_F_GUEST_ECN
, "RxECN" },
192 { VIRTIO_NET_F_GUEST_UFO
, "RxUFO" },
193 { VIRTIO_NET_F_HOST_TSO4
, "TxTSOv4" },
194 { VIRTIO_NET_F_HOST_TSO6
, "TxTSOv6" },
195 { VIRTIO_NET_F_HOST_ECN
, "TxTSOECN" },
196 { VIRTIO_NET_F_HOST_UFO
, "TxUFO" },
197 { VIRTIO_NET_F_MRG_RXBUF
, "MrgRxBuf" },
198 { VIRTIO_NET_F_STATUS
, "Status" },
199 { VIRTIO_NET_F_CTRL_VQ
, "ControlVq" },
200 { VIRTIO_NET_F_CTRL_RX
, "RxMode" },
201 { VIRTIO_NET_F_CTRL_VLAN
, "VLanFilter" },
202 { VIRTIO_NET_F_CTRL_RX_EXTRA
, "RxModeExtra" },
203 { VIRTIO_NET_F_GUEST_ANNOUNCE
, "GuestAnnounce" },
204 { VIRTIO_NET_F_MQ
, "RFS" },
205 { VIRTIO_NET_F_CTRL_MAC_ADDR
, "SetMacAddress" },
209 static device_method_t vtnet_methods
[] = {
210 /* Device methods. */
211 DEVMETHOD(device_probe
, vtnet_probe
),
212 DEVMETHOD(device_attach
, vtnet_attach
),
213 DEVMETHOD(device_detach
, vtnet_detach
),
214 DEVMETHOD(device_suspend
, vtnet_suspend
),
215 DEVMETHOD(device_resume
, vtnet_resume
),
216 DEVMETHOD(device_shutdown
, vtnet_shutdown
),
218 /* VirtIO methods. */
219 DEVMETHOD(virtio_config_change
, vtnet_config_change
),
224 static driver_t vtnet_driver
= {
227 sizeof(struct vtnet_softc
)
230 static devclass_t vtnet_devclass
;
232 DRIVER_MODULE(vtnet
, virtio_pci
, vtnet_driver
, vtnet_devclass
, NULL
, NULL
);
233 MODULE_VERSION(vtnet
, 1);
234 MODULE_DEPEND(vtnet
, virtio
, 1, 1, 1);
237 vtnet_probe(device_t dev
)
239 if (virtio_get_device_type(dev
) != VIRTIO_ID_NETWORK
)
242 device_set_desc(dev
, "VirtIO Networking Adapter");
244 return (BUS_PROBE_DEFAULT
);
248 vtnet_attach(device_t dev
)
250 struct vtnet_softc
*sc
;
253 sc
= device_get_softc(dev
);
256 lwkt_serialize_init(&sc
->vtnet_slz
);
257 callout_init(&sc
->vtnet_tick_ch
);
259 ifmedia_init(&sc
->vtnet_media
, IFM_IMASK
, vtnet_ifmedia_upd
,
261 ifmedia_add(&sc
->vtnet_media
, VTNET_MEDIATYPE
, 0, NULL
);
262 ifmedia_set(&sc
->vtnet_media
, VTNET_MEDIATYPE
);
264 vtnet_add_statistics(sc
);
265 SLIST_INIT(&sc
->vtnet_txhdr_free
);
267 /* Register our feature descriptions. */
268 virtio_set_feature_desc(dev
, vtnet_feature_desc
);
269 vtnet_negotiate_features(sc
);
271 if (virtio_with_feature(dev
, VIRTIO_RING_F_INDIRECT_DESC
))
272 sc
->vtnet_flags
|= VTNET_FLAG_INDIRECT
;
274 if (virtio_with_feature(dev
, VIRTIO_NET_F_MAC
)) {
275 /* This feature should always be negotiated. */
276 sc
->vtnet_flags
|= VTNET_FLAG_MAC
;
279 if (virtio_with_feature(dev
, VIRTIO_NET_F_MRG_RXBUF
)) {
280 sc
->vtnet_flags
|= VTNET_FLAG_MRG_RXBUFS
;
281 sc
->vtnet_hdr_size
= sizeof(struct virtio_net_hdr_mrg_rxbuf
);
283 sc
->vtnet_hdr_size
= sizeof(struct virtio_net_hdr
);
286 sc
->vtnet_rx_mbuf_size
= MCLBYTES
;
287 sc
->vtnet_rx_mbuf_count
= VTNET_NEEDED_RX_MBUFS(sc
);
289 if (virtio_with_feature(dev
, VIRTIO_NET_F_CTRL_VQ
)) {
290 sc
->vtnet_flags
|= VTNET_FLAG_CTRL_VQ
;
292 if (virtio_with_feature(dev
, VIRTIO_NET_F_CTRL_RX
))
293 sc
->vtnet_flags
|= VTNET_FLAG_CTRL_RX
;
294 if (virtio_with_feature(dev
, VIRTIO_NET_F_CTRL_VLAN
))
295 sc
->vtnet_flags
|= VTNET_FLAG_VLAN_FILTER
;
296 if (virtio_with_feature(dev
, VIRTIO_NET_F_CTRL_MAC_ADDR
) &&
297 virtio_with_feature(dev
, VIRTIO_NET_F_CTRL_RX
))
298 sc
->vtnet_flags
|= VTNET_FLAG_CTRL_MAC
;
301 /* Read (or generate) the MAC address for the adapter. */
302 vtnet_get_hwaddr(sc
);
304 error
= vtnet_alloc_virtqueues(sc
);
306 device_printf(dev
, "cannot allocate virtqueues\n");
310 error
= vtnet_setup_interface(sc
);
312 device_printf(dev
, "cannot setup interface\n");
316 TASK_INIT(&sc
->vtnet_cfgchg_task
, 0, vtnet_config_change_task
, sc
);
318 error
= virtio_setup_intr(dev
, &sc
->vtnet_slz
);
320 device_printf(dev
, "cannot setup virtqueue interrupts\n");
321 ether_ifdetach(sc
->vtnet_ifp
);
325 if ((sc
->vtnet_flags
& VTNET_FLAG_MAC
) == 0) {
326 lwkt_serialize_enter(&sc
->vtnet_slz
);
327 vtnet_set_hwaddr(sc
);
328 lwkt_serialize_exit(&sc
->vtnet_slz
);
332 * Device defaults to promiscuous mode for backwards
333 * compatibility. Turn it off if possible.
335 if (sc
->vtnet_flags
& VTNET_FLAG_CTRL_RX
) {
336 lwkt_serialize_enter(&sc
->vtnet_slz
);
337 if (vtnet_set_promisc(sc
, 0) != 0) {
338 sc
->vtnet_ifp
->if_flags
|= IFF_PROMISC
;
340 "cannot disable promiscuous mode\n");
342 lwkt_serialize_exit(&sc
->vtnet_slz
);
344 sc
->vtnet_ifp
->if_flags
|= IFF_PROMISC
;
354 vtnet_detach(device_t dev
)
356 struct vtnet_softc
*sc
;
359 sc
= device_get_softc(dev
);
362 if (device_is_attached(dev
)) {
363 lwkt_serialize_enter(&sc
->vtnet_slz
);
365 lwkt_serialize_exit(&sc
->vtnet_slz
);
367 callout_stop(&sc
->vtnet_tick_ch
);
368 taskqueue_drain(taskqueue_swi
, &sc
->vtnet_cfgchg_task
);
373 if (sc
->vtnet_vlan_attach
!= NULL
) {
374 EVENTHANDLER_DEREGISTER(vlan_config
, sc
->vtnet_vlan_attach
);
375 sc
->vtnet_vlan_attach
= NULL
;
377 if (sc
->vtnet_vlan_detach
!= NULL
) {
378 EVENTHANDLER_DEREGISTER(vlan_unconfig
, sc
->vtnet_vlan_detach
);
379 sc
->vtnet_vlan_detach
= NULL
;
384 sc
->vtnet_ifp
= NULL
;
387 if (sc
->vtnet_rx_vq
!= NULL
)
388 vtnet_free_rx_mbufs(sc
);
389 if (sc
->vtnet_tx_vq
!= NULL
)
390 vtnet_free_tx_mbufs(sc
);
391 if (sc
->vtnet_ctrl_vq
!= NULL
)
392 vtnet_free_ctrl_vq(sc
);
394 if (sc
->vtnet_txhdrarea
!= NULL
) {
395 contigfree(sc
->vtnet_txhdrarea
,
396 sc
->vtnet_txhdrcount
* sizeof(struct vtnet_tx_header
),
398 sc
->vtnet_txhdrarea
= NULL
;
400 SLIST_INIT(&sc
->vtnet_txhdr_free
);
401 if (sc
->vtnet_macfilter
!= NULL
) {
402 contigfree(sc
->vtnet_macfilter
,
403 sizeof(struct vtnet_mac_filter
), M_DEVBUF
);
404 sc
->vtnet_macfilter
= NULL
;
407 ifmedia_removeall(&sc
->vtnet_media
);
413 vtnet_suspend(device_t dev
)
415 struct vtnet_softc
*sc
;
417 sc
= device_get_softc(dev
);
419 lwkt_serialize_enter(&sc
->vtnet_slz
);
421 sc
->vtnet_flags
|= VTNET_FLAG_SUSPENDED
;
422 lwkt_serialize_exit(&sc
->vtnet_slz
);
428 vtnet_resume(device_t dev
)
430 struct vtnet_softc
*sc
;
433 sc
= device_get_softc(dev
);
436 lwkt_serialize_enter(&sc
->vtnet_slz
);
437 if (ifp
->if_flags
& IFF_UP
)
438 vtnet_init_locked(sc
);
439 sc
->vtnet_flags
&= ~VTNET_FLAG_SUSPENDED
;
440 lwkt_serialize_exit(&sc
->vtnet_slz
);
446 vtnet_shutdown(device_t dev
)
450 * Suspend already does all of what we need to
451 * do here; we just never expect to be resumed.
453 return (vtnet_suspend(dev
));
457 vtnet_config_change(device_t dev
)
459 struct vtnet_softc
*sc
;
461 sc
= device_get_softc(dev
);
463 taskqueue_enqueue(taskqueue_thread
[mycpuid
], &sc
->vtnet_cfgchg_task
);
469 vtnet_negotiate_features(struct vtnet_softc
*sc
)
472 uint64_t mask
, features
;
477 if (vtnet_csum_disable
)
478 mask
|= VIRTIO_NET_F_CSUM
| VIRTIO_NET_F_GUEST_CSUM
;
481 * XXX DragonFly doesn't support receive checksum offload for ipv6 yet,
482 * hence always disable the virtio feature for now.
483 * XXX We need to support the DynOffload feature, in order to
484 * dynamically enable/disable this feature.
486 mask
|= VIRTIO_NET_F_GUEST_CSUM
;
489 * TSO and LRO are only available when their corresponding checksum
490 * offload feature is also negotiated.
493 if (vtnet_csum_disable
|| vtnet_tso_disable
)
494 mask
|= VIRTIO_NET_F_HOST_TSO4
| VIRTIO_NET_F_HOST_TSO6
|
495 VIRTIO_NET_F_HOST_ECN
;
497 if (vtnet_csum_disable
|| vtnet_lro_disable
)
498 mask
|= VTNET_LRO_FEATURES
;
500 features
= VTNET_FEATURES
& ~mask
;
501 features
|= VIRTIO_F_NOTIFY_ON_EMPTY
;
502 features
|= VIRTIO_F_ANY_LAYOUT
;
503 sc
->vtnet_features
= virtio_negotiate_features(dev
, features
);
505 if (virtio_with_feature(dev
, VTNET_LRO_FEATURES
) &&
506 virtio_with_feature(dev
, VIRTIO_NET_F_MRG_RXBUF
) == 0) {
508 * LRO without mergeable buffers requires special care. This
509 * is not ideal because every receive buffer must be large
510 * enough to hold the maximum TCP packet, the Ethernet header,
511 * and the header. This requires up to 34 descriptors with
512 * MCLBYTES clusters. If we do not have indirect descriptors,
513 * LRO is disabled since the virtqueue will not contain very
514 * many receive buffers.
516 if (!virtio_with_feature(dev
, VIRTIO_RING_F_INDIRECT_DESC
)) {
518 "LRO disabled due to both mergeable buffers and "
519 "indirect descriptors not negotiated\n");
521 features
&= ~VTNET_LRO_FEATURES
;
523 virtio_negotiate_features(dev
, features
);
525 sc
->vtnet_flags
|= VTNET_FLAG_LRO_NOMRG
;
530 vtnet_alloc_virtqueues(struct vtnet_softc
*sc
)
533 struct vq_alloc_info vq_info
[3];
540 * Indirect descriptors are not needed for the Rx
541 * virtqueue when mergeable buffers are negotiated.
542 * The header is placed inline with the data, not
543 * in a separate descriptor, and mbuf clusters are
544 * always physically contiguous.
546 if ((sc
->vtnet_flags
& VTNET_FLAG_MRG_RXBUFS
) == 0) {
547 sc
->vtnet_rx_nsegs
= (sc
->vtnet_flags
& VTNET_FLAG_LRO_NOMRG
) ?
548 VTNET_MAX_RX_SEGS
: VTNET_MIN_RX_SEGS
;
550 sc
->vtnet_rx_nsegs
= VTNET_MRG_RX_SEGS
;
552 if (virtio_with_feature(dev
, VIRTIO_NET_F_HOST_TSO4
) ||
553 virtio_with_feature(dev
, VIRTIO_NET_F_HOST_TSO6
))
554 sc
->vtnet_tx_nsegs
= VTNET_MAX_TX_SEGS
;
556 sc
->vtnet_tx_nsegs
= VTNET_MIN_TX_SEGS
;
558 VQ_ALLOC_INFO_INIT(&vq_info
[0], sc
->vtnet_rx_nsegs
,
559 vtnet_rx_vq_intr
, sc
, &sc
->vtnet_rx_vq
,
560 "%s receive", device_get_nameunit(dev
));
562 VQ_ALLOC_INFO_INIT(&vq_info
[1], sc
->vtnet_tx_nsegs
,
563 vtnet_tx_vq_intr
, sc
, &sc
->vtnet_tx_vq
,
564 "%s transmit", device_get_nameunit(dev
));
566 if (sc
->vtnet_flags
& VTNET_FLAG_CTRL_VQ
) {
569 VQ_ALLOC_INFO_INIT(&vq_info
[2], 0, NULL
, NULL
,
570 &sc
->vtnet_ctrl_vq
, "%s control",
571 device_get_nameunit(dev
));
574 return (virtio_alloc_virtqueues(dev
, 0, nvqs
, vq_info
));
578 vtnet_setup_interface(struct vtnet_softc
*sc
)
586 ifp
= sc
->vtnet_ifp
= if_alloc(IFT_ETHER
);
588 device_printf(dev
, "cannot allocate ifnet structure\n");
593 if_initname(ifp
, device_get_name(dev
), device_get_unit(dev
));
594 ifp
->if_flags
= IFF_BROADCAST
| IFF_SIMPLEX
| IFF_MULTICAST
;
595 ifp
->if_init
= vtnet_init
;
596 ifp
->if_start
= vtnet_start
;
597 ifp
->if_ioctl
= vtnet_ioctl
;
599 sc
->vtnet_rx_process_limit
= virtqueue_size(sc
->vtnet_rx_vq
);
600 sc
->vtnet_tx_size
= virtqueue_size(sc
->vtnet_tx_vq
);
601 if (sc
->vtnet_flags
& VTNET_FLAG_INDIRECT
)
602 sc
->vtnet_txhdrcount
= sc
->vtnet_tx_size
;
604 sc
->vtnet_txhdrcount
= (sc
->vtnet_tx_size
/ 2) + 1;
605 sc
->vtnet_txhdrarea
= contigmalloc(
606 sc
->vtnet_txhdrcount
* sizeof(struct vtnet_tx_header
),
607 M_VTNET
, M_WAITOK
, 0, BUS_SPACE_MAXADDR
, 4, 0);
608 if (sc
->vtnet_txhdrarea
== NULL
) {
609 device_printf(dev
, "cannot contigmalloc the tx headers\n");
612 for (i
= 0; i
< sc
->vtnet_txhdrcount
; i
++)
613 vtnet_enqueue_txhdr(sc
, &sc
->vtnet_txhdrarea
[i
]);
614 sc
->vtnet_macfilter
= contigmalloc(
615 sizeof(struct vtnet_mac_filter
),
616 M_DEVBUF
, M_WAITOK
, 0, BUS_SPACE_MAXADDR
, 4, 0);
617 if (sc
->vtnet_macfilter
== NULL
) {
619 "cannot contigmalloc the mac filter table\n");
622 ifq_set_maxlen(&ifp
->if_snd
, sc
->vtnet_tx_size
- 1);
623 ifq_set_ready(&ifp
->if_snd
);
625 ether_ifattach(ifp
, sc
->vtnet_hwaddr
, NULL
);
627 /* Tell the upper layer(s) we support long frames. */
628 ifp
->if_data
.ifi_hdrlen
= sizeof(struct ether_vlan_header
);
629 ifp
->if_capabilities
|= IFCAP_JUMBO_MTU
| IFCAP_VLAN_MTU
;
631 if (virtio_with_feature(dev
, VIRTIO_NET_F_CSUM
)) {
632 ifp
->if_capabilities
|= IFCAP_TXCSUM
;
634 if (virtio_with_feature(dev
, VIRTIO_NET_F_HOST_TSO4
))
635 ifp
->if_capabilities
|= IFCAP_TSO4
;
636 if (virtio_with_feature(dev
, VIRTIO_NET_F_HOST_TSO6
))
637 ifp
->if_capabilities
|= IFCAP_TSO6
;
638 if (ifp
->if_capabilities
& IFCAP_TSO
)
639 ifp
->if_capabilities
|= IFCAP_VLAN_HWTSO
;
641 if (virtio_with_feature(dev
, VIRTIO_NET_F_HOST_ECN
))
642 sc
->vtnet_flags
|= VTNET_FLAG_TSO_ECN
;
645 if (virtio_with_feature(dev
, VIRTIO_NET_F_GUEST_CSUM
)) {
646 ifp
->if_capabilities
|= IFCAP_RXCSUM
;
648 if (virtio_with_feature(dev
, VIRTIO_NET_F_GUEST_TSO4
) ||
649 virtio_with_feature(dev
, VIRTIO_NET_F_GUEST_TSO6
))
650 ifp
->if_capabilities
|= IFCAP_LRO
;
653 if (ifp
->if_capabilities
& IFCAP_HWCSUM
) {
655 * VirtIO does not support VLAN tagging, but we can fake
656 * it by inserting and removing the 802.1Q header during
657 * transmit and receive. We are then able to do checksum
658 * offloading of VLAN frames.
660 ifp
->if_capabilities
|=
661 IFCAP_VLAN_HWTAGGING
| IFCAP_VLAN_HWCSUM
;
664 ifp
->if_capenable
= ifp
->if_capabilities
;
667 * Capabilities after here are not enabled by default.
670 if (sc
->vtnet_flags
& VTNET_FLAG_VLAN_FILTER
) {
671 ifp
->if_capabilities
|= IFCAP_VLAN_HWFILTER
;
673 sc
->vtnet_vlan_attach
= EVENTHANDLER_REGISTER(vlan_config
,
674 vtnet_register_vlan
, sc
, EVENTHANDLER_PRI_FIRST
);
675 sc
->vtnet_vlan_detach
= EVENTHANDLER_REGISTER(vlan_unconfig
,
676 vtnet_unregister_vlan
, sc
, EVENTHANDLER_PRI_FIRST
);
683 vtnet_set_hwaddr(struct vtnet_softc
*sc
)
689 if ((sc
->vtnet_flags
& VTNET_FLAG_CTRL_MAC
) &&
690 (sc
->vtnet_flags
& VTNET_FLAG_CTRL_RX
)) {
691 if (vtnet_ctrl_mac_cmd(sc
, sc
->vtnet_hwaddr
) != 0)
692 device_printf(dev
, "unable to set MAC address\n");
693 } else if (sc
->vtnet_flags
& VTNET_FLAG_MAC
) {
694 virtio_write_device_config(dev
,
695 offsetof(struct virtio_net_config
, mac
),
696 sc
->vtnet_hwaddr
, ETHER_ADDR_LEN
);
701 vtnet_get_hwaddr(struct vtnet_softc
*sc
)
707 if ((sc
->vtnet_flags
& VTNET_FLAG_MAC
) == 0) {
709 * Generate a random locally administered unicast address.
711 * It would be nice to generate the same MAC address across
712 * reboots, but it seems all the hosts currently available
713 * support the MAC feature, so this isn't too important.
715 sc
->vtnet_hwaddr
[0] = 0xB2;
716 karc4rand(&sc
->vtnet_hwaddr
[1], ETHER_ADDR_LEN
- 1);
720 virtio_read_device_config(dev
,
721 offsetof(struct virtio_net_config
, mac
),
722 sc
->vtnet_hwaddr
, ETHER_ADDR_LEN
);
726 vtnet_is_link_up(struct vtnet_softc
*sc
)
735 ASSERT_SERIALIZED(&sc
->vtnet_slz
);
737 if (virtio_with_feature(dev
, VIRTIO_NET_F_STATUS
)) {
738 status
= virtio_read_dev_config_2(dev
,
739 offsetof(struct virtio_net_config
, status
));
741 status
= VIRTIO_NET_S_LINK_UP
;
744 return ((status
& VIRTIO_NET_S_LINK_UP
) != 0);
748 vtnet_update_link_status(struct vtnet_softc
*sc
)
752 struct ifaltq_subque
*ifsq
;
757 ifsq
= ifq_get_subq_default(&ifp
->if_snd
);
759 link
= vtnet_is_link_up(sc
);
761 if (link
&& ((sc
->vtnet_flags
& VTNET_FLAG_LINK
) == 0)) {
762 sc
->vtnet_flags
|= VTNET_FLAG_LINK
;
764 device_printf(dev
, "Link is up\n");
765 ifp
->if_link_state
= LINK_STATE_UP
;
766 if_link_state_change(ifp
);
767 if (!ifsq_is_empty(ifsq
))
768 vtnet_start_locked(ifp
, ifsq
);
769 } else if (!link
&& (sc
->vtnet_flags
& VTNET_FLAG_LINK
)) {
770 sc
->vtnet_flags
&= ~VTNET_FLAG_LINK
;
772 device_printf(dev
, "Link is down\n");
774 ifp
->if_link_state
= LINK_STATE_DOWN
;
775 if_link_state_change(ifp
);
781 vtnet_watchdog(struct vtnet_softc
*sc
)
787 #ifdef VTNET_TX_INTR_MODERATION
791 if (sc
->vtnet_watchdog_timer
== 0 || --sc
->vtnet_watchdog_timer
)
794 if_printf(ifp
, "watchdog timeout -- resetting\n");
796 virtqueue_dump(sc
->vtnet_tx_vq
);
799 ifp
->if_flags
&= ~IFF_RUNNING
;
800 vtnet_init_locked(sc
);
805 vtnet_config_change_task(void *arg
, int pending
)
807 struct vtnet_softc
*sc
;
811 lwkt_serialize_enter(&sc
->vtnet_slz
);
812 vtnet_update_link_status(sc
);
813 lwkt_serialize_exit(&sc
->vtnet_slz
);
817 vtnet_ioctl(struct ifnet
*ifp
, u_long cmd
, caddr_t data
,struct ucred
*cr
)
819 struct vtnet_softc
*sc
;
821 int reinit
, mask
, error
;
824 ifr
= (struct ifreq
*) data
;
830 if (ifr
->ifr_mtu
< ETHERMIN
|| ifr
->ifr_mtu
> VTNET_MAX_MTU
)
832 else if (ifp
->if_mtu
!= ifr
->ifr_mtu
) {
833 lwkt_serialize_enter(&sc
->vtnet_slz
);
834 error
= vtnet_change_mtu(sc
, ifr
->ifr_mtu
);
835 lwkt_serialize_exit(&sc
->vtnet_slz
);
840 lwkt_serialize_enter(&sc
->vtnet_slz
);
841 if ((ifp
->if_flags
& IFF_UP
) == 0) {
842 if (ifp
->if_flags
& IFF_RUNNING
)
844 } else if (ifp
->if_flags
& IFF_RUNNING
) {
845 if ((ifp
->if_flags
^ sc
->vtnet_if_flags
) &
846 (IFF_PROMISC
| IFF_ALLMULTI
)) {
847 if (sc
->vtnet_flags
& VTNET_FLAG_CTRL_RX
)
853 vtnet_init_locked(sc
);
856 sc
->vtnet_if_flags
= ifp
->if_flags
;
857 lwkt_serialize_exit(&sc
->vtnet_slz
);
862 lwkt_serialize_enter(&sc
->vtnet_slz
);
863 if ((sc
->vtnet_flags
& VTNET_FLAG_CTRL_RX
) &&
864 (ifp
->if_flags
& IFF_RUNNING
))
865 vtnet_rx_filter_mac(sc
);
866 lwkt_serialize_exit(&sc
->vtnet_slz
);
871 error
= ifmedia_ioctl(ifp
, ifr
, &sc
->vtnet_media
, cmd
);
875 mask
= ifr
->ifr_reqcap
^ ifp
->if_capenable
;
877 lwkt_serialize_enter(&sc
->vtnet_slz
);
879 if (mask
& IFCAP_TXCSUM
) {
880 ifp
->if_capenable
^= IFCAP_TXCSUM
;
881 if (ifp
->if_capenable
& IFCAP_TXCSUM
)
882 ifp
->if_hwassist
|= VTNET_CSUM_OFFLOAD
;
884 ifp
->if_hwassist
&= ~VTNET_CSUM_OFFLOAD
;
887 if (mask
& IFCAP_TSO4
) {
888 ifp
->if_capenable
^= IFCAP_TSO4
;
889 if (ifp
->if_capenable
& IFCAP_TSO4
)
890 ifp
->if_hwassist
|= CSUM_TSO
;
892 ifp
->if_hwassist
&= ~CSUM_TSO
;
895 if (mask
& IFCAP_RXCSUM
) {
896 ifp
->if_capenable
^= IFCAP_RXCSUM
;
900 if (mask
& IFCAP_LRO
) {
901 ifp
->if_capenable
^= IFCAP_LRO
;
905 if (mask
& IFCAP_VLAN_HWFILTER
) {
906 ifp
->if_capenable
^= IFCAP_VLAN_HWFILTER
;
910 if (mask
& IFCAP_VLAN_HWTSO
)
911 ifp
->if_capenable
^= IFCAP_VLAN_HWTSO
;
913 if (mask
& IFCAP_VLAN_HWTAGGING
)
914 ifp
->if_capenable
^= IFCAP_VLAN_HWTAGGING
;
916 if (reinit
&& (ifp
->if_flags
& IFF_RUNNING
)) {
917 ifp
->if_flags
&= ~IFF_RUNNING
;
918 vtnet_init_locked(sc
);
920 //VLAN_CAPABILITIES(ifp);
922 lwkt_serialize_exit(&sc
->vtnet_slz
);
926 error
= ether_ioctl(ifp
, cmd
, data
);
934 vtnet_change_mtu(struct vtnet_softc
*sc
, int new_mtu
)
937 int new_frame_size
, clsize
;
941 if ((sc
->vtnet_flags
& VTNET_FLAG_MRG_RXBUFS
) == 0) {
942 new_frame_size
= sizeof(struct vtnet_rx_header
) +
943 sizeof(struct ether_vlan_header
) + new_mtu
;
945 if (new_frame_size
> MJUM9BYTES
)
948 if (new_frame_size
<= MCLBYTES
)
953 new_frame_size
= sizeof(struct virtio_net_hdr_mrg_rxbuf
) +
954 sizeof(struct ether_vlan_header
) + new_mtu
;
956 if (new_frame_size
<= MCLBYTES
)
959 clsize
= MJUMPAGESIZE
;
962 sc
->vtnet_rx_mbuf_size
= clsize
;
963 sc
->vtnet_rx_mbuf_count
= VTNET_NEEDED_RX_MBUFS(sc
);
964 KASSERT(sc
->vtnet_rx_mbuf_count
< VTNET_MAX_RX_SEGS
,
965 ("too many rx mbufs: %d", sc
->vtnet_rx_mbuf_count
));
967 ifp
->if_mtu
= new_mtu
;
969 if (ifp
->if_flags
& IFF_RUNNING
) {
970 ifp
->if_flags
&= ~IFF_RUNNING
;
971 vtnet_init_locked(sc
);
978 vtnet_init_rx_vq(struct vtnet_softc
*sc
)
980 struct virtqueue
*vq
;
983 vq
= sc
->vtnet_rx_vq
;
987 while (!virtqueue_full(vq
)) {
988 if ((error
= vtnet_newbuf(sc
)) != 0)
994 virtqueue_notify(vq
, &sc
->vtnet_slz
);
997 * EMSGSIZE signifies the virtqueue did not have enough
998 * entries available to hold the last mbuf. This is not
999 * an error. We should not get ENOSPC since we check if
1000 * the virtqueue is full before attempting to add a
1003 if (error
== EMSGSIZE
)
1011 vtnet_free_rx_mbufs(struct vtnet_softc
*sc
)
1013 struct virtqueue
*vq
;
1017 vq
= sc
->vtnet_rx_vq
;
1020 while ((m
= virtqueue_drain(vq
, &last
)) != NULL
)
1023 KASSERT(virtqueue_empty(vq
), ("mbufs remaining in Rx Vq"));
1027 vtnet_free_tx_mbufs(struct vtnet_softc
*sc
)
1029 struct virtqueue
*vq
;
1030 struct vtnet_tx_header
*txhdr
;
1033 vq
= sc
->vtnet_tx_vq
;
1036 while ((txhdr
= virtqueue_drain(vq
, &last
)) != NULL
) {
1037 m_freem(txhdr
->vth_mbuf
);
1038 vtnet_enqueue_txhdr(sc
, txhdr
);
1041 KASSERT(virtqueue_empty(vq
), ("mbufs remaining in Tx Vq"));
1045 vtnet_free_ctrl_vq(struct vtnet_softc
*sc
)
1048 * The control virtqueue is only polled, therefore
1049 * it should already be empty.
1051 KASSERT(virtqueue_empty(sc
->vtnet_ctrl_vq
),
1052 ("Ctrl Vq not empty"));
1055 static struct mbuf
*
1056 vtnet_alloc_rxbuf(struct vtnet_softc
*sc
, int nbufs
, struct mbuf
**m_tailp
)
1058 struct mbuf
*m_head
, *m_tail
, *m
;
1061 clsize
= sc
->vtnet_rx_mbuf_size
;
1063 /*use getcl instead of getjcl. see if_mxge.c comment line 2398*/
1064 //m_head = m_getjcl(M_DONTWAIT, MT_DATA, M_PKTHDR, clsize);
1065 m_head
= m_getcl(M_NOWAIT
, MT_DATA
, M_PKTHDR
);
1069 m_head
->m_len
= clsize
;
1073 KASSERT(sc
->vtnet_flags
& VTNET_FLAG_LRO_NOMRG
,
1074 ("chained Rx mbuf requested without LRO_NOMRG"));
1076 for (i
= 0; i
< nbufs
- 1; i
++) {
1077 //m = m_getjcl(M_DONTWAIT, MT_DATA, 0, clsize);
1078 m
= m_getcl(M_NOWAIT
, MT_DATA
, 0);
1088 if (m_tailp
!= NULL
)
1094 sc
->vtnet_stats
.mbuf_alloc_failed
++;
1101 vtnet_replace_rxbuf(struct vtnet_softc
*sc
, struct mbuf
*m0
, int len0
)
1103 struct mbuf
*m
, *m_prev
;
1104 struct mbuf
*m_new
, *m_tail
;
1105 int len
, clsize
, nreplace
, error
;
1112 clsize
= sc
->vtnet_rx_mbuf_size
;
1115 if (m
->m_next
!= NULL
)
1116 KASSERT(sc
->vtnet_flags
& VTNET_FLAG_LRO_NOMRG
,
1117 ("chained Rx mbuf without LRO_NOMRG"));
1120 * Since LRO_NOMRG mbuf chains are so large, we want to avoid
1121 * allocating an entire chain for each received frame. When
1122 * the received frame's length is less than that of the chain,
1123 * the unused mbufs are reassigned to the new chain.
1127 * Something is seriously wrong if we received
1128 * a frame larger than the mbuf chain. Drop it.
1131 sc
->vtnet_stats
.rx_frame_too_large
++;
1135 KASSERT(m
->m_len
== clsize
,
1136 ("mbuf length not expected cluster size: %d",
1139 m
->m_len
= MIN(m
->m_len
, len
);
1147 KASSERT(m_prev
!= NULL
, ("m_prev == NULL"));
1148 KASSERT(nreplace
<= sc
->vtnet_rx_mbuf_count
,
1149 ("too many replacement mbufs: %d/%d", nreplace
,
1150 sc
->vtnet_rx_mbuf_count
));
1152 m_new
= vtnet_alloc_rxbuf(sc
, nreplace
, &m_tail
);
1153 if (m_new
== NULL
) {
1154 m_prev
->m_len
= clsize
;
1159 * Move unused mbufs, if any, from the original chain
1160 * onto the end of the new chain.
1162 if (m_prev
->m_next
!= NULL
) {
1163 m_tail
->m_next
= m_prev
->m_next
;
1164 m_prev
->m_next
= NULL
;
1167 error
= vtnet_enqueue_rxbuf(sc
, m_new
);
1170 * BAD! We could not enqueue the replacement mbuf chain. We
1171 * must restore the m0 chain to the original state if it was
1172 * modified so we can subsequently discard it.
1174 * NOTE: The replacement is suppose to be an identical copy
1175 * to the one just dequeued so this is an unexpected error.
1177 sc
->vtnet_stats
.rx_enq_replacement_failed
++;
1179 if (m_tail
->m_next
!= NULL
) {
1180 m_prev
->m_next
= m_tail
->m_next
;
1181 m_tail
->m_next
= NULL
;
1184 m_prev
->m_len
= clsize
;
1192 vtnet_newbuf(struct vtnet_softc
*sc
)
1197 m
= vtnet_alloc_rxbuf(sc
, sc
->vtnet_rx_mbuf_count
, NULL
);
1201 error
= vtnet_enqueue_rxbuf(sc
, m
);
1209 vtnet_discard_merged_rxbuf(struct vtnet_softc
*sc
, int nbufs
)
1211 struct virtqueue
*vq
;
1214 vq
= sc
->vtnet_rx_vq
;
1216 while (--nbufs
> 0) {
1217 if ((m
= virtqueue_dequeue(vq
, NULL
)) == NULL
)
1219 vtnet_discard_rxbuf(sc
, m
);
1224 vtnet_discard_rxbuf(struct vtnet_softc
*sc
, struct mbuf
*m
)
1229 * Requeue the discarded mbuf. This should always be
1230 * successful since it was just dequeued.
1232 error
= vtnet_enqueue_rxbuf(sc
, m
);
1233 KASSERT(error
== 0, ("cannot requeue discarded mbuf"));
1237 vtnet_enqueue_rxbuf(struct vtnet_softc
*sc
, struct mbuf
*m
)
1240 struct sglist_seg segs
[VTNET_MAX_RX_SEGS
];
1241 struct vtnet_rx_header
*rxhdr
;
1242 struct virtio_net_hdr
*hdr
;
1246 ASSERT_SERIALIZED(&sc
->vtnet_slz
);
1247 if ((sc
->vtnet_flags
& VTNET_FLAG_LRO_NOMRG
) == 0)
1248 KASSERT(m
->m_next
== NULL
, ("chained Rx mbuf"));
1250 sglist_init(&sg
, sc
->vtnet_rx_nsegs
, segs
);
1252 mdata
= mtod(m
, uint8_t *);
1255 if ((sc
->vtnet_flags
& VTNET_FLAG_MRG_RXBUFS
) == 0) {
1256 rxhdr
= (struct vtnet_rx_header
*) mdata
;
1257 hdr
= &rxhdr
->vrh_hdr
;
1258 offset
+= sizeof(struct vtnet_rx_header
);
1260 error
= sglist_append(&sg
, hdr
, sc
->vtnet_hdr_size
);
1261 KASSERT(error
== 0, ("cannot add header to sglist"));
1264 error
= sglist_append(&sg
, mdata
+ offset
, m
->m_len
- offset
);
1268 if (m
->m_next
!= NULL
) {
1269 error
= sglist_append_mbuf(&sg
, m
->m_next
);
1274 return (virtqueue_enqueue(sc
->vtnet_rx_vq
, m
, &sg
, 0, sg
.sg_nseg
));
1278 vtnet_vlan_tag_remove(struct mbuf
*m
)
1280 struct ether_vlan_header
*evl
;
1282 evl
= mtod(m
, struct ether_vlan_header
*);
1284 m
->m_pkthdr
.ether_vlantag
= ntohs(evl
->evl_tag
);
1285 m
->m_flags
|= M_VLANTAG
;
1287 /* Strip the 802.1Q header. */
1288 bcopy((char *) evl
, (char *) evl
+ ETHER_VLAN_ENCAP_LEN
,
1289 ETHER_HDR_LEN
- ETHER_TYPE_LEN
);
1290 m_adj(m
, ETHER_VLAN_ENCAP_LEN
);
1294 * Alternative method of doing receive checksum offloading. Rather
1295 * than parsing the received frame down to the IP header, use the
1296 * csum_offset to determine which CSUM_* flags are appropriate. We
1297 * can get by with doing this only because the checksum offsets are
1298 * unique for the things we care about.
1301 vtnet_rx_csum(struct vtnet_softc
*sc
, struct mbuf
*m
,
1302 struct virtio_net_hdr
*hdr
)
1304 struct ether_header
*eh
;
1305 struct ether_vlan_header
*evh
;
1310 csum_len
= hdr
->csum_start
+ hdr
->csum_offset
;
1312 if (csum_len
< sizeof(struct ether_header
) + sizeof(struct ip
))
1314 if (m
->m_len
< csum_len
)
1317 eh
= mtod(m
, struct ether_header
*);
1318 eth_type
= ntohs(eh
->ether_type
);
1319 if (eth_type
== ETHERTYPE_VLAN
) {
1320 evh
= mtod(m
, struct ether_vlan_header
*);
1321 eth_type
= ntohs(evh
->evl_proto
);
1324 if (eth_type
!= ETHERTYPE_IP
&& eth_type
!= ETHERTYPE_IPV6
) {
1325 sc
->vtnet_stats
.rx_csum_bad_ethtype
++;
1329 /* Use the offset to determine the appropriate CSUM_* flags. */
1330 switch (hdr
->csum_offset
) {
1331 case offsetof(struct udphdr
, uh_sum
):
1332 if (m
->m_len
< hdr
->csum_start
+ sizeof(struct udphdr
))
1334 udp
= (struct udphdr
*)(mtod(m
, uint8_t *) + hdr
->csum_start
);
1335 if (udp
->uh_sum
== 0)
1340 case offsetof(struct tcphdr
, th_sum
):
1341 m
->m_pkthdr
.csum_flags
|= CSUM_DATA_VALID
| CSUM_PSEUDO_HDR
;
1342 m
->m_pkthdr
.csum_data
= 0xFFFF;
1346 sc
->vtnet_stats
.rx_csum_bad_offset
++;
1350 sc
->vtnet_stats
.rx_csum_offloaded
++;
1356 vtnet_rxeof_merged(struct vtnet_softc
*sc
, struct mbuf
*m_head
, int nbufs
)
1359 struct virtqueue
*vq
;
1360 struct mbuf
*m
, *m_tail
;
1363 ifp
= sc
->vtnet_ifp
;
1364 vq
= sc
->vtnet_rx_vq
;
1367 while (--nbufs
> 0) {
1368 m
= virtqueue_dequeue(vq
, &len
);
1374 if (vtnet_newbuf(sc
) != 0) {
1376 vtnet_discard_rxbuf(sc
, m
);
1378 vtnet_discard_merged_rxbuf(sc
, nbufs
);
1386 m
->m_flags
&= ~M_PKTHDR
;
1388 m_head
->m_pkthdr
.len
+= len
;
1396 sc
->vtnet_stats
.rx_mergeable_failed
++;
1403 vtnet_rxeof(struct vtnet_softc
*sc
, int count
, int *rx_npktsp
)
1405 struct virtio_net_hdr lhdr
;
1407 struct virtqueue
*vq
;
1409 struct ether_header
*eh
;
1410 struct virtio_net_hdr
*hdr
;
1411 struct virtio_net_hdr_mrg_rxbuf
*mhdr
;
1412 int len
, deq
, nbufs
, adjsz
, rx_npkts
;
1414 ifp
= sc
->vtnet_ifp
;
1415 vq
= sc
->vtnet_rx_vq
;
1420 ASSERT_SERIALIZED(&sc
->vtnet_slz
);
1422 while (--count
>= 0) {
1423 m
= virtqueue_dequeue(vq
, &len
);
1428 if (len
< sc
->vtnet_hdr_size
+ ETHER_HDR_LEN
) {
1430 vtnet_discard_rxbuf(sc
, m
);
1434 if ((sc
->vtnet_flags
& VTNET_FLAG_MRG_RXBUFS
) == 0) {
1436 adjsz
= sizeof(struct vtnet_rx_header
);
1438 * Account for our pad between the header and
1439 * the actual start of the frame.
1441 len
+= VTNET_RX_HEADER_PAD
;
1443 mhdr
= mtod(m
, struct virtio_net_hdr_mrg_rxbuf
*);
1444 nbufs
= mhdr
->num_buffers
;
1445 adjsz
= sizeof(struct virtio_net_hdr_mrg_rxbuf
);
1448 if (vtnet_replace_rxbuf(sc
, m
, len
) != 0) {
1450 vtnet_discard_rxbuf(sc
, m
);
1452 vtnet_discard_merged_rxbuf(sc
, nbufs
);
1456 m
->m_pkthdr
.len
= len
;
1457 m
->m_pkthdr
.rcvif
= ifp
;
1458 m
->m_pkthdr
.csum_flags
= 0;
1461 if (vtnet_rxeof_merged(sc
, m
, nbufs
) != 0)
1468 * Save copy of header before we strip it. For both mergeable
1469 * and non-mergeable, the VirtIO header is placed first in the
1470 * mbuf's data. We no longer need num_buffers, so always use a
1473 memcpy(hdr
, mtod(m
, void *), sizeof(struct virtio_net_hdr
));
1476 if (ifp
->if_capenable
& IFCAP_VLAN_HWTAGGING
) {
1477 eh
= mtod(m
, struct ether_header
*);
1478 if (eh
->ether_type
== htons(ETHERTYPE_VLAN
)) {
1479 vtnet_vlan_tag_remove(m
);
1482 * With the 802.1Q header removed, update the
1483 * checksum starting location accordingly.
1485 if (hdr
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
)
1487 ETHER_VLAN_ENCAP_LEN
;
1491 if (ifp
->if_capenable
& IFCAP_RXCSUM
&&
1492 hdr
->flags
& VIRTIO_NET_HDR_F_NEEDS_CSUM
) {
1493 if (vtnet_rx_csum(sc
, m
, hdr
) != 0)
1494 sc
->vtnet_stats
.rx_csum_failed
++;
1497 lwkt_serialize_exit(&sc
->vtnet_slz
);
1499 ifp
->if_input(ifp
, m
, NULL
, -1);
1500 lwkt_serialize_enter(&sc
->vtnet_slz
);
1503 * The interface may have been stopped while we were
1504 * passing the packet up the network stack.
1506 if ((ifp
->if_flags
& IFF_RUNNING
) == 0)
1510 virtqueue_notify(vq
, &sc
->vtnet_slz
);
1512 if (rx_npktsp
!= NULL
)
1513 *rx_npktsp
= rx_npkts
;
1515 return (count
> 0 ? 0 : EAGAIN
);
1519 vtnet_rx_intr_task(void *arg
)
1521 struct vtnet_softc
*sc
;
1526 ifp
= sc
->vtnet_ifp
;
1529 // lwkt_serialize_enter(&sc->vtnet_slz);
1531 if ((ifp
->if_flags
& IFF_RUNNING
) == 0) {
1532 vtnet_enable_rx_intr(sc
);
1533 // lwkt_serialize_exit(&sc->vtnet_slz);
1537 more
= vtnet_rxeof(sc
, sc
->vtnet_rx_process_limit
, NULL
);
1538 if (!more
&& vtnet_enable_rx_intr(sc
) != 0) {
1539 vtnet_disable_rx_intr(sc
);
1543 // lwkt_serialize_exit(&sc->vtnet_slz);
1546 sc
->vtnet_stats
.rx_task_rescheduled
++;
1552 vtnet_rx_vq_intr(void *xsc
)
1554 struct vtnet_softc
*sc
;
1558 vtnet_disable_rx_intr(sc
);
1559 vtnet_rx_intr_task(sc
);
1565 vtnet_enqueue_txhdr(struct vtnet_softc
*sc
, struct vtnet_tx_header
*txhdr
)
1567 bzero(txhdr
, sizeof(*txhdr
));
1568 SLIST_INSERT_HEAD(&sc
->vtnet_txhdr_free
, txhdr
, link
);
1572 vtnet_txeof(struct vtnet_softc
*sc
)
1574 struct virtqueue
*vq
;
1576 struct vtnet_tx_header
*txhdr
;
1579 vq
= sc
->vtnet_tx_vq
;
1580 ifp
= sc
->vtnet_ifp
;
1583 ASSERT_SERIALIZED(&sc
->vtnet_slz
);
1585 while ((txhdr
= virtqueue_dequeue(vq
, NULL
)) != NULL
) {
1588 m_freem(txhdr
->vth_mbuf
);
1589 vtnet_enqueue_txhdr(sc
, txhdr
);
1593 ifq_clr_oactive(&ifp
->if_snd
);
1594 if (virtqueue_empty(vq
))
1595 sc
->vtnet_watchdog_timer
= 0;
1599 static struct mbuf
*
1600 vtnet_tx_offload(struct vtnet_softc
*sc
, struct mbuf
*m
,
1601 struct virtio_net_hdr
*hdr
)
1604 struct ether_header
*eh
;
1605 struct ether_vlan_header
*evh
;
1607 struct ip6_hdr
*ip6
;
1610 uint16_t eth_type
, csum_start
;
1611 uint8_t ip_proto
, gso_type
;
1613 ifp
= sc
->vtnet_ifp
;
1616 ip_offset
= sizeof(struct ether_header
);
1617 if (m
->m_len
< ip_offset
) {
1618 if ((m
= m_pullup(m
, ip_offset
)) == NULL
)
1622 eh
= mtod(m
, struct ether_header
*);
1623 eth_type
= ntohs(eh
->ether_type
);
1624 if (eth_type
== ETHERTYPE_VLAN
) {
1625 ip_offset
= sizeof(struct ether_vlan_header
);
1626 if (m
->m_len
< ip_offset
) {
1627 if ((m
= m_pullup(m
, ip_offset
)) == NULL
)
1630 evh
= mtod(m
, struct ether_vlan_header
*);
1631 eth_type
= ntohs(evh
->evl_proto
);
1636 if (m
->m_len
< ip_offset
+ sizeof(struct ip
)) {
1637 m
= m_pullup(m
, ip_offset
+ sizeof(struct ip
));
1642 ip
= (struct ip
*)(mtod(m
, uint8_t *) + ip_offset
);
1643 ip_proto
= ip
->ip_p
;
1644 csum_start
= ip_offset
+ (ip
->ip_hl
<< 2);
1645 gso_type
= VIRTIO_NET_HDR_GSO_TCPV4
;
1648 case ETHERTYPE_IPV6
:
1649 if (m
->m_len
< ip_offset
+ sizeof(struct ip6_hdr
)) {
1650 m
= m_pullup(m
, ip_offset
+ sizeof(struct ip6_hdr
));
1655 ip6
= (struct ip6_hdr
*)(mtod(m
, uint8_t *) + ip_offset
);
1657 * XXX Assume no extension headers are present. Presently,
1658 * this will always be true in the case of TSO, and FreeBSD
1659 * does not perform checksum offloading of IPv6 yet.
1661 ip_proto
= ip6
->ip6_nxt
;
1662 csum_start
= ip_offset
+ sizeof(struct ip6_hdr
);
1663 gso_type
= VIRTIO_NET_HDR_GSO_TCPV6
;
1670 if (m
->m_pkthdr
.csum_flags
& VTNET_CSUM_OFFLOAD
) {
1671 hdr
->flags
|= VIRTIO_NET_HDR_F_NEEDS_CSUM
;
1672 hdr
->csum_start
= csum_start
;
1673 hdr
->csum_offset
= m
->m_pkthdr
.csum_data
;
1675 sc
->vtnet_stats
.tx_csum_offloaded
++;
1678 if (m
->m_pkthdr
.csum_flags
& CSUM_TSO
) {
1679 if (ip_proto
!= IPPROTO_TCP
)
1682 if (m
->m_len
< csum_start
+ sizeof(struct tcphdr
)) {
1683 m
= m_pullup(m
, csum_start
+ sizeof(struct tcphdr
));
1688 tcp
= (struct tcphdr
*)(mtod(m
, uint8_t *) + csum_start
);
1689 hdr
->gso_type
= gso_type
;
1690 hdr
->hdr_len
= csum_start
+ (tcp
->th_off
<< 2);
1691 hdr
->gso_size
= m
->m_pkthdr
.tso_segsz
;
1693 if (tcp
->th_flags
& TH_CWR
) {
1695 * Drop if we did not negotiate VIRTIO_NET_F_HOST_ECN.
1696 * ECN support is only configurable globally with the
1697 * net.inet.tcp.ecn.enable sysctl knob.
1699 if ((sc
->vtnet_flags
& VTNET_FLAG_TSO_ECN
) == 0) {
1700 if_printf(ifp
, "TSO with ECN not supported "
1706 hdr
->gso_type
|= VIRTIO_NET_HDR_GSO_ECN
;
1709 sc
->vtnet_stats
.tx_tso_offloaded
++;
1716 vtnet_enqueue_txbuf(struct vtnet_softc
*sc
, struct mbuf
**m_head
,
1717 struct vtnet_tx_header
*txhdr
)
1720 struct sglist_seg segs
[VTNET_MAX_TX_SEGS
];
1721 struct virtqueue
*vq
;
1725 vq
= sc
->vtnet_tx_vq
;
1728 sglist_init(&sg
, sc
->vtnet_tx_nsegs
, segs
);
1729 error
= sglist_append(&sg
, &txhdr
->vth_uhdr
, sc
->vtnet_hdr_size
);
1730 KASSERT(error
== 0 && sg
.sg_nseg
== 1,
1731 ("%s: error %d adding header to sglist", __func__
, error
));
1733 error
= sglist_append_mbuf(&sg
, m
);
1735 m
= m_defrag(m
, M_NOWAIT
);
1740 sc
->vtnet_stats
.tx_defragged
++;
1742 error
= sglist_append_mbuf(&sg
, m
);
1747 txhdr
->vth_mbuf
= m
;
1748 error
= virtqueue_enqueue(vq
, txhdr
, &sg
, sg
.sg_nseg
, 0);
1753 sc
->vtnet_stats
.tx_defrag_failed
++;
1760 static struct mbuf
*
1761 vtnet_vlan_tag_insert(struct mbuf
*m
)
1764 struct ether_vlan_header
*evl
;
1766 if (M_WRITABLE(m
) == 0) {
1767 n
= m_dup(m
, M_NOWAIT
);
1769 if ((m
= n
) == NULL
)
1773 M_PREPEND(m
, ETHER_VLAN_ENCAP_LEN
, M_NOWAIT
);
1776 if (m
->m_len
< sizeof(struct ether_vlan_header
)) {
1777 m
= m_pullup(m
, sizeof(struct ether_vlan_header
));
1782 /* Insert 802.1Q header into the existing Ethernet header. */
1783 evl
= mtod(m
, struct ether_vlan_header
*);
1784 bcopy((char *) evl
+ ETHER_VLAN_ENCAP_LEN
,
1785 (char *) evl
, ETHER_HDR_LEN
- ETHER_TYPE_LEN
);
1786 evl
->evl_encap_proto
= htons(ETHERTYPE_VLAN
);
1787 evl
->evl_tag
= htons(m
->m_pkthdr
.ether_vlantag
);
1788 m
->m_flags
&= ~M_VLANTAG
;
1794 vtnet_encap(struct vtnet_softc
*sc
, struct mbuf
**m_head
)
1796 struct vtnet_tx_header
*txhdr
;
1797 struct virtio_net_hdr
*hdr
;
1801 txhdr
= SLIST_FIRST(&sc
->vtnet_txhdr_free
);
1804 SLIST_REMOVE_HEAD(&sc
->vtnet_txhdr_free
, link
);
1807 * Always use the non-mergeable header to simplify things. When
1808 * the mergeable feature is negotiated, the num_buffers field
1809 * must be set to zero. We use vtnet_hdr_size later to enqueue
1810 * the correct header size to the host.
1812 hdr
= &txhdr
->vth_uhdr
.hdr
;
1817 if (m
->m_flags
& M_VLANTAG
) {
1818 //m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
1819 m
= vtnet_vlan_tag_insert(m
);
1820 if ((*m_head
= m
) == NULL
)
1822 m
->m_flags
&= ~M_VLANTAG
;
1825 if (m
->m_pkthdr
.csum_flags
!= 0) {
1826 m
= vtnet_tx_offload(sc
, m
, hdr
);
1827 if ((*m_head
= m
) == NULL
)
1831 error
= vtnet_enqueue_txbuf(sc
, m_head
, txhdr
);
1834 vtnet_enqueue_txhdr(sc
, txhdr
);
1839 vtnet_start(struct ifnet
*ifp
, struct ifaltq_subque
*ifsq
)
1841 struct vtnet_softc
*sc
;
1845 ASSERT_ALTQ_SQ_DEFAULT(ifp
, ifsq
);
1846 lwkt_serialize_enter(&sc
->vtnet_slz
);
1847 vtnet_start_locked(ifp
, ifsq
);
1848 lwkt_serialize_exit(&sc
->vtnet_slz
);
1852 vtnet_start_locked(struct ifnet
*ifp
, struct ifaltq_subque
*ifsq
)
1854 struct vtnet_softc
*sc
;
1855 struct virtqueue
*vq
;
1860 vq
= sc
->vtnet_tx_vq
;
1863 ASSERT_SERIALIZED(&sc
->vtnet_slz
);
1865 if ((ifp
->if_flags
& (IFF_RUNNING
)) !=
1866 IFF_RUNNING
|| ((sc
->vtnet_flags
& VTNET_FLAG_LINK
) == 0))
1869 #ifdef VTNET_TX_INTR_MODERATION
1870 if (virtqueue_nused(vq
) >= sc
->vtnet_tx_size
/ 2)
1874 while (!ifsq_is_empty(ifsq
)) {
1875 if (virtqueue_full(vq
)) {
1876 ifq_set_oactive(&ifp
->if_snd
);
1880 m0
= ifq_dequeue(&ifp
->if_snd
);
1884 if (vtnet_encap(sc
, &m0
) != 0) {
1887 ifq_prepend(&ifp
->if_snd
, m0
);
1888 ifq_set_oactive(&ifp
->if_snd
);
1893 ETHER_BPF_MTAP(ifp
, m0
);
1897 virtqueue_notify(vq
, &sc
->vtnet_slz
);
1898 sc
->vtnet_watchdog_timer
= VTNET_WATCHDOG_TIMEOUT
;
1903 vtnet_tick(void *xsc
)
1905 struct vtnet_softc
*sc
;
1910 ASSERT_SERIALIZED(&sc
->vtnet_slz
);
1912 virtqueue_dump(sc
->vtnet_rx_vq
);
1913 virtqueue_dump(sc
->vtnet_tx_vq
);
1917 callout_reset(&sc
->vtnet_tick_ch
, hz
, vtnet_tick
, sc
);
1922 vtnet_tx_intr_task(void *arg
)
1924 struct vtnet_softc
*sc
;
1926 struct ifaltq_subque
*ifsq
;
1929 ifp
= sc
->vtnet_ifp
;
1930 ifsq
= ifq_get_subq_default(&ifp
->if_snd
);
1933 // lwkt_serialize_enter(&sc->vtnet_slz);
1935 if ((ifp
->if_flags
& IFF_RUNNING
) == 0) {
1936 vtnet_enable_tx_intr(sc
);
1937 // lwkt_serialize_exit(&sc->vtnet_slz);
1943 if (!ifsq_is_empty(ifsq
))
1944 vtnet_start_locked(ifp
, ifsq
);
1946 if (vtnet_enable_tx_intr(sc
) != 0) {
1947 vtnet_disable_tx_intr(sc
);
1948 sc
->vtnet_stats
.tx_task_rescheduled
++;
1949 // lwkt_serialize_exit(&sc->vtnet_slz);
1953 // lwkt_serialize_exit(&sc->vtnet_slz);
1957 vtnet_tx_vq_intr(void *xsc
)
1959 struct vtnet_softc
*sc
;
1963 vtnet_disable_tx_intr(sc
);
1964 vtnet_tx_intr_task(sc
);
1970 vtnet_stop(struct vtnet_softc
*sc
)
1975 dev
= sc
->vtnet_dev
;
1976 ifp
= sc
->vtnet_ifp
;
1978 ASSERT_SERIALIZED(&sc
->vtnet_slz
);
1980 sc
->vtnet_watchdog_timer
= 0;
1981 callout_stop(&sc
->vtnet_tick_ch
);
1982 ifq_clr_oactive(&ifp
->if_snd
);
1983 ifp
->if_flags
&= ~(IFF_RUNNING
);
1985 vtnet_disable_rx_intr(sc
);
1986 vtnet_disable_tx_intr(sc
);
1989 * Stop the host VirtIO adapter. Note this will reset the host
1990 * adapter's state back to the pre-initialized state, so in
1991 * order to make the device usable again, we must drive it
1992 * through virtio_reinit() and virtio_reinit_complete().
1996 sc
->vtnet_flags
&= ~VTNET_FLAG_LINK
;
1998 vtnet_free_rx_mbufs(sc
);
1999 vtnet_free_tx_mbufs(sc
);
2003 vtnet_virtio_reinit(struct vtnet_softc
*sc
)
2010 dev
= sc
->vtnet_dev
;
2011 ifp
= sc
->vtnet_ifp
;
2012 features
= sc
->vtnet_features
;
2015 * Re-negotiate with the host, removing any disabled receive
2016 * features. Transmit features are disabled only on our side
2017 * via if_capenable and if_hwassist.
2020 if (ifp
->if_capabilities
& IFCAP_RXCSUM
) {
2021 if ((ifp
->if_capenable
& IFCAP_RXCSUM
) == 0)
2022 features
&= ~VIRTIO_NET_F_GUEST_CSUM
;
2025 if (ifp
->if_capabilities
& IFCAP_LRO
) {
2026 if ((ifp
->if_capenable
& IFCAP_LRO
) == 0)
2027 features
&= ~VTNET_LRO_FEATURES
;
2030 if (ifp
->if_capabilities
& IFCAP_VLAN_HWFILTER
) {
2031 if ((ifp
->if_capenable
& IFCAP_VLAN_HWFILTER
) == 0)
2032 features
&= ~VIRTIO_NET_F_CTRL_VLAN
;
2035 error
= virtio_reinit(dev
, features
);
2037 device_printf(dev
, "virtio reinit error %d\n", error
);
2043 vtnet_init_locked(struct vtnet_softc
*sc
)
2049 dev
= sc
->vtnet_dev
;
2050 ifp
= sc
->vtnet_ifp
;
2052 ASSERT_SERIALIZED(&sc
->vtnet_slz
);
2054 if (ifp
->if_flags
& IFF_RUNNING
)
2057 /* Stop host's adapter, cancel any pending I/O. */
2060 /* Reinitialize the host device. */
2061 error
= vtnet_virtio_reinit(sc
);
2064 "reinitialization failed, stopping device...\n");
2069 /* Update host with assigned MAC address. */
2070 bcopy(IF_LLADDR(ifp
), sc
->vtnet_hwaddr
, ETHER_ADDR_LEN
);
2071 vtnet_set_hwaddr(sc
);
2073 ifp
->if_hwassist
= 0;
2074 if (ifp
->if_capenable
& IFCAP_TXCSUM
)
2075 ifp
->if_hwassist
|= VTNET_CSUM_OFFLOAD
;
2076 if (ifp
->if_capenable
& IFCAP_TSO4
)
2077 ifp
->if_hwassist
|= CSUM_TSO
;
2079 error
= vtnet_init_rx_vq(sc
);
2082 "cannot allocate mbufs for Rx virtqueue\n");
2087 if (sc
->vtnet_flags
& VTNET_FLAG_CTRL_VQ
) {
2088 if (sc
->vtnet_flags
& VTNET_FLAG_CTRL_RX
) {
2089 /* Restore promiscuous and all-multicast modes. */
2090 vtnet_rx_filter(sc
);
2092 /* Restore filtered MAC addresses. */
2093 vtnet_rx_filter_mac(sc
);
2096 /* Restore VLAN filters. */
2097 if (ifp
->if_capenable
& IFCAP_VLAN_HWFILTER
)
2098 vtnet_rx_filter_vlan(sc
);
2102 vtnet_enable_rx_intr(sc
);
2103 vtnet_enable_tx_intr(sc
);
2106 ifp
->if_flags
|= IFF_RUNNING
;
2107 ifq_clr_oactive(&ifp
->if_snd
);
2109 virtio_reinit_complete(dev
);
2111 vtnet_update_link_status(sc
);
2112 callout_reset(&sc
->vtnet_tick_ch
, hz
, vtnet_tick
, sc
);
2116 vtnet_init(void *xsc
)
2118 struct vtnet_softc
*sc
;
2122 lwkt_serialize_enter(&sc
->vtnet_slz
);
2123 vtnet_init_locked(sc
);
2124 lwkt_serialize_exit(&sc
->vtnet_slz
);
2128 vtnet_exec_ctrl_cmd(struct vtnet_softc
*sc
, void *cookie
,
2129 struct sglist
*sg
, int readable
, int writable
)
2131 struct virtqueue
*vq
;
2134 vq
= sc
->vtnet_ctrl_vq
;
2136 ASSERT_SERIALIZED(&sc
->vtnet_slz
);
2137 KASSERT(sc
->vtnet_flags
& VTNET_FLAG_CTRL_VQ
,
2138 ("no control virtqueue"));
2139 KASSERT(virtqueue_empty(vq
),
2140 ("control command already enqueued"));
2142 if (virtqueue_enqueue(vq
, cookie
, sg
, readable
, writable
) != 0)
2145 virtqueue_notify(vq
, &sc
->vtnet_slz
);
2148 * Poll until the command is complete. Previously, we would
2149 * sleep until the control virtqueue interrupt handler woke
2150 * us up, but dropping the VTNET_MTX leads to serialization
2153 * Furthermore, it appears QEMU/KVM only allocates three MSIX
2154 * vectors. Two of those vectors are needed for the Rx and Tx
2155 * virtqueues. We do not support sharing both a Vq and config
2156 * changed notification on the same MSIX vector.
2158 c
= virtqueue_poll(vq
, NULL
);
2159 KASSERT(c
== cookie
, ("unexpected control command response"));
2163 vtnet_ctrl_mac_cmd(struct vtnet_softc
*sc
, uint8_t *hwaddr
)
2166 struct virtio_net_ctrl_hdr hdr
__aligned(2);
2168 char aligned_hwaddr
[ETHER_ADDR_LEN
] __aligned(8);
2172 struct sglist_seg segs
[3];
2176 s
.hdr
.class = VIRTIO_NET_CTRL_MAC
;
2177 s
.hdr
.cmd
= VIRTIO_NET_CTRL_MAC_ADDR_SET
;
2178 s
.ack
= VIRTIO_NET_ERR
;
2180 /* Copy the mac address into physically contiguous memory */
2181 memcpy(s
.aligned_hwaddr
, hwaddr
, ETHER_ADDR_LEN
);
2183 sglist_init(&sg
, 3, segs
);
2185 error
|= sglist_append(&sg
, &s
.hdr
,
2186 sizeof(struct virtio_net_ctrl_hdr
));
2187 error
|= sglist_append(&sg
, s
.aligned_hwaddr
, ETHER_ADDR_LEN
);
2188 error
|= sglist_append(&sg
, &s
.ack
, sizeof(uint8_t));
2189 KASSERT(error
== 0 && sg
.sg_nseg
== 3,
2190 ("%s: error %d adding set MAC msg to sglist", __func__
, error
));
2192 vtnet_exec_ctrl_cmd(sc
, &s
.ack
, &sg
, sg
.sg_nseg
- 1, 1);
2194 return (s
.ack
== VIRTIO_NET_OK
? 0 : EIO
);
2198 vtnet_rx_filter(struct vtnet_softc
*sc
)
2203 dev
= sc
->vtnet_dev
;
2204 ifp
= sc
->vtnet_ifp
;
2206 ASSERT_SERIALIZED(&sc
->vtnet_slz
);
2207 KASSERT(sc
->vtnet_flags
& VTNET_FLAG_CTRL_RX
,
2208 ("CTRL_RX feature not negotiated"));
2210 if (vtnet_set_promisc(sc
, ifp
->if_flags
& IFF_PROMISC
) != 0)
2211 device_printf(dev
, "cannot %s promiscuous mode\n",
2212 (ifp
->if_flags
& IFF_PROMISC
) ? "enable" : "disable");
2214 if (vtnet_set_allmulti(sc
, ifp
->if_flags
& IFF_ALLMULTI
) != 0)
2215 device_printf(dev
, "cannot %s all-multicast mode\n",
2216 (ifp
->if_flags
& IFF_ALLMULTI
) ? "enable" : "disable");
2220 vtnet_ctrl_rx_cmd(struct vtnet_softc
*sc
, int cmd
, int on
)
2222 struct sglist_seg segs
[3];
2225 struct virtio_net_ctrl_hdr hdr
__aligned(2);
2233 KASSERT(sc
->vtnet_flags
& VTNET_FLAG_CTRL_RX
,
2234 ("%s: CTRL_RX feature not negotiated", __func__
));
2236 s
.hdr
.class = VIRTIO_NET_CTRL_RX
;
2239 s
.ack
= VIRTIO_NET_ERR
;
2241 sglist_init(&sg
, 3, segs
);
2243 error
|= sglist_append(&sg
, &s
.hdr
, sizeof(struct virtio_net_ctrl_hdr
));
2244 error
|= sglist_append(&sg
, &s
.onoff
, sizeof(uint8_t));
2245 error
|= sglist_append(&sg
, &s
.ack
, sizeof(uint8_t));
2246 KASSERT(error
== 0 && sg
.sg_nseg
== 3,
2247 ("%s: error %d adding Rx message to sglist", __func__
, error
));
2249 vtnet_exec_ctrl_cmd(sc
, &s
.ack
, &sg
, sg
.sg_nseg
- 1, 1);
2251 return (s
.ack
== VIRTIO_NET_OK
? 0 : EIO
);
2255 vtnet_set_promisc(struct vtnet_softc
*sc
, int on
)
2258 return (vtnet_ctrl_rx_cmd(sc
, VIRTIO_NET_CTRL_RX_PROMISC
, on
));
2262 vtnet_set_allmulti(struct vtnet_softc
*sc
, int on
)
2265 return (vtnet_ctrl_rx_cmd(sc
, VIRTIO_NET_CTRL_RX_ALLMULTI
, on
));
2269 vtnet_rx_filter_mac(struct vtnet_softc
*sc
)
2271 struct virtio_net_ctrl_hdr hdr
__aligned(2);
2272 struct vtnet_mac_filter
*filter
;
2273 struct sglist_seg segs
[4];
2277 struct ifaddr_container
*ifac
;
2278 struct ifmultiaddr
*ifma
;
2279 int ucnt
, mcnt
, promisc
, allmulti
, error
;
2282 ifp
= sc
->vtnet_ifp
;
2288 ASSERT_SERIALIZED(&sc
->vtnet_slz
);
2289 KASSERT(sc
->vtnet_flags
& VTNET_FLAG_CTRL_RX
,
2290 ("%s: CTRL_RX feature not negotiated", __func__
));
2292 /* Use the MAC filtering table allocated in vtnet_attach. */
2293 filter
= sc
->vtnet_macfilter
;
2294 memset(filter
, 0, sizeof(struct vtnet_mac_filter
));
2296 /* Unicast MAC addresses: */
2297 //if_addr_rlock(ifp);
2298 TAILQ_FOREACH(ifac
, &ifp
->if_addrheads
[mycpuid
], ifa_link
) {
2300 if (ifa
->ifa_addr
->sa_family
!= AF_LINK
)
2302 else if (memcmp(LLADDR((struct sockaddr_dl
*)ifa
->ifa_addr
),
2303 sc
->vtnet_hwaddr
, ETHER_ADDR_LEN
) == 0)
2305 else if (ucnt
== VTNET_MAX_MAC_ENTRIES
) {
2310 bcopy(LLADDR((struct sockaddr_dl
*)ifa
->ifa_addr
),
2311 &filter
->vmf_unicast
.macs
[ucnt
], ETHER_ADDR_LEN
);
2314 //if_addr_runlock(ifp);
2317 filter
->vmf_unicast
.nentries
= 0;
2318 if_printf(ifp
, "more than %d MAC addresses assigned, "
2319 "falling back to promiscuous mode\n",
2320 VTNET_MAX_MAC_ENTRIES
);
2322 filter
->vmf_unicast
.nentries
= ucnt
;
2324 /* Multicast MAC addresses: */
2325 //if_maddr_rlock(ifp);
2326 TAILQ_FOREACH(ifma
, &ifp
->if_multiaddrs
, ifma_link
) {
2327 if (ifma
->ifma_addr
->sa_family
!= AF_LINK
)
2329 else if (mcnt
== VTNET_MAX_MAC_ENTRIES
) {
2334 bcopy(LLADDR((struct sockaddr_dl
*)ifma
->ifma_addr
),
2335 &filter
->vmf_multicast
.macs
[mcnt
], ETHER_ADDR_LEN
);
2338 //if_maddr_runlock(ifp);
2340 if (allmulti
!= 0) {
2341 filter
->vmf_multicast
.nentries
= 0;
2342 if_printf(ifp
, "more than %d multicast MAC addresses "
2343 "assigned, falling back to all-multicast mode\n",
2344 VTNET_MAX_MAC_ENTRIES
);
2346 filter
->vmf_multicast
.nentries
= mcnt
;
2348 if (promisc
!= 0 && allmulti
!= 0)
2351 hdr
.class = VIRTIO_NET_CTRL_MAC
;
2352 hdr
.cmd
= VIRTIO_NET_CTRL_MAC_TABLE_SET
;
2353 ack
= VIRTIO_NET_ERR
;
2355 sglist_init(&sg
, 4, segs
);
2357 error
|= sglist_append(&sg
, &hdr
, sizeof(struct virtio_net_ctrl_hdr
));
2358 error
|= sglist_append(&sg
, &filter
->vmf_unicast
,
2359 sizeof(uint32_t) + filter
->vmf_unicast
.nentries
* ETHER_ADDR_LEN
);
2360 error
|= sglist_append(&sg
, &filter
->vmf_multicast
,
2361 sizeof(uint32_t) + filter
->vmf_multicast
.nentries
* ETHER_ADDR_LEN
);
2362 error
|= sglist_append(&sg
, &ack
, sizeof(uint8_t));
2363 KASSERT(error
== 0 && sg
.sg_nseg
== 4,
2364 ("%s: error %d adding MAC filter msg to sglist", __func__
, error
));
2366 vtnet_exec_ctrl_cmd(sc
, &ack
, &sg
, sg
.sg_nseg
- 1, 1);
2368 if (ack
!= VIRTIO_NET_OK
)
2369 if_printf(ifp
, "error setting host MAC filter table\n");
2372 if (promisc
!= 0 && vtnet_set_promisc(sc
, 1) != 0)
2373 if_printf(ifp
, "cannot enable promiscuous mode\n");
2374 if (allmulti
!= 0 && vtnet_set_allmulti(sc
, 1) != 0)
2375 if_printf(ifp
, "cannot enable all-multicast mode\n");
2379 vtnet_exec_vlan_filter(struct vtnet_softc
*sc
, int add
, uint16_t tag
)
2381 struct sglist_seg segs
[3];
2384 struct virtio_net_ctrl_hdr hdr
__aligned(2);
2392 s
.hdr
.class = VIRTIO_NET_CTRL_VLAN
;
2393 s
.hdr
.cmd
= add
? VIRTIO_NET_CTRL_VLAN_ADD
: VIRTIO_NET_CTRL_VLAN_DEL
;
2395 s
.ack
= VIRTIO_NET_ERR
;
2397 sglist_init(&sg
, 3, segs
);
2399 error
|= sglist_append(&sg
, &s
.hdr
, sizeof(struct virtio_net_ctrl_hdr
));
2400 error
|= sglist_append(&sg
, &s
.tag
, sizeof(uint16_t));
2401 error
|= sglist_append(&sg
, &s
.ack
, sizeof(uint8_t));
2402 KASSERT(error
== 0 && sg
.sg_nseg
== 3,
2403 ("%s: error %d adding VLAN message to sglist", __func__
, error
));
2405 vtnet_exec_ctrl_cmd(sc
, &s
.ack
, &sg
, sg
.sg_nseg
- 1, 1);
2407 return (s
.ack
== VIRTIO_NET_OK
? 0 : EIO
);
2411 vtnet_rx_filter_vlan(struct vtnet_softc
*sc
)
2417 ASSERT_SERIALIZED(&sc
->vtnet_slz
);
2418 KASSERT(sc
->vtnet_flags
& VTNET_FLAG_VLAN_FILTER
,
2419 ("%s: VLAN_FILTER feature not negotiated", __func__
));
2421 nvlans
= sc
->vtnet_nvlans
;
2423 /* Enable the filter for each configured VLAN. */
2424 for (i
= 0; i
< VTNET_VLAN_SHADOW_SIZE
&& nvlans
> 0; i
++) {
2425 w
= sc
->vtnet_vlan_shadow
[i
];
2426 while ((bit
= ffs(w
) - 1) != -1) {
2428 tag
= sizeof(w
) * CHAR_BIT
* i
+ bit
;
2431 if (vtnet_exec_vlan_filter(sc
, 1, tag
) != 0) {
2432 device_printf(sc
->vtnet_dev
,
2433 "cannot enable VLAN %d filter\n", tag
);
2438 KASSERT(nvlans
== 0, ("VLAN count incorrect"));
2442 vtnet_update_vlan_filter(struct vtnet_softc
*sc
, int add
, uint16_t tag
)
2447 ifp
= sc
->vtnet_ifp
;
2448 idx
= (tag
>> 5) & 0x7F;
2451 if (tag
== 0 || tag
> 4095)
2454 lwkt_serialize_enter(&sc
->vtnet_slz
);
2456 /* Update shadow VLAN table. */
2459 sc
->vtnet_vlan_shadow
[idx
] |= (1 << bit
);
2462 sc
->vtnet_vlan_shadow
[idx
] &= ~(1 << bit
);
2465 if (ifp
->if_capenable
& IFCAP_VLAN_HWFILTER
&&
2466 vtnet_exec_vlan_filter(sc
, add
, tag
) != 0) {
2467 device_printf(sc
->vtnet_dev
,
2468 "cannot %s VLAN %d %s the host filter table\n",
2469 add
? "add" : "remove", tag
, add
? "to" : "from");
2472 lwkt_serialize_exit(&sc
->vtnet_slz
);
2476 vtnet_register_vlan(void *arg
, struct ifnet
*ifp
, uint16_t tag
)
2479 if (ifp
->if_softc
!= arg
)
2482 vtnet_update_vlan_filter(arg
, 1, tag
);
2486 vtnet_unregister_vlan(void *arg
, struct ifnet
*ifp
, uint16_t tag
)
2489 if (ifp
->if_softc
!= arg
)
2492 vtnet_update_vlan_filter(arg
, 0, tag
);
2496 vtnet_ifmedia_upd(struct ifnet
*ifp
)
2498 struct vtnet_softc
*sc
;
2499 struct ifmedia
*ifm
;
2502 ifm
= &sc
->vtnet_media
;
2504 if (IFM_TYPE(ifm
->ifm_media
) != IFM_ETHER
)
2511 vtnet_ifmedia_sts(struct ifnet
*ifp
, struct ifmediareq
*ifmr
)
2513 struct vtnet_softc
*sc
;
2517 ifmr
->ifm_status
= IFM_AVALID
;
2518 ifmr
->ifm_active
= IFM_ETHER
;
2520 lwkt_serialize_enter(&sc
->vtnet_slz
);
2521 if (vtnet_is_link_up(sc
) != 0) {
2522 ifmr
->ifm_status
|= IFM_ACTIVE
;
2523 ifmr
->ifm_active
|= VTNET_MEDIATYPE
;
2525 ifmr
->ifm_active
|= IFM_NONE
;
2526 lwkt_serialize_exit(&sc
->vtnet_slz
);
2530 vtnet_add_statistics(struct vtnet_softc
*sc
)
2533 struct vtnet_statistics
*stats
;
2534 struct sysctl_ctx_list
*ctx
;
2535 struct sysctl_oid
*tree
;
2536 struct sysctl_oid_list
*child
;
2538 dev
= sc
->vtnet_dev
;
2539 stats
= &sc
->vtnet_stats
;
2540 ctx
= device_get_sysctl_ctx(dev
);
2541 tree
= device_get_sysctl_tree(dev
);
2542 child
= SYSCTL_CHILDREN(tree
);
2544 SYSCTL_ADD_UQUAD(ctx
, child
, OID_AUTO
, "mbuf_alloc_failed",
2545 CTLFLAG_RD
, &stats
->mbuf_alloc_failed
, 0,
2546 "Mbuf cluster allocation failures");
2548 SYSCTL_ADD_UQUAD(ctx
, child
, OID_AUTO
, "rx_frame_too_large",
2549 CTLFLAG_RD
, &stats
->rx_frame_too_large
, 0,
2550 "Received frame larger than the mbuf chain");
2551 SYSCTL_ADD_UQUAD(ctx
, child
, OID_AUTO
, "rx_enq_replacement_failed",
2552 CTLFLAG_RD
, &stats
->rx_enq_replacement_failed
, 0,
2553 "Enqueuing the replacement receive mbuf failed");
2554 SYSCTL_ADD_UQUAD(ctx
, child
, OID_AUTO
, "rx_mergeable_failed",
2555 CTLFLAG_RD
, &stats
->rx_mergeable_failed
, 0,
2556 "Mergeable buffers receive failures");
2557 SYSCTL_ADD_UQUAD(ctx
, child
, OID_AUTO
, "rx_csum_bad_ethtype",
2558 CTLFLAG_RD
, &stats
->rx_csum_bad_ethtype
, 0,
2559 "Received checksum offloaded buffer with unsupported "
2561 SYSCTL_ADD_UQUAD(ctx
, child
, OID_AUTO
, "rx_csum_bad_ipproto",
2562 CTLFLAG_RD
, &stats
->rx_csum_bad_ipproto
, 0,
2563 "Received checksum offloaded buffer with incorrect IP protocol");
2564 SYSCTL_ADD_UQUAD(ctx
, child
, OID_AUTO
, "rx_csum_bad_offset",
2565 CTLFLAG_RD
, &stats
->rx_csum_bad_offset
, 0,
2566 "Received checksum offloaded buffer with incorrect offset");
2567 SYSCTL_ADD_UQUAD(ctx
, child
, OID_AUTO
, "rx_csum_failed",
2568 CTLFLAG_RD
, &stats
->rx_csum_failed
, 0,
2569 "Received buffer checksum offload failed");
2570 SYSCTL_ADD_UQUAD(ctx
, child
, OID_AUTO
, "rx_csum_offloaded",
2571 CTLFLAG_RD
, &stats
->rx_csum_offloaded
, 0,
2572 "Received buffer checksum offload succeeded");
2573 SYSCTL_ADD_UQUAD(ctx
, child
, OID_AUTO
, "rx_task_rescheduled",
2574 CTLFLAG_RD
, &stats
->rx_task_rescheduled
, 0,
2575 "Times the receive interrupt task rescheduled itself");
2577 SYSCTL_ADD_UQUAD(ctx
, child
, OID_AUTO
, "tx_csum_bad_ethtype",
2578 CTLFLAG_RD
, &stats
->tx_csum_bad_ethtype
, 0,
2579 "Aborted transmit of checksum offloaded buffer with unknown "
2581 SYSCTL_ADD_UQUAD(ctx
, child
, OID_AUTO
, "tx_tso_bad_ethtype",
2582 CTLFLAG_RD
, &stats
->tx_tso_bad_ethtype
, 0,
2583 "Aborted transmit of TSO buffer with unknown Ethernet type");
2584 SYSCTL_ADD_UQUAD(ctx
, child
, OID_AUTO
, "tx_defragged",
2585 CTLFLAG_RD
, &stats
->tx_defragged
, 0,
2586 "Transmit mbufs defragged");
2587 SYSCTL_ADD_UQUAD(ctx
, child
, OID_AUTO
, "tx_defrag_failed",
2588 CTLFLAG_RD
, &stats
->tx_defrag_failed
, 0,
2589 "Aborted transmit of buffer because defrag failed");
2590 SYSCTL_ADD_UQUAD(ctx
, child
, OID_AUTO
, "tx_csum_offloaded",
2591 CTLFLAG_RD
, &stats
->tx_csum_offloaded
, 0,
2592 "Offloaded checksum of transmitted buffer");
2593 SYSCTL_ADD_UQUAD(ctx
, child
, OID_AUTO
, "tx_tso_offloaded",
2594 CTLFLAG_RD
, &stats
->tx_tso_offloaded
, 0,
2595 "Segmentation offload of transmitted buffer");
2596 SYSCTL_ADD_UQUAD(ctx
, child
, OID_AUTO
, "tx_task_rescheduled",
2597 CTLFLAG_RD
, &stats
->tx_task_rescheduled
, 0,
2598 "Times the transmit interrupt task rescheduled itself");
2602 vtnet_enable_rx_intr(struct vtnet_softc
*sc
)
2605 return (virtqueue_enable_intr(sc
->vtnet_rx_vq
));
2609 vtnet_disable_rx_intr(struct vtnet_softc
*sc
)
2612 virtqueue_disable_intr(sc
->vtnet_rx_vq
);
2616 vtnet_enable_tx_intr(struct vtnet_softc
*sc
)
2619 #ifdef VTNET_TX_INTR_MODERATION
2622 return (virtqueue_enable_intr(sc
->vtnet_tx_vq
));
2627 vtnet_disable_tx_intr(struct vtnet_softc
*sc
)
2630 virtqueue_disable_intr(sc
->vtnet_tx_vq
);